IVGCVSW-3578 Do not attempt to infer dynamic output shapes

* Report dynamic output tensors as unsupported for all operations,
  regardless of HAL level

Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com>
Change-Id: I2341dc96be965886666b75515e9a226d813a1591
diff --git a/1.1/HalPolicy.cpp b/1.1/HalPolicy.cpp
index bbd289e..ab8224a 100644
--- a/1.1/HalPolicy.cpp
+++ b/1.1/HalPolicy.cpp
@@ -5,7 +5,6 @@
 
 #include "HalPolicy.hpp"
 
-#include "OutputShapeUtils.hpp"
 #include "Utils.hpp"
 
 #include "../1.0/HalPolicy.hpp"
@@ -124,13 +123,17 @@
         return Fail("%s: Operation has invalid inputs", __func__);
     }
 
-    const Operand* outputOperand = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
-    if (!outputOperand)
+    const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
+    if (!output)
     {
-        return false;
+        return Fail("%s: Could not read output 0", __func__);
     }
 
-    const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
+    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+    if (IsDynamicTensor(outputInfo))
+    {
+        return Fail("%s: Dynamic output tensors are not supported", __func__);
+    }
 
     bool isSupported = false;
     FORWARD_LAYER_SUPPORT_FUNC(__func__,
@@ -139,14 +142,14 @@
                                isSupported,
                                input0.GetTensorInfo(),
                                input1.GetTensorInfo(),
-                               outInfo);
+                               outputInfo);
     if (!isSupported)
     {
         return false;
     }
 
     armnn::IConnectableLayer* const startLayer = data.m_Network->AddDivisionLayer();
-    armnn::IConnectableLayer* const endLayer = ProcessActivation(outInfo, activationFunction, startLayer, data);
+    armnn::IConnectableLayer* const endLayer   = ProcessActivation(outputInfo, activationFunction, startLayer, data);
 
     const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
     const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
@@ -180,17 +183,16 @@
         return Fail("%s: Operation has invalid inputs", __func__);
     }
 
-    const Operand* outputOperand = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
-    if (!outputOperand)
+    const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
+    if (!output)
     {
-        return false;
+        return Fail("%s: Could not read output 0", __func__);
     }
 
-    armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
+    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
     if (IsDynamicTensor(outputInfo))
     {
-        ALOGD("Output shape not set, will infer from inputs");
-        outputInfo.SetShape(InferSubOutputShape(input0.GetTensorInfo().GetShape(), input1.GetTensorInfo().GetShape()));
+        return Fail("%s: Dynamic output tensors are not supported", __func__);
     }
 
     bool isSupported = false;
@@ -215,12 +217,7 @@
     if (endLayer)
     {
         BroadcastTensor(input0, input1, startLayer, *data.m_Network);
-        return SetupAndTrackLayerOutputSlot<hal_1_1::HalPolicy>(operation,
-                                                                0,
-                                                                *endLayer,
-                                                                model,
-                                                                data,
-                                                                armnn::Optional<armnn::TensorInfo>(outputInfo));
+        return SetupAndTrackLayerOutputSlot<hal_1_1::HalPolicy>(operation, 0, *endLayer, model, data);
     }
 
     return Fail("%s: ProcessActivation failed", __func__);
@@ -236,6 +233,18 @@
         return Fail("%s: Operation has invalid inputs", __func__);
     }
 
+    const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
+    if (!output)
+    {
+        return Fail("%s: Could not read output 0", __func__);
+    }
+
+    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+    if (IsDynamicTensor(outputInfo))
+    {
+        return Fail("%s: Dynamic output tensors are not supported", __func__);
+    }
+
     const Operand* axisOperand = GetInputOperand<hal_1_1::HalPolicy>(operation, 1, model);
     if (!axisOperand)
     {
@@ -268,14 +277,6 @@
     descriptor.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
     descriptor.m_KeepDims = keepDims > 0;
 
-    const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
-    if (!output)
-    {
-        return Fail("%s: Could not read output 0", __func__);
-    }
-
-    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
-
     bool isSupported = false;
     FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                IsMeanSupported,
@@ -321,6 +322,18 @@
         Fail("%s: Only inputs with rank 4 are supported", __func__);
     }
 
+    const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
+    if (!output)
+    {
+        return Fail("%s: Could not read output 0", __func__);
+    }
+
+    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+    if (IsDynamicTensor(outputInfo))
+    {
+        return Fail("%s: Dynamic output tensors are not supported", __func__);
+    }
+
     const Operand* blockShapeOperand = GetInputOperand<hal_1_1::HalPolicy>(operation, 1, model);
     const Operand* paddingsOperand   = GetInputOperand<hal_1_1::HalPolicy>(operation, 2, model);
 
@@ -363,14 +376,6 @@
     descriptor.m_BlockShape.assign(blockShape.cbegin(), blockShape.cend());
     descriptor.m_PadList.assign(paddingList.cbegin(), paddingList.cend());
 
-    const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
-    if (!output)
-    {
-        return Fail("%s: Could not read output 0", __func__);
-    }
-
-    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
-
     bool isSupported = false;
     FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                IsSpaceToBatchNdSupported,
@@ -402,13 +407,23 @@
     }
 
     const armnn::TensorInfo& inputInfo  = input.GetTensorInfo();
-
     unsigned int rank = inputInfo.GetNumDimensions();
     if (rank > 4)
     {
         Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
     }
 
+    const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
+    if (!output)
+    {
+        return Fail("%s: Could not read output 0", __func__);
+    }
+
+    if (IsDynamicTensor(GetTensorInfoForOperand(*output)))
+    {
+        return Fail("%s: Dynamic output tensors are not supported", __func__);
+    }
+
     // NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure
     // if the operand index is out of bounds.
     const Operand* axisOperand = GetInputOperand<hal_1_1::HalPolicy>(operation, 1, model, false);
@@ -446,12 +461,6 @@
     armnn::ReshapeDescriptor reshapeDesc;
     reshapeDesc.m_TargetShape = outputInfo.GetShape();
 
-    const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
-    if (!output)
-    {
-        return Fail("%s: Could not read output 0", __func__);
-    }
-
     bool isSupported = false;
     FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                IsReshapeSupported,
@@ -488,6 +497,18 @@
         Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
     }
 
+    const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
+    if (!output)
+    {
+        return Fail("%s: Could not read output 0", __func__);
+    }
+
+    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+    if (IsDynamicTensor(outputInfo))
+    {
+        return Fail("%s: Dynamic output tensors are not supported", __func__);
+    }
+
     const Operand* beginOperand   = GetInputOperand<hal_1_1::HalPolicy>(operation, 1, model);
     const Operand* endOperand     = GetInputOperand<hal_1_1::HalPolicy>(operation, 2, model);
     const Operand* stridesOperand = GetInputOperand<hal_1_1::HalPolicy>(operation, 3, model);
@@ -539,13 +560,6 @@
         return Fail("%s: Operation has invalid inputs", __func__);
     }
 
-    const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
-    if (!output)
-    {
-        return Fail("%s: Could not read output 0", __func__);
-    }
-    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
-
     bool isSupported = false;
     FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                IsStridedSliceSupported,
@@ -652,6 +666,18 @@
         return Fail("%s: Operation has invalid inputs", __func__);
     }
 
+    const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
+    if (!output)
+    {
+        return Fail("%s: Could not read output 0", __func__);
+    }
+
+    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+    if (IsDynamicTensor(outputInfo))
+    {
+        return Fail("%s: Dynamic output tensors are not supported", __func__);
+    }
+
     const Operand* blockOperand = GetInputOperand<hal_1_1::HalPolicy>(operation, 1, model);
     if (!blockOperand)
     {
@@ -686,14 +712,6 @@
     // Setting crops to 0,0 0,0 as it is not supported in Android NN API
     batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}};
 
-    const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
-    if (!output)
-    {
-        return Fail("%s: Could not read output 0", __func__);
-    }
-
-    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
-
     bool isSupported = false;
     FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                IsBatchToSpaceNdSupported,