IVGCVSW-3482 Report operations with dynamic output size as unsupported

Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com>
Change-Id: Ifafe2a6fbfd6019b3395d51ed9967db794d2b034
diff --git a/1.0/HalPolicy.cpp b/1.0/HalPolicy.cpp
index 9673a74..2149d40 100644
--- a/1.0/HalPolicy.cpp
+++ b/1.0/HalPolicy.cpp
@@ -8,6 +8,7 @@
 #include <armnn/Optional.hpp>
 
 #include "FullyConnected.hpp"
+#include "OutputShapeUtils.hpp"
 
 namespace armnn_driver
 {
@@ -388,11 +389,17 @@
         return Fail("%s: Operation has invalid outputs", __func__);
     }
 
+    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
+    if (IsDynamicOutput(outputInfo))
+    {
+        return Fail("%s: Dynamic output not supported", __func__);
+    }
+
     if (!IsLayerSupportedForAnyBackend(__func__,
                                        armnn::IsDequantizeSupported,
                                        data.m_Backends,
                                        input.GetTensorInfo(),
-                                       GetTensorInfoForOperand(*outputOperand)))
+                                       outputInfo))
     {
         return false;
     }
@@ -957,6 +964,11 @@
     const armnn::TensorInfo& inputInfo  = input.GetTensorInfo();
     const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
 
+    if (IsDynamicOutput(outputInfo))
+    {
+        return Fail("%s: Dynamic output not supported", __func__);
+    }
+
     armnn::L2NormalizationDescriptor desc;
     desc.m_DataLayout = armnn::DataLayout::NHWC;
 
@@ -1082,7 +1094,11 @@
         return Fail("%s: Operation has no outputs", __func__);
     }
 
-    const armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
+    const armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
+    if (IsDynamicOutput(outputInfo))
+    {
+        return Fail("%s: Dynamic output not supported", __func__);
+    }
 
     armnn::SoftmaxDescriptor desc;
     if (!GetInputFloat32<hal_1_0::HalPolicy>(operation, 1, desc.m_Beta, model, data))
@@ -1094,7 +1110,7 @@
                                        armnn::IsSoftmaxSupported,
                                        data.m_Backends,
                                        input.GetTensorInfo(),
-                                       outInfo,
+                                       outputInfo,
                                        desc))
     {
         return false;