IVGCVSW-3553 Fix failing zero_sized tests

Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com>
Change-Id: Idd10f34babc0d2552d599872b853ba5fb5c98351
diff --git a/1.0/HalPolicy.cpp b/1.0/HalPolicy.cpp
index 8c61700..a2c8252 100644
--- a/1.0/HalPolicy.cpp
+++ b/1.0/HalPolicy.cpp
@@ -9,6 +9,7 @@
 
 #include "FullyConnected.hpp"
 #include "OutputShapeUtils.hpp"
+#include "Utils.hpp"
 
 namespace armnn_driver
 {
@@ -397,7 +398,7 @@
     }
 
     armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
-    if (IsDynamicOutput(outputInfo))
+    if (IsDynamicTensor(outputInfo))
     {
         ALOGD("Output shape not set, will infer from input");
         outputInfo.SetShape(input.GetTensorInfo().GetShape());
@@ -477,7 +478,7 @@
     const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
     armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
 
-    if (IsDynamicOutput(outputInfo))
+    if (IsDynamicTensor(outputInfo))
     {
         ALOGD("Output shape not set, will infer from inputs");
         outputInfo.SetShape(inputInfo.GetShape());
@@ -1004,7 +1005,7 @@
     const armnn::TensorInfo& inputInfo  = input.GetTensorInfo();
     armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
 
-    if (IsDynamicOutput(outputInfo))
+    if (IsDynamicTensor(outputInfo))
     {
         ALOGD("Output shape not set, will infer from inputs");
         outputInfo.SetShape(inputInfo.GetShape());
@@ -1147,7 +1148,7 @@
     }
 
     armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
-    if (IsDynamicOutput(outputInfo))
+    if (IsDynamicTensor(outputInfo))
     {
         ALOGD("Output shape not set, will infer from input");
         outputInfo.SetShape(input.GetTensorInfo().GetShape());
diff --git a/1.1/HalPolicy.cpp b/1.1/HalPolicy.cpp
index d7f4bbb..6687b12 100644
--- a/1.1/HalPolicy.cpp
+++ b/1.1/HalPolicy.cpp
@@ -6,6 +6,7 @@
 #include "HalPolicy.hpp"
 
 #include "OutputShapeUtils.hpp"
+#include "Utils.hpp"
 
 #include "../1.0/HalPolicy.hpp"
 
@@ -182,7 +183,7 @@
     }
 
     armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
-    if (IsDynamicOutput(outputInfo))
+    if (IsDynamicTensor(outputInfo))
     {
         ALOGD("Output shape not set, will infer from inputs");
         outputInfo.SetShape(InferSubOutputShape(input0.GetTensorInfo().GetShape(), input1.GetTensorInfo().GetShape()));
@@ -313,7 +314,7 @@
     }
 
     armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
-    if (IsDynamicOutput(outputInfo))
+    if (IsDynamicTensor(outputInfo))
     {
         ALOGD("Output shape not set, will infer from inputs");
         outputInfo.SetShape(InferPadOutputShape(inputInfo.GetShape(), descriptor.m_PadList));
diff --git a/1.2/HalPolicy.cpp b/1.2/HalPolicy.cpp
index 3c00388..f93629e 100644
--- a/1.2/HalPolicy.cpp
+++ b/1.2/HalPolicy.cpp
@@ -6,6 +6,7 @@
 #include "HalPolicy.hpp"
 
 #include "OutputShapeUtils.hpp"
+#include "Utils.hpp"
 
 #include "../1.0/HalPolicy.hpp"
 #include "../1.1/HalPolicy.hpp"
@@ -270,7 +271,7 @@
     desc.m_BiasEnabled = true;
     armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
 
-    if (IsDynamicOutput(outputInfo))
+    if (IsDynamicTensor(outputInfo))
     {
         try
         {
@@ -450,7 +451,7 @@
     armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
 
     armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
-    if (IsDynamicOutput(outputInfo))
+    if (IsDynamicTensor(outputInfo))
     {
         try
         {
@@ -522,7 +523,7 @@
     }
 
     armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
-    if (IsDynamicOutput(outInfo))
+    if (IsDynamicTensor(outInfo))
     {
         ALOGD("Output shape not set, will infer from inputs");
         outInfo.SetShape(InferMaximumOutputShape(input0.GetTensorInfo().GetShape(), input1.GetTensorInfo().GetShape()));
@@ -571,7 +572,7 @@
     }
 
     armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
-    if (IsDynamicOutput(outputInfo))
+    if (IsDynamicTensor(outputInfo))
     {
         ALOGD("Output shape not set, will infer from inputs");
         outputInfo.SetShape(InferMinimumOutputShape(input0.GetTensorInfo().GetShape(),
@@ -628,7 +629,7 @@
     }
 
     armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
-    if (IsDynamicOutput(outputInfo))
+    if (IsDynamicTensor(outputInfo))
     {
         ALOGD("Output shape not set, will infer from inputs");
         outputInfo.SetShape(InferPadOutputShape(inputInfo.GetShape(), descriptor.m_PadList));
@@ -726,7 +727,7 @@
     const armnn::TensorInfo& alphaInfo  = alpha.GetTensorInfo();
 
     armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
-    if (IsDynamicOutput(outputInfo))
+    if (IsDynamicTensor(outputInfo))
     {
         ALOGD("Output shape not set, will infer from inputs");
         outputInfo.SetShape(InferPreluOutputShape(inputInfo.GetShape(), alphaInfo.GetShape()));
@@ -848,7 +849,7 @@
         return false;
     }
 
-    if (IsDynamicOutput(outputInfo))
+    if (IsDynamicTensor(outputInfo))
     {
         try
         {
@@ -961,7 +962,7 @@
     }
 
     armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
-    if (IsDynamicOutput(outputInfo))
+    if (IsDynamicTensor(outputInfo))
     {
         ALOGD("Output shape not set, will infer from input");
         outputInfo.SetShape(input.GetTensorInfo().GetShape());
diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp
index 8eb48fe..755e3be 100644
--- a/ConversionUtils.hpp
+++ b/ConversionUtils.hpp
@@ -974,6 +974,11 @@
     try
     {
         armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
+        if (IsDynamicTensor(operandTensorInfo))
+        {
+            Fail("%s: dynamic input tensors are not supported", __func__);
+            return LayerInputHandle();
+        }
 
         switch (operand->lifetime)
         {
diff --git a/OutputShapeUtils.cpp b/OutputShapeUtils.cpp
index e3812a3..0c897d1 100644
--- a/OutputShapeUtils.cpp
+++ b/OutputShapeUtils.cpp
@@ -96,11 +96,6 @@
 
 using namespace armnn;
 
-bool IsDynamicOutput(const TensorInfo& outputInfo)
-{
-    return outputInfo.GetNumElements() == 0u;
-}
-
 TensorShape InferConvolution2dOutputShape(const TensorShape& inputShape,
                                           const TensorShape& kernelShape,
                                           const Convolution2dDescriptor& descriptor)
@@ -177,4 +172,4 @@
     return CalculateMaxShape(input0Shape, input1Shape);
 }
 
-} // namespace armnn_driver
\ No newline at end of file
+} // namespace armnn_driver
diff --git a/OutputShapeUtils.hpp b/OutputShapeUtils.hpp
index 7452ced..222c123 100644
--- a/OutputShapeUtils.hpp
+++ b/OutputShapeUtils.hpp
@@ -10,8 +10,6 @@
 namespace armnn_driver
 {
 
-bool IsDynamicOutput(const armnn::TensorInfo& outputInfo);
-
 armnn::TensorShape InferConvolution2dOutputShape(const armnn::TensorShape& inputShape,
                                                  const armnn::TensorShape& kernelShape,
                                                  const armnn::Convolution2dDescriptor& descriptor);
@@ -37,5 +35,3 @@
 armnn::TensorShape InferSubOutputShape(const armnn::TensorShape& input0Shape, const armnn::TensorShape& input1Shape);
 
 } // namespace armnn_driver
-
-
diff --git a/Utils.cpp b/Utils.cpp
index c3c6310..d3d62a0 100644
--- a/Utils.cpp
+++ b/Utils.cpp
@@ -328,4 +328,10 @@
     profiler->Print(fileStream);
 }
 
+bool IsDynamicTensor(const armnn::TensorInfo& outputInfo)
+{
+    // Dynamic tensors have at least one 0-sized dimension
+    return outputInfo.GetNumElements() == 0u;
+}
+
 } // namespace armnn_driver
diff --git a/Utils.hpp b/Utils.hpp
index 5aac471..267e519 100644
--- a/Utils.hpp
+++ b/Utils.hpp
@@ -144,4 +144,7 @@
     }
 }
 
+/// Checks if a tensor info represents a dynamic tensor
+bool IsDynamicTensor(const armnn::TensorInfo& outputInfo);
+
 } // namespace armnn_driver