IVGCVSW-3530 Fix DynamicOutput Tests for Android Q NeuralNetworks 1.0 & 1.1

 * Updated ConvertToActivation, ConvertConv2d, and ConvertDepthwiseConv2d functions
   to infer output shape from input if it is dynamic

Signed-off-by: Sadik Armagan <sadik.armagan@arm.com>
Change-Id: Ie24fbfd87c6186c69c3ecba5c68a6866507fb449
diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp
index 755e3be..5ebec6b 100644
--- a/ConversionUtils.hpp
+++ b/ConversionUtils.hpp
@@ -5,6 +5,7 @@
 
 #pragma once
 
+#include "OutputShapeUtils.hpp"
 #include "Utils.hpp"
 
 #include <armnn/ArmNN.hpp>
@@ -1157,7 +1158,12 @@
     {
         return false;
     }
-    const armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
+    armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
+    if (IsDynamicTensor(outInfo))
+    {
+        ALOGD("Output shape not set, will infer from input");
+        outInfo.SetShape(input.GetTensorInfo().GetShape());
+    }
 
     bool isSupported = false;
     FORWARD_LAYER_SUPPORT_FUNC(__func__,
@@ -1176,7 +1182,11 @@
     BOOST_ASSERT(layer != nullptr);
     input.Connect(layer->GetInputSlot(0));
 
-    return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
+    return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
+                                                   0,
+                                                   *layer,
+                                                   model,
+                                                   data,armnn::Optional<armnn::TensorInfo>(outInfo));
 }
 
 template<typename HalPolicy,
@@ -1344,7 +1354,7 @@
     }
 
     const armnn::TensorInfo& inputInfo  = input.GetTensorInfo();
-    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+    armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
 
     // ArmNN does not currently support non-fixed weights or bias
     const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 1, model, data);
@@ -1408,6 +1418,21 @@
     desc.m_BiasEnabled = true;
     armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
 
+    if (IsDynamicTensor(outputInfo))
+    {
+        try
+        {
+            ALOGD("Output shape not set, will infer from inputs");
+            outputInfo.SetShape(InferConvolution2dOutputShape(inputInfo.GetShape(),
+                                                              weights.GetInfo().GetShape(),
+                                                              desc));
+        }
+        catch (armnn::Exception& e)
+        {
+            return Fail("%s: Could not infer dynamic output shape: %s", __func__, e.what());
+        }
+    }
+
     bool isSupported = false;
     FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                IsConvolution2dSupported,
@@ -1440,7 +1465,12 @@
 
     input.Connect(startLayer->GetInputSlot(0));
 
-    return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
+    return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
+                                                   0,
+                                                   *endLayer,
+                                                   model,
+                                                   data,
+                                                   armnn::Optional<armnn::TensorInfo>(outputInfo));
 }
 
 template<typename HalPolicy,
@@ -1466,7 +1496,7 @@
     }
 
     const armnn::TensorInfo& inputInfo  = input.GetTensorInfo();
-    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+    armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
 
     // ArmNN does not currently support non-fixed weights or bias
 
@@ -1568,6 +1598,21 @@
     desc.m_BiasEnabled = true;
     armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
 
+    if (IsDynamicTensor(outputInfo))
+    {
+        try
+        {
+            ALOGD("Output shape not set, will infer from inputs");
+            outputInfo.SetShape(InferDepthwiseConvolution2dOutputShape(inputInfo.GetShape(),
+                                                                       weights.GetInfo().GetShape(),
+                                                                       desc));
+        }
+        catch (armnn::Exception& e)
+        {
+            return Fail("%s: Could not infer dynamic output shape: %s", __func__, e.what());
+        }
+    }
+
     bool isSupported = false;
     FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                IsDepthwiseConvolutionSupported,
@@ -1598,7 +1643,12 @@
 
     input.Connect(startLayer->GetInputSlot(0));
 
-    return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
+    return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
+                                                   0,
+                                                   *endLayer,
+                                                   model,
+                                                   data,
+                                                   armnn::Optional<armnn::TensorInfo>(outputInfo));
 }
 
 } // namespace armnn_driver