IVGCVSW-3530 Fix DynamicOutput Tests for Android Q NeuralNetworks 1.0 & 1.1

 * Fixed for failing Conv2d, DepthwiseConv2d, and Activation tests on Hal 1.0 and 1.1 in Q

Signed-off-by: Sadik Armagan <sadik.armagan@arm.com>
Signed-off-by: Aron Virginas-Tar <aron.virginas-tar@arm.com>
Change-Id: I435338b90b6c501320083f2fd9372e3a4ac3c32c
diff --git a/1.0/HalPolicy.cpp b/1.0/HalPolicy.cpp
index b9200f1..7a54e74 100644
--- a/1.0/HalPolicy.cpp
+++ b/1.0/HalPolicy.cpp
@@ -1142,34 +1142,19 @@
 bool HalPolicy::ConvertReLu(const Operation& operation, const Model& model, ConversionData& data)
 {
     ALOGV("hal_1_0::HalPolicy::ConvertReLu()");
-
-    armnn::ActivationDescriptor desc;
-    desc.m_Function = armnn::ActivationFunction::ReLu;
-
-    return ConvertToActivation<hal_1_0::HalPolicy>(operation, __func__, desc, model, data);
+    return ::ConvertReLu<hal_1_0::HalPolicy>(operation, model, data);
 }
 
 bool HalPolicy::ConvertReLu1(const Operation& operation, const Model& model, ConversionData& data)
 {
     ALOGV("hal_1_0::HalPolicy::ConvertReLu1()");
-
-    armnn::ActivationDescriptor desc;
-    desc.m_Function = armnn::ActivationFunction::BoundedReLu;
-    desc.m_A        = 1.0f;
-    desc.m_B        = -1.0f;
-
-    return ConvertToActivation<hal_1_0::HalPolicy>(operation, __func__, desc, model, data);
+    return ::ConvertReLu1<hal_1_0::HalPolicy>(operation, model, data);
 }
 
 bool HalPolicy::ConvertReLu6(const Operation& operation, const Model& model, ConversionData& data)
 {
     ALOGV("hal_1_0::HalPolicy::ConvertReLu6()");
-
-    armnn::ActivationDescriptor desc;
-    desc.m_Function = armnn::ActivationFunction::BoundedReLu;
-    desc.m_A        = 6.0f;
-
-    return ConvertToActivation<hal_1_0::HalPolicy>(operation, __func__, desc, model, data);
+    return ::ConvertReLu6<hal_1_0::HalPolicy>(operation, model, data);
 }
 
 bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, ConversionData& data)
@@ -1285,13 +1270,7 @@
 bool HalPolicy::ConvertTanH(const Operation& operation, const Model& model, ConversionData& data)
 {
     ALOGV("hal_1_0::HalPolicy::ConvertTanH()");
-
-    armnn::ActivationDescriptor desc;
-    desc.m_Function = armnn::ActivationFunction::TanH;
-    desc.m_A = 1.0f; // android nn does not support tanH parameters
-    desc.m_B = 1.0f; // set to 1.0f for unity scaling
-
-    return ConvertToActivation<hal_1_0::HalPolicy>(operation, __func__, desc, model, data);
+    return ::ConvertTanH<hal_1_0::HalPolicy>(operation, model, data);
 }
 
 bool HalPolicy::ConvertReshape(const Operation& operation, const Model& model, ConversionData& data)
diff --git a/1.0/HalPolicy.hpp b/1.0/HalPolicy.hpp
index 833017b..4b8dc47 100644
--- a/1.0/HalPolicy.hpp
+++ b/1.0/HalPolicy.hpp
@@ -70,14 +70,14 @@
 
     static bool ConvertSoftmax(const Operation& operation, const Model& model, ConversionData& data);
 
-    static bool ConvertTanH(const Operation& operation, const Model& model, ConversionData& data);
-
     static bool ConvertReshape(const Operation& operation, const Model& model, ConversionData& data);
 
     static bool ConvertResizeBilinear(const Operation& operation, const Model& model, ConversionData& data);
 
     static bool ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data);
 
+    static bool ConvertTanH(const Operation& operation, const Model& model, ConversionData& data);
+
     static bool ValidateConv2dParameters(const Operation& operation);
 
     static bool ValidateDepthwiseConv2dParameters(const Operation& operation);
diff --git a/1.2/HalPolicy.cpp b/1.2/HalPolicy.cpp
index 2d6d797..5f327c2 100644
--- a/1.2/HalPolicy.cpp
+++ b/1.2/HalPolicy.cpp
@@ -42,14 +42,10 @@
         case V1_0::OperationType::LSTM:
         case V1_0::OperationType::MAX_POOL_2D:
         case V1_0::OperationType::MUL:
-        case V1_0::OperationType::RELU:
-        case V1_0::OperationType::RELU1:
-        case V1_0::OperationType::RELU6:
         case V1_0::OperationType::RESHAPE:
         case V1_0::OperationType::RNN:
         case V1_0::OperationType::SPACE_TO_DEPTH:
         case V1_0::OperationType::SVDF:
-        case V1_0::OperationType::TANH:
         case V1_0::OperationType::OEM_OPERATION:
             return true;
         default:
@@ -151,12 +147,20 @@
             return ConvertPadV2(operation, model, data);
         case V1_2::OperationType::PRELU:
             return ConvertPrelu(operation, model, data);
+        case V1_2::OperationType::RELU:
+            return ConvertReLu(operation, model, data);
+        case V1_2::OperationType::RELU1:
+            return ConvertReLu1(operation, model, data);
+        case V1_2::OperationType::RELU6:
+            return ConvertReLu6(operation, model, data);
         case V1_2::OperationType::RESIZE_BILINEAR:
             return ConvertResize(operation, model, data, armnn::ResizeMethod::Bilinear);
         case V1_2::OperationType::RESIZE_NEAREST_NEIGHBOR:
             return ConvertResize(operation, model, data, armnn::ResizeMethod::NearestNeighbor);
         case V1_2::OperationType::SOFTMAX:
             return ConvertSoftmax(operation, model, data);
+        case V1_2::OperationType::TANH:
+            return ConvertTanH(operation, model, data);
         default:
             return Fail("%s: Operation type %s not supported in ArmnnDriver",
                         __func__, toString(operation.type).c_str());
@@ -779,6 +783,24 @@
                                                             armnn::Optional<armnn::TensorInfo>(outputInfo));
 }
 
+bool HalPolicy::ConvertReLu(const Operation& operation, const Model& model, ConversionData& data)
+{
+    ALOGV("hal_1_2::HalPolicy::ConvertReLu()");
+    return ::ConvertReLu<hal_1_2::HalPolicy>(operation, model, data);
+}
+
+bool HalPolicy::ConvertReLu1(const Operation& operation, const Model& model, ConversionData& data)
+{
+    ALOGV("hal_1_2::HalPolicy::ConvertReLu1()");
+    return ::ConvertReLu1<hal_1_2::HalPolicy>(operation, model, data);
+}
+
+bool HalPolicy::ConvertReLu6(const Operation& operation, const Model& model, ConversionData& data)
+{
+    ALOGV("hal_1_2::HalPolicy::ConvertReLu6()");
+    return ::ConvertReLu6<hal_1_2::HalPolicy>(operation, model, data);
+}
+
 bool HalPolicy::ConvertResize(const Operation& operation,
                               const Model& model,
                               ConversionData& data,
@@ -1030,5 +1052,11 @@
                                                             armnn::Optional<armnn::TensorInfo>(outputInfo));
 }
 
+bool HalPolicy::ConvertTanH(const Operation& operation, const Model& model, ConversionData& data)
+{
+    ALOGV("hal_1_2::HalPolicy::ConvertTanH()");
+    return ::ConvertTanH<hal_1_2::HalPolicy>(operation, model, data);
+}
+
 } // namespace hal_1_2
 } // namespace armnn_driver
diff --git a/1.2/HalPolicy.hpp b/1.2/HalPolicy.hpp
index bac765d..d11ae3c 100644
--- a/1.2/HalPolicy.hpp
+++ b/1.2/HalPolicy.hpp
@@ -45,6 +45,12 @@
 
     static bool ConvertPrelu(const Operation& operation, const Model& model, ConversionData& data);
 
+    static bool ConvertReLu(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertReLu1(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertReLu6(const Operation& operation, const Model& model, ConversionData& data);
+
     static bool ConvertResize(const Operation& operation,
                               const Model& model,
                               ConversionData& data,
@@ -53,6 +59,8 @@
     static bool ConvertSoftmax(const Operation& operation, const Model& model, ConversionData& data);
 
     static bool ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertTanH(const Operation& operation, const Model& model, ConversionData& data);
 };
 
 } // namespace hal_1_2
diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp
index f84dc10..790382d 100644
--- a/ConversionUtils.hpp
+++ b/ConversionUtils.hpp
@@ -194,6 +194,11 @@
     return false;
 }
 
+inline bool Is12Operand(V1_0::Operand)
+{
+    return false;
+}
+
 #ifdef ARMNN_ANDROID_NN_V1_2
 
 inline bool IsBool(V1_2::Operand operand)
@@ -201,6 +206,12 @@
     return operand.type == V1_2::OperandType::BOOL;
 }
 
+/// Checks if a operand is 1_2 Operand
+inline bool Is12Operand(V1_2::Operand)
+{
+    return true;
+}
+
 #endif
 
 template<typename LayerHandleType>
@@ -1161,8 +1172,15 @@
     armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
     if (IsDynamicTensor(outInfo))
     {
-        ALOGD("Output shape not set, will infer from input");
-        outInfo.SetShape(input.GetTensorInfo().GetShape());
+        if (Is12Operand(*outputOperand))
+        {
+            ALOGD("Output shape not set, will infer from input");
+            outInfo.SetShape(input.GetTensorInfo().GetShape());
+        }
+        else
+        {
+            return Fail("%s: Dynamic OutputShapes are not supported in this HAL version", __func__);
+        }
     }
 
     bool isSupported = false;
@@ -1190,6 +1208,55 @@
 }
 
 template<typename HalPolicy,
+    typename HalOperation = typename HalPolicy::Operation,
+    typename HalModel     = typename HalPolicy::Model>
+bool ConvertReLu(const HalOperation& operation, const HalModel& model, ConversionData& data)
+{
+    armnn::ActivationDescriptor desc;
+    desc.m_Function = armnn::ActivationFunction::ReLu;
+
+    return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
+}
+
+template<typename HalPolicy,
+    typename HalOperation = typename HalPolicy::Operation,
+    typename HalModel     = typename HalPolicy::Model>
+bool ConvertReLu1(const HalOperation& operation, const HalModel& model, ConversionData& data)
+{
+    armnn::ActivationDescriptor desc;
+    desc.m_Function = armnn::ActivationFunction::BoundedReLu;
+    desc.m_A        = 1.0f;
+    desc.m_B        = -1.0f;
+
+    return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
+}
+
+template<typename HalPolicy,
+    typename HalOperation = typename HalPolicy::Operation,
+    typename HalModel     = typename HalPolicy::Model>
+bool ConvertReLu6(const HalOperation& operation, const HalModel& model, ConversionData& data)
+{
+    armnn::ActivationDescriptor desc;
+    desc.m_Function = armnn::ActivationFunction::BoundedReLu;
+    desc.m_A        = 6.0f;
+
+    return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
+}
+
+template<typename HalPolicy,
+    typename HalOperation = typename HalPolicy::Operation,
+    typename HalModel     = typename HalPolicy::Model>
+bool ConvertTanH(const HalOperation& operation, const HalModel& model, ConversionData& data)
+{
+    armnn::ActivationDescriptor desc;
+    desc.m_Function = armnn::ActivationFunction::TanH;
+    desc.m_A = 1.0f; // android nn does not support tanH parameters
+    desc.m_B = 1.0f; // set to 1.0f for unity scaling
+
+    return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
+}
+
+template<typename HalPolicy,
          typename HalOperation   = typename HalPolicy::Operation,
          typename HalModel       = typename HalPolicy::Model>
 bool ConvertPaddings(const HalOperation& operation,
@@ -1420,17 +1487,7 @@
 
     if (IsDynamicTensor(outputInfo))
     {
-        try
-        {
-            ALOGD("Output shape not set, will infer from inputs");
-            outputInfo.SetShape(InferConvolution2dOutputShape(inputInfo.GetShape(),
-                                                              weights.GetInfo().GetShape(),
-                                                              desc));
-        }
-        catch (armnn::Exception& e)
-        {
-            return Fail("%s: Could not infer dynamic output shape: %s", __func__, e.what());
-        }
+        return Fail("%s: Dynamic OutputShapes are not supported", __func__);
     }
 
     bool isSupported = false;
@@ -1600,17 +1657,7 @@
 
     if (IsDynamicTensor(outputInfo))
     {
-        try
-        {
-            ALOGD("Output shape not set, will infer from inputs");
-            outputInfo.SetShape(InferDepthwiseConvolution2dOutputShape(inputInfo.GetShape(),
-                                                                       weights.GetInfo().GetShape(),
-                                                                       desc));
-        }
-        catch (armnn::Exception& e)
-        {
-            return Fail("%s: Could not infer dynamic output shape: %s", __func__, e.what());
-        }
+        return Fail("%s: Dynamic OutputShapes are not supported", __func__);
     }
 
     bool isSupported = false;