IVGCVSW-3591 Fixed unexpectedly skipped SUB tests

 * A model that has Inputs with different quantized scales is not compliant with 1.1

Signed-off-by: Mike Kelly <mike.kelly@arm.com>
Change-Id: Ifb8277d78f05b5ef017effa879322a08c0efc851
diff --git a/1.1/HalPolicy.cpp b/1.1/HalPolicy.cpp
index ab8224a..93ee70e 100644
--- a/1.1/HalPolicy.cpp
+++ b/1.1/HalPolicy.cpp
@@ -166,61 +166,7 @@
 bool HalPolicy::ConvertSub(const Operation& operation, const Model& model, ConversionData& data)
 {
     ALOGV("hal_1_1::HalPolicy::ConvertSub()");
-
-    LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_1::HalPolicy>(operation, 0, model, data);
-    LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_1::HalPolicy>(operation, 1, model, data);
-
-    if (!input0.IsValid() || !input1.IsValid())
-    {
-        return Fail("%s: Operation has invalid inputs", __func__);
-    }
-
-    // The FuseActivation parameter is always the input index 2
-    // and it should be optional
-    ActivationFn activationFunction;
-    if (!GetOptionalInputActivation<hal_1_1::HalPolicy>(operation, 2, activationFunction, model, data))
-    {
-        return Fail("%s: Operation has invalid inputs", __func__);
-    }
-
-    const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
-    if (!output)
-    {
-        return Fail("%s: Could not read output 0", __func__);
-    }
-
-    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
-    if (IsDynamicTensor(outputInfo))
-    {
-        return Fail("%s: Dynamic output tensors are not supported", __func__);
-    }
-
-    bool isSupported = false;
-    FORWARD_LAYER_SUPPORT_FUNC(__func__,
-                               IsSubtractionSupported,
-                               data.m_Backends,
-                               isSupported,
-                               input0.GetTensorInfo(),
-                               input1.GetTensorInfo(),
-                               outputInfo);
-    if (!isSupported)
-    {
-        return false;
-    }
-
-    armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer();
-    armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
-
-    const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
-    const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
-
-    if (endLayer)
-    {
-        BroadcastTensor(input0, input1, startLayer, *data.m_Network);
-        return SetupAndTrackLayerOutputSlot<hal_1_1::HalPolicy>(operation, 0, *endLayer, model, data);
-    }
-
-    return Fail("%s: ProcessActivation failed", __func__);
+    return ::ConvertSub<hal_1_1::HalPolicy>(operation, model, data);
 }
 
 bool HalPolicy::ConvertMean(const Operation& operation, const Model& model, ConversionData& data)
diff --git a/1.2/HalPolicy.cpp b/1.2/HalPolicy.cpp
index 8dbfd89..8502640 100644
--- a/1.2/HalPolicy.cpp
+++ b/1.2/HalPolicy.cpp
@@ -61,7 +61,6 @@
         case V1_1::OperationType::SPACE_TO_BATCH_ND:
         case V1_1::OperationType::SQUEEZE:
         case V1_1::OperationType::STRIDED_SLICE:
-        case V1_1::OperationType::SUB:
         case V1_1::OperationType::TRANSPOSE:
             return true;
         default:
@@ -161,6 +160,8 @@
             return ConvertSoftmax(operation, model, data);
         case V1_2::OperationType::SPACE_TO_DEPTH:
             return ConvertSpaceToDepth(operation, model, data);
+        case V1_2::OperationType::SUB:
+            return ConvertSub(operation, model, data);
         case V1_2::OperationType::TANH:
             return ConvertTanH(operation, model, data);
         case V1_2::OperationType::LSTM:
@@ -1003,6 +1004,12 @@
     return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
 }
 
+bool HalPolicy::ConvertSub(const Operation& operation, const Model& model, ConversionData& data)
+{
+    ALOGV("hal_1_2::HalPolicy::ConvertSub()");
+    return ::ConvertSub<hal_1_2::HalPolicy>(operation, model, data);
+}
+
 bool HalPolicy::ConvertTanH(const Operation& operation, const Model& model, ConversionData& data)
 {
     ALOGV("hal_1_2::HalPolicy::ConvertTanH()");
diff --git a/1.2/HalPolicy.hpp b/1.2/HalPolicy.hpp
index 7468313..285a37f 100644
--- a/1.2/HalPolicy.hpp
+++ b/1.2/HalPolicy.hpp
@@ -66,6 +66,8 @@
 
     static bool ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data);
 
+    static bool ConvertSub(const Operation& operation, const Model& model, ConversionData& data);
+
     static bool ConvertTanH(const Operation& operation, const Model& model, ConversionData& data);
 
     static bool ConvertLstm(const Operation& operation, const Model& model, ConversionData& data);
diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp
index 52bfd5c..fc6d365 100644
--- a/ConversionUtils.hpp
+++ b/ConversionUtils.hpp
@@ -1738,4 +1738,67 @@
     return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
 }
 
+template<typename HalPolicy,
+         typename Operation = typename HalPolicy::Operation,
+         typename Operand   = typename HalPolicy::Operand,
+         typename Model     = typename HalPolicy::Model>
+bool ConvertSub(const Operation& operation, const Model& model, ConversionData& data)
+{
+    LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
+    LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
+
+    if (!input0.IsValid() || !input1.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    // The FuseActivation parameter is always the input index 2
+    // and it should be optional
+    ActivationFn activationFunction;
+    if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
+    if (!output)
+    {
+        return Fail("%s: Could not read output 0", __func__);
+    }
+
+    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+    if (IsDynamicTensor(outputInfo))
+    {
+        return Fail("%s: Dynamic output tensors are not supported", __func__);
+    }
+
+    bool isSupported = false;
+    FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                               IsSubtractionSupported,
+                               data.m_Backends,
+                               isSupported,
+                               input0.GetTensorInfo(),
+                               input1.GetTensorInfo(),
+                               outputInfo);
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer();
+    armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
+
+    const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
+    const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
+
+    if (endLayer)
+    {
+        BroadcastTensor(input0, input1, startLayer, *data.m_Network);
+        return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
+    }
+
+    return Fail("%s: ProcessActivation failed", __func__);
+}
+
+
 } // namespace armnn_driver