IVGCVSW-3521 CpuAcc V1.2 pad Failures

 * Fixed Pad and PadV2 failures and skips.
 * Templated ConvertPad to enable float16 tests to run.

Signed-off-by: Mike Kelly <mike.kelly@arm.com>
Change-Id: I50ded84fe44ea5d5949e877f383f32adff88680d
diff --git a/1.1/HalPolicy.cpp b/1.1/HalPolicy.cpp
index c5df72a..b58cda4 100644
--- a/1.1/HalPolicy.cpp
+++ b/1.1/HalPolicy.cpp
@@ -86,7 +86,7 @@
             case V1_1::OperationType::MEAN:
                 return ConvertMean(operation, model, data);
             case V1_1::OperationType::PAD:
-                return ConvertPad(operation, model, data);
+                return ConvertPad<hal_1_1::HalPolicy>(operation, model, data);
             case V1_1::OperationType::SPACE_TO_BATCH_ND:
                 return ConvertSpaceToBatchNd(operation, model, data);
             case V1_1::OperationType::SQUEEZE:
@@ -296,64 +296,6 @@
     return SetupAndTrackLayerOutputSlot<hal_1_1::HalPolicy>(operation, 0, *layer, model, data);
 }
 
-bool HalPolicy::ConvertPad(const Operation& operation, const Model& model, ConversionData& data)
-{
-    ALOGV("hal_1_1::HalPolicy::ConvertPad()");
-
-    LayerInputHandle input = ConvertToLayerInputHandle<hal_1_1::HalPolicy>(operation, 0, model, data);
-    if (!input.IsValid())
-    {
-        return Fail("%s: Operation has invalid inputs", __func__);
-    }
-
-    const armnn::TensorInfo& inputInfo  = input.GetTensorInfo();
-    unsigned int rank = inputInfo.GetNumDimensions();
-
-    armnn::PadDescriptor descriptor;
-    if (!ConvertPaddings<hal_1_1::HalPolicy>(operation, model, data, rank, descriptor))
-    {
-        return Fail("%s: Could not convert paddings", __func__);
-    }
-
-    const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
-    if (!output)
-    {
-        return Fail("%s: Could not read output", __func__);
-    }
-
-    armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
-    if (IsDynamicTensor(outputInfo))
-    {
-        ALOGD("Output shape not set, will infer from inputs");
-        outputInfo.SetShape(InferPadOutputShape(inputInfo.GetShape(), descriptor.m_PadList));
-    }
-
-    bool isSupported = false;
-    FORWARD_LAYER_SUPPORT_FUNC(__func__,
-                               IsPadSupported,
-                               data.m_Backends,
-                               isSupported,
-                               inputInfo,
-                               outputInfo,
-                               descriptor);
-    if (!isSupported)
-    {
-        return false;
-    }
-
-    armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
-    assert(layer != nullptr);
-    input.Connect(layer->GetInputSlot(0));
-    layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
-
-    return SetupAndTrackLayerOutputSlot<hal_1_1::HalPolicy>(operation,
-                                                            0,
-                                                            *layer,
-                                                            model,
-                                                            data,
-                                                            armnn::Optional<armnn::TensorInfo>(outputInfo));
-}
-
 bool HalPolicy::ConvertSpaceToBatchNd(const Operation& operation, const Model& model, ConversionData& data)
 {
     ALOGV("hal_1_1::HalPolicy::ConvertSpaceToBatchNd()");
diff --git a/1.1/HalPolicy.hpp b/1.1/HalPolicy.hpp
index dd8558b..827fddd 100644
--- a/1.1/HalPolicy.hpp
+++ b/1.1/HalPolicy.hpp
@@ -31,7 +31,6 @@
     static bool ConvertDiv(const Operation& operation, const Model& model, ConversionData& data);
     static bool ConvertSub(const Operation& operation, const Model& model, ConversionData& data);
     static bool ConvertMean(const Operation& operation, const Model& model, ConversionData& data);
-    static bool ConvertPad(const Operation& operation, const Model& model, ConversionData& data);
     static bool ConvertSpaceToBatchNd(const Operation& operation, const Model& model, ConversionData& data);
     static bool ConvertSqueeze(const Operation& operation, const Model& model, ConversionData& data);
     static bool ConvertStridedSlice(const Operation& operation, const Model& model, ConversionData& data);
diff --git a/1.2/HalPolicy.cpp b/1.2/HalPolicy.cpp
index 906d6bc..307475a 100644
--- a/1.2/HalPolicy.cpp
+++ b/1.2/HalPolicy.cpp
@@ -68,7 +68,6 @@
         case V1_1::OperationType::BATCH_TO_SPACE_ND:
         case V1_1::OperationType::DIV:
         case V1_1::OperationType::MEAN:
-        case V1_1::OperationType::PAD:
         case V1_1::OperationType::SPACE_TO_BATCH_ND:
         case V1_1::OperationType::SQUEEZE:
         case V1_1::OperationType::STRIDED_SLICE:
@@ -146,6 +145,8 @@
             return ConvertMaximum(operation, model, data);
         case V1_2::OperationType::MINIMUM:
             return ConvertMinimum(operation, model, data);
+        case V1_2::OperationType::PAD:
+            return ConvertPad<hal_1_2::HalPolicy>(operation, model, data);
         case V1_2::OperationType::PAD_V2:
             return ConvertPadV2(operation, model, data);
         case V1_2::OperationType::PRELU:
@@ -675,15 +676,12 @@
     }
     else if (operandType0 == OperandType::TENSOR_QUANT8_ASYMM && operandType2 == OperandType::INT32)
     {
-        int32_t quantizedPadValue = 0;
-        if (!GetInputInt32<hal_1_2::HalPolicy>(operation, 2, quantizedPadValue, model, data))
+        int32_t intPadValue = 0;
+        if (!GetInputInt32<hal_1_2::HalPolicy>(operation, 2, intPadValue, model, data))
         {
             return Fail("%s: Could not read input 2 (INT32)", __func__);
         }
-
-        descriptor.m_PadValue = armnn::Dequantize(quantizedPadValue,
-                                                  inputInfo.GetQuantizationScale(),
-                                                  inputInfo.GetQuantizationOffset());
+        descriptor.m_PadValue = intPadValue;
     }
     else
     {
diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp
index 5ebec6b..fa686a6 100644
--- a/ConversionUtils.hpp
+++ b/ConversionUtils.hpp
@@ -1651,4 +1651,74 @@
                                                    armnn::Optional<armnn::TensorInfo>(outputInfo));
 }
 
+template<typename HalPolicy,
+         typename HalOperation = typename HalPolicy::Operation,
+         typename HalOperand   = typename HalPolicy::Operand,
+         typename HalModel     = typename HalPolicy::Model>
+bool ConvertPad(HalOperation& operation, const HalModel& model, ConversionData& data)
+{
+    ALOGV("hal_1_1::HalPolicy::ConvertPad()");
+
+    LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
+    if (!input.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
+    unsigned int rank = inputInfo.GetNumDimensions();
+
+    armnn::PadDescriptor descriptor;
+    if (!ConvertPaddings<HalPolicy>(operation, model, data, rank, descriptor))
+    {
+        return Fail("%s: Could not convert paddings", __func__);
+    }
+
+    // Before Android Q, the pad value for ANEURALNETWORKS_TENSOR_QUANT8_ASYMM was undefined. Since Android Q the pad
+    // value must be "logical zero" we set it to be equal to the QuantizationOffset so effectively it ends up as
+    // (QuantizationOffset - QuantizationOffset) * scale = 0.
+    if (inputInfo.GetDataType() == armnn::DataType::QuantisedAsymm8)
+    {
+        descriptor.m_PadValue = inputInfo.GetQuantizationOffset();
+    }
+
+    const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
+    if (!output)
+    {
+        return Fail("%s: Could not read output", __func__);
+    }
+
+    armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
+    if (IsDynamicTensor(outputInfo))
+    {
+        ALOGD("Output shape not set, will infer from inputs");
+        outputInfo.SetShape(InferPadOutputShape(inputInfo.GetShape(), descriptor.m_PadList));
+    }
+
+    bool isSupported = false;
+    FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                               IsPadSupported,
+                               data.m_Backends,
+                               isSupported,
+                               inputInfo,
+                               outputInfo,
+                               descriptor);
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
+    assert(layer != nullptr);
+    input.Connect(layer->GetInputSlot(0));
+    layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
+
+    return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
+                                                   0,
+                                                   *layer,
+                                                   model,
+                                                   data,
+                                                   armnn::Optional<armnn::TensorInfo>(outputInfo));
+}
+
 } // namespace armnn_driver
diff --git a/Utils.cpp b/Utils.cpp
index d3d62a0..43b65ee 100644
--- a/Utils.cpp
+++ b/Utils.cpp
@@ -7,6 +7,7 @@
 
 #include "Utils.hpp"
 
+#include <Half.hpp>
 #include <Permute.hpp>
 
 #include <cassert>
@@ -42,6 +43,9 @@
 
     switch(tensor.GetDataType())
     {
+    case armnn::DataType::Float16:
+        SwizzleAndroidNn4dTensorToArmNn<armnn::Half>(tensor.GetShape(), input, output, mappings);
+        break;
     case armnn::DataType::Float32:
         SwizzleAndroidNn4dTensorToArmNn<float>(tensor.GetShape(), input, output, mappings);
         break;
@@ -112,6 +116,9 @@
         case V1_2::OperandType::TENSOR_FLOAT32:
             type = armnn::DataType::Float32;
             break;
+        case V1_2::OperandType::TENSOR_FLOAT16:
+            type = armnn::DataType::Float16;
+            break;
         case V1_2::OperandType::TENSOR_QUANT8_ASYMM:
             type = armnn::DataType::QuantisedAsymm8;
             break;