Add QUANT8_ASYMM_SIGNED in PadV2 for HAL1.3

Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com>
Change-Id: I597344d91975d7067f137e6587b751500de33837
diff --git a/ConversionUtils_1_2.hpp b/ConversionUtils_1_2.hpp
index f884f7c..c66a2f5 100644
--- a/ConversionUtils_1_2.hpp
+++ b/ConversionUtils_1_2.hpp
@@ -1573,7 +1573,7 @@
             return Fail("%s: Could not read input 2 (FLOAT32)", __func__);
         }
     }
-    else if (operandType0 == HalOperandType::TENSOR_QUANT8_ASYMM && operandType2 == HalOperandType::INT32)
+    else if (isQuantizedOperand(operandType0) && operandType2 == HalOperandType::INT32)
     {
         int32_t intPadValue = 0;
         if (!GetInputInt32<HalPolicy>(operation, 2, intPadValue, model, data))
diff --git a/Utils.cpp b/Utils.cpp
index 873dce4..60e7a80 100644
--- a/Utils.cpp
+++ b/Utils.cpp
@@ -603,6 +603,53 @@
 #endif
 }
 
+bool isQuantizedOperand(const V1_0::OperandType& operandType)
+{
+    if (operandType == V1_0::OperandType::TENSOR_QUANT8_ASYMM)
+    {
+        return true;
+    }
+    else
+    {
+        return false;
+    }
+}
+
+#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)// Using ::android::hardware::neuralnetworks::V1_2
+bool isQuantizedOperand(const V1_2::OperandType& operandType)
+{
+    if (operandType == V1_2::OperandType::TENSOR_QUANT8_ASYMM ||
+        operandType == V1_2::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
+        operandType == V1_2::OperandType::TENSOR_QUANT8_SYMM ||
+        operandType == V1_2::OperandType::TENSOR_QUANT16_SYMM )
+    {
+        return true;
+    }
+    else
+    {
+        return false;
+    }
+}
+#endif
+
+#ifdef ARMNN_ANDROID_NN_V1_3 // Using ::android::hardware::neuralnetworks::V1_3
+bool isQuantizedOperand(const V1_3::OperandType& operandType)
+{
+    if (operandType == V1_3::OperandType::TENSOR_QUANT8_ASYMM ||
+        operandType == V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
+        operandType == V1_3::OperandType::TENSOR_QUANT8_SYMM ||
+        operandType == V1_3::OperandType::TENSOR_QUANT16_SYMM ||
+        operandType == V1_3::OperandType::TENSOR_QUANT8_ASYMM_SIGNED)
+    {
+        return true;
+    }
+    else
+    {
+        return false;
+    }
+}
+#endif
+
 std::string GetFileTimestamp()
 {
     // used to get a timestamp to name diagnostic files (the ArmNN serialized graph
diff --git a/Utils.hpp b/Utils.hpp
index 893c4a0..da10153 100644
--- a/Utils.hpp
+++ b/Utils.hpp
@@ -70,22 +70,25 @@
 /// Can throw UnsupportedOperand
 armnn::TensorInfo GetTensorInfoForOperand(const V1_0::Operand& operand);
 
+std::string GetOperandSummary(const V1_0::Operand& operand);
+
+// Returns true for any quantized data type, false for the rest.
+bool isQuantizedOperand(const V1_0::OperandType& operandType);
+
 #if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3) // Using ::android::hardware::neuralnetworks::V1_2
 armnn::TensorInfo GetTensorInfoForOperand(const V1_2::Operand& operand);
+
+std::string GetOperandSummary(const V1_2::Operand& operand);
+
+bool isQuantizedOperand(const V1_2::OperandType& operandType);
 #endif
 
 #ifdef ARMNN_ANDROID_NN_V1_3 // Using ::android::hardware::neuralnetworks::V1_3
 armnn::TensorInfo GetTensorInfoForOperand(const V1_3::Operand& operand);
-#endif
 
-std::string GetOperandSummary(const V1_0::Operand& operand);
-
-#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3) // Using ::android::hardware::neuralnetworks::V1_2
-std::string GetOperandSummary(const V1_2::Operand& operand);
-#endif
-
-#ifdef ARMNN_ANDROID_NN_V1_3 // Using ::android::hardware::neuralnetworks::V1_3
 std::string GetOperandSummary(const V1_3::Operand& operand);
+
+bool isQuantizedOperand(const V1_3::OperandType& operandType);
 #endif
 
 template <typename HalModel>