IVGCVSW-5595 Fix incorrect padding value for asymmetric quantized type

Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com>
Change-Id: I85f0c30757043f8c27c78d607f0f9dbbdd35b9fb
diff --git a/src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp
index 5511f86..086f875 100644
--- a/src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp
@@ -417,6 +417,79 @@
     return result;
 }
 
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 2> PadQAsymmTestCommon(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
+    float qScale,
+    int32_t qOffset,
+    const float customPaddingValue)
+{
+    IgnoreUnused(memoryManager);
+    const armnn::TensorShape inputShape{ 3, 3 };
+    const armnn::TensorShape outputShape{ 7, 7 };
+
+    const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
+    const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
+
+    std::vector<T> inputValues =
+        {
+            // Height (3) x Width (3)
+            4, 8, 6,
+            7, 4, 4,
+            3, 2, 4
+        };
+
+    T p = static_cast<T>(customPaddingValue);
+    std::vector<T> expectedOutputValues =
+        {
+            p, p, p, p, p, p, p,
+            p, p, p, p, p, p, p,
+            p, p, 4, 8, 6, p, p,
+            p, p, 7, 4, 4, p, p,
+            p, p, 3, 2, 4, p, p,
+            p, p, p, p, p, p, p,
+            p, p, p, p, p, p, p
+        };
+
+    auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, std::vector<T>(inputValues));
+
+    LayerTestResult<T, 2> result(outputTensorInfo);
+    result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, std::vector<T>(expectedOutputValues));
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
+
+
+    armnn::PadQueueDescriptor descriptor;
+
+    std::vector<std::pair<unsigned int, unsigned int>> padList;
+    padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
+    padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
+
+    descriptor.m_Parameters.m_PadList = padList;
+    descriptor.m_Parameters.m_PadValue = customPaddingValue;
+    armnn::WorkloadInfo info;
+
+    AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
+
+    workload->PostAllocationConfigure();
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
+
+    return result;
+}
+
 //
 // Explicit template specializations
 //
@@ -446,6 +519,24 @@
     float qScale,
     int32_t qOffset);
 
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 2>
+PadQAsymmTestCommon<armnn::DataType::QAsymmS8>(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
+    float qScale,
+    int32_t qOffset,
+    const float customPaddingValue);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 2>
+PadQAsymmTestCommon<armnn::DataType::QAsymmU8>(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
+    float qScale,
+    int32_t qOffset,
+    const float customPaddingValue);
+
 //
 // Implementation functions
 //
@@ -582,3 +673,21 @@
 {
     return Pad4dTestCommon<armnn::DataType::QSymmS8>(workloadFactory, memoryManager, tensorHandleFactory, 1.0f, 0);
 }
+
+LayerTestResult<int8_t, 2> PadInt8AsymmTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+    return PadQAsymmTestCommon<armnn::DataType::QAsymmS8>(
+        workloadFactory, memoryManager, tensorHandleFactory, 2.0f, 2);
+}
+
+LayerTestResult<int8_t, 2> PadInt8CustomPaddingAsymmTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+    return PadQAsymmTestCommon<armnn::DataType::QAsymmS8>(
+        workloadFactory, memoryManager, tensorHandleFactory, 2.0f, 3, 1.0f);
+}
diff --git a/src/backends/backendsCommon/test/layerTests/PadTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/PadTestImpl.hpp
index 8d60386..4c30c42 100644
--- a/src/backends/backendsCommon/test/layerTests/PadTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/PadTestImpl.hpp
@@ -39,6 +39,15 @@
     float qScale,
     int32_t qOffset);
 
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 2> PadQAsymmTestCommon(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
+    float qScale,
+    int32_t qOffset,
+    const float customPaddingValue = 0.0f);
+
 LayerTestResult<uint8_t, 2> PadUint82dTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -117,4 +126,14 @@
 LayerTestResult<int8_t, 4> PadInt84dTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<int8_t, 2> PadInt82dAsymmTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<int8_t, 2> PadInt82dCustomPaddingAsymmTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     const armnn::ITensorHandleFactory& tensorHandleFactory);
\ No newline at end of file