IVGCVSW-3235 Add scalar to use as padding value in Reference Pad

Signed-off-by: David Monahan <david.monahan@arm.com>
Change-Id: If050f318fcb7626bbfae1b8737a1d232a4a5a915
diff --git a/src/backends/backendsCommon/test/LayerTests.cpp b/src/backends/backendsCommon/test/LayerTests.cpp
index d9ae546..c9a5731 100644
--- a/src/backends/backendsCommon/test/LayerTests.cpp
+++ b/src/backends/backendsCommon/test/LayerTests.cpp
@@ -5869,13 +5869,14 @@
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     float qScale,
-    int32_t qOffset)
+    int32_t qOffset,
+    const float customPaddingValue = 0)
 {
     const armnn::TensorShape inputShape{ 3, 3 };
     const armnn::TensorShape outputShape{ 7, 7 };
 
-    const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
-    const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
+    const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
+    const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
 
     std::vector<T> inputValues(
     QuantizedVector<T>(qScale, qOffset,
@@ -5886,17 +5887,38 @@
       3, 2, 4
     }));
 
-    std::vector<T> expectedOutputValues(
-    QuantizedVector<T>(qScale, qOffset,
+    const T padValue = ConvertToDataType<T>(customPaddingValue, inputTensorInfo);
+
+    std::vector<T> expectedOutputValues;
+    if (padValue == 0)
     {
-      0, 0, 0, 0, 0, 0, 0,
-      0, 0, 0, 0, 0, 0, 0,
-      0, 0, 4, 8, 6, 0, 0,
-      0, 0, 7, 4, 4, 0, 0,
-      0, 0, 3, 2, 4, 0, 0,
-      0, 0, 0, 0, 0, 0, 0,
-      0, 0, 0, 0, 0, 0, 0
-    }));
+        expectedOutputValues = (
+        QuantizedVector<T>(qScale, qOffset,
+        {
+          0, 0, 0, 0, 0, 0, 0,
+          0, 0, 0, 0, 0, 0, 0,
+          0, 0, 4, 8, 6, 0, 0,
+          0, 0, 7, 4, 4, 0, 0,
+          0, 0, 3, 2, 4, 0, 0,
+          0, 0, 0, 0, 0, 0, 0,
+          0, 0, 0, 0, 0, 0, 0
+        }));
+    }
+    else
+    {
+        expectedOutputValues = (
+        QuantizedVector<T>(qScale, qOffset,
+        {
+          1, 1, 1, 1, 1, 1, 1,
+          1, 1, 1, 1, 1, 1, 1,
+          1, 1, 4, 8, 6, 1, 1,
+          1, 1, 7, 4, 4, 1, 1,
+          1, 1, 3, 2, 4, 1, 1,
+          1, 1, 1, 1, 1, 1, 1,
+          1, 1, 1, 1, 1, 1, 1
+        }));
+    }
+
 
     auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, std::vector<T>(inputValues));
 
@@ -5943,8 +5965,8 @@
     const armnn::TensorShape inputShape{ 2, 2, 2 };
     const armnn::TensorShape outputShape{ 3, 5, 6 };
 
-    const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
-    const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
+    const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
+    const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
 
     std::vector<T> inputValues(
       QuantizedVector<T>(qScale,qOffset,
@@ -6028,8 +6050,8 @@
     const armnn::TensorShape inputShape{ 2, 2, 3, 2 };
     const armnn::TensorShape outputShape{ 4, 5, 7, 4 };
 
-    const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
-    const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
+    const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
+    const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
 
     std::vector<T> inputValues(
       QuantizedVector<T>(qScale,qOffset,
@@ -6263,6 +6285,13 @@
   return Pad2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
 }
 
+LayerTestResult<uint8_t, 2> PadUint82dCustomPaddingTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    return Pad2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
+}
+
 LayerTestResult<uint8_t, 3> PadUint83dTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
@@ -6284,6 +6313,13 @@
   return Pad2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
 }
 
+LayerTestResult<float, 2> PadFloat322dCustomPaddingTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    return Pad2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+}
+
 LayerTestResult<float, 3> PadFloat323dTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp
index 25ccfa0..be16819 100644
--- a/src/backends/backendsCommon/test/LayerTests.hpp
+++ b/src/backends/backendsCommon/test/LayerTests.hpp
@@ -1382,6 +1382,10 @@
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
 
+LayerTestResult<uint8_t, 2> PadUint82dCustomPaddingTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
 LayerTestResult<uint8_t, 3> PadUint83dTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
@@ -1394,6 +1398,10 @@
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
 
+LayerTestResult<float, 2> PadFloat322dCustomPaddingTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
 LayerTestResult<float, 3> PadFloat323dTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
@@ -2022,6 +2030,18 @@
     return output;
 }
 
+// Utility method to convert a single value to the correct type
+template <typename T>
+T ConvertToDataType(const float& value,
+                    const armnn::TensorInfo& tensorInfo)
+{
+    std::vector<T> output(1);
+    std::unique_ptr<armnn::Encoder<float>> pEncoder = armnn::MakeEncoder<float>(tensorInfo, output.data());
+    armnn::Encoder<float>& rEncoder = *pEncoder;
+    rEncoder.Set(value);
+    return output[0];
+}
+
 template<armnn::DataType ArmnnType, typename T>
 LayerTestResult<T, 2> Rsqrt2dTestCommon(
         armnn::IWorkloadFactory& workloadFactory,
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index cb9ee4b..9cb8d13 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -645,10 +645,12 @@
 
 // Pad
 ARMNN_AUTO_TEST_CASE(PadFloat322d, PadFloat322dTest)
+ARMNN_AUTO_TEST_CASE(PadFloat322dCustomPadding, PadFloat322dCustomPaddingTest)
 ARMNN_AUTO_TEST_CASE(PadFloat323d, PadFloat323dTest)
 ARMNN_AUTO_TEST_CASE(PadFloat324d, PadFloat324dTest)
 
 ARMNN_AUTO_TEST_CASE(PadUint82d, PadUint82dTest)
+ARMNN_AUTO_TEST_CASE(PadUint82dCustomPadding, PadUint82dCustomPaddingTest)
 ARMNN_AUTO_TEST_CASE(PadUint83d, PadUint83dTest)
 ARMNN_AUTO_TEST_CASE(PadUint84d, PadUint84dTest)
 
diff --git a/src/backends/reference/workloads/Pad.cpp b/src/backends/reference/workloads/Pad.cpp
index 7a928a1..1e58124 100644
--- a/src/backends/reference/workloads/Pad.cpp
+++ b/src/backends/reference/workloads/Pad.cpp
@@ -5,8 +5,10 @@
 
 #include "Pad.hpp"
 #include "backendsCommon/WorkloadData.hpp"
-#include <boost/numeric/conversion/cast.hpp>
 #include "TensorBufferArrayView.hpp"
+#include "Encoders.hpp"
+
+#include <boost/numeric/conversion/cast.hpp>
 #include <cmath>
 #include <cstddef>
 #include <functional>
@@ -15,12 +17,25 @@
 
 namespace armnn
 {
+
+template <typename T>
+T ConvertToDataType(const float& value,
+                    const armnn::TensorInfo& tensorInfo)
+{
+    std::vector<T> output(1);
+    std::unique_ptr<armnn::Encoder<float>> pEncoder = armnn::MakeEncoder<float>(tensorInfo, output.data());
+    armnn::Encoder<float>& rEncoder = *pEncoder;
+    rEncoder.Set(value);
+    return output[0];
+}
+
 template <typename T>
 void Pad(const TensorInfo& inputInfo,
          const TensorInfo& outputInfo,
-         std::vector<std::pair<unsigned int, unsigned int>> m_PadList,
+         std::vector<std::pair<unsigned int, unsigned int>> m_padList,
          const T* inputData,
-         T* outData)
+         T* outData,
+         const float padValue)
 {
     unsigned int numOutputElements = outputInfo.GetNumElements();
 
@@ -45,9 +60,11 @@
     unsigned int outputHeight = 0;
     unsigned int outputWidth = 0;
 
+    T convertedPadValue = ConvertToDataType<T>(padValue, inputInfo);
+
     for (unsigned int i = 0; i < numOutputElements; ++i)
     {
-       outData[i] = 0;
+       outData[i] = convertedPadValue;
     }
 
     switch(numInputDimensions) {
@@ -58,7 +75,7 @@
 
             for (unsigned int w = 0; w < inputWidth ; w++)
             {
-                outData[w+std::get<0>(m_PadList[0])] = inputData[w];
+                outData[w+std::get<0>(m_padList[0])] = inputData[w];
             }
 
             break;
@@ -74,8 +91,8 @@
             {
                 for (unsigned int w = 0; w < inputWidth ; w++)
                 {
-                    outData[(h+std::get<0>(m_PadList[0]))*outputWidth
-                    + (w+std::get<0>(m_PadList[1]))] = inputData[h * inputWidth + w];
+                    outData[(h+std::get<0>(m_padList[0]))*outputWidth
+                    + (w+std::get<0>(m_padList[1]))] = inputData[h * inputWidth + w];
                 }
             }
 
@@ -96,9 +113,9 @@
                 {
                     for (unsigned int w = 0; w < inputWidth ; w++)
                     {
-                        outData[(c+std::get<0>(m_PadList[0]))*outputHeight*outputWidth
-                        + (h+std::get<0>(m_PadList[1]))*outputWidth
-                        + (w+std::get<0>(m_PadList[2]))] = inputData[c * inputHeight * inputWidth
+                        outData[(c+std::get<0>(m_padList[0]))*outputHeight*outputWidth
+                        + (h+std::get<0>(m_padList[1]))*outputWidth
+                        + (w+std::get<0>(m_padList[2]))] = inputData[c * inputHeight * inputWidth
                                                                       + h * inputWidth
                                                                       + w];
                     }
@@ -125,10 +142,10 @@
                     {
                         for (unsigned int w = 0; w < inputWidth ; w++)
                         {
-                            outData[(b+std::get<0>(m_PadList[0])) * outputChannels * outputHeight * outputWidth
-                                   + (c+std::get<0>(m_PadList[1])) * outputHeight * outputWidth
-                                   + (h+std::get<0>(m_PadList[2])) * outputWidth
-                                   + (w+std::get<0>(m_PadList[3]))] = inputData[b * inputChannels * inputHeight
+                            outData[(b+std::get<0>(m_padList[0])) * outputChannels * outputHeight * outputWidth
+                                   + (c+std::get<0>(m_padList[1])) * outputHeight * outputWidth
+                                   + (h+std::get<0>(m_padList[2])) * outputWidth
+                                   + (w+std::get<0>(m_padList[3]))] = inputData[b * inputChannels * inputHeight
                                                                                 * inputWidth
                                                                              + c * inputHeight * inputWidth
                                                                              + h * inputWidth
@@ -150,11 +167,13 @@
                          const TensorInfo& outputInfo,
                          std::vector<std::pair<unsigned int, unsigned int>> m_PadList,
                          const float* inputData,
-                         float* outData);
+                         float* outData,
+                         const float padValue);
 template void Pad<uint8_t>(const TensorInfo& inputInfo,
                            const TensorInfo& outputInfo,
                            std::vector<std::pair<unsigned int, unsigned int>> m_PadList,
                            const uint8_t* inputData,
-                           uint8_t* outData);
+                           uint8_t* outData,
+                           const float padValue);
 
 } //namespace armnn
\ No newline at end of file
diff --git a/src/backends/reference/workloads/Pad.hpp b/src/backends/reference/workloads/Pad.hpp
index 42318d6..4297185 100644
--- a/src/backends/reference/workloads/Pad.hpp
+++ b/src/backends/reference/workloads/Pad.hpp
@@ -15,7 +15,8 @@
 template <typename T>
 void Pad(const TensorInfo& inputInfo,
          const TensorInfo& outputInfo,
-         std::vector<std::pair<unsigned int, unsigned int>> m_PadList,
+         std::vector<std::pair<unsigned int, unsigned int>> m_padList,
          const T* inputData,
-         T* outData);
+         T* outData,
+         const float padValue);
 } //namespace armnn
diff --git a/src/backends/reference/workloads/RefPadWorkload.cpp b/src/backends/reference/workloads/RefPadWorkload.cpp
index 16032e7..8cb9d88 100644
--- a/src/backends/reference/workloads/RefPadWorkload.cpp
+++ b/src/backends/reference/workloads/RefPadWorkload.cpp
@@ -30,8 +30,7 @@
     const T* inputData = GetInputTensorData<T>(0, m_Data);
     T* outputData = GetOutputTensorData<T>(0, m_Data);
 
-
-    Pad(inputInfo, outputInfo, m_Data.m_Parameters.m_PadList, inputData, outputData);
+    Pad(inputInfo, outputInfo, m_Data.m_Parameters.m_PadList, inputData, outputData, m_Data.m_Parameters.m_padValue);
 }
 
 template class RefPadWorkload<DataType::Float32>;