IVGCVSW-6469 Add MirrorPad FrontEnd and Ref Support

 * Added PaddingMode enum to PaddingDescriptor to enable Symmetric and
   Reflect padding.
 * Added Symmetric and Reflect Ref implementation.
 * Added Serializer & Deserializer support.
 * Added unit tests.

Signed-off-by: Matthew Sloyan <matthew.sloyan@arm.com>
Change-Id: I4bed907b31742b32ccefe5e8ca39a6f1e5bd9dee
diff --git a/src/armnn/SerializeLayerParameters.cpp b/src/armnn/SerializeLayerParameters.cpp
index 3fc93df..c60d4fa 100644
--- a/src/armnn/SerializeLayerParameters.cpp
+++ b/src/armnn/SerializeLayerParameters.cpp
@@ -293,6 +293,7 @@
         fn("PadList", ss.str());
     }
     fn("PadValue", std::to_string(desc.m_PadValue));
+    fn("PaddingMode", GetPaddingModeAsCString(desc.m_PaddingMode));
 }
 
 void StringifyLayerParameters<PreCompiledDescriptor>::Serialize(ParameterStringifyFunction& fn,
diff --git a/src/armnn/layers/PadLayer.cpp b/src/armnn/layers/PadLayer.cpp
index 78af9d3..bbe92af 100644
--- a/src/armnn/layers/PadLayer.cpp
+++ b/src/armnn/layers/PadLayer.cpp
@@ -23,6 +23,7 @@
 {
     PadQueueDescriptor descriptor;
     descriptor.m_Parameters.m_PadList = m_Param.m_PadList;
+    descriptor.m_Parameters.m_PaddingMode = m_Param.m_PaddingMode;
     SetAdditionalInfo(descriptor);
 
     return factory.CreatePad(descriptor, PrepInfoAndDesc(descriptor));
@@ -33,6 +34,7 @@
     auto layer = CloneBase<PadLayer>(graph, m_Param, GetName());
 
     layer->m_Param.m_PadList = m_Param.m_PadList;
+    layer->m_Param.m_PaddingMode = m_Param.m_PaddingMode;
 
     return std::move(layer);
 }
diff --git a/src/armnnDeserializer/Deserializer.cpp b/src/armnnDeserializer/Deserializer.cpp
index c088ef7..bfd4f6b 100644
--- a/src/armnnDeserializer/Deserializer.cpp
+++ b/src/armnnDeserializer/Deserializer.cpp
@@ -577,6 +577,19 @@
     }
 }
 
+armnn::PaddingMode ToPaddingMode(armnnSerializer::PaddingMode paddingMode)
+{
+    switch (paddingMode)
+    {
+        case armnnSerializer::PaddingMode::PaddingMode_Reflect:
+            return armnn::PaddingMode::Reflect;
+        case armnnSerializer::PaddingMode::PaddingMode_Symmetric:
+            return armnn::PaddingMode::Symmetric;
+        default:
+            return armnn::PaddingMode::Constant;
+    }
+}
+
 armnn::ResizeMethod ToResizeMethod(armnnSerializer::ResizeMethod method)
 {
     switch (method)
@@ -2064,6 +2077,7 @@
 
     auto flatBufferDescriptor = graph->layers()->Get(layerIndex)->layer_as_PadLayer()->descriptor();
     auto flatBufferPadList = flatBufferDescriptor->padList();
+    auto paddingMode = flatBufferDescriptor->paddingMode();
     float padValue = flatBufferDescriptor->padValue();
 
     if (flatBufferPadList->Length() % 2 != 0)
@@ -2079,7 +2093,7 @@
         padList.emplace_back(flatBufferPadList->Get(i), flatBufferPadList->Get(i+1));
     }
 
-    armnn::PadDescriptor descriptor(padList, padValue);
+    armnn::PadDescriptor descriptor(padList, padValue, ToPaddingMode(paddingMode));
 
     auto layerName = GetLayerName(graph, layerIndex);
     IConnectableLayer* layer = m_Network->AddPadLayer(descriptor, layerName.c_str());
diff --git a/src/armnnDeserializer/test/DeserializePad.cpp b/src/armnnDeserializer/test/DeserializePad.cpp
index 43de229..ade0974 100644
--- a/src/armnnDeserializer/test/DeserializePad.cpp
+++ b/src/armnnDeserializer/test/DeserializePad.cpp
@@ -12,10 +12,11 @@
 {
 struct PadFixture : public ParserFlatbuffersSerializeFixture
 {
-    explicit PadFixture(const std::string &inputShape,
-                        const std::string &padList,
-                        const std::string &outputShape,
-                        const std::string &dataType)
+    explicit PadFixture(const std::string& inputShape,
+                        const std::string& padList,
+                        const std::string& outputShape,
+                        const std::string& dataType,
+                        const std::string& paddingMode)
     {
         m_JsonString = R"(
             {
@@ -67,6 +68,7 @@
                             },
                             descriptor: {
                                 padList: )" + padList + R"(,
+                                paddingMode: )" + paddingMode + R"(,
                             }
                         }
                     },
@@ -106,23 +108,108 @@
     SimplePadFixture() : PadFixture("[ 2, 2, 2 ]",
                                     "[ 0, 1, 2, 1, 2, 2 ]",
                                     "[ 3, 5, 6 ]",
-                                    "QuantisedAsymm8") {}
+                                    "QuantisedAsymm8",
+                                    "Constant") {}
 };
 
 TEST_CASE_FIXTURE(SimplePadFixture, "SimplePadQuantisedAsymm8")
 {
     RunTest<3, armnn::DataType::QAsymmU8>(0,
-                                                 {
-                                                    0, 4, 2, 5, 6, 1, 5, 2
-                                                 },
-                                                 {
-                                                    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-                                                    4, 0, 0, 0, 0, 2, 5, 0, 0, 0, 0, 0, 0, 0, 0,
-                                                    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6,
-                                                    1, 0, 0, 0, 0, 5, 2, 0, 0, 0, 0, 0, 0, 0, 0,
-                                                    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-                                                    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
-                                                 });
+                                         {
+                                            0, 4, 2, 5, 6, 1, 5, 2
+                                         },
+                                         {
+                                            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+                                            4, 0, 0, 0, 0, 2, 5, 0, 0, 0, 0, 0, 0, 0, 0,
+                                            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6,
+                                            1, 0, 0, 0, 0, 5, 2, 0, 0, 0, 0, 0, 0, 0, 0,
+                                            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+                                            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+                                         });
+}
+
+struct SimplePadSymmetricFixture : PadFixture
+{
+    SimplePadSymmetricFixture() : PadFixture("[ 2, 2, 2 ]",
+                                             "[ 1, 1, 1, 1, 1, 1 ]",
+                                             "[ 4, 4, 4 ]",
+                                             "QuantisedAsymm8",
+                                             "Symmetric") {}
+};
+
+TEST_CASE_FIXTURE(SimplePadSymmetricFixture, "SimplePadSymmetricQuantisedAsymm8")
+{
+    RunTest<3, armnn::DataType::QAsymmU8>(0,
+                                          {
+                                              1, 2,
+                                              3, 4,
+
+                                              5, 6,
+                                              7, 8
+                                          },
+                                          {
+                                              1, 1, 2, 2,
+                                              1, 1, 2, 2,
+                                              3, 3, 4, 4,
+                                              3, 3, 4, 4,
+
+                                              1, 1, 2, 2,
+                                              1, 1, 2, 2,
+                                              3, 3, 4, 4,
+                                              3, 3, 4, 4,
+
+                                              5, 5, 6, 6,
+                                              5, 5, 6, 6,
+                                              7, 7, 8, 8,
+                                              7, 7, 8, 8,
+
+                                              5, 5, 6, 6,
+                                              5, 5, 6, 6,
+                                              7, 7, 8, 8,
+                                              7, 7, 8, 8
+                                          });
+}
+
+struct SimplePadReflectFixture : PadFixture
+{
+    SimplePadReflectFixture() : PadFixture("[ 2, 2, 2 ]",
+                                           "[ 1, 1, 1, 1, 1, 1 ]",
+                                           "[ 4, 4, 4 ]",
+                                           "QuantisedAsymm8",
+                                           "Reflect") {}
+};
+
+TEST_CASE_FIXTURE(SimplePadReflectFixture, "SimplePadReflectQuantisedAsymm8")
+{
+    RunTest<3, armnn::DataType::QAsymmU8>(0,
+                                          {
+                                              1, 2,
+                                              3, 4,
+
+                                              5, 6,
+                                              7, 8
+                                          },
+                                          {
+                                              8, 7, 8, 7,
+                                              6, 5, 6, 5,
+                                              8, 7, 8, 7,
+                                              6, 5, 6, 5,
+
+                                              4, 3, 4, 3,
+                                              2, 1, 2, 1,
+                                              4, 3, 4, 3,
+                                              2, 1, 2, 1,
+
+                                              8, 7, 8, 7,
+                                              6, 5, 6, 5,
+                                              8, 7, 8, 7,
+                                              6, 5, 6, 5,
+
+                                              4, 3, 4, 3,
+                                              2, 1, 2, 1,
+                                              4, 3, 4, 3,
+                                              2, 1, 2, 1
+                                          });
 }
 
 }
diff --git a/src/armnnSerializer/ArmnnSchema.fbs b/src/armnnSerializer/ArmnnSchema.fbs
index c577a11..40de349 100644
--- a/src/armnnSerializer/ArmnnSchema.fbs
+++ b/src/armnnSerializer/ArmnnSchema.fbs
@@ -619,9 +619,16 @@
     descriptor:PadDescriptor;
 }
 
+enum PaddingMode : byte {
+    Constant  = 0,
+    Reflect   = 1,
+    Symmetric = 2
+}
+
 table PadDescriptor {
     padList:[uint];
     padValue:float = 0;
+    paddingMode:PaddingMode = Constant;
 }
 
 /// @deprecated Use ElementwiseUnaryLayer instead
diff --git a/src/armnnSerializer/ArmnnSchema_generated.h b/src/armnnSerializer/ArmnnSchema_generated.h
index 712ad28..7747f9e 100644
--- a/src/armnnSerializer/ArmnnSchema_generated.h
+++ b/src/armnnSerializer/ArmnnSchema_generated.h
@@ -1198,6 +1198,39 @@
   return EnumNamesNormalizationAlgorithmMethod()[index];
 }
 
+enum PaddingMode {
+  PaddingMode_Constant = 0,
+  PaddingMode_Reflect = 1,
+  PaddingMode_Symmetric = 2,
+  PaddingMode_MIN = PaddingMode_Constant,
+  PaddingMode_MAX = PaddingMode_Symmetric
+};
+
+inline const PaddingMode (&EnumValuesPaddingMode())[3] {
+  static const PaddingMode values[] = {
+    PaddingMode_Constant,
+    PaddingMode_Reflect,
+    PaddingMode_Symmetric
+  };
+  return values;
+}
+
+inline const char * const *EnumNamesPaddingMode() {
+  static const char * const names[4] = {
+    "Constant",
+    "Reflect",
+    "Symmetric",
+    nullptr
+  };
+  return names;
+}
+
+inline const char *EnumNamePaddingMode(PaddingMode e) {
+  if (flatbuffers::IsOutRange(e, PaddingMode_Constant, PaddingMode_Symmetric)) return "";
+  const size_t index = static_cast<size_t>(e);
+  return EnumNamesPaddingMode()[index];
+}
+
 enum Layer {
   Layer_NONE = 0,
   Layer_ActivationLayer = 1,
@@ -6383,7 +6416,8 @@
   typedef PadDescriptorBuilder Builder;
   enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
     VT_PADLIST = 4,
-    VT_PADVALUE = 6
+    VT_PADVALUE = 6,
+    VT_PADDINGMODE = 8
   };
   const flatbuffers::Vector<uint32_t> *padList() const {
     return GetPointer<const flatbuffers::Vector<uint32_t> *>(VT_PADLIST);
@@ -6391,11 +6425,15 @@
   float padValue() const {
     return GetField<float>(VT_PADVALUE, 0.0f);
   }
+  armnnSerializer::PaddingMode paddingMode() const {
+    return static_cast<armnnSerializer::PaddingMode>(GetField<int8_t>(VT_PADDINGMODE, 0));
+  }
   bool Verify(flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyOffset(verifier, VT_PADLIST) &&
            verifier.VerifyVector(padList()) &&
            VerifyField<float>(verifier, VT_PADVALUE) &&
+           VerifyField<int8_t>(verifier, VT_PADDINGMODE) &&
            verifier.EndTable();
   }
 };
@@ -6410,6 +6448,9 @@
   void add_padValue(float padValue) {
     fbb_.AddElement<float>(PadDescriptor::VT_PADVALUE, padValue, 0.0f);
   }
+  void add_paddingMode(armnnSerializer::PaddingMode paddingMode) {
+    fbb_.AddElement<int8_t>(PadDescriptor::VT_PADDINGMODE, static_cast<int8_t>(paddingMode), 0);
+  }
   explicit PadDescriptorBuilder(flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
@@ -6425,22 +6466,26 @@
 inline flatbuffers::Offset<PadDescriptor> CreatePadDescriptor(
     flatbuffers::FlatBufferBuilder &_fbb,
     flatbuffers::Offset<flatbuffers::Vector<uint32_t>> padList = 0,
-    float padValue = 0.0f) {
+    float padValue = 0.0f,
+    armnnSerializer::PaddingMode paddingMode = armnnSerializer::PaddingMode_Constant) {
   PadDescriptorBuilder builder_(_fbb);
   builder_.add_padValue(padValue);
   builder_.add_padList(padList);
+  builder_.add_paddingMode(paddingMode);
   return builder_.Finish();
 }
 
 inline flatbuffers::Offset<PadDescriptor> CreatePadDescriptorDirect(
     flatbuffers::FlatBufferBuilder &_fbb,
     const std::vector<uint32_t> *padList = nullptr,
-    float padValue = 0.0f) {
+    float padValue = 0.0f,
+    armnnSerializer::PaddingMode paddingMode = armnnSerializer::PaddingMode_Constant) {
   auto padList__ = padList ? _fbb.CreateVector<uint32_t>(*padList) : 0;
   return armnnSerializer::CreatePadDescriptor(
       _fbb,
       padList__,
-      padValue);
+      padValue,
+      paddingMode);
 }
 
 /// @deprecated Use ElementwiseUnaryLayer instead
diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp
index 84a9d53..c087843 100644
--- a/src/armnnSerializer/Serializer.cpp
+++ b/src/armnnSerializer/Serializer.cpp
@@ -894,7 +894,8 @@
 
     auto flatBufferPadDesc = serializer::CreatePadDescriptor(m_flatBufferBuilder,
                                                              m_flatBufferBuilder.CreateVector(padList),
-                                                             padDescriptor.m_PadValue);
+                                                             padDescriptor.m_PadValue,
+                                                             GetFlatBufferPaddingMode(padDescriptor.m_PaddingMode));
 
     auto flatBufferPadLayer = serializer::CreatePadLayer(m_flatBufferBuilder,
                                                          flatBufferBaseLayer,
diff --git a/src/armnnSerializer/SerializerUtils.cpp b/src/armnnSerializer/SerializerUtils.cpp
index 5ad2771..49ce721 100644
--- a/src/armnnSerializer/SerializerUtils.cpp
+++ b/src/armnnSerializer/SerializerUtils.cpp
@@ -170,6 +170,19 @@
     }
 }
 
+armnnSerializer::PaddingMode GetFlatBufferPaddingMode(armnn::PaddingMode paddingMode)
+{
+    switch (paddingMode)
+    {
+        case armnn::PaddingMode::Reflect:
+            return armnnSerializer::PaddingMode::PaddingMode_Reflect;
+        case armnn::PaddingMode::Symmetric:
+            return armnnSerializer::PaddingMode::PaddingMode_Symmetric;
+        default:
+            return armnnSerializer::PaddingMode::PaddingMode_Constant;
+    }
+}
+
 armnnSerializer::NormalizationAlgorithmChannel GetFlatBufferNormalizationAlgorithmChannel(
     armnn::NormalizationAlgorithmChannel normalizationAlgorithmChannel)
 {
diff --git a/src/armnnSerializer/SerializerUtils.hpp b/src/armnnSerializer/SerializerUtils.hpp
index 5517986..07cdc2a 100644
--- a/src/armnnSerializer/SerializerUtils.hpp
+++ b/src/armnnSerializer/SerializerUtils.hpp
@@ -27,6 +27,8 @@
 
 armnnSerializer::PaddingMethod GetFlatBufferPaddingMethod(armnn::PaddingMethod paddingMethod);
 
+armnnSerializer::PaddingMode GetFlatBufferPaddingMode(armnn::PaddingMode paddingMode);
+
 armnnSerializer::NormalizationAlgorithmChannel GetFlatBufferNormalizationAlgorithmChannel(
     armnn::NormalizationAlgorithmChannel normalizationAlgorithmChannel);
 
diff --git a/src/armnnSerializer/test/SerializerTests.cpp b/src/armnnSerializer/test/SerializerTests.cpp
index 2bffe0b..e32b908 100644
--- a/src/armnnSerializer/test/SerializerTests.cpp
+++ b/src/armnnSerializer/test/SerializerTests.cpp
@@ -1684,6 +1684,36 @@
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
+TEST_CASE("SerializePadReflect")
+{
+    const std::string layerName("padReflect");
+    const armnn::TensorInfo inputTensorInfo = armnn::TensorInfo({1, 2, 3, 4}, armnn::DataType::Float32);
+    const armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 5, 7}, armnn::DataType::Float32);
+
+    armnn::PadDescriptor desc({{0, 0}, {1, 0}, {1, 1}, {1, 2}});
+    desc.m_PaddingMode = armnn::PaddingMode::Reflect;
+
+    armnn::INetworkPtr network = armnn::INetwork::Create();
+    armnn::IConnectableLayer* const inputLayer = network->AddInputLayer(0);
+    armnn::IConnectableLayer* const padLayer = network->AddPadLayer(desc, layerName.c_str());
+    armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0);
+
+    inputLayer->GetOutputSlot(0).Connect(padLayer->GetInputSlot(0));
+    padLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
+
+    inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
+    padLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+    armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
+    CHECK(deserializedNetwork);
+
+    LayerVerifierBaseWithDescriptor<armnn::PadDescriptor> verifier(layerName,
+                                                                   {inputTensorInfo},
+                                                                   {outputTensorInfo},
+                                                                   desc);
+    deserializedNetwork->ExecuteStrategy(verifier);
+}
+
 TEST_CASE("EnsurePadBackwardCompatibility")
 {
     // The PadDescriptor is being extended with a float PadValue (so a value other than 0
diff --git a/src/backends/aclCommon/ArmComputeTensorUtils.cpp b/src/backends/aclCommon/ArmComputeTensorUtils.cpp
index 62f3263..8bbaea7 100644
--- a/src/backends/aclCommon/ArmComputeTensorUtils.cpp
+++ b/src/backends/aclCommon/ArmComputeTensorUtils.cpp
@@ -254,9 +254,9 @@
     return arm_compute::Size2D(width, height);
 }
 
-arm_compute::PixelValue GetPixelValue(arm_compute::ITensor& input, float pixelValue)
+arm_compute::PixelValue GetPixelValue(const arm_compute::ITensorInfo* tensorInfo, float pixelValue)
 {
-    switch (input.info()->data_type())
+    switch (tensorInfo->data_type())
     {
         case arm_compute::DataType::F16:
             return arm_compute::PixelValue(static_cast<Half>(pixelValue));
@@ -273,7 +273,7 @@
             return arm_compute::PixelValue(static_cast<int32_t>(pixelValue));
         default:
             throw InvalidArgumentException("Unsupported DataType: [" +
-                                           std::to_string(static_cast<int>(input.info()->data_type())) + "]");
+                                           std::to_string(static_cast<int>(tensorInfo->data_type())) + "]");
     }
 }
 
diff --git a/src/backends/aclCommon/ArmComputeTensorUtils.hpp b/src/backends/aclCommon/ArmComputeTensorUtils.hpp
index ad5d461..30df31b 100644
--- a/src/backends/aclCommon/ArmComputeTensorUtils.hpp
+++ b/src/backends/aclCommon/ArmComputeTensorUtils.hpp
@@ -65,8 +65,8 @@
 /// Utility function used to setup an arm_compute::Size2D object from width and height values.
 arm_compute::Size2D BuildArmComputeSize2D(const unsigned int width, const unsigned int height);
 
-/// Gets the appropriate PixelValue for the input DataType
-arm_compute::PixelValue GetPixelValue(arm_compute::ITensor& input, float pixelValue);
+/// Gets the appropriate PixelValue for the TensorInfo DataType
+arm_compute::PixelValue GetPixelValue(const arm_compute::ITensorInfo* tensorInfo, float pixelValue);
 
 /// Utility function used to setup an arm_compute::PadStrideInfo object from an armnn layer descriptor.
 template <typename Descriptor>
diff --git a/src/backends/aclCommon/ArmComputeUtils.hpp b/src/backends/aclCommon/ArmComputeUtils.hpp
index 2f76789..f096346 100644
--- a/src/backends/aclCommon/ArmComputeUtils.hpp
+++ b/src/backends/aclCommon/ArmComputeUtils.hpp
@@ -300,6 +300,17 @@
     return arm_compute::Conv3dInfo{stride, padding, activationInfo, dilation, roundType, isFastMathEnabled};
 }
 
+inline arm_compute::PaddingMode ConvertPaddingModeToAcl(const PaddingMode& paddingMode)
+{
+    switch (paddingMode)
+    {
+        case PaddingMode::Constant:   return arm_compute::PaddingMode::CONSTANT;
+        case PaddingMode::Reflect:    return arm_compute::PaddingMode::REFLECT;
+        case PaddingMode::Symmetric:  return arm_compute::PaddingMode::SYMMETRIC;
+        default:                      throw InvalidArgumentException("Unsupported Padding Mode");
+    }
+}
+
 inline arm_compute::ReductionOperation ConvertReductionOperationToAcl(const ReduceDescriptor& descriptor)
 {
     switch (descriptor.m_ReduceOperation)
diff --git a/src/backends/backendsCommon/common.mk b/src/backends/backendsCommon/common.mk
index f90a7c8..a77ec06 100644
--- a/src/backends/backendsCommon/common.mk
+++ b/src/backends/backendsCommon/common.mk
@@ -77,6 +77,7 @@
     test/layerTests/LstmTestImpl.cpp \
     test/layerTests/MaximumTestImpl.cpp \
     test/layerTests/MinimumTestImpl.cpp \
+    test/layerTests/MirrorPadTestImpl.cpp \
     test/layerTests/MultiplicationTestImpl.cpp \
     test/layerTests/NegTestImpl.cpp \
     test/layerTests/NormalizationTestImpl.cpp \
diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt
index 9272ae7..cd62242 100644
--- a/src/backends/backendsCommon/test/CMakeLists.txt
+++ b/src/backends/backendsCommon/test/CMakeLists.txt
@@ -129,6 +129,8 @@
     layerTests/MeanTestImpl.hpp
     layerTests/MinimumTestImpl.cpp
     layerTests/MinimumTestImpl.hpp
+    layerTests/MirrorPadTestImpl.cpp
+    layerTests/MirrorPadTestImpl.hpp
     layerTests/MultiplicationTestImpl.cpp
     layerTests/MultiplicationTestImpl.hpp
     layerTests/NegTestImpl.cpp
diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp
index 0dcd3d1..b51ff33 100644
--- a/src/backends/backendsCommon/test/LayerTests.hpp
+++ b/src/backends/backendsCommon/test/LayerTests.hpp
@@ -43,6 +43,7 @@
 #include <backendsCommon/test/layerTests/MaximumTestImpl.hpp>
 #include <backendsCommon/test/layerTests/MeanTestImpl.hpp>
 #include <backendsCommon/test/layerTests/MinimumTestImpl.hpp>
+#include <backendsCommon/test/layerTests/MirrorPadTestImpl.hpp>
 #include <backendsCommon/test/layerTests/MultiplicationTestImpl.hpp>
 #include <backendsCommon/test/layerTests/NegTestImpl.hpp>
 #include <backendsCommon/test/layerTests/NormalizationTestImpl.hpp>
diff --git a/src/backends/backendsCommon/test/layerTests/MirrorPadTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/MirrorPadTestImpl.cpp
new file mode 100644
index 0000000..61899db
--- /dev/null
+++ b/src/backends/backendsCommon/test/layerTests/MirrorPadTestImpl.cpp
@@ -0,0 +1,1091 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "MirrorPadTestImpl.hpp"
+
+#include <QuantizeHelper.hpp>
+
+#include <backendsCommon/test/TensorCopyUtils.hpp>
+#include <backendsCommon/test/WorkloadTestUtils.hpp>
+
+#include <test/TensorHelpers.hpp>
+
+//
+// Implementation templates
+//
+
+template<typename T>
+LayerTestResult<T, 2> MirrorPad2dTestCommon(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
+    const armnn::TensorInfo& inputTensorInfo,
+    const armnn::TensorInfo& outputTensorInfo,
+    const std::vector<T>& inputValues,
+    const std::vector<T>& expectedOutputValues,
+    const std::vector<std::pair<unsigned int, unsigned int>>& padList,
+    const armnn::PaddingMode paddingMode)
+{
+    IgnoreUnused(memoryManager);
+    std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::PadQueueDescriptor descriptor;
+
+    descriptor.m_Parameters.m_PadList = padList;
+    descriptor.m_Parameters.m_PaddingMode = paddingMode;
+    armnn::WorkloadInfo info;
+
+    AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle.get(), inputValues.data());
+
+    ExecuteWorkload(*workload, memoryManager);
+
+    CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
+
+    return LayerTestResult<T, 2>(actualOutput,
+                                 expectedOutputValues,
+                                 outputHandle->GetShape(),
+                                 outputTensorInfo.GetShape());
+}
+
+template<typename T>
+LayerTestResult<T, 3> MirrorPad3dTestCommon(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory,
+        const armnn::TensorInfo& inputTensorInfo,
+        const armnn::TensorInfo& outputTensorInfo,
+        const std::vector<T>& inputValues,
+        const std::vector<T>& expectedOutputValues,
+        const std::vector<std::pair<unsigned int, unsigned int>>& padList,
+        const armnn::PaddingMode paddingMode)
+{
+    IgnoreUnused(memoryManager);
+    std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::PadQueueDescriptor descriptor;
+    descriptor.m_Parameters.m_PadList = padList;
+    descriptor.m_Parameters.m_PaddingMode = paddingMode;
+
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle.get(), inputValues.data());
+
+    ExecuteWorkload(*workload, memoryManager);
+
+    CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
+
+    return LayerTestResult<T, 3>(actualOutput,
+                                 expectedOutputValues,
+                                 outputHandle->GetShape(),
+                                 outputTensorInfo.GetShape());
+}
+
+template<typename T>
+LayerTestResult<T, 4> MirrorPad4dTestCommon(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory,
+        const armnn::TensorInfo& inputTensorInfo,
+        const armnn::TensorInfo& outputTensorInfo,
+        const std::vector<T>& inputValues,
+        const std::vector<T>& expectedOutputValues,
+        const std::vector<std::pair<unsigned int, unsigned int>>& padList,
+        const armnn::PaddingMode paddingMode)
+{
+    IgnoreUnused(memoryManager);
+    std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::PadQueueDescriptor descriptor;
+    descriptor.m_Parameters.m_PadList = padList;
+    descriptor.m_Parameters.m_PaddingMode = paddingMode;
+
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle.get(), inputValues.data());
+
+    ExecuteWorkload(*workload, memoryManager);
+
+    CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
+
+    return LayerTestResult<T, 4>(actualOutput,
+                                 expectedOutputValues,
+                                 outputHandle->GetShape(),
+                                 outputTensorInfo.GetShape());
+}
+
+template<armnn::DataType ArmnnType,
+        typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 2> PadSymmetric2dTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory,
+        float qScale,
+        int32_t qOffset)
+{
+    const armnn::TensorShape inputShape{ 3, 3 };
+    const armnn::TensorShape outputShape{ 7, 7 };
+
+    const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
+    const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
+
+    std::vector<T> inputValues = armnnUtils::QuantizedVector<T>(
+    {
+        // Height (3) x Width (3)
+        1, 2, 3,
+        4, 5, 6,
+        7, 8, 9
+    },
+    qScale, qOffset);
+
+    std::vector<T> expectedOutputValues = armnnUtils::QuantizedVector<T>(
+    {
+        5, 4, 4, 5, 6, 6, 5,
+        2, 1, 1, 2, 3, 3, 2,
+        2, 1, 1, 2, 3, 3, 2,
+        5, 4, 4, 5, 6, 6, 5,
+        8, 7, 7, 8, 9, 9, 8,
+        8, 7, 7, 8, 9, 9, 8,
+        5, 4, 4, 5, 6, 6, 5
+    },
+    qScale, qOffset);
+
+    std::vector<std::pair<unsigned int, unsigned int>> padList;
+    padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
+    padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
+
+    return MirrorPad2dTestCommon<T>(workloadFactory,
+                                    memoryManager,
+                                    tensorHandleFactory,
+                                    inputTensorInfo,
+                                    outputTensorInfo,
+                                    inputValues,
+                                    expectedOutputValues,
+                                    padList,
+                                    armnn::PaddingMode::Symmetric);
+}
+
+template<armnn::DataType ArmnnType,
+        typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 2> PadReflect2dTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory,
+        float qScale,
+        int32_t qOffset)
+{
+    const armnn::TensorShape inputShape{ 3, 3 };
+    const armnn::TensorShape outputShape{ 7, 7 };
+
+    const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
+    const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
+
+    std::vector<T> inputValues = armnnUtils::QuantizedVector<T>(
+    {
+        // Height (3) x Width (3)
+        1, 2, 3,
+        4, 5, 6,
+        7, 8, 9
+    },
+    qScale, qOffset);
+
+    std::vector<T> expectedOutputValues = armnnUtils::QuantizedVector<T>(
+    {
+        9, 8, 7, 8, 9, 8, 7,
+        6, 5, 4, 5, 6, 5, 4,
+        3, 2, 1, 2, 3, 2, 1,
+        6, 5, 4, 5, 6, 5, 4,
+        9, 8, 7, 8, 9, 8, 7,
+        6, 5, 4, 5, 6, 5, 4,
+        3, 2, 1, 2, 3, 2, 1
+    },
+    qScale, qOffset);
+
+    std::vector<std::pair<unsigned int, unsigned int>> padList;
+    padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
+    padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
+
+    return MirrorPad2dTestCommon<T>(workloadFactory,
+                                    memoryManager,
+                                    tensorHandleFactory,
+                                    inputTensorInfo,
+                                    outputTensorInfo,
+                                    inputValues,
+                                    expectedOutputValues,
+                                    padList,
+                                    armnn::PaddingMode::Reflect);
+}
+
+template<armnn::DataType ArmnnType,
+        typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 3> PadSymmetric3dTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory,
+        float qScale,
+        int32_t qOffset)
+{
+    const armnn::TensorShape inputShape{ 2, 2, 2 };
+    const armnn::TensorShape outputShape{ 4, 4, 4 };
+
+    const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
+    const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
+
+    std::vector<T> inputValues = armnnUtils::QuantizedVector<T>(
+    {
+        // Channel 0, Height (2) x Width (2)
+        1, 2,
+        3, 4,
+
+        // Channel 1, Height (2) x Width (2)
+        5, 6,
+        7, 8
+    },
+    qScale, qOffset);
+
+    std::vector<T> expectedOutputValues = armnnUtils::QuantizedVector<T>(
+    {
+        1, 1, 2, 2,
+        1, 1, 2, 2,
+        3, 3, 4, 4,
+        3, 3, 4, 4,
+
+        1, 1, 2, 2,
+        1, 1, 2, 2,
+        3, 3, 4, 4,
+        3, 3, 4, 4,
+
+        5, 5, 6, 6,
+        5, 5, 6, 6,
+        7, 7, 8, 8,
+        7, 7, 8, 8,
+
+        5, 5, 6, 6,
+        5, 5, 6, 6,
+        7, 7, 8, 8,
+        7, 7, 8, 8
+    },
+    qScale, qOffset);
+
+    std::vector<std::pair<unsigned int, unsigned int>> padList;
+    padList.push_back(std::pair<unsigned int, unsigned int>(1,1));
+    padList.push_back(std::pair<unsigned int, unsigned int>(1,1));
+    padList.push_back(std::pair<unsigned int, unsigned int>(1,1));
+
+    return MirrorPad3dTestCommon<T>(workloadFactory,
+                                    memoryManager,
+                                    tensorHandleFactory,
+                                    inputTensorInfo,
+                                    outputTensorInfo,
+                                    inputValues,
+                                    expectedOutputValues,
+                                    padList,
+                                    armnn::PaddingMode::Symmetric);
+}
+
+template<armnn::DataType ArmnnType,
+        typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 3> PadReflect3dTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory,
+        float qScale,
+        int32_t qOffset)
+{
+    const armnn::TensorShape inputShape{ 2, 2, 2 };
+    const armnn::TensorShape outputShape{ 4, 4, 4 };
+
+    const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
+    const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
+
+    std::vector<T> inputValues = armnnUtils::QuantizedVector<T>(
+    {
+        // Channel 0, Height (2) x Width (2)
+        1, 2,
+        3, 4,
+
+        // Channel 1, Height (2) x Width (2)
+        5, 6,
+        7, 8
+    },
+    qScale, qOffset);
+
+    std::vector<T> expectedOutputValues = armnnUtils::QuantizedVector<T>(
+    {
+        8, 7, 8, 7,
+        6, 5, 6, 5,
+        8, 7, 8, 7,
+        6, 5, 6, 5,
+
+        4, 3, 4, 3,
+        2, 1, 2, 1,
+        4, 3, 4, 3,
+        2, 1, 2, 1,
+
+        8, 7, 8, 7,
+        6, 5, 6, 5,
+        8, 7, 8, 7,
+        6, 5, 6, 5,
+
+        4, 3, 4, 3,
+        2, 1, 2, 1,
+        4, 3, 4, 3,
+        2, 1, 2, 1
+    },
+    qScale, qOffset);
+
+    std::vector<std::pair<unsigned int, unsigned int>> padList;
+    padList.push_back(std::pair<unsigned int, unsigned int>(1,1));
+    padList.push_back(std::pair<unsigned int, unsigned int>(1,1));
+    padList.push_back(std::pair<unsigned int, unsigned int>(1,1));
+
+    return MirrorPad3dTestCommon<T>(workloadFactory,
+                                    memoryManager,
+                                    tensorHandleFactory,
+                                    inputTensorInfo,
+                                    outputTensorInfo,
+                                    inputValues,
+                                    expectedOutputValues,
+                                    padList,
+                                    armnn::PaddingMode::Reflect);
+}
+
+template<armnn::DataType ArmnnType,
+        typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> PadSymmetric4dTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory,
+        float qScale,
+        int32_t qOffset)
+{
+    const armnn::TensorShape inputShape{ 2, 2, 2, 2 };
+    const armnn::TensorShape outputShape{ 6, 6, 6, 6 };
+
+    const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
+    const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
+
+    std::vector<T> inputValues = armnnUtils::QuantizedVector<T>(
+    {
+        // Batch 0, Channel 0, Height (2) x Width (2)
+        1, 2,
+        3, 4,
+
+        // Batch 0, Channel 1, Height (2) x Width (2)
+        5, 6,
+        7, 8,
+
+        // Batch 1, Channel 0, Height (2) x Width (2)
+        9, 10,
+        11, 12,
+
+        // Batch 1, Channel 1, Height (2) x Width (2)
+        13, 14,
+        15, 16,
+    },
+    qScale, qOffset);
+
+    std::vector<T> expectedOutputValues = armnnUtils::QuantizedVector<T>(
+    {
+        16, 15, 15, 16, 16, 15,
+        14, 13, 13, 14, 14, 13,
+        14, 13, 13, 14, 14, 13,
+        16, 15, 15, 16, 16, 15,
+        16, 15, 15, 16, 16, 15,
+        14, 13, 13, 14, 14, 13,
+
+        12, 11, 11, 12, 12, 11,
+        10,  9,  9, 10, 10,  9,
+        10,  9,  9, 10, 10,  9,
+        12, 11, 11, 12, 12, 11,
+        12, 11, 11, 12, 12, 11,
+        10,  9,  9, 10, 10,  9,
+
+        12, 11, 11, 12, 12, 11,
+        10,  9,  9, 10, 10,  9,
+        10,  9,  9, 10, 10,  9,
+        12, 11, 11, 12, 12, 11,
+        12, 11, 11, 12, 12, 11,
+        10,  9,  9, 10, 10,  9,
+
+        16, 15, 15, 16, 16, 15,
+        14, 13, 13, 14, 14, 13,
+        14, 13, 13, 14, 14, 13,
+        16, 15, 15, 16, 16, 15,
+        16, 15, 15, 16, 16, 15,
+        14, 13, 13, 14, 14, 13,
+
+        16, 15, 15, 16, 16, 15,
+        14, 13, 13, 14, 14, 13,
+        14, 13, 13, 14, 14, 13,
+        16, 15, 15, 16, 16, 15,
+        16, 15, 15, 16, 16, 15,
+        14, 13, 13, 14, 14, 13,
+
+        12, 11, 11, 12, 12, 11,
+        10,  9,  9, 10, 10,  9,
+        10,  9,  9, 10, 10,  9,
+        12, 11, 11, 12, 12, 11,
+        12, 11, 11, 12, 12, 11,
+        10,  9,  9, 10, 10,  9,
+
+
+        8,  7,  7,  8,  8,  7,
+        6,  5,  5,  6,  6,  5,
+        6,  5,  5,  6,  6,  5,
+        8,  7,  7,  8,  8,  7,
+        8,  7,  7,  8,  8,  7,
+        6,  5,  5,  6,  6,  5,
+
+        4,  3,  3,  4,  4,  3,
+        2,  1,  1,  2,  2,  1,
+        2,  1,  1,  2,  2,  1,
+        4,  3,  3,  4,  4,  3,
+        4,  3,  3,  4,  4,  3,
+        2,  1,  1,  2,  2,  1,
+
+        4,  3,  3,  4,  4,  3,
+        2,  1,  1,  2,  2,  1,
+        2,  1,  1,  2,  2,  1,
+        4,  3,  3,  4,  4,  3,
+        4,  3,  3,  4,  4,  3,
+        2,  1,  1,  2,  2,  1,
+
+        8,  7,  7,  8,  8,  7,
+        6,  5,  5,  6,  6,  5,
+        6,  5,  5,  6,  6,  5,
+        8,  7,  7,  8,  8,  7,
+        8,  7,  7,  8,  8,  7,
+        6,  5,  5,  6,  6,  5,
+
+        8,  7,  7,  8,  8,  7,
+        6,  5,  5,  6,  6,  5,
+        6,  5,  5,  6,  6,  5,
+        8,  7,  7,  8,  8,  7,
+        8,  7,  7,  8,  8,  7,
+        6,  5,  5,  6,  6,  5,
+
+        4,  3,  3,  4,  4,  3,
+        2,  1,  1,  2,  2,  1,
+        2,  1,  1,  2,  2,  1,
+        4,  3,  3,  4,  4,  3,
+        4,  3,  3,  4,  4,  3,
+        2,  1,  1,  2,  2,  1,
+
+
+        8,  7,  7,  8,  8,  7,
+        6,  5,  5,  6,  6,  5,
+        6,  5,  5,  6,  6,  5,
+        8,  7,  7,  8,  8,  7,
+        8,  7,  7,  8,  8,  7,
+        6,  5,  5,  6,  6,  5,
+
+        4,  3,  3,  4,  4,  3,
+        2,  1,  1,  2,  2,  1,
+        2,  1,  1,  2,  2,  1,
+        4,  3,  3,  4,  4,  3,
+        4,  3,  3,  4,  4,  3,
+        2,  1,  1,  2,  2,  1,
+
+        4,  3,  3,  4,  4,  3,
+        2,  1,  1,  2,  2,  1,
+        2,  1,  1,  2,  2,  1,
+        4,  3,  3,  4,  4,  3,
+        4,  3,  3,  4,  4,  3,
+        2,  1,  1,  2,  2,  1,
+
+        8,  7,  7,  8,  8,  7,
+        6,  5,  5,  6,  6,  5,
+        6,  5,  5,  6,  6,  5,
+        8,  7,  7,  8,  8,  7,
+        8,  7,  7,  8,  8,  7,
+        6,  5,  5,  6,  6,  5,
+
+        8,  7,  7,  8,  8,  7,
+        6,  5,  5,  6,  6,  5,
+        6,  5,  5,  6,  6,  5,
+        8,  7,  7,  8,  8,  7,
+        8,  7,  7,  8,  8,  7,
+        6,  5,  5,  6,  6,  5,
+
+        4,  3,  3,  4,  4,  3,
+        2,  1,  1,  2,  2,  1,
+        2,  1,  1,  2,  2,  1,
+        4,  3,  3,  4,  4,  3,
+        4,  3,  3,  4,  4,  3,
+        2,  1,  1,  2,  2,  1,
+
+
+        16, 15, 15, 16, 16, 15,
+        14, 13, 13, 14, 14, 13,
+        14, 13, 13, 14, 14, 13,
+        16, 15, 15, 16, 16, 15,
+        16, 15, 15, 16, 16, 15,
+        14, 13, 13, 14, 14, 13,
+
+        12, 11, 11, 12, 12, 11,
+        10,  9,  9, 10, 10,  9,
+        10,  9,  9, 10, 10,  9,
+        12, 11, 11, 12, 12, 11,
+        12, 11, 11, 12, 12, 11,
+        10,  9,  9, 10, 10,  9,
+
+        12, 11, 11, 12, 12, 11,
+        10,  9,  9, 10, 10,  9,
+        10,  9,  9, 10, 10,  9,
+        12, 11, 11, 12, 12, 11,
+        12, 11, 11, 12, 12, 11,
+        10,  9,  9, 10, 10,  9,
+
+        16, 15, 15, 16, 16, 15,
+        14, 13, 13, 14, 14, 13,
+        14, 13, 13, 14, 14, 13,
+        16, 15, 15, 16, 16, 15,
+        16, 15, 15, 16, 16, 15,
+        14, 13, 13, 14, 14, 13,
+
+        16, 15, 15, 16, 16, 15,
+        14, 13, 13, 14, 14, 13,
+        14, 13, 13, 14, 14, 13,
+        16, 15, 15, 16, 16, 15,
+        16, 15, 15, 16, 16, 15,
+        14, 13, 13, 14, 14, 13,
+
+        12, 11, 11, 12, 12, 11,
+        10,  9,  9, 10, 10,  9,
+        10,  9,  9, 10, 10,  9,
+        12, 11, 11, 12, 12, 11,
+        12, 11, 11, 12, 12, 11,
+        10,  9,  9, 10, 10,  9,
+
+
+        16, 15, 15, 16, 16, 15,
+        14, 13, 13, 14, 14, 13,
+        14, 13, 13, 14, 14, 13,
+        16, 15, 15, 16, 16, 15,
+        16, 15, 15, 16, 16, 15,
+        14, 13, 13, 14, 14, 13,
+
+        12, 11, 11, 12, 12, 11,
+        10,  9,  9, 10, 10,  9,
+        10,  9,  9, 10, 10,  9,
+        12, 11, 11, 12, 12, 11,
+        12, 11, 11, 12, 12, 11,
+        10,  9,  9, 10, 10,  9,
+
+        12, 11, 11, 12, 12, 11,
+        10,  9,  9, 10, 10,  9,
+        10,  9,  9, 10, 10,  9,
+        12, 11, 11, 12, 12, 11,
+        12, 11, 11, 12, 12, 11,
+        10,  9,  9, 10, 10,  9,
+
+        16, 15, 15, 16, 16, 15,
+        14, 13, 13, 14, 14, 13,
+        14, 13, 13, 14, 14, 13,
+        16, 15, 15, 16, 16, 15,
+        16, 15, 15, 16, 16, 15,
+        14, 13, 13, 14, 14, 13,
+
+        16, 15, 15, 16, 16, 15,
+        14, 13, 13, 14, 14, 13,
+        14, 13, 13, 14, 14, 13,
+        16, 15, 15, 16, 16, 15,
+        16, 15, 15, 16, 16, 15,
+        14, 13, 13, 14, 14, 13,
+
+        12, 11, 11, 12, 12, 11,
+        10,  9,  9, 10, 10,  9,
+        10,  9,  9, 10, 10,  9,
+        12, 11, 11, 12, 12, 11,
+        12, 11, 11, 12, 12, 11,
+        10,  9,  9, 10, 10,  9,
+
+
+        8,  7,  7,  8,  8,  7,
+        6,  5,  5,  6,  6,  5,
+        6,  5,  5,  6,  6,  5,
+        8,  7,  7,  8,  8,  7,
+        8,  7,  7,  8,  8,  7,
+        6,  5,  5,  6,  6,  5,
+
+        4,  3,  3,  4,  4,  3,
+        2,  1,  1,  2,  2,  1,
+        2,  1,  1,  2,  2,  1,
+        4,  3,  3,  4,  4,  3,
+        4,  3,  3,  4,  4,  3,
+        2,  1,  1,  2,  2,  1,
+
+        4,  3,  3,  4,  4,  3,
+        2,  1,  1,  2,  2,  1,
+        2,  1,  1,  2,  2,  1,
+        4,  3,  3,  4,  4,  3,
+        4,  3,  3,  4,  4,  3,
+        2,  1,  1,  2,  2,  1,
+
+        8,  7,  7,  8,  8,  7,
+        6,  5,  5,  6,  6,  5,
+        6,  5,  5,  6,  6,  5,
+        8,  7,  7,  8,  8,  7,
+        8,  7,  7,  8,  8,  7,
+        6,  5,  5,  6,  6,  5,
+
+        8,  7,  7,  8,  8,  7,
+        6,  5,  5,  6,  6,  5,
+        6,  5,  5,  6,  6,  5,
+        8,  7,  7,  8,  8,  7,
+        8,  7,  7,  8,  8,  7,
+        6,  5,  5,  6,  6,  5,
+
+        4,  3,  3,  4,  4,  3,
+        2,  1,  1,  2,  2,  1,
+        2,  1,  1,  2,  2,  1,
+        4,  3,  3,  4,  4,  3,
+        4,  3,  3,  4,  4,  3,
+        2,  1,  1,  2,  2,  1
+    },
+    qScale, qOffset);
+
+    std::vector<std::pair<unsigned int, unsigned int>> padList;
+    padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
+    padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
+    padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
+    padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
+
+    return MirrorPad4dTestCommon<T>(workloadFactory,
+                                    memoryManager,
+                                    tensorHandleFactory,
+                                    inputTensorInfo,
+                                    outputTensorInfo,
+                                    inputValues,
+                                    expectedOutputValues,
+                                    padList,
+                                    armnn::PaddingMode::Symmetric);
+}
+
+template<armnn::DataType ArmnnType,
+        typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> PadReflect4dTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory,
+        float qScale,
+        int32_t qOffset)
+{
+    const armnn::TensorShape inputShape{ 2, 2, 2, 2 };
+    const armnn::TensorShape outputShape{ 4, 4, 4, 4 };
+
+    const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
+    const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
+
+    std::vector<T> inputValues = armnnUtils::QuantizedVector<T>(
+    {
+        // Batch 0, Channel 0, Height (2) x Width (2)
+        1, 2,
+        3, 4,
+
+        // Batch 0, Channel 1, Height (2) x Width (2)
+        5, 6,
+        7, 8,
+
+        // Batch 1, Channel 0, Height (2) x Width (2)
+        9, 10,
+        11, 12,
+
+        // Batch 1, Channel 1, Height (2) x Width (2)
+        13, 14,
+        15, 16,
+    },
+    qScale, qOffset);
+
+    std::vector<T> expectedOutputValues = armnnUtils::QuantizedVector<T>(
+    {
+        16, 15, 16, 15,
+        14, 13, 14, 13,
+        16, 15, 16, 15,
+        14, 13, 14, 13,
+
+        12, 11, 12, 11,
+        10,  9, 10,  9,
+        12, 11, 12, 11,
+        10,  9, 10,  9,
+
+        16, 15, 16, 15,
+        14, 13, 14, 13,
+        16, 15, 16, 15,
+        14, 13, 14, 13,
+
+        12, 11, 12, 11,
+        10,  9, 10,  9,
+        12, 11, 12, 11,
+        10,  9, 10,  9,
+
+
+        8,  7,  8,  7,
+        6,  5,  6,  5,
+        8,  7,  8,  7,
+        6,  5,  6,  5,
+
+        4,  3,  4,  3,
+        2,  1,  2,  1,
+        4,  3,  4,  3,
+        2,  1,  2,  1,
+
+        8,  7,  8,  7,
+        6,  5,  6,  5,
+        8,  7,  8,  7,
+        6,  5,  6,  5,
+
+        4,  3,  4,  3,
+        2,  1,  2,  1,
+        4,  3,  4,  3,
+        2,  1,  2,  1,
+
+
+        16, 15, 16, 15,
+        14, 13, 14, 13,
+        16, 15, 16, 15,
+        14, 13, 14, 13,
+
+        12, 11, 12, 11,
+        10,  9, 10,  9,
+        12, 11, 12, 11,
+        10,  9, 10,  9,
+
+        16, 15, 16, 15,
+        14, 13, 14, 13,
+        16, 15, 16, 15,
+        14, 13, 14, 13,
+
+        12, 11, 12, 11,
+        10,  9, 10,  9,
+        12, 11, 12, 11,
+        10,  9, 10,  9,
+
+
+        8,  7,  8,  7,
+        6,  5,  6,  5,
+        8,  7,  8,  7,
+        6,  5,  6,  5,
+
+        4,  3,  4,  3,
+        2,  1,  2,  1,
+        4,  3,  4,  3,
+        2,  1,  2,  1,
+
+        8,  7,  8,  7,
+        6,  5,  6,  5,
+        8,  7,  8,  7,
+        6,  5,  6,  5,
+
+        4,  3,  4,  3,
+        2,  1,  2,  1,
+        4,  3,  4,  3,
+        2,  1,  2,  1
+    },
+    qScale, qOffset);
+
+    std::vector<std::pair<unsigned int, unsigned int>> padList;
+    padList.push_back(std::pair<unsigned int, unsigned int>(1,1));
+    padList.push_back(std::pair<unsigned int, unsigned int>(1,1));
+    padList.push_back(std::pair<unsigned int, unsigned int>(1,1));
+    padList.push_back(std::pair<unsigned int, unsigned int>(1,1));
+
+    return MirrorPad4dTestCommon<T>(workloadFactory,
+                                    memoryManager,
+                                    tensorHandleFactory,
+                                    inputTensorInfo,
+                                    outputTensorInfo,
+                                    inputValues,
+                                    expectedOutputValues,
+                                    padList,
+                                    armnn::PaddingMode::Reflect);
+}
+
+LayerTestResult<armnn::Half, 2> PadSymmetricFloat16(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+    using namespace half_float::literal;
+
+    const armnn::TensorShape inputShape{ 3, 3 };
+    const armnn::TensorShape outputShape{ 5, 7 };
+
+    const armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float16);
+    const armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float16);
+
+    const std::vector<armnn::Half> inputValues =
+    {
+        1._h,  2._h,  3._h,
+        4._h,  5._h,  6._h,
+        7._h,  8._h,  9._h
+    };
+
+    std::vector<armnn::Half> expectedOutputValues =
+    {
+        2._h, 1._h, 1._h, 2._h, 3._h, 3._h, 2._h,
+        2._h, 1._h, 1._h, 2._h, 3._h, 3._h, 2._h,
+        5._h, 4._h, 4._h, 5._h, 6._h, 6._h, 5._h,
+        8._h, 7._h, 7._h, 8._h, 9._h, 9._h, 8._h,
+        8._h, 7._h, 7._h, 8._h, 9._h, 9._h, 8._h,
+    };
+
+    std::vector<std::pair<unsigned int, unsigned int>> padList;
+    padList.push_back(std::pair<unsigned int, unsigned int>(1,1));
+    padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
+
+    return MirrorPad2dTestCommon<armnn::Half>(workloadFactory,
+                                              memoryManager,
+                                              tensorHandleFactory,
+                                              inputTensorInfo,
+                                              outputTensorInfo,
+                                              inputValues,
+                                              expectedOutputValues,
+                                              padList,
+                                              armnn::PaddingMode::Symmetric);
+}
+
+LayerTestResult<armnn::Half, 2> PadReflectFloat16(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+    using namespace half_float::literal;
+
+    const armnn::TensorShape inputShape{ 3, 3 };
+    const armnn::TensorShape outputShape{ 7, 5 };
+
+    const armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float16);
+    const armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float16);
+
+    const std::vector<armnn::Half> inputValues =
+    {
+        1._h,  2._h,  3._h,
+        4._h,  5._h,  6._h,
+        7._h,  8._h,  9._h
+    };
+
+    std::vector<armnn::Half> expectedOutputValues =
+    {
+        8._h, 7._h, 8._h, 9._h, 8._h,
+        5._h, 4._h, 5._h, 6._h, 5._h,
+        2._h, 1._h, 2._h, 3._h, 2._h,
+        5._h, 4._h, 5._h, 6._h, 5._h,
+        8._h, 7._h, 8._h, 9._h, 8._h,
+        5._h, 4._h, 5._h, 6._h, 5._h,
+        2._h, 1._h, 2._h, 3._h, 2._h,
+    };
+
+    std::vector<std::pair<unsigned int, unsigned int>> padList;
+    padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
+    padList.push_back(std::pair<unsigned int, unsigned int>(1,1));
+
+    return MirrorPad2dTestCommon<armnn::Half>(workloadFactory,
+                                              memoryManager,
+                                              tensorHandleFactory,
+                                              inputTensorInfo,
+                                              outputTensorInfo,
+                                              inputValues,
+                                              expectedOutputValues,
+                                              padList,
+                                              armnn::PaddingMode::Reflect);
+}
+
+//
+// Implementation functions
+//
+
+LayerTestResult<float, 2> PadSymmetric2dFloat32Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+    return PadSymmetric2dTest<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
+}
+
+LayerTestResult<float, 2> PadReflect2dFloat32Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+    return PadReflect2dTest<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
+}
+
+LayerTestResult<float, 3> PadSymmetric3dFloat32Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+    return PadSymmetric3dTest<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
+}
+
+LayerTestResult<float, 3> PadReflect3dFloat32Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+    return PadReflect3dTest<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
+}
+
+LayerTestResult<uint8_t, 3> PadSymmetric3dUint8Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+    return PadSymmetric3dTest<armnn::DataType::QAsymmU8>(
+            workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 128);
+}
+
+LayerTestResult<uint8_t, 3> PadReflect3dUint8Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+    return PadReflect3dTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 128);
+}
+
+LayerTestResult<int8_t, 3> PadSymmetric3dInt8Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+    return PadSymmetric3dTest<armnn::DataType::QAsymmS8>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 64);
+}
+
+LayerTestResult<int8_t, 3> PadReflect3dInt8Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+    return PadReflect3dTest<armnn::DataType::QAsymmS8>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 64);
+}
+
+LayerTestResult<float, 4> PadSymmetric4dFloat32Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+    return PadSymmetric4dTest<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
+}
+
+LayerTestResult<float, 4> PadReflect4dFloat32Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+    return PadReflect4dTest<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
+}
+
+LayerTestResult<armnn::BFloat16, 4> PadSymmetric4dBFloat16Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+    return PadSymmetric4dTest<armnn::DataType::BFloat16>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
+}
+
+LayerTestResult<armnn::BFloat16, 4> PadReflect4dBFloat16Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+    return PadReflect4dTest<armnn::DataType::BFloat16>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
+}
+
+LayerTestResult<uint8_t, 4> PadSymmetric4dUint8Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+    return PadSymmetric4dTest<armnn::DataType::QAsymmU8>(
+            workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 128);
+}
+
+LayerTestResult<uint8_t, 4> PadReflect4dUint8Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+    return PadReflect4dTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 128);
+}
+
+LayerTestResult<int8_t, 4> PadSymmetric4dInt8Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+    return PadSymmetric4dTest<armnn::DataType::QAsymmS8>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 64);
+}
+
+LayerTestResult<int8_t, 4> PadReflect4dInt8Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+    return PadReflect4dTest<armnn::DataType::QAsymmS8>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 64);
+}
+
+LayerTestResult<int16_t, 4> PadSymmetric4dInt16Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+    return PadSymmetric4dTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 2.0f, 0);
+}
+
+LayerTestResult<int16_t, 4> PadReflect4dInt16Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+    return PadReflect4dTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 2.0f, 0);
+}
+
+LayerTestResult<armnn::Half, 2> PadSymmetricFloat16Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+    return PadSymmetricFloat16(workloadFactory, memoryManager, tensorHandleFactory);
+}
+
+LayerTestResult<armnn::Half, 2> PadReflectFloat16Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+    return PadReflectFloat16(workloadFactory, memoryManager, tensorHandleFactory);
+}
diff --git a/src/backends/backendsCommon/test/layerTests/MirrorPadTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/MirrorPadTestImpl.hpp
new file mode 100644
index 0000000..52898b8
--- /dev/null
+++ b/src/backends/backendsCommon/test/layerTests/MirrorPadTestImpl.hpp
@@ -0,0 +1,117 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "LayerTestResult.hpp"
+
+#include <Half.hpp>
+
+#include <ResolveType.hpp>
+
+#include <armnn/Types.hpp>
+
+#include <armnn/backends/IBackendInternal.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+LayerTestResult<float, 2> PadSymmetric2dFloat32Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<float, 2> PadReflect2dFloat32Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<float, 3> PadSymmetric3dFloat32Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<float, 3> PadReflect3dFloat32Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<uint8_t, 3> PadSymmetric3dUint8Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<uint8_t, 3> PadReflect3dUint8Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<int8_t, 3> PadSymmetric3dInt8Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<int8_t, 3> PadReflect3dInt8Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<float, 4> PadSymmetric4dFloat32Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<float, 4> PadReflect4dFloat32Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<armnn::BFloat16, 4> PadSymmetric4dBFloat16Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<armnn::BFloat16, 4> PadReflect4dBFloat16Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<uint8_t, 4> PadSymmetric4dUint8Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<uint8_t, 4> PadReflect4dUint8Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<int8_t, 4> PadSymmetric4dInt8Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<int8_t, 4> PadReflect4dInt8Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<int16_t, 4> PadSymmetric4dInt16Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<int16_t, 4> PadReflect4dInt16Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<armnn::Half, 2> PadSymmetricFloat16Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<armnn::Half, 2> PadReflectFloat16Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory);
\ No newline at end of file
diff --git a/src/backends/cl/workloads/ClFillWorkload.cpp b/src/backends/cl/workloads/ClFillWorkload.cpp
index 8cb2db4..ea42dcf 100644
--- a/src/backends/cl/workloads/ClFillWorkload.cpp
+++ b/src/backends/cl/workloads/ClFillWorkload.cpp
@@ -29,7 +29,7 @@
     m_Data.ValidateInputsOutputs("ClFillWorkload", 1, 1);
 
     arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(this->m_Data.m_Outputs[0])->GetTensor();
-    arm_compute::PixelValue pixelValue = GetPixelValue(output, descriptor.m_Parameters.m_Value);
+    arm_compute::PixelValue pixelValue = GetPixelValue(output.info(), descriptor.m_Parameters.m_Value);
 
     m_Layer.configure(clCompileContext, &output, pixelValue);
 }
diff --git a/src/backends/cl/workloads/ClPadWorkload.cpp b/src/backends/cl/workloads/ClPadWorkload.cpp
index 10c8907..4697510 100644
--- a/src/backends/cl/workloads/ClPadWorkload.cpp
+++ b/src/backends/cl/workloads/ClPadWorkload.cpp
@@ -39,7 +39,7 @@
 
     arm_compute::PaddingList padList = static_cast<arm_compute::PaddingList>(reversed_PadList);
 
-    arm_compute::PixelValue pixelValue = GetPixelValue(input, descriptor.m_Parameters.m_PadValue);
+    arm_compute::PixelValue pixelValue = GetPixelValue(input.info(), descriptor.m_Parameters.m_PadValue);
 
     m_Layer.configure(clCompileContext, &input, &output, padList, pixelValue);
 }
diff --git a/src/backends/neon/workloads/NeonFillWorkload.cpp b/src/backends/neon/workloads/NeonFillWorkload.cpp
index 0a3c7f0..3cfa56a 100644
--- a/src/backends/neon/workloads/NeonFillWorkload.cpp
+++ b/src/backends/neon/workloads/NeonFillWorkload.cpp
@@ -28,7 +28,7 @@
     m_Data.ValidateInputsOutputs("NeonFillWorkload", 1, 1);
 
     arm_compute::ITensor& output = static_cast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
-    arm_compute::PixelValue pixelValue = GetPixelValue(output, descriptor.m_Parameters.m_Value);
+    arm_compute::PixelValue pixelValue = GetPixelValue(output.info(), descriptor.m_Parameters.m_Value);
 
     auto layer = std::make_unique<arm_compute::NEFill>();
     layer->configure(&output, pixelValue);
diff --git a/src/backends/neon/workloads/NeonPadWorkload.cpp b/src/backends/neon/workloads/NeonPadWorkload.cpp
index b378d5f..42fc42b 100644
--- a/src/backends/neon/workloads/NeonPadWorkload.cpp
+++ b/src/backends/neon/workloads/NeonPadWorkload.cpp
@@ -38,7 +38,7 @@
 
     arm_compute::PaddingList padList = static_cast<arm_compute::PaddingList>(reversed_PadList);
 
-    arm_compute::PixelValue pixelValue = GetPixelValue(input, descriptor.m_Parameters.m_PadValue);
+    arm_compute::PixelValue pixelValue = GetPixelValue(input.info(), descriptor.m_Parameters.m_PadValue);
 
     auto layer = std::make_unique<arm_compute::NEPadLayer>();
     layer->configure(&input, &output, padList, pixelValue);
diff --git a/src/backends/reference/backend.mk b/src/backends/reference/backend.mk
index f8169a6..7049279 100644
--- a/src/backends/reference/backend.mk
+++ b/src/backends/reference/backend.mk
@@ -41,6 +41,7 @@
         workloads/Lstm.cpp \
         workloads/LstmUtils.cpp \
         workloads/Concatenate.cpp \
+        workloads/MirrorPad.cpp \
         workloads/Pad.cpp \
         workloads/Pooling2d.cpp \
         workloads/PreluImpl.cpp \
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index cb31b37..5993270 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -1415,7 +1415,7 @@
 ARMNN_AUTO_TEST_CASE_WITH_THF(LogSoftmaxFloat16_3, LogSoftmaxTest3<DataType::Float16>)
 ARMNN_AUTO_TEST_CASE_WITH_THF(LogSoftmaxFloat16_4, LogSoftmaxTest4<DataType::Float16>)
 
-// Pad
+// Pad - Constant
 ARMNN_AUTO_TEST_CASE_WITH_THF(PadBFloat162d, PadBFloat162dTest)
 ARMNN_AUTO_TEST_CASE_WITH_THF(PadBFloat162dCustomPadding, PadBFloat162dCustomPaddingTest)
 ARMNN_AUTO_TEST_CASE_WITH_THF(PadBFloat163d, PadBFloat163dTest)
@@ -1445,6 +1445,31 @@
 ARMNN_AUTO_TEST_CASE_WITH_THF(PadQAsymmS8, PadQAsymmTestCommon<DataType::QAsymmS8>, -2.0f, 3, 0.0f)
 ARMNN_AUTO_TEST_CASE_WITH_THF(PadQAsymmS8CustomPadding, PadQAsymmTestCommon<DataType::QAsymmS8>, -2.0f, 3, 2.0f)
 
+// Pad - Symmetric & Reflect
+ARMNN_AUTO_TEST_CASE_WITH_THF(PadSymmetric2dFloat32, PadSymmetric2dFloat32Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(PadReflect2dFloat32, PadReflect2dFloat32Test)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(PadSymmetric3dFloat32, PadSymmetric3dFloat32Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(PadReflect3dFloat32, PadReflect3dFloat32Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(PadSymmetric3dUint8, PadSymmetric3dUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(PadReflect3dUint8, PadReflect3dUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(PadSymmetric3dInt8, PadSymmetric3dInt8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(PadReflect3dInt8, PadReflect3dInt8Test)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(PadSymmetric4dFloat32, PadSymmetric4dFloat32Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(PadReflect4dFloat32, PadReflect4dFloat32Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(PadSymmetric4dBFloat16, PadSymmetric4dBFloat16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(PadReflect4dBFloat16, PadReflect4dBFloat16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(PadSymmetric4dUint8, PadSymmetric4dUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(PadReflect4dUint8, PadReflect4dUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(PadSymmetric4dInt8, PadSymmetric4dInt8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(PadReflect4dInt8, PadReflect4dInt8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(PadSymmetric4dInt16, PadSymmetric4dInt16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(PadReflect4dInt16, PadReflect4dInt16Test)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(PadSymmetricFloat16, PadSymmetricFloat16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(PadReflectFloat16, PadReflectFloat16Test)
+
 // Constant
 ARMNN_AUTO_TEST_CASE_WITH_THF(Constant, ConstantTest)
 ARMNN_AUTO_TEST_CASE_WITH_THF(ConstantUint8, ConstantUint8CustomQuantizationScaleAndOffsetTest)
diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt
index 5727291..f212522 100644
--- a/src/backends/reference/workloads/CMakeLists.txt
+++ b/src/backends/reference/workloads/CMakeLists.txt
@@ -52,6 +52,8 @@
     Concatenate.hpp
     Concatenate.cpp
     Minimum.hpp
+    MirrorPad.cpp
+    MirrorPad.hpp
     Pad.cpp
     Pad.hpp
     Pooling2d.cpp
diff --git a/src/backends/reference/workloads/MirrorPad.cpp b/src/backends/reference/workloads/MirrorPad.cpp
new file mode 100644
index 0000000..7388fed
--- /dev/null
+++ b/src/backends/reference/workloads/MirrorPad.cpp
@@ -0,0 +1,199 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "MirrorPad.hpp"
+
+#include "BaseIterator.hpp"
+#include "Decoders.hpp"
+#include "Encoders.hpp"
+
+namespace
+{
+
+// Convert a linear index into n-dimensional coordinates.
+// E.g. index = 2 returns [0, 0, 2].
+inline std::vector<unsigned int> IndexToCoord(const armnn::TensorShape& shape, unsigned int index)
+{
+    unsigned int numOfElements = shape.GetNumElements();
+
+    ARMNN_ASSERT_MSG(index <= numOfElements, "Index has to be in [0, num_elements]");
+    ARMNN_ASSERT_MSG(numOfElements != 0, "Cannot create coordinate from empty shape");
+
+    std::vector<unsigned int> coord(shape.GetNumDimensions());
+    for(unsigned int i = 0; i < shape.GetNumDimensions(); ++i)
+    {
+        numOfElements /= shape[i];
+        coord[i] = index / numOfElements;
+        index %= numOfElements;
+    }
+
+    return coord;
+}
+
+// Returns the index of a given coordinate.
+// E.g. [0, 0, 2] returns 2.
+inline unsigned int CoordToIndex(const armnn::TensorShape& shape, const std::vector<unsigned int>& coord)
+{
+    ARMNN_ASSERT_MSG(shape.GetNumDimensions() != 0, "Cannot get index from empty shape");
+    ARMNN_ASSERT_MSG(coord.size() != 0, "Cannot get index of empty coordinate");
+
+    unsigned int index    = 0;
+    unsigned int dimSize  = 1;
+
+    for (unsigned int i = shape.GetNumDimensions(); i > 0; --i)
+    {
+        index += coord[i - 1] * dimSize;
+        dimSize *= shape[i - 1];
+    }
+
+    return index;
+}
+
+} // anonymous namespace
+
+namespace armnn
+{
+
+void MirrorPad(const TensorInfo& inputInfo,
+               const TensorInfo& outputInfo,
+               const ITensorHandle* inputHandle,
+               ITensorHandle* outputHandle,
+               const PadQueueDescriptor& data)
+{
+    auto padList  = data.m_Parameters.m_PadList;
+    PaddingMode paddingMode = data.m_Parameters.m_PaddingMode;
+
+    TensorShape outputShape = outputInfo.GetShape();
+    TensorShape inputShape  = inputInfo.GetShape();
+
+    unsigned int numOutputElements = outputInfo.GetNumElements();
+    unsigned int numInputDimensions = inputShape.GetNumDimensions();
+    assert(numInputDimensions == outputShape.GetNumDimensions());
+
+    // If padding mode is Reflect then both paddings must be no greater than inputShape(i) - 1.
+    // If padding mode is Symmetric then both paddings must be no greater than inputShape(i).
+    const unsigned int isReflect = static_cast<unsigned int>(paddingMode == PaddingMode::Reflect);
+    for(unsigned int i = 0; i < padList.size(); ++i)
+    {
+        if(padList.at(i).first > (inputShape[i] - isReflect) ||
+           padList.at(i).second > (inputShape[i] - isReflect))
+        {
+            throw armnn::InvalidArgumentException("Paddings must be less (Reflect) or "
+                                                  "equal (Symmetric) to the dimension size.");
+        }
+    }
+
+    auto inputData = MakeDecoder<float>(inputInfo, inputHandle->Map());
+    auto outData   = MakeEncoder<float>(outputInfo, outputHandle->Map());
+
+    Decoder<float>& input  = *inputData;
+    Encoder<float>& output = *outData;
+
+    for(unsigned int idx = 0; idx < numOutputElements; ++idx)
+    {
+        // Get the coordinates of the current index in vector form. E.g inx 1 = [0, 0, 0, 1 ]
+        const std::vector<unsigned int> coord = IndexToCoord(outputShape, idx);
+
+        std::vector<unsigned int> dimensions;
+        std::vector<unsigned int> coords;
+
+        for(unsigned int i = 0; i < numInputDimensions; ++i)
+        {
+            dimensions.emplace_back(i);
+            coords.emplace_back(coord[i]);
+        }
+
+        auto isInPadding = [&](unsigned int i)
+        {
+            return (coords[i] < padList[i].first || coords[i] > inputShape[i] + padList[i].first - 1);
+        };
+
+        auto getReflectIndex = [&](unsigned int i) -> unsigned int
+        {
+            if(isInPadding(i))
+            {
+                if(coords[i] < padList[i].first)
+                {
+                    return padList[i].first - coords[i];
+                }
+                else
+                {
+                    return 2 * inputShape[i] + padList[i].first - 2 - coords[i];
+                }
+            }
+            return coords[i] - padList[i].first;
+        };
+
+        auto getSymmetricIndex = [&](unsigned int i) -> unsigned int
+        {
+            if(isInPadding(i))
+            {
+                if(coords[i] < padList[i].first)
+                {
+                    return padList[i].first - coords[i] - 1;
+                }
+                else
+                {
+                    return 2 * inputShape[i] + padList[i].first - 1 - coords[i];
+                }
+            }
+            return coords[i] - padList[i].first;
+        };
+
+        // Location of the value in the input tensor to use in the output.
+        std::vector<unsigned int> coordOfInput;
+
+        // any_of works as a loop here to check if any of the dimensions are in the padding.
+        // If dimensions is in the padding area, then create the coordinates of the location in the
+        // input tensor to use in the output.
+        // E.g.
+        // Input tensor = [ 1, 2, 3 ], Rank = 1.
+        // Output tensor = [ 2, 1, 2, 3, 1 ] if Reflect or [ 1, 1, 2, 3, 3 ] if Symmetric with a padding of (1, 1).
+        // So it will either return [ 1 ] or [ 0 ] which is used to set the first value in the output tensor and so on.
+        if(std::any_of(dimensions.begin(), dimensions.end(), isInPadding))
+        {
+            switch(paddingMode)
+            {
+                case PaddingMode::Reflect:
+                {
+                    for(unsigned int i = 0; i < numInputDimensions; ++i)
+                    {
+                        coordOfInput.emplace_back(getReflectIndex(i));
+                    }
+                    break;
+                }
+                case PaddingMode::Symmetric:
+                {
+                    for(unsigned int i = 0; i < numInputDimensions; ++i)
+                    {
+                        coordOfInput.emplace_back(getSymmetricIndex(i));
+                    }
+                    break;
+                }
+                default:
+                    throw InvalidArgumentException("Padding mode not supported.");
+                    break;
+            }
+        }
+        else
+        {
+            for(unsigned int i = 0; i < numInputDimensions; ++i)
+            {
+                coordOfInput.emplace_back(coord[i] - padList[i].first);
+            }
+        }
+
+        // Set output value using the coordinate of the input value to use.
+        const unsigned int indexOfInput = CoordToIndex(inputShape, coordOfInput);
+
+        input[indexOfInput];
+        auto inputValue = input.Get();
+
+        output[idx];
+        output.Set(inputValue);
+    }
+}
+
+} //namespace armnn
\ No newline at end of file
diff --git a/src/backends/reference/workloads/MirrorPad.hpp b/src/backends/reference/workloads/MirrorPad.hpp
new file mode 100644
index 0000000..3deaf1d
--- /dev/null
+++ b/src/backends/reference/workloads/MirrorPad.hpp
@@ -0,0 +1,22 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "armnn/Tensor.hpp"
+
+#include <backendsCommon/Workload.hpp>
+#include <backendsCommon/WorkloadData.hpp>
+
+namespace armnn
+{
+
+void MirrorPad(const TensorInfo& inputInfo,
+               const TensorInfo& outputInfo,
+               const ITensorHandle* inputHandle,
+               ITensorHandle* outputHandle,
+               const PadQueueDescriptor& data);
+
+} //namespace armnn
diff --git a/src/backends/reference/workloads/RefPadWorkload.cpp b/src/backends/reference/workloads/RefPadWorkload.cpp
index f15306d..fd0728c 100644
--- a/src/backends/reference/workloads/RefPadWorkload.cpp
+++ b/src/backends/reference/workloads/RefPadWorkload.cpp
@@ -5,6 +5,7 @@
 
 #include "RefPadWorkload.hpp"
 
+#include "MirrorPad.hpp"
 #include "Pad.hpp"
 #include "Profiling.hpp"
 #include "RefWorkloadUtils.hpp"
@@ -29,11 +30,19 @@
     const TensorInfo& inputInfo  = GetTensorInfo(inputs[0]);
     const TensorInfo& outputInfo = GetTensorInfo(outputs[0]);
 
-    armnn::Pad(inputInfo,
-               outputInfo,
-               inputs[0],
-               outputs[0],
-               m_Data);
+    PaddingMode paddingMode = m_Data.m_Parameters.m_PaddingMode;
+    if (paddingMode == PaddingMode::Constant)
+    {
+        armnn::Pad(inputInfo, outputInfo, inputs[0], outputs[0], m_Data);
+    }
+    else if(paddingMode == PaddingMode::Reflect || paddingMode == PaddingMode::Symmetric)
+    {
+        armnn::MirrorPad(inputInfo, outputInfo, inputs[0], outputs[0], m_Data);
+    }
+    else
+    {
+        throw InvalidArgumentException("Padding mode not supported.");
+    }
 }
 
 } //namespace armnn
\ No newline at end of file