IVGCVSW-7623: GpuFsa Op: Add Pool2d operator

* Add Pool2d EndToEnd tests to all backends
* Add utility functions for the attributes in a separate file
* Remove some unnecessary includes

Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com>
Change-Id: I0f82ebbf7b3301c6368462fb4fb4d4d02b246fc6
diff --git a/src/backends/aclCommon/ArmComputeTensorUtils.cpp b/src/backends/aclCommon/ArmComputeTensorUtils.cpp
index e6c5a9b..a11b966 100644
--- a/src/backends/aclCommon/ArmComputeTensorUtils.cpp
+++ b/src/backends/aclCommon/ArmComputeTensorUtils.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #include <aclCommon/ArmComputeTensorUtils.hpp>
@@ -276,12 +276,12 @@
     const arm_compute::DimensionRoundingType rounding = ConvertOutputShapeRoundingToAclDimensionRoundingType(
                                                                                     descriptor.m_OutputShapeRounding);
     const arm_compute::PadStrideInfo padStrideInfo(descriptor.m_StrideX,
-                                      descriptor.m_StrideY,
-                                      descriptor.m_PadLeft,
-                                      descriptor.m_PadRight,
-                                      descriptor.m_PadTop,
-                                      descriptor.m_PadBottom,
-                                      rounding);
+                                                   descriptor.m_StrideY,
+                                                   descriptor.m_PadLeft,
+                                                   descriptor.m_PadRight,
+                                                   descriptor.m_PadTop,
+                                                   descriptor.m_PadBottom,
+                                                   rounding);
 
     const bool excludePadding = (descriptor.m_PaddingMethod == PaddingMethod::Exclude);
 
diff --git a/src/backends/backendsCommon/test/Pooling2dEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/Pooling2dEndToEndTestImpl.hpp
index 7a264e1..adff6e0 100644
--- a/src/backends/backendsCommon/test/Pooling2dEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/Pooling2dEndToEndTestImpl.hpp
@@ -76,7 +76,8 @@
 }
 
 template<armnn::DataType ArmnnType>
-void MaxPool2dEndToEndFloat16(const std::vector<armnn::BackendId>& backends)
+void MaxPool2dEndToEndFloat16(const std::vector<armnn::BackendId>& backends,
+                              PaddingMethod padMethod = PaddingMethod::Exclude)
 {
     using namespace half_float::literal;
     using Half = half_float::half;
@@ -84,7 +85,7 @@
     const TensorShape& inputShape = { 1, 3, 3, 1 };
     const TensorShape& outputShape = { 1, 3, 3, 1 };
 
-    INetworkPtr network = CreatePooling2dNetwork<ArmnnType>(inputShape, outputShape);
+    INetworkPtr network = CreatePooling2dNetwork<ArmnnType>(inputShape, outputShape, padMethod);
     CHECK(network);
 
     std::vector<Half> inputData{ 1._h, 2._h, 3._h,
@@ -140,7 +141,7 @@
 
 template<armnn::DataType ArmnnType>
 void AvgPool2dEndToEndFloat16(const std::vector<armnn::BackendId>& backends,
-                              PaddingMethod padMethod = PaddingMethod::IgnoreValue)
+                              PaddingMethod padMethod = PaddingMethod::Exclude)
 {
     using namespace half_float::literal;
     using Half = half_float::half;
@@ -155,9 +156,19 @@
     std::vector<Half> inputData{ 1._h, 2._h, 3._h,
                                  4._h, 5._h, 6._h,
                                  7._h, 8._h, 9._h };
-    std::vector<Half> expectedOutput{ 1.33333_h, 2.33333_h, 1.77778_h,
-                                      3._h     , 5._h     , 3.66667_h,
-                                      2.66667_h, 4.33333_h, 3.11111_h };
+    std::vector<Half> expectedOutput;
+    if (padMethod == PaddingMethod::Exclude)
+    {
+        expectedOutput  = { 3._h , 3.5_h, 4._h ,
+                            4.5_h, 5._h , 5.5_h,
+                            6._h , 6.5_h, 7._h  };
+    }
+    else
+    {
+        expectedOutput  = { 1.33333_h, 2.33333_h, 1.77778_h,
+                            3._h     , 5._h     , 3.66667_h,
+                            2.66667_h, 4.33333_h, 3.11111_h };
+    }
 
     std::map<int, std::vector<Half>> inputTensorData = { { 0, inputData } };
     std::map<int, std::vector<Half>> expectedOutputData = { { 0, expectedOutput } };
diff --git a/src/backends/cl/test/ClEndToEndTests.cpp b/src/backends/cl/test/ClEndToEndTests.cpp
index 878054f..3acd7dc 100644
--- a/src/backends/cl/test/ClEndToEndTests.cpp
+++ b/src/backends/cl/test/ClEndToEndTests.cpp
@@ -17,6 +17,7 @@
 #include <backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp>
 #include <backendsCommon/test/FillEndToEndTestImpl.hpp>
 #include <backendsCommon/test/InstanceNormalizationEndToEndTestImpl.hpp>
+#include "backendsCommon/test/Pooling2dEndToEndTestImpl.hpp"
 #include <backendsCommon/test/PreluEndToEndTestImpl.hpp>
 #include <backendsCommon/test/QLstmEndToEndTestImpl.hpp>
 #include <backendsCommon/test/QuantizedLstmEndToEndTestImpl.hpp>
@@ -245,8 +246,8 @@
                                                 0, 0, 0, 0,  0, 0, 0, 0 });
 
     ComparisonSimpleEndToEnd<armnn::DataType::QAsymmU8>(clDefaultBackends,
-                                                               ComparisonOperation::Greater,
-                                                               expectedOutput);
+                                                        ComparisonOperation::Greater,
+                                                        expectedOutput);
 }
 
 TEST_CASE("ClGreaterBroadcastEndToEndTest")
@@ -265,8 +266,8 @@
                                                 1, 1, 1, 1, 1, 1 });
 
     ComparisonBroadcastEndToEnd<armnn::DataType::QAsymmU8>(clDefaultBackends,
-                                                                  ComparisonOperation::Greater,
-                                                                  expectedOutput);
+                                                           ComparisonOperation::Greater,
+                                                           expectedOutput);
 }
 
 // HardSwish
@@ -316,6 +317,49 @@
     InstanceNormalizationNchwEndToEndTest2(clDefaultBackends);
 }
 
+// Pooling 2D
+// Average Pool 2D
+TEST_CASE("ClAvgPool2DEndtoEndTestFloat32")
+{
+    AvgPool2dEndToEnd<DataType::Float32>(clDefaultBackends);
+}
+
+TEST_CASE("ClAvgPool2DEndtoEndTestFloat16")
+{
+    AvgPool2dEndToEndFloat16<DataType::Float16>(clDefaultBackends);
+}
+
+TEST_CASE("ClAvgPool2DIgnoreValueEndtoEndTestFloat32")
+{
+    AvgPool2dEndToEnd<DataType::Float32>(clDefaultBackends, PaddingMethod::IgnoreValue);
+}
+
+// Max Pool 2D
+TEST_CASE("ClMaxPool2DEndtoEndTestFloat32")
+{
+    MaxPool2dEndToEnd<DataType::Float32>(clDefaultBackends);
+}
+
+TEST_CASE("ClMaxPool2DEndtoEndTestFloat16")
+{
+    MaxPool2dEndToEndFloat16<DataType::Float16>(clDefaultBackends);
+}
+
+TEST_CASE("ClMaxPool2DIgnoreValueEndtoEndTestFloat32")
+{
+    MaxPool2dEndToEnd<DataType::Float32>(clDefaultBackends, PaddingMethod::IgnoreValue);
+}
+
+TEST_CASE("ClMaxPool2DTwoLayerEndtoEndTestFloat32")
+{
+    MaxPool2dTwoLayerEndToEnd<DataType::Float32>(clDefaultBackends);
+}
+
+TEST_CASE("ClMaxPool2DThreeLayerEndtoEndTestFloat32")
+{
+    MaxPool2dThreeLayerEndToEnd<DataType::Float32>(clDefaultBackends);
+}
+
 // Fill
 TEST_CASE("ClFillEndToEndTest")
 {
diff --git a/src/backends/gpuFsa/GpuFsaBackend.cpp b/src/backends/gpuFsa/GpuFsaBackend.cpp
index 8b62aec..f14687b 100644
--- a/src/backends/gpuFsa/GpuFsaBackend.cpp
+++ b/src/backends/gpuFsa/GpuFsaBackend.cpp
@@ -24,6 +24,7 @@
 #include "layers/GpuFsaDepthwiseConvolution2d.hpp"
 #include "layers/GpuFsaElementwiseBinaryAdd.hpp"
 #include "layers/GpuFsaElementwiseBinarySub.hpp"
+#include "layers/GpuFsaPooling2d.hpp"
 
 namespace armnn
 {
@@ -315,6 +316,13 @@
                 }
                 break;
             }
+            case (LayerType::Pooling2d):
+            {
+                auto input = base.GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
+                auto desc = PolymorphicDowncast<const Pooling2dDescriptor*>(&base.GetParameters());
+                GpuFsaPooling2dCreateOp(preCompiledBlobPtr, input, *desc);
+                break;
+            }
             default:
                 // unsupported layer for GpuFsa backend
                 continue;
diff --git a/src/backends/gpuFsa/GpuFsaLayerSupport.cpp b/src/backends/gpuFsa/GpuFsaLayerSupport.cpp
index 2e5c7d5..b73b3e9 100644
--- a/src/backends/gpuFsa/GpuFsaLayerSupport.cpp
+++ b/src/backends/gpuFsa/GpuFsaLayerSupport.cpp
@@ -14,6 +14,7 @@
 #include "layers/GpuFsaDepthwiseConvolution2d.hpp"
 #include "layers/GpuFsaElementwiseBinaryAdd.hpp"
 #include "layers/GpuFsaElementwiseBinarySub.hpp"
+#include "layers/GpuFsaPooling2d.hpp"
 #endif
 
 #include <vector>
@@ -156,7 +157,21 @@
             {
                 throw InvalidArgumentException("Invalid ElementwiseBinary BinaryOperation operation.");
             }
-            return false;
+        }
+        case LayerType::Pooling2d:
+        {
+            if (infos.size() != 2)
+            {
+                throw InvalidArgumentException("Invalid number of Pooling2d TensorInfos. "
+                                               "TensorInfos should be of format: {input, output}.");
+            }
+
+            auto desc = PolymorphicDowncast<const Pooling2dDescriptor*>(&descriptor);
+
+            FORWARD_LAYER_VALIDATE_FUNC(GpuFsaPooling2dValidate,
+                                        reasonIfUnsupported,
+                                        infos[0],
+                                        *desc);
         }
         case LayerType::Constant:
         case LayerType::Input:
diff --git a/src/backends/gpuFsa/backend.cmake b/src/backends/gpuFsa/backend.cmake
index 1647333..67e9be7 100644
--- a/src/backends/gpuFsa/backend.cmake
+++ b/src/backends/gpuFsa/backend.cmake
@@ -1,5 +1,5 @@
 #
-# Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+# Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
 # SPDX-License-Identifier: MIT
 #
 
@@ -7,7 +7,7 @@
 list(APPEND armnnLibraries armnnGpuFsaBackend)
 
 if(ARMCOMPUTEGPUFSA)
-    list(APPEND armnnLibraries armnnGpuFsaBackendLayerValidators)
+    list(APPEND armnnLibraries armnnGpuFsaBackendLayers)
     list(APPEND armnnLibraries armnnGpuFsaBackendWorkloads)
     list(APPEND armnnUnitTestLibraries armnnGpuFsaBackendUnitTests)
 else()
diff --git a/src/backends/gpuFsa/layers/CMakeLists.txt b/src/backends/gpuFsa/layers/CMakeLists.txt
index 182a32c..5e0d0e7 100644
--- a/src/backends/gpuFsa/layers/CMakeLists.txt
+++ b/src/backends/gpuFsa/layers/CMakeLists.txt
@@ -3,7 +3,7 @@
 # SPDX-License-Identifier: MIT
 #
 
-list(APPEND armnnGpuFsaBackendLayerValidators_sources
+list(APPEND armnnGpuFsaBackendLayers_sources
         GpuFsaConvolution2d.cpp
         GpuFsaConvolution2d.hpp
         GpuFsaDepthwiseConvolution2d.cpp
@@ -12,9 +12,13 @@
         GpuFsaElementwiseBinaryAdd.hpp
         GpuFsaElementwiseBinarySub.cpp
         GpuFsaElementwiseBinarySub.hpp
+        GpuFsaPooling2d.cpp
+        GpuFsaPooling2d.hpp
+        UtilsGpuFsa.cpp
+        UtilsGpuFsa.hpp
     )
 
-add_library(armnnGpuFsaBackendLayerValidators OBJECT ${armnnGpuFsaBackendLayerValidators_sources})
-target_include_directories(armnnGpuFsaBackendLayerValidators PRIVATE ${PROJECT_SOURCE_DIR}/src/armnn)
-target_include_directories(armnnGpuFsaBackendLayerValidators PRIVATE ${PROJECT_SOURCE_DIR}/src/armnnUtils)
-target_include_directories(armnnGpuFsaBackendLayerValidators PRIVATE ${PROJECT_SOURCE_DIR}/src/backends)
+add_library(armnnGpuFsaBackendLayers OBJECT ${armnnGpuFsaBackendLayers_sources})
+target_include_directories(armnnGpuFsaBackendLayers PRIVATE ${PROJECT_SOURCE_DIR}/src/armnn)
+target_include_directories(armnnGpuFsaBackendLayers PRIVATE ${PROJECT_SOURCE_DIR}/src/armnnUtils)
+target_include_directories(armnnGpuFsaBackendLayers PRIVATE ${PROJECT_SOURCE_DIR}/src/backends)
diff --git a/src/backends/gpuFsa/layers/GpuFsaConvolution2d.cpp b/src/backends/gpuFsa/layers/GpuFsaConvolution2d.cpp
index 90b0ddc..e940963 100644
--- a/src/backends/gpuFsa/layers/GpuFsaConvolution2d.cpp
+++ b/src/backends/gpuFsa/layers/GpuFsaConvolution2d.cpp
@@ -4,29 +4,23 @@
 //
 
 #include "GpuFsaConvolution2d.hpp"
-
-//#include <armnn/Types.hpp>
+#include "UtilsGpuFsa.hpp"
 
 #include <aclCommon/ArmComputeTensorUtils.hpp>
 
-//#include <arm_compute/core/ITensorInfo.h>
-//#include <arm_compute/core/TensorInfo.h>
-//#include <arm_compute/core/TensorShape.h>
-//#include <arm_compute/core/CL/CLKernelLibrary.h>
-//#include <arm_compute/core/CL/CLCompileContext.h>
-
-//#include <arm_compute/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.h>
 #include <arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadContext.h>
+#include <arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h>
 #include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuConv2d.h>
 #include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h>
 
 #include <vector>
 
+using namespace arm_compute::experimental::dynamic_fusion;
+using namespace armnn::armcomputetensorutils;
+
 namespace armnn
 {
 
-using namespace armcomputetensorutils;
-
 arm_compute::Status GpuFsaConvolution2dValidate(const TensorInfo& input,
                                                 const Convolution2dDescriptor& descriptor,
                                                 const TensorInfo& weights,
@@ -61,23 +55,14 @@
         biasSketchInfoPtr = workloadContext.create_tensor_info(aclBiasInfo);
     }
 
-    // Set Conv2d attributes using descriptor
-    const arm_compute::Size2D    aclDilationInfo = BuildArmComputeSize2D(descriptor.m_DilationX,
-                                                                         descriptor.m_DilationY);
-    const arm_compute::Padding2D aclPadInfo      = BuildArmComputePaddingInfo(descriptor);
-    const arm_compute::Size2D    aclStrideInfo   = BuildArmComputeSize2D(descriptor.m_StrideX, descriptor.m_StrideY);
-
-    Conv2dAttributes conv2DAttributes{};
-    conv2DAttributes.dilation(aclDilationInfo);
-    conv2DAttributes.pad(aclPadInfo);
-    conv2DAttributes.stride(aclStrideInfo);
+    Conv2dAttributes conv2dAttributes = CreateConv2dAttributes(descriptor);
 
     // Validate operator, check status and update reasonIfUnsupported
     arm_compute::Status aclStatus = GpuConv2d::validate_op(sketch,
                                                            inputInfo,
                                                            weightInfo,
                                                            biasSketchInfoPtr,
-                                                           conv2DAttributes);
+                                                           conv2dAttributes);
 
     return aclStatus;
 }
@@ -99,7 +84,6 @@
  * as the TensorInfos used when creating the Tensors must match those used to create the Sketch. Otherwise the runtime
  * doesn't know which Tensors to use.
  */
-    using namespace arm_compute::experimental::dynamic_fusion;
     GpuWorkloadSketch* sketch = blob->sketch.get();
     GpuWorkloadContext* workloadContext = blob->workloadContext.get();
     std::vector<arm_compute::ITensorInfo*> inputTensorInfos = {};
@@ -130,23 +114,14 @@
         biasSketchInfoPtr = inputTensorInfos[2];
     }
 
-    // Set Conv2d attributes using descriptor
-    const arm_compute::Size2D    aclDilationInfo = BuildArmComputeSize2D(descriptor.m_DilationX,
-                                                                         descriptor.m_DilationY);
-    const arm_compute::Padding2D aclPadInfo      = BuildArmComputePaddingInfo(descriptor);
-    const arm_compute::Size2D    aclStrideInfo   = BuildArmComputeSize2D(descriptor.m_StrideX, descriptor.m_StrideY);
-
-    Conv2dAttributes conv2DAttributes{};
-    conv2DAttributes.dilation(aclDilationInfo);
-    conv2DAttributes.pad(aclPadInfo);
-    conv2DAttributes.stride(aclStrideInfo);
+    Conv2dAttributes conv2dAttributes = CreateConv2dAttributes(descriptor);
 
     // Validate operator, check status and update reasonIfUnsupported
     arm_compute::Status aclStatus = GpuConv2d::validate_op(*sketch,
                                                            inputTensorInfos[0],
                                                            inputTensorInfos[1],
                                                            biasSketchInfoPtr,
-                                                           conv2DAttributes);
+                                                           conv2dAttributes);
 
     const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
     if (!supported)
@@ -159,7 +134,7 @@
                                                                  inputTensorInfos[0],
                                                                  inputTensorInfos[1],
                                                                  biasSketchInfoPtr,
-                                                                 conv2DAttributes);
+                                                                 conv2dAttributes);
 
     // Create the Output
     outputTensorInfos.emplace_back(workloadContext->create_tensor_info());
diff --git a/src/backends/gpuFsa/layers/GpuFsaConvolution2d.hpp b/src/backends/gpuFsa/layers/GpuFsaConvolution2d.hpp
index 424ba41..55067f0 100644
--- a/src/backends/gpuFsa/layers/GpuFsaConvolution2d.hpp
+++ b/src/backends/gpuFsa/layers/GpuFsaConvolution2d.hpp
@@ -10,9 +10,6 @@
 
 namespace armnn
 {
-
-using namespace arm_compute::experimental::dynamic_fusion;
-
 arm_compute::Status GpuFsaConvolution2dValidate(const TensorInfo& input,
                                                 const Convolution2dDescriptor& descriptor,
                                                 const TensorInfo& weights,
diff --git a/src/backends/gpuFsa/layers/GpuFsaDepthwiseConvolution2d.cpp b/src/backends/gpuFsa/layers/GpuFsaDepthwiseConvolution2d.cpp
index a3c3dd9..21077af 100644
--- a/src/backends/gpuFsa/layers/GpuFsaDepthwiseConvolution2d.cpp
+++ b/src/backends/gpuFsa/layers/GpuFsaDepthwiseConvolution2d.cpp
@@ -4,22 +4,25 @@
 //
 
 #include "GpuFsaDepthwiseConvolution2d.hpp"
+#include "UtilsGpuFsa.hpp"
+
 #include <backendsCommon/WorkloadUtils.hpp>
 
 #include <aclCommon/ArmComputeTensorUtils.hpp>
 
-#include <arm_compute/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.h>
 #include <arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadContext.h>
+#include <arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h>
 #include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuDepthwiseConv2d.h>
 #include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h>
 
 #include <vector>
 
+using namespace arm_compute::experimental::dynamic_fusion;
+using namespace armnn::armcomputetensorutils;
+
 namespace armnn
 {
 
-using namespace armcomputetensorutils;
-
 arm_compute::Status GpuFsaDepthwiseConvolution2dValidate(const TensorInfo& input,
                                                          const DepthwiseConvolution2dDescriptor& descriptor,
                                                          const TensorInfo& weights,
@@ -71,17 +74,7 @@
         biasSketchInfoPtr = workloadContext.create_tensor_info(aclBiasInfo);
     }
 
-    // Set DepthwiseConv2d attributes using descriptor
-    const arm_compute::Size2D    aclDilationInfo = BuildArmComputeSize2D(descriptor.m_DilationX,
-                                                                         descriptor.m_DilationY);
-    const arm_compute::Padding2D aclPadInfo      = BuildArmComputePaddingInfo(descriptor);
-    const arm_compute::Size2D    aclStrideInfo   = BuildArmComputeSize2D(descriptor.m_StrideX, descriptor.m_StrideY);
-
-    DepthwiseConv2dAttributes depthwiseConv2dAttributes{};
-    depthwiseConv2dAttributes.pad(aclPadInfo);
-    depthwiseConv2dAttributes.stride(aclStrideInfo);
-    depthwiseConv2dAttributes.dilation(aclDilationInfo);
-    depthwiseConv2dAttributes.depth_multiplier(aclDepthMultiplier);
+    DepthwiseConv2dAttributes depthwiseConv2dAttributes = CreateDWConv2dAttributes(descriptor, aclDepthMultiplier);
 
     // Validate operator, check status and update reasonIfUnsupported
     arm_compute::Status aclStatus = GpuDepthwiseConv2d::validate_op(sketch,
@@ -110,7 +103,6 @@
 * as the TensorInfos used when creating the Tensors must match those used to create the Sketch. Otherwise the runtime
 * doesn't know which Tensors to use.
 */
-    using namespace arm_compute::experimental::dynamic_fusion;
     GpuWorkloadSketch* sketch = blob->sketch.get();
     GpuWorkloadContext* workloadContext = blob->workloadContext.get();
     std::vector<arm_compute::ITensorInfo*> inputTensorInfos = {};
@@ -157,17 +149,7 @@
         biasSketchInfoPtr = inputTensorInfos[2];
     }
 
-    // Set DepthwiseConv2d attributes using descriptor
-    const arm_compute::Size2D    aclDilationInfo = BuildArmComputeSize2D(descriptor.m_DilationX,
-                                                                         descriptor.m_DilationY);
-    const arm_compute::Padding2D aclPadInfo      = BuildArmComputePaddingInfo(descriptor);
-    const arm_compute::Size2D    aclStrideInfo   = BuildArmComputeSize2D(descriptor.m_StrideX, descriptor.m_StrideY);
-
-    DepthwiseConv2dAttributes depthwiseConv2dAttributes{};
-    depthwiseConv2dAttributes.pad(aclPadInfo);
-    depthwiseConv2dAttributes.stride(aclStrideInfo);
-    depthwiseConv2dAttributes.dilation(aclDilationInfo);
-    depthwiseConv2dAttributes.depth_multiplier(aclDepthMultiplier);
+    DepthwiseConv2dAttributes depthwiseConv2dAttributes = CreateDWConv2dAttributes(descriptor, aclDepthMultiplier);
 
     // Validate operator, check status and update reasonIfUnsupported
     arm_compute::Status aclStatus = GpuDepthwiseConv2d::validate_op(*sketch,
diff --git a/src/backends/gpuFsa/layers/GpuFsaDepthwiseConvolution2d.hpp b/src/backends/gpuFsa/layers/GpuFsaDepthwiseConvolution2d.hpp
index d3e562d..924d1d3 100644
--- a/src/backends/gpuFsa/layers/GpuFsaDepthwiseConvolution2d.hpp
+++ b/src/backends/gpuFsa/layers/GpuFsaDepthwiseConvolution2d.hpp
@@ -10,9 +10,6 @@
 
 namespace armnn
 {
-
-using namespace arm_compute::experimental::dynamic_fusion;
-
 arm_compute::Status GpuFsaDepthwiseConvolution2dValidate(const TensorInfo& input,
                                                          const DepthwiseConvolution2dDescriptor& descriptor,
                                                          const TensorInfo& weights,
diff --git a/src/backends/gpuFsa/layers/GpuFsaElementwiseBinaryAdd.cpp b/src/backends/gpuFsa/layers/GpuFsaElementwiseBinaryAdd.cpp
index fa016a6..d6404dd 100644
--- a/src/backends/gpuFsa/layers/GpuFsaElementwiseBinaryAdd.cpp
+++ b/src/backends/gpuFsa/layers/GpuFsaElementwiseBinaryAdd.cpp
@@ -7,12 +7,13 @@
 
 #include <aclCommon/ArmComputeTensorUtils.hpp>
 
-#include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuAdd.h>
-#include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h>
 #include <arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadContext.h>
 #include <arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h>
+#include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuAdd.h>
+#include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h>
 
 using namespace arm_compute::experimental::dynamic_fusion;
+using namespace armnn::armcomputetensorutils;
 
 namespace armnn
 {
@@ -20,8 +21,6 @@
 arm_compute::Status GpuFsaElementwiseBinaryAddValidate(const TensorInfo& input0,
                                                        const TensorInfo& input1)
 {
-    using namespace armcomputetensorutils;
-
     // Create a new workload sketch, for validation purposes
     auto compileCtx         = arm_compute::CLKernelLibrary::get().get_compile_context();
     auto workloadContext    = GpuWorkloadContext(&compileCtx);
@@ -43,8 +42,6 @@
                                         const TensorInfo& input0,
                                         const TensorInfo& input1)
 {
-    using namespace armcomputetensorutils;
-
     GpuWorkloadSketch* sketch           = blob->sketch.get();
     GpuWorkloadContext* workloadContext = blob->workloadContext.get();
     std::vector<arm_compute::ITensorInfo*> inputTensorInfos  = {};
diff --git a/src/backends/gpuFsa/layers/GpuFsaElementwiseBinaryAdd.hpp b/src/backends/gpuFsa/layers/GpuFsaElementwiseBinaryAdd.hpp
index 73f1fcb..1392d01 100644
--- a/src/backends/gpuFsa/layers/GpuFsaElementwiseBinaryAdd.hpp
+++ b/src/backends/gpuFsa/layers/GpuFsaElementwiseBinaryAdd.hpp
@@ -10,14 +10,11 @@
 
 namespace armnn
 {
+arm_compute::Status GpuFsaElementwiseBinaryAddValidate(const TensorInfo& input0,
+                                                       const TensorInfo& input1);
 
-    using namespace arm_compute::experimental::dynamic_fusion;
-
-    arm_compute::Status GpuFsaElementwiseBinaryAddValidate(const TensorInfo& input0,
-                                                           const TensorInfo& input1);
-
-    void GpuFsaElementwiseBinaryAddCreateOp(GpuFsaPreCompiledBlob* blob,
-                                            const TensorInfo& input0,
-                                            const TensorInfo& input1);
+void GpuFsaElementwiseBinaryAddCreateOp(GpuFsaPreCompiledBlob* blob,
+                                        const TensorInfo& input0,
+                                        const TensorInfo& input1);
 
 } // namespace armnn
\ No newline at end of file
diff --git a/src/backends/gpuFsa/layers/GpuFsaElementwiseBinarySub.cpp b/src/backends/gpuFsa/layers/GpuFsaElementwiseBinarySub.cpp
index 4e7eb77..5e0f478 100644
--- a/src/backends/gpuFsa/layers/GpuFsaElementwiseBinarySub.cpp
+++ b/src/backends/gpuFsa/layers/GpuFsaElementwiseBinarySub.cpp
@@ -7,12 +7,13 @@
 
 #include <aclCommon/ArmComputeTensorUtils.hpp>
 
-#include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuSub.h>
-#include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h>
 #include <arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadContext.h>
 #include <arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h>
+#include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuSub.h>
+#include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h>
 
 using namespace arm_compute::experimental::dynamic_fusion;
+using namespace armnn::armcomputetensorutils;
 
 namespace armnn
 {
@@ -20,8 +21,6 @@
 arm_compute::Status GpuFsaElementwiseBinarySubValidate(const TensorInfo& input0,
                                                        const TensorInfo& input1)
 {
-    using namespace armcomputetensorutils;
-
     // Create a new workload sketch, for validation purposes
     auto compileCtx         = arm_compute::CLKernelLibrary::get().get_compile_context();
     auto workloadContext    = GpuWorkloadContext(&compileCtx);
@@ -43,8 +42,6 @@
                                         const TensorInfo& input0,
                                         const TensorInfo& input1)
 {
-    using namespace armcomputetensorutils;
-
     GpuWorkloadSketch* sketch           = blob->sketch.get();
     GpuWorkloadContext* workloadContext = blob->workloadContext.get();
     std::vector<arm_compute::ITensorInfo*> inputTensorInfos  = {};
diff --git a/src/backends/gpuFsa/layers/GpuFsaElementwiseBinarySub.hpp b/src/backends/gpuFsa/layers/GpuFsaElementwiseBinarySub.hpp
index 59d8189..4d58f31 100644
--- a/src/backends/gpuFsa/layers/GpuFsaElementwiseBinarySub.hpp
+++ b/src/backends/gpuFsa/layers/GpuFsaElementwiseBinarySub.hpp
@@ -2,7 +2,6 @@
 // Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
-
 #pragma once
 
 #include <armnn/Descriptors.hpp>
@@ -11,13 +10,11 @@
 
 namespace armnn
 {
+arm_compute::Status GpuFsaElementwiseBinarySubValidate(const TensorInfo& input0,
+                                                       const TensorInfo& input1);
 
-    using namespace arm_compute::experimental::dynamic_fusion;
+void GpuFsaElementwiseBinarySubCreateOp(GpuFsaPreCompiledBlob* blob,
+                                        const TensorInfo& input0,
+                                        const TensorInfo& input1);
 
-    arm_compute::Status GpuFsaElementwiseBinarySubValidate(const TensorInfo& input0,
-                                                           const TensorInfo& input1);
-
-    void GpuFsaElementwiseBinarySubCreateOp(GpuFsaPreCompiledBlob* blob,
-                                            const TensorInfo& input0,
-                                            const TensorInfo& input1);
-}
\ No newline at end of file
+} // namespace armnn
\ No newline at end of file
diff --git a/src/backends/gpuFsa/layers/GpuFsaPooling2d.cpp b/src/backends/gpuFsa/layers/GpuFsaPooling2d.cpp
new file mode 100644
index 0000000..4575d21
--- /dev/null
+++ b/src/backends/gpuFsa/layers/GpuFsaPooling2d.cpp
@@ -0,0 +1,83 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "GpuFsaPooling2d.hpp"
+#include "UtilsGpuFsa.hpp"
+
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+
+#include <arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadContext.h>
+#include <arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h>
+#include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuPool2d.h>
+#include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h>
+
+using namespace arm_compute::experimental::dynamic_fusion;
+using namespace armnn::armcomputetensorutils;
+
+namespace armnn
+{
+
+arm_compute::Status GpuFsaPooling2dValidate(const TensorInfo& input,
+                                            const Pooling2dDescriptor& descriptor)
+{
+    // Create a new workload sketch, for validation purposes
+    auto compileCtx         = arm_compute::CLKernelLibrary::get().get_compile_context();
+    auto workloadContext    = GpuWorkloadContext(&compileCtx);
+    GpuWorkloadSketch sketch{ &workloadContext };
+
+    arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
+    aclInputInfo.set_are_values_constant(input.IsConstant());
+    arm_compute::ITensorInfo* inputInfo = workloadContext.create_tensor_info(aclInputInfo);
+
+    Pool2dAttributes pool2dAttributes = CreatePool2dAttributes(descriptor);
+    GpuPool2dSettings pool2dSettings{};
+
+    return GpuPool2d::validate_op(sketch, inputInfo, pool2dAttributes, pool2dSettings);
+}
+
+void GpuFsaPooling2dCreateOp(GpuFsaPreCompiledBlob* blob,
+                             const TensorInfo& input,
+                             const Pooling2dDescriptor& descriptor)
+{
+    GpuWorkloadSketch* sketch           = blob->sketch.get();
+    GpuWorkloadContext* workloadContext = blob->workloadContext.get();
+    std::vector<arm_compute::ITensorInfo*> inputTensorInfos  = {};
+    std::vector<arm_compute::ITensorInfo*> outputTensorInfos = {};
+
+    arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
+    aclInputInfo.set_are_values_constant(input.IsConstant());
+
+    inputTensorInfos.emplace_back(workloadContext->create_tensor_info(aclInputInfo));
+
+    Pool2dAttributes pool2dAttributes = CreatePool2dAttributes(descriptor);
+    GpuPool2dSettings pool2dSettings{};
+
+    // Validate operator, check status and update reasonIfUnsupported
+    arm_compute::Status aclStatus = GpuPool2d::validate_op(*sketch,
+                                                           inputTensorInfos[0],
+                                                           pool2dAttributes,
+                                                           pool2dSettings);
+
+    const bool supported = aclStatus.error_code() == arm_compute::ErrorCode::OK;
+    if (!supported)
+    {
+        throw BackendCapabilityException("\"GpuFsa\" backend failed during pooling 2d validation");
+    }
+
+    arm_compute::ITensorInfo* addOutputInfo = GpuPool2d::create_op(*sketch,
+                                                                   inputTensorInfos[0],
+                                                                   pool2dAttributes,
+                                                                   pool2dSettings);
+
+    // Temporary fix until fusing attempt is make for GpuFsa backend and Output layer workload is created.
+    outputTensorInfos.emplace_back(workloadContext->create_tensor_info());
+    GpuOutput::create_op(*sketch, addOutputInfo, outputTensorInfos[0]);
+
+    // Store the TensorInfos within the blob as unique_ptrs to be used later
+    blob->inputTensorInfos  = std::make_unique<std::vector<arm_compute::ITensorInfo*>>(inputTensorInfos);
+    blob->outputTensorInfos = std::make_unique<std::vector<arm_compute::ITensorInfo*>>(outputTensorInfos);
+}
+
+} // namespace armnn
\ No newline at end of file
diff --git a/src/backends/gpuFsa/layers/GpuFsaPooling2d.hpp b/src/backends/gpuFsa/layers/GpuFsaPooling2d.hpp
new file mode 100644
index 0000000..25f6e72
--- /dev/null
+++ b/src/backends/gpuFsa/layers/GpuFsaPooling2d.hpp
@@ -0,0 +1,20 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <armnn/Descriptors.hpp>
+
+#include <gpuFsa/GpuFsaBackend.hpp>
+
+namespace armnn
+{
+arm_compute::Status GpuFsaPooling2dValidate(const TensorInfo& input,
+                                            const Pooling2dDescriptor& descriptor);
+
+void GpuFsaPooling2dCreateOp(GpuFsaPreCompiledBlob* blob,
+                             const TensorInfo& input,
+                             const Pooling2dDescriptor& descriptor);
+
+} // namespace armnn
\ No newline at end of file
diff --git a/src/backends/gpuFsa/layers/UtilsGpuFsa.cpp b/src/backends/gpuFsa/layers/UtilsGpuFsa.cpp
new file mode 100644
index 0000000..a1d96f0
--- /dev/null
+++ b/src/backends/gpuFsa/layers/UtilsGpuFsa.cpp
@@ -0,0 +1,61 @@
+//
+// Copyright © 2023-2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "UtilsGpuFsa.hpp"
+#include "aclCommon/ArmComputeTensorUtils.hpp"
+#include "aclCommon/ArmComputeUtils.hpp"
+
+using namespace armnn;
+using namespace armnn::armcomputetensorutils;
+using namespace arm_compute::experimental::dynamic_fusion;
+
+Conv2dAttributes CreateConv2dAttributes(const Convolution2dDescriptor& descriptor)
+{
+    const arm_compute::Padding2D padInfo      = BuildArmComputePaddingInfo(descriptor);
+    const arm_compute::Size2D    strideInfo   = BuildArmComputeSize2D(descriptor.m_StrideX, descriptor.m_StrideY);
+    const arm_compute::Size2D    dilationInfo = BuildArmComputeSize2D(descriptor.m_DilationX, descriptor.m_DilationY);
+
+    arm_compute::experimental::dynamic_fusion::Conv2dAttributes conv2dAttributes{};
+    conv2dAttributes.pad(padInfo);
+    conv2dAttributes.stride(strideInfo);
+    conv2dAttributes.dilation(dilationInfo);
+
+    return conv2dAttributes;
+}
+
+arm_compute::experimental::dynamic_fusion::DepthwiseConv2dAttributes
+CreateDWConv2dAttributes(const DepthwiseConvolution2dDescriptor& descriptor, const unsigned int aclDepthMultiplier)
+{
+    const arm_compute::Padding2D padInfo      = BuildArmComputePaddingInfo(descriptor);
+    const arm_compute::Size2D    strideInfo   = BuildArmComputeSize2D(descriptor.m_StrideX, descriptor.m_StrideY);
+    const arm_compute::Size2D    dilationInfo = BuildArmComputeSize2D(descriptor.m_DilationX, descriptor.m_DilationY);
+
+    arm_compute::experimental::dynamic_fusion::DepthwiseConv2dAttributes depthwiseConv2dAttributes{};
+    depthwiseConv2dAttributes.pad(padInfo);
+    depthwiseConv2dAttributes.stride(strideInfo);
+    depthwiseConv2dAttributes.dilation(dilationInfo);
+    depthwiseConv2dAttributes.depth_multiplier(aclDepthMultiplier);
+
+    return depthwiseConv2dAttributes;
+}
+
+arm_compute::experimental::dynamic_fusion::Pool2dAttributes
+CreatePool2dAttributes(const Pooling2dDescriptor& descriptor)
+{
+    const arm_compute::PoolingType poolType = ConvertPoolingAlgorithmToAclPoolingType(descriptor.m_PoolType);
+    const arm_compute::Padding2D   padding  = BuildArmComputePaddingInfo(descriptor);
+    const arm_compute::Size2D      poolSize = BuildArmComputeSize2D(descriptor.m_PoolWidth, descriptor.m_PoolHeight);
+    const arm_compute::Size2D      strides  = BuildArmComputeSize2D(descriptor.m_StrideX, descriptor.m_StrideY);
+    const bool excludePadding = (descriptor.m_PaddingMethod == PaddingMethod::Exclude);
+
+    arm_compute::experimental::dynamic_fusion::Pool2dAttributes pool2dAttributes{};
+    pool2dAttributes.pool_type(poolType);
+    pool2dAttributes.pad(padding);
+    pool2dAttributes.pool_size(poolSize);
+    pool2dAttributes.stride(strides);
+    pool2dAttributes.exclude_padding(excludePadding);
+
+    return pool2dAttributes;
+}
\ No newline at end of file
diff --git a/src/backends/gpuFsa/layers/UtilsGpuFsa.hpp b/src/backends/gpuFsa/layers/UtilsGpuFsa.hpp
new file mode 100644
index 0000000..6c1d97a
--- /dev/null
+++ b/src/backends/gpuFsa/layers/UtilsGpuFsa.hpp
@@ -0,0 +1,30 @@
+//
+// Copyright © 2023-2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "armnn/Descriptors.hpp"
+#include "arm_compute/dynamic_fusion/sketch/attributes/Conv2dAttributes.h"
+#include "arm_compute/dynamic_fusion/sketch/attributes/DepthwiseConv2dAttributes.h"
+#include "arm_compute/dynamic_fusion/sketch/attributes/Pool2dAttributes.h"
+
+/// Utility function used to setup an arm_compute::Conv2dAttributes object from given descriptor
+/// @param[in] armnn::Convolution2dDescriptor
+/// @return arm_compute::experimental::dynamic_fusion::Conv2dAttributes
+arm_compute::experimental::dynamic_fusion::Conv2dAttributes
+CreateConv2dAttributes(const armnn::Convolution2dDescriptor& descriptor);
+
+/// Utility function used to setup an arm_compute::DepthwiseConv2dAttributes object from given descriptor
+/// @param[in] armnn::DepthwiseConvolution2dDescriptor
+/// @return arm_compute::experimental::dynamic_fusion::DepthwiseConv2dAttributes
+arm_compute::experimental::dynamic_fusion::DepthwiseConv2dAttributes
+CreateDWConv2dAttributes(const armnn::DepthwiseConvolution2dDescriptor& descriptor, 
+                         const unsigned int aclDepthMultiplier);
+
+/// Utility function used to setup an arm_compute::Pool2dAttributes object from given descriptor
+/// @param[in] armnn::Pooling2dDescriptor
+/// @return arm_compute::experimental::dynamic_fusion::Pool2dAttributes
+arm_compute::experimental::dynamic_fusion::Pool2dAttributes
+CreatePool2dAttributes(const armnn::Pooling2dDescriptor& descriptor);
diff --git a/src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp b/src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp
index 26c7cb8..93a4a81 100644
--- a/src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp
+++ b/src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp
@@ -9,6 +9,8 @@
 
 #include "backendsCommon/test/DepthwiseConvolution2dEndToEndTests.hpp"
 #include "backendsCommon/test/ElementwiseBinaryEndToEndTestImpl.hpp"
+#include "backendsCommon/test/Pooling2dEndToEndTestImpl.hpp"
+
 
 #include <doctest/doctest.h>
 
@@ -56,4 +58,56 @@
     ElementwiseBinarySimple3DEndToEnd<armnn::DataType::Float16>(gpuFsaDefaultBackends, BinaryOperation::Sub);
 }
 
+// Pooling 2D
+// Average Pool 2D
+TEST_CASE("GpuFsaAvgPool2DEndtoEndTestFloat32")
+{
+    AvgPool2dEndToEnd<DataType::Float32>(gpuFsaDefaultBackends);
+}
+
+TEST_CASE("GpuFsaAvgPool2DEndtoEndTestFloat16")
+{
+
+    AvgPool2dEndToEndFloat16<DataType::Float16>(gpuFsaDefaultBackends);
+}
+
+TEST_CASE("UNSUPPORTED_GpuFsaAvgPool2DIgnoreValueEndtoEndTestFloat32")
+{
+    // Exclude padding must be set to true in Attributes! to be supported by GPU
+    try
+    {
+        AvgPool2dEndToEnd<DataType::Float32>(gpuFsaDefaultBackends, PaddingMethod::IgnoreValue);
+        FAIL("An exception should have been thrown");
+    }
+    catch (const armnn::InvalidArgumentException& e)
+    {
+        CHECK(strcmp(e.what(), "Failed to assign a backend to each layer") == 0);
+    }
+}
+
+// Max Pool 2D
+TEST_CASE("GpuFsaMaxPool2DEndtoEndTestFloat32")
+{
+    MaxPool2dEndToEnd<DataType::Float32>(gpuFsaDefaultBackends);
+}
+
+TEST_CASE("GpuFsaMaxPool2DEndtoEndTestFloat16")
+{
+    MaxPool2dEndToEndFloat16<DataType::Float16>(gpuFsaDefaultBackends);
+}
+
+TEST_CASE("UNSUPPORTED_GpuFsaMaxPool2DIgnoreValueEndtoEndTestFloat32")
+{
+    // Exclude padding must be set to true in Attributes! to be supported by GPU
+    try
+    {
+        MaxPool2dEndToEnd<DataType::Float32>(gpuFsaDefaultBackends, PaddingMethod::IgnoreValue);
+        FAIL("An exception should have been thrown");
+    }
+    catch (const armnn::InvalidArgumentException& e)
+    {
+        CHECK(strcmp(e.what(), "Failed to assign a backend to each layer") == 0);
+    }
+}
+
 }
diff --git a/src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp b/src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp
index 9d4b3b9..fee0d07 100644
--- a/src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp
+++ b/src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp
@@ -101,4 +101,34 @@
     CHECK(supported);
 }
 
+TEST_CASE("IsLayerSupportedGpuFsaPooling2d")
+{
+    TensorInfo inputInfo({ 1, 3, 4, 1 }, DataType::Float32);
+    TensorInfo outputInfo({ 1, 2, 2, 1 }, DataType::Float32);
+
+    Pooling2dDescriptor desc{};
+    desc.m_PoolType = PoolingAlgorithm::Max;
+    desc.m_PadLeft    = 0;
+    desc.m_PadRight   = 0;
+    desc.m_PadTop     = 0;
+    desc.m_PadBottom  = 0;
+    desc.m_PoolWidth  = 2;
+    desc.m_PoolHeight = 2;
+    desc.m_StrideX    = 1;
+    desc.m_StrideY    = 1;
+    desc.m_OutputShapeRounding = OutputShapeRounding::Floor;
+    desc.m_PaddingMethod = PaddingMethod::Exclude;
+    desc.m_DataLayout  = DataLayout::NHWC;
+
+    GpuFsaLayerSupport supportChecker;
+    std::string reasonIfNotSupported;
+    auto supported = supportChecker.IsLayerSupported(LayerType::Pooling2d,
+                                                     {inputInfo, outputInfo},
+                                                     desc,
+                                                     EmptyOptional(),
+                                                     EmptyOptional(),
+                                                     reasonIfNotSupported);
+    CHECK(supported);
+}
+
 }
\ No newline at end of file
diff --git a/src/backends/neon/test/NeonEndToEndTests.cpp b/src/backends/neon/test/NeonEndToEndTests.cpp
index 37f6d38..1bf9344 100644
--- a/src/backends/neon/test/NeonEndToEndTests.cpp
+++ b/src/backends/neon/test/NeonEndToEndTests.cpp
@@ -18,6 +18,7 @@
 #include <backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp>
 #include <backendsCommon/test/FillEndToEndTestImpl.hpp>
 #include <backendsCommon/test/InstanceNormalizationEndToEndTestImpl.hpp>
+#include "backendsCommon/test/Pooling2dEndToEndTestImpl.hpp"
 #include <backendsCommon/test/PreluEndToEndTestImpl.hpp>
 #include <backendsCommon/test/QLstmEndToEndTestImpl.hpp>
 #include <backendsCommon/test/QuantizedLstmEndToEndTestImpl.hpp>
@@ -552,6 +553,49 @@
     InstanceNormalizationNchwEndToEndTest2(neonDefaultBackends);
 }
 
+// Pooling 2D
+// Average Pool 2D
+TEST_CASE("NeonAvgPool2DEndtoEndTestFloat32")
+{
+    AvgPool2dEndToEnd<DataType::Float32>(neonDefaultBackends);
+}
+
+TEST_CASE("NeonAvgPool2DEndtoEndTestFloat16")
+{
+    AvgPool2dEndToEndFloat16<DataType::Float16>(neonDefaultBackends);
+}
+
+TEST_CASE("NeonAvgPool2DIgnoreValueEndtoEndTestFloat32")
+{
+    AvgPool2dEndToEnd<DataType::Float32>(neonDefaultBackends, PaddingMethod::IgnoreValue);
+}
+
+// Max Pool 2D
+TEST_CASE("NeonMaxPool2DEndtoEndTestFloat32")
+{
+    MaxPool2dEndToEnd<DataType::Float32>(neonDefaultBackends);
+}
+
+TEST_CASE("NeonMaxPool2DEndtoEndTestFloat16")
+{
+    MaxPool2dEndToEndFloat16<DataType::Float16>(neonDefaultBackends);
+}
+
+TEST_CASE("NeonMaxPool2DIgnoreValueEndtoEndTestFloat32")
+{
+    MaxPool2dEndToEnd<DataType::Float32>(neonDefaultBackends, PaddingMethod::IgnoreValue);
+}
+
+TEST_CASE("NeonMaxPool2DTwoLayerEndtoEndTestFloat32")
+{
+    MaxPool2dTwoLayerEndToEnd<DataType::Float32>(neonDefaultBackends);
+}
+
+TEST_CASE("NeonMaxPool2DThreeLayerEndtoEndTestFloat32")
+{
+    MaxPool2dThreeLayerEndToEnd<DataType::Float32>(neonDefaultBackends);
+}
+
 // Fill
 TEST_CASE("NeonFillEndToEndTest")
 {
diff --git a/src/backends/reference/test/RefEndToEndTests.cpp b/src/backends/reference/test/RefEndToEndTests.cpp
index c09304e..9f80059 100644
--- a/src/backends/reference/test/RefEndToEndTests.cpp
+++ b/src/backends/reference/test/RefEndToEndTests.cpp
@@ -28,6 +28,7 @@
 #include <backendsCommon/test/GatherNdEndToEndTestImpl.hpp>
 #include <backendsCommon/test/InstanceNormalizationEndToEndTestImpl.hpp>
 #include <backendsCommon/test/LogSoftmaxEndToEndTestImpl.hpp>
+#include "backendsCommon/test/Pooling2dEndToEndTestImpl.hpp"
 #include <backendsCommon/test/PreluEndToEndTestImpl.hpp>
 #include <backendsCommon/test/QLstmEndToEndTestImpl.hpp>
 #include <backendsCommon/test/QuantizationEndToEndTestImpl.hpp>
@@ -1098,6 +1099,49 @@
     PreluEndToEndPositiveTest<armnn::DataType::QSymmS16>(defaultBackends);
 }
 
+// Pooling 2D
+// Average Pool 2D
+TEST_CASE("RefAvgPool2DEndtoEndTestFloat32")
+{
+    AvgPool2dEndToEnd<DataType::Float32>(defaultBackends);
+}
+
+TEST_CASE("RefAvgPool2DEndtoEndTestFloat16")
+{
+    AvgPool2dEndToEndFloat16<DataType::Float16>(defaultBackends);
+}
+
+TEST_CASE("RefAvgPool2DIgnoreValueEndtoEndTestFloat32")
+{
+    AvgPool2dEndToEnd<DataType::Float32>(defaultBackends, PaddingMethod::IgnoreValue);
+}
+
+// Max Pool 2D
+TEST_CASE("RefMaxPool2DEndtoEndTestFloat32")
+{
+    MaxPool2dEndToEnd<DataType::Float32>(defaultBackends);
+}
+
+TEST_CASE("RefMaxPool2DEndtoEndTestFloat16")
+{
+    MaxPool2dEndToEndFloat16<DataType::Float16>(defaultBackends);
+}
+
+TEST_CASE("RefMaxPool2DIgnoreValueEndtoEndTestFloat32")
+{
+    MaxPool2dEndToEnd<DataType::Float32>(defaultBackends, PaddingMethod::IgnoreValue);
+}
+
+TEST_CASE("RefMaxPool2DTwoLayerEndtoEndTestFloat32")
+{
+    MaxPool2dTwoLayerEndToEnd<DataType::Float32>(defaultBackends);
+}
+
+TEST_CASE("RefMaxPool2DThreeLayerEndtoEndTestFloat32")
+{
+    MaxPool2dThreeLayerEndToEnd<DataType::Float32>(defaultBackends);
+}
+
 // Quantization
 TEST_CASE("QuantizationEndToEndFloat32_U8Test")
 {
@@ -1156,7 +1200,7 @@
     Splitter1dEndToEnd<DataType::QSymmS16>(defaultBackends);
 }
 
-TEST_CASE("TosaRefSplit1dEndtoEndTestFloat16")
+TEST_CASE("RefSplit1dEndtoEndTestFloat16")
 {
     Splitter1dEndToEndFloat16<DataType::Float16>(defaultBackends);
 }
diff --git a/src/backends/tosaReference/test/TosaRefEndToEndTests.cpp b/src/backends/tosaReference/test/TosaRefEndToEndTests.cpp
index 7a3edaf..68531f8 100644
--- a/src/backends/tosaReference/test/TosaRefEndToEndTests.cpp
+++ b/src/backends/tosaReference/test/TosaRefEndToEndTests.cpp
@@ -95,6 +95,14 @@
     Convolution2dEndToEnd<armnn::DataType::Float32>(tosaDefaultBackends, armnn::DataLayout::NHWC, false);
 }
 
+// Maximum
+TEST_CASE("TosaRefMaximumEndtoEndTestInt8")
+{
+    ElementwiseBinarySimpleNoReshapeEndToEnd<DataType::Signed32>(tosaDefaultBackends,
+                                                                 armnn::BinaryOperation::Maximum);
+}
+
+// Pooling
 // Average Pool 2D
 TEST_CASE("TosaRefAvgPool2DEndtoEndTestFloat32")
 {
@@ -111,13 +119,6 @@
     AvgPool2dEndToEnd<DataType::Float32>(tosaDefaultBackends, PaddingMethod::IgnoreValue);
 }
 
-// Maximum
-TEST_CASE("TosaRefMaximumEndtoEndTestInt8")
-{
-    ElementwiseBinarySimpleNoReshapeEndToEnd<DataType::Signed32>(tosaDefaultBackends,
-                                                                 armnn::BinaryOperation::Maximum);
-}
-
 // Max Pool 2D
 TEST_CASE("TosaRefMaxPool2DEndtoEndTestFloat32")
 {