IVGCVSW-7571 GpuFsa Op: Add Depthwise Conv2d

  * Added DepthwiseConv2d support for GpuFsa backend.
  * Updated DepthwiseConv2d End-to-End test

Signed-off-by: Tianle Cheng <tianle.cheng@arm.com>
Change-Id: I646839980d138ae235a00990c97c6e66a4418a5e
diff --git a/src/backends/backendsCommon/test/DepthwiseConvolution2dEndToEndTests.hpp b/src/backends/backendsCommon/test/DepthwiseConvolution2dEndToEndTests.hpp
index 1f9b60a..a2c369b 100644
--- a/src/backends/backendsCommon/test/DepthwiseConvolution2dEndToEndTests.hpp
+++ b/src/backends/backendsCommon/test/DepthwiseConvolution2dEndToEndTests.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022, 2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -67,34 +67,34 @@
     unsigned int kernelWidth  = 3;
 
     unsigned int outputHeight    = inputHeight - kernelHeight + 1 + 2;
-    unsigned int outputWidth     = (inputWidth - kernelWidth + 1)/2;
+    unsigned int outputWidth     = inputWidth - kernelWidth + 1;
     unsigned int outputChannels  = inputChannels * depthMultiplier;
     unsigned int outputBatchSize = inputBatchSize;
 
-    TensorInfo inputInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, ArmnnType, qScale, qOffset, true);
-    TensorInfo outputInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, ArmnnType, qScale, qOffset);
-    TensorInfo weightsInfo({1, kernelHeight, kernelWidth, outputChannels}, ArmnnType, qScale, qOffset, true);
-    TensorInfo biasesInfo({outputChannels}, ArmnnBType, qScale * qScale, 0, true);
+    TensorInfo inputInfo({ inputBatchSize, inputHeight, inputWidth, inputChannels }, ArmnnType, qScale, qOffset, true);
+    TensorInfo outputInfo({ outputBatchSize, outputHeight, outputWidth, outputChannels }, ArmnnType, qScale, qOffset);
+    TensorInfo weightsInfo({ 1, kernelHeight, kernelWidth, outputChannels }, ArmnnType, qScale, qOffset, true);
+    TensorInfo biasesInfo({ outputChannels }, ArmnnBType, qScale * qScale, 0, true);
 
     std::vector<float> inputData =
     {
-        0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
+        0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 1.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f,
+        0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f,
+        0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
         0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
-        0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
-        0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
-        0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
-        0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
-        0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
-        0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
-        0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
-        0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
-        0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
-        0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
-        0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
-        0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
-        0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
-        0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f
-   };
+        0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 1.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f,
+        0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f,
+        0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 1.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f,
+        0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f,
+        0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 1.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f,
+        0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f,
+        0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 1.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f,
+        0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f,
+        0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 1.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f,
+        0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f,
+        0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 1.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f,
+        0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f
+    };
 
     std::vector<float> weightsData =
     {
@@ -127,31 +127,43 @@
 
     std::vector<float> expectedOutputData =
     {
-        3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f,
-        5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.5f, 5.5f, 5.5f, 5.5f, 5.5f, 5.5f, 5.5f,
-        5.5f, 5.5f, 5.5f, 5.5f, 5.5f, 5.5f, 5.5f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f,
-        2.5f, 2.5f, 2.5f, 2.5f, 2.5f, 2.5f, 2.5f, 3.5f, 3.5f, 3.5f, 3.5f, 3.5f, 3.5f, 3.5f,
-        4.5f, 4.5f, 4.5f, 4.5f, 4.5f, 4.5f, 4.5f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f,
-        6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f,
-        1.0f, 3.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 2.0f, 4.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
-        2.0f, 4.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 2.0f, 4.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
-        2.0f, 4.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 2.0f, 4.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
-        2.0f, 4.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 3.0f, 5.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
-        3.0f, 5.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 3.0f, 5.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
-        3.0f, 5.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 3.0f, 5.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f
-   };
+        3.0f,  4.5f,  2.0f,  1.0f,  3.0f,  4.5f,  3.0f,  1.0f,  3.0f,  4.5f,  4.0f,  3.0f,  3.0f,  4.5f,
+        1.0f, -1.0f,  3.0f,  4.5f,  1.0f, -1.0f,  3.0f,  4.5f,  1.0f, -1.0f,  3.0f,  4.5f,  1.0f, -1.0f,
+        3.0f,  4.5f,  1.0f, -1.0f,  3.0f,  4.5f,  1.0f, -1.0f,  3.0f,  4.5f,  1.0f, -1.0f,  3.0f,  4.5f,
+        1.0f, -1.0f,  3.0f,  4.5f,  1.0f, -1.0f,  3.0f,  4.5f,  1.0f, -1.0f,  3.0f,  4.5f,  1.0f, -1.0f,
+        3.0f,  5.5f,  3.0f,  2.0f,  3.0f,  5.5f,  4.0f,  2.0f,  3.0f,  5.5f,  5.0f,  4.0f,  3.0f,  5.5f,
+        1.0f, -1.0f,  3.0f,  5.5f,  1.0f, -1.0f,  3.0f,  5.5f,  1.0f, -1.0f,  3.0f,  5.5f,  1.0f, -1.0f,
+        3.0f,  5.5f,  1.0f, -1.0f,  3.0f,  5.5f,  1.0f, -1.0f,  3.0f,  5.5f,  1.0f, -1.0f,  3.0f,  5.5f,
+        1.0f, -1.0f,  3.0f,  5.5f,  1.0f, -1.0f,  3.0f,  5.5f,  1.0f, -1.0f,  3.0f,  5.5f,  1.0f, -1.0f,
+        5.0f,  6.5f,  3.0f,  2.0f,  5.0f,  6.5f,  4.0f,  2.0f,  5.0f,  6.5f,  5.0f,  4.0f,  5.0f,  6.5f,
+        1.0f, -1.0f,  5.0f,  6.5f,  1.0f, -1.0f,  5.0f,  6.5f,  1.0f, -1.0f,  5.0f,  6.5f,  1.0f, -1.0f,
+        5.0f,  6.5f,  1.0f, -1.0f,  5.0f,  6.5f,  1.0f, -1.0f,  5.0f,  6.5f,  1.0f, -1.0f,  5.0f,  6.5f,
+        1.0f, -1.0f,  5.0f,  6.5f,  1.0f, -1.0f,  5.0f,  6.5f,  1.0f, -1.0f,  5.0f,  6.5f,  1.0f, -1.0f,
+        5.5f,  8.0f,  3.0f,  2.0f,  5.5f,  8.0f,  4.0f,  2.0f,  5.5f,  8.0f,  5.0f,  4.0f,  5.5f,  8.0f,
+        1.0f, -1.0f,  5.5f,  8.0f,  1.0f, -1.0f,  5.5f,  8.0f,  1.0f, -1.0f,  5.5f,  8.0f,  1.0f, -1.0f,
+        5.5f,  8.0f,  1.0f, -1.0f,  5.5f,  8.0f,  1.0f, -1.0f,  5.5f,  8.0f,  1.0f, -1.0f,  5.5f,  8.0f,
+        1.0f, -1.0f,  5.5f,  8.0f,  1.0f, -1.0f,  5.5f,  8.0f,  1.0f, -1.0f,  5.5f,  8.0f,  1.0f, -1.0f,
+        5.5f,  8.0f,  3.0f,  2.0f,  5.5f,  8.0f,  4.0f,  2.0f,  5.5f,  8.0f,  5.0f,  4.0f,  5.5f,  8.0f,
+        1.0f, -1.0f,  5.5f,  8.0f,  1.0f, -1.0f,  5.5f,  8.0f,  1.0f, -1.0f,  5.5f,  8.0f,  1.0f, -1.0f,
+        5.5f,  8.0f,  1.0f, -1.0f,  5.5f,  8.0f,  1.0f, -1.0f,  5.5f,  8.0f,  1.0f, -1.0f,  5.5f,  8.0f,
+        1.0f, -1.0f,  5.5f,  8.0f,  1.0f, -1.0f,  5.5f,  8.0f,  1.0f, -1.0f,  5.5f,  8.0f,  1.0f, -1.0f,
+        5.0f,  8.0f,  3.0f,  2.0f,  5.0f,  8.0f,  4.0f,  2.0f,  5.0f,  8.0f,  5.0f,  4.0f,  5.0f,  8.0f,
+        1.0f, -1.0f,  5.0f,  8.0f,  1.0f, -1.0f,  5.0f,  8.0f,  1.0f, -1.0f,  5.0f,  8.0f,  1.0f, -1.0f,
+        5.0f,  8.0f,  1.0f, -1.0f,  5.0f,  8.0f,  1.0f, -1.0f,  5.0f,  8.0f,  1.0f, -1.0f,  5.0f,  8.0f,
+        1.0f, -1.0f,  5.0f,  8.0f,  1.0f, -1.0f,  5.0f,  8.0f,  1.0f, -1.0f,  5.0f,  8.0f,  1.0f, -1.0f
+    };
 
     DepthwiseConvolution2dDescriptor descriptor;
     descriptor.m_PadLeft     = 0;
     descriptor.m_PadRight    = 0;
     descriptor.m_PadTop      = 1;
-    descriptor.m_PadBottom   = 0;
-    descriptor.m_StrideX     = 2;
+    descriptor.m_PadBottom   = 1;
+    descriptor.m_StrideX     = 1;
     descriptor.m_StrideY     = 1;
     descriptor.m_BiasEnabled = true;
     descriptor.m_DataLayout  = dataLayout;
 
-    // Permute input and output if NCDHW.
+    // Permute input if NCHW, the original input and output are in NHWC format.
     if (dataLayout == DataLayout::NCHW)
     {
         PermuteTensorNhwcToNchw(inputInfo, inputData);
diff --git a/src/backends/gpuFsa/GpuFsaBackend.cpp b/src/backends/gpuFsa/GpuFsaBackend.cpp
index 9886a6e..e803699 100644
--- a/src/backends/gpuFsa/GpuFsaBackend.cpp
+++ b/src/backends/gpuFsa/GpuFsaBackend.cpp
@@ -21,6 +21,7 @@
 #include <arm_compute/runtime/CL/CLBufferAllocator.h>
 
 #include "layers/GpuFsaConvolution2d.hpp"
+#include "layers/GpuFsaDepthwiseConvolution2d.hpp"
 
 namespace armnn
 {
@@ -268,6 +269,31 @@
                 }
                 break;
             }
+            case (LayerType::DepthwiseConvolution2d):
+            {
+                auto input = base.GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
+                auto weights = base.GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo();
+
+                auto desc = PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&base.GetParameters());
+                if (desc->m_BiasEnabled)
+                {
+                    auto bias = base.GetInputSlot(2).GetConnectedOutputSlot()->GetTensorInfo();
+                    GpuFsaDepthwiseConvolution2dCreateOp(preCompiledBlobPtr,
+                                                         input,
+                                                         *desc,
+                                                         weights,
+                                                         bias);
+                }
+                else
+                {
+                    GpuFsaDepthwiseConvolution2dCreateOp(preCompiledBlobPtr,
+                                                         input,
+                                                         *desc,
+                                                         weights,
+                                                         EmptyOptional());
+                }
+                break;
+            }
             default:
                 // unsupported layer for GpuFsa backend
                 continue;
diff --git a/src/backends/gpuFsa/GpuFsaLayerSupport.cpp b/src/backends/gpuFsa/GpuFsaLayerSupport.cpp
index 96c986a..18c9ac8 100644
--- a/src/backends/gpuFsa/GpuFsaLayerSupport.cpp
+++ b/src/backends/gpuFsa/GpuFsaLayerSupport.cpp
@@ -11,6 +11,7 @@
 
 #if defined(ARMCOMPUTEGPUFSA_ENABLED)
 #include "layers/GpuFsaConvolution2d.hpp"
+#include "layers/GpuFsaDepthwiseConvolution2d.hpp"
 #endif
 
 #include <vector>
@@ -98,6 +99,34 @@
                                             infos[3]);
             }
         }
+        case LayerType::DepthwiseConvolution2d:
+        {
+            if (infos.size() != 4)
+            {
+                throw InvalidArgumentException("Invalid number of DepthwiseConvolution2dDescriptor TensorInfos. "
+                                               "TensorInfos should be of format: {input, output, weights, biases}.");
+            }
+
+            auto desc = *(PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&descriptor));
+            if (infos[3] == TensorInfo())
+            {
+                FORWARD_LAYER_VALIDATE_FUNC(GpuFsaDepthwiseConvolution2dValidate,
+                                            reasonIfUnsupported,
+                                            infos[0],
+                                            desc,
+                                            infos[2],
+                                            EmptyOptional());
+            }
+            else
+            {
+                FORWARD_LAYER_VALIDATE_FUNC(GpuFsaDepthwiseConvolution2dValidate,
+                                            reasonIfUnsupported,
+                                            infos[0],
+                                            desc,
+                                            infos[2],
+                                            infos[3]);
+            }
+        }
         case LayerType::Constant:
         case LayerType::Input:
         case LayerType::Output:
diff --git a/src/backends/gpuFsa/layers/CMakeLists.txt b/src/backends/gpuFsa/layers/CMakeLists.txt
index 3a02ce1..c174c51 100644
--- a/src/backends/gpuFsa/layers/CMakeLists.txt
+++ b/src/backends/gpuFsa/layers/CMakeLists.txt
@@ -6,6 +6,8 @@
 list(APPEND armnnGpuFsaBackendLayerValidators_sources
         GpuFsaConvolution2d.cpp
         GpuFsaConvolution2d.hpp
+        GpuFsaDepthwiseConvolution2d.cpp
+        GpuFsaDepthwiseConvolution2d.hpp
     )
 
 add_library(armnnGpuFsaBackendLayerValidators OBJECT ${armnnGpuFsaBackendLayerValidators_sources})
diff --git a/src/backends/gpuFsa/layers/GpuFsaConvolution2d.cpp b/src/backends/gpuFsa/layers/GpuFsaConvolution2d.cpp
index 7aa643d..2393174 100644
--- a/src/backends/gpuFsa/layers/GpuFsaConvolution2d.cpp
+++ b/src/backends/gpuFsa/layers/GpuFsaConvolution2d.cpp
@@ -21,7 +21,6 @@
 #include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h>
 
 #include <vector>
-#include <iostream>
 
 namespace armnn
 {
diff --git a/src/backends/gpuFsa/layers/GpuFsaDepthwiseConvolution2d.cpp b/src/backends/gpuFsa/layers/GpuFsaDepthwiseConvolution2d.cpp
new file mode 100644
index 0000000..01a36f2
--- /dev/null
+++ b/src/backends/gpuFsa/layers/GpuFsaDepthwiseConvolution2d.cpp
@@ -0,0 +1,210 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "GpuFsaDepthwiseConvolution2d.hpp"
+
+#include <armnn/Types.hpp>
+
+#include <backendsCommon/WorkloadUtils.hpp>
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+
+#include <arm_compute/core/ITensorInfo.h>
+#include <arm_compute/core/TensorInfo.h>
+#include <arm_compute/core/TensorShape.h>
+#include <arm_compute/core/CL/CLKernelLibrary.h>
+#include <arm_compute/core/CL/CLCompileContext.h>
+
+#include <arm_compute/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.h>
+#include <arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadContext.h>
+#include <src/dynamic_fusion/sketch/gpu/GpuWorkloadContextImpl.h>
+#include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuDepthwiseConv2d.h>
+#include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h>
+
+#include <vector>
+
+namespace armnn
+{
+
+using namespace armcomputetensorutils;
+
+arm_compute::Status GpuFsaDepthwiseConvolution2dValidate(const TensorInfo& input,
+                                                         const DepthwiseConvolution2dDescriptor& descriptor,
+                                                         const TensorInfo& weights,
+                                                         const Optional<TensorInfo>& biases)
+{
+    // Create a new workload sketch, for validation purposes
+    auto compileCtx = arm_compute::CLKernelLibrary::get().get_compile_context();
+    auto workloadContext = GpuWorkloadContext(&compileCtx);
+    GpuWorkloadSketch sketch{ &workloadContext };
+
+    // Build and create tensor infos using the sketch
+    const arm_compute::TensorInfo aclInputInfo   = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
+
+    // ArmNN format for weights for depthwise is [1, H, W, C] independently of the input/output layout
+    //
+    // ACL format for weights for depthwise is:
+    // - [1, H, W, C] for [N, H, W, C] input/output layout (matches with ArmNN)
+    // - [1, C, H, W] for [N, C, H, W] input/output layout
+    //
+    // Therefore ArmNN weights have to be permuted when input/output layout is [N, C, H, W] to pass them to ACL.
+    // The PermuteDepthwiseConv2dWeights backend optimization takes care of this, but it has not been performed yet,
+    // so we do the permute here for the TensorInfo weights.
+    unsigned int aclDepthMultiplier;
+    TensorInfo weightsPermuted;
+    std::tie(weightsPermuted, aclDepthMultiplier) = Convert1HWOTensorInfoToAcl(weights, input,descriptor.m_DataLayout);
+    auto weightsShape = weightsPermuted.GetShape();
+    weightsPermuted.SetShape({weightsShape[1], weightsShape[2], weightsShape[3]});
+
+    arm_compute::TensorInfo aclWeightsInfo = BuildArmComputeTensorInfo(weightsPermuted, descriptor.m_DataLayout);
+    aclWeightsInfo.set_are_values_constant(weights.IsConstant());
+
+    auto inputInfo  = workloadContext.create_tensor_info(aclInputInfo);
+    auto weightInfo = workloadContext.create_tensor_info(aclWeightsInfo);
+
+    // Only create the bias tensor info if enabled, otherwise pass nullptr to validate_op
+    arm_compute::TensorInfo aclBiasInfo;
+    arm_compute::ITensorInfo* biasSketchInfoPtr = nullptr;
+
+    if (descriptor.m_BiasEnabled)
+    {
+        if(!biases.has_value())
+        {
+            throw InvalidArgumentException(
+                "GpuFsaDepthwiseConvolution2dValidate: No biases set when biases are enabled");
+        }
+        aclBiasInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout);
+        aclBiasInfo.set_are_values_constant(biases.value().IsConstant());
+
+        biasSketchInfoPtr = workloadContext.create_tensor_info(aclBiasInfo);
+    }
+
+    // Set DepthwiseConv2d attributes using descriptor
+    const arm_compute::Size2D    aclDilationInfo = BuildArmComputeSize2D(descriptor.m_DilationX,
+                                                                         descriptor.m_DilationY);
+    const arm_compute::Padding2D aclPadInfo      = BuildArmComputePaddingInfo(descriptor);
+    const arm_compute::Size2D    aclStrideInfo   = BuildArmComputeSize2D(descriptor.m_StrideX, descriptor.m_StrideY);
+
+    DepthwiseConv2dAttributes depthwiseConv2dAttributes{};
+    depthwiseConv2dAttributes.pad(aclPadInfo);
+    depthwiseConv2dAttributes.stride(aclStrideInfo);
+    depthwiseConv2dAttributes.dilation(aclDilationInfo);
+    depthwiseConv2dAttributes.depth_multiplier(aclDepthMultiplier);
+
+    // Validate operator, check status and update reasonIfUnsupported
+    arm_compute::Status aclStatus = GpuDepthwiseConv2d::validate_op(sketch,
+                                                                    inputInfo,
+                                                                    weightInfo,
+                                                                    biasSketchInfoPtr,
+                                                                    depthwiseConv2dAttributes);
+
+    return aclStatus;
+}
+
+void GpuFsaDepthwiseConvolution2dCreateOp(GpuFsaPreCompiledBlob* blob,
+                                          const TensorInfo& input,
+                                          const DepthwiseConvolution2dDescriptor& descriptor,
+                                          const TensorInfo& weights,
+                                          const Optional<TensorInfo>& biases)
+{
+/*
+* Creating an Op for the GpuFsa backend requires us to create and maintain quite a bit of data, which is then stored
+* in a GpuFsaPreCompiledBlob for execution later. Specifically we need:
+* GpuWorkloadContext, this contains the TensorInfos and is unique to the Graph being executed
+* Sketch, this is similar to a subgraph and can contain one or more operations. Multiple ops can be "fused" together
+* using a single sketch.
+* The inputTensorinfos / outputTensorInfos, these are pointers to the TensorInfos used when creating the sketch.
+* They refer to the TensorInfos stored within the GpuWorkloadContext and are needed when executing the sketch
+* as the TensorInfos used when creating the Tensors must match those used to create the Sketch. Otherwise the runtime
+* doesn't know which Tensors to use.
+*/
+    using namespace arm_compute::experimental::dynamic_fusion;
+    GpuWorkloadSketch* sketch = blob->sketch.get();
+    GpuWorkloadContext* workloadContext = blob->workloadContext.get();
+    std::vector<arm_compute::ITensorInfo*> inputTensorInfos = {};
+    std::vector<arm_compute::ITensorInfo*> outputTensorInfos = {};
+
+    // Build and create tensor infos using the sketch
+    const arm_compute::TensorInfo aclInputInfo   = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
+
+    // ArmNN format for weights for depthwise is [1, H, W, C] independently of the input/output layout
+    //
+    // ACL format for weights for depthwise is:
+    // - [1, H, W, C] for [N, H, W, C] input/output layout (matches with ArmNN)
+    // - [1, C, H, W] for [N, C, H, W] input/output layout
+    //
+    // Therefore ArmNN weights have to be permuted when input/output layout is [N, C, H, W] to pass them to ACL.
+    // The PermuteDepthwiseConv2dWeights backend optimization takes care of this, but it has not been performed yet,
+    // so we do the permute here for the TensorInfo weights.
+    unsigned int aclDepthMultiplier;
+    TensorInfo weightsPermuted;
+    std::tie(weightsPermuted, aclDepthMultiplier) = Convert1HWOTensorInfoToAcl(weights, input,descriptor.m_DataLayout);
+    auto weightsShape = weightsPermuted.GetShape();
+    weightsPermuted.SetShape({weightsShape[1], weightsShape[2], weightsShape[3]});
+
+    arm_compute::TensorInfo aclWeightsInfo = BuildArmComputeTensorInfo(weightsPermuted, descriptor.m_DataLayout);
+    aclWeightsInfo.set_are_values_constant(weights.IsConstant());
+
+    inputTensorInfos.emplace_back(workloadContext->create_tensor_info(aclInputInfo));
+    inputTensorInfos.emplace_back(workloadContext->create_tensor_info(aclWeightsInfo));
+
+    // Only create the bias tensor info if enabled, otherwise pass nullptr to validate_op
+    arm_compute::TensorInfo aclBiasInfo;
+    arm_compute::ITensorInfo* biasSketchInfoPtr = nullptr;
+
+    if (descriptor.m_BiasEnabled)
+    {
+        if(!biases.has_value())
+        {
+            throw InvalidArgumentException("GpuFsaConvolution2dValidate: No biases set when biases are enabled");
+        }
+        aclBiasInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout);
+        aclBiasInfo.set_are_values_constant(biases.value().IsConstant());
+
+        inputTensorInfos.emplace_back(workloadContext->create_tensor_info(aclBiasInfo));
+        biasSketchInfoPtr = inputTensorInfos[2];
+    }
+
+    // Set DepthwiseConv2d attributes using descriptor
+    const arm_compute::Size2D    aclDilationInfo = BuildArmComputeSize2D(descriptor.m_DilationX,
+                                                                         descriptor.m_DilationY);
+    const arm_compute::Padding2D aclPadInfo      = BuildArmComputePaddingInfo(descriptor);
+    const arm_compute::Size2D    aclStrideInfo   = BuildArmComputeSize2D(descriptor.m_StrideX, descriptor.m_StrideY);
+
+    DepthwiseConv2dAttributes depthwiseConv2dAttributes{};
+    depthwiseConv2dAttributes.pad(aclPadInfo);
+    depthwiseConv2dAttributes.stride(aclStrideInfo);
+    depthwiseConv2dAttributes.dilation(aclDilationInfo);
+    depthwiseConv2dAttributes.depth_multiplier(aclDepthMultiplier);
+
+    // Validate operator, check status and update reasonIfUnsupported
+    arm_compute::Status aclStatus = GpuDepthwiseConv2d::validate_op(*sketch,
+                                                                    inputTensorInfos[0],
+                                                                    inputTensorInfos[1],
+                                                                    biasSketchInfoPtr,
+                                                                    depthwiseConv2dAttributes);
+
+    const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
+    if (!supported)
+    {
+        throw BackendCapabilityException(
+            "\"GpuFsa\" backend failed during DepthwiseConvolution2D operation validation");
+    }
+
+    // Create the Op within the Sketch using the TensorInfos we have stored
+    arm_compute::ITensorInfo* convOutInfo = GpuDepthwiseConv2d::create_op(*sketch,
+                                                                          inputTensorInfos[0],
+                                                                          inputTensorInfos[1],
+                                                                          biasSketchInfoPtr,
+                                                                          depthwiseConv2dAttributes);
+
+    outputTensorInfos.emplace_back(workloadContext->create_tensor_info());
+    GpuOutput::create_op(*sketch, convOutInfo, outputTensorInfos[0]);
+
+    // Store the TensorInfos within the blob as unique_ptrs to be used later
+    blob->inputTensorInfos = std::make_unique<std::vector<arm_compute::ITensorInfo*>>(inputTensorInfos);
+    blob->outputTensorInfos = std::make_unique<std::vector<arm_compute::ITensorInfo*>>(outputTensorInfos);
+}
+
+} // namespace armnn
diff --git a/src/backends/gpuFsa/layers/GpuFsaDepthwiseConvolution2d.hpp b/src/backends/gpuFsa/layers/GpuFsaDepthwiseConvolution2d.hpp
new file mode 100644
index 0000000..b705096
--- /dev/null
+++ b/src/backends/gpuFsa/layers/GpuFsaDepthwiseConvolution2d.hpp
@@ -0,0 +1,30 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <armnn/Descriptors.hpp>
+#include <armnn/Tensor.hpp>
+
+#include <arm_compute/core/Error.h>
+#include <arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h>
+#include <gpuFsa/GpuFsaBackend.hpp>
+
+namespace armnn
+{
+
+using namespace arm_compute::experimental::dynamic_fusion;
+
+arm_compute::Status GpuFsaDepthwiseConvolution2dValidate(const TensorInfo& input,
+                                                         const DepthwiseConvolution2dDescriptor& descriptor,
+                                                         const TensorInfo& weights,
+                                                         const Optional<TensorInfo>& biases);
+
+void GpuFsaDepthwiseConvolution2dCreateOp(GpuFsaPreCompiledBlob* blob,
+                                          const TensorInfo& input,
+                                          const DepthwiseConvolution2dDescriptor& descriptor,
+                                          const TensorInfo& weights,
+                                          const Optional<TensorInfo>& biases);
+
+} // namespace armnn
diff --git a/src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp b/src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp
index c2cdd57..79dd9d3 100644
--- a/src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp
+++ b/src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp
@@ -6,6 +6,8 @@
 #include "backendsCommon/test/EndToEndTestImpl.hpp"
 
 #include "backendsCommon/test/Convolution2dEndToEndTestImpl.hpp"
+#include "backendsCommon/test/DepthwiseConvolution2dEndToEndTests.hpp"
+
 #include <doctest/doctest.h>
 
 TEST_SUITE("GpuFsaEndToEnd")
@@ -24,4 +26,10 @@
     Convolution2dEndToEnd<armnn::DataType::Float32>(gpuFsaDefaultBackends, armnn::DataLayout::NHWC, false);
 }
 
+TEST_CASE("GpuFsaDepthwiseConvolution2dEndtoEndTestFloat32")
+{
+    DepthwiseConvolution2dEndToEnd<armnn::DataType::Float32, armnn::DataType::Float32>(gpuFsaDefaultBackends,
+                                                                                       armnn::DataLayout::NHWC);
+}
+
 }
diff --git a/src/backends/reference/test/RefEndToEndTests.cpp b/src/backends/reference/test/RefEndToEndTests.cpp
index 63f76b1..199fbdf 100644
--- a/src/backends/reference/test/RefEndToEndTests.cpp
+++ b/src/backends/reference/test/RefEndToEndTests.cpp
@@ -17,6 +17,7 @@
 #include <backendsCommon/test/Convolution2dEndToEndTestImpl.hpp>
 #include <backendsCommon/test/Convolution3dEndToEndTestImpl.hpp>
 #include <backendsCommon/test/DepthToSpaceEndToEndTestImpl.hpp>
+#include <backendsCommon/test/DepthwiseConvolution2dEndToEndTests.hpp>
 #include <backendsCommon/test/DequantizeEndToEndTestImpl.hpp>
 #include <backendsCommon/test/DetectionPostProcessEndToEndTestImpl.hpp>
 #include <backendsCommon/test/ElementwiseBinaryEndToEndTestImpl.hpp>
@@ -652,6 +653,12 @@
                                                                                 armnn::DataLayout::NDHWC);
 }
 
+TEST_CASE("RefDepthwiseConvolution2dEndtoEndFloat32Test")
+{
+    DepthwiseConvolution2dEndToEnd<armnn::DataType::Float32, armnn::DataType::Float32>(defaultBackends,
+                                                                                       armnn::DataLayout::NHWC);
+}
+
 TEST_CASE("RefEluEndToEndTestFloat32")
 {
     EluEndToEndTest<armnn::DataType::Float32>(defaultBackends);