IVGCVSW-7570 GpuFsa Op: Add ElemenWiseBinary Operators available

* Refactor to generalize
* Add MUL

Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com>
Change-Id: I2ee273d50d3a8b114b5a41abc8ee7585b15e3308
diff --git a/src/backends/backendsCommon/test/ElementwiseBinaryEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ElementwiseBinaryEndToEndTestImpl.hpp
index 5138e49..dbc270e 100644
--- a/src/backends/backendsCommon/test/ElementwiseBinaryEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/ElementwiseBinaryEndToEndTestImpl.hpp
@@ -135,7 +135,8 @@
 
     const std::vector<float> input2({ 2, 2, 2, 2 });
     std::vector<float> expectedOutput;
-    switch (operation) {
+    switch (operation)
+    {
         case armnn::BinaryOperation::Add:
             expectedOutput = { 3, 1, 3, 3 };
             break;
diff --git a/src/backends/gpuFsa/GpuFsaBackend.cpp b/src/backends/gpuFsa/GpuFsaBackend.cpp
index 1bfe8dd..de0d019 100644
--- a/src/backends/gpuFsa/GpuFsaBackend.cpp
+++ b/src/backends/gpuFsa/GpuFsaBackend.cpp
@@ -23,8 +23,7 @@
 #include "layers/GpuFsaCast.hpp"
 #include "layers/GpuFsaConvolution2d.hpp"
 #include "layers/GpuFsaDepthwiseConvolution2d.hpp"
-#include "layers/GpuFsaElementwiseBinaryAdd.hpp"
-#include "layers/GpuFsaElementwiseBinarySub.hpp"
+#include "layers/GpuFsaElementwiseBinary.hpp"
 #include "layers/GpuFsaPooling2d.hpp"
 #include "layers/GpuFsaResize.hpp"
 
@@ -309,20 +308,9 @@
             case LayerType::ElementwiseBinary:
             {
                 auto desc = PolymorphicDowncast<const ElementwiseBinaryDescriptor *>(&base.GetParameters());
-                if (desc->m_Operation == BinaryOperation::Add)
-                {
-                    auto input0 = base.GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
-                    auto input1 = base.GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo();
-
-                    GpuFsaElementwiseBinaryAddCreateOp(preCompiledBlobPtr, input0, input1);
-                }
-                else if (desc->m_Operation == BinaryOperation::Sub)
-                {
-                    auto input0 = base.GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
-                    auto input1 = base.GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo();
-
-                    GpuFsaElementwiseBinarySubCreateOp(preCompiledBlobPtr, input0, input1);
-                }
+                auto input0 = base.GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
+                auto input1 = base.GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo();
+                GpuFsaElementwiseBinaryCreateOp(preCompiledBlobPtr, input0, input1, *desc);
                 break;
             }
             case (LayerType::Pooling2d):
diff --git a/src/backends/gpuFsa/GpuFsaLayerSupport.cpp b/src/backends/gpuFsa/GpuFsaLayerSupport.cpp
index 56af9c4..1ee80c9 100644
--- a/src/backends/gpuFsa/GpuFsaLayerSupport.cpp
+++ b/src/backends/gpuFsa/GpuFsaLayerSupport.cpp
@@ -13,8 +13,7 @@
 #include "layers/GpuFsaCast.hpp"
 #include "layers/GpuFsaConvolution2d.hpp"
 #include "layers/GpuFsaDepthwiseConvolution2d.hpp"
-#include "layers/GpuFsaElementwiseBinaryAdd.hpp"
-#include "layers/GpuFsaElementwiseBinarySub.hpp"
+#include "layers/GpuFsaElementwiseBinary.hpp"
 #include "layers/GpuFsaPooling2d.hpp"
 #include "layers/GpuFsaResize.hpp"
 #endif
@@ -150,28 +149,15 @@
             if (infos.size() != 3)
             {
                 throw InvalidArgumentException("Invalid number of ElementwiseBinary TensorInfos. "
-                                               "TensorInfos should be of format: {input0, input1, output0}.");
+                                               "TensorInfos should be of format: {input0, input1, output}.");
             }
 
             auto desc = PolymorphicDowncast<const ElementwiseBinaryDescriptor*>(&descriptor);
-            if (desc->m_Operation == BinaryOperation::Add)
-            {
-                FORWARD_LAYER_VALIDATE_FUNC(GpuFsaElementwiseBinaryAddValidate,
-                                            reasonIfUnsupported,
-                                            infos[0],
-                                            infos[1]);
-            }
-            else if (desc->m_Operation == BinaryOperation::Sub)
-            {
-                FORWARD_LAYER_VALIDATE_FUNC(GpuFsaElementwiseBinarySubValidate,
-                                            reasonIfUnsupported,
-                                            infos[0],
-                                            infos[1]);
-            }
-            else
-            {
-                throw InvalidArgumentException("Invalid ElementwiseBinary BinaryOperation operation.");
-            }
+            FORWARD_LAYER_VALIDATE_FUNC(GpuFsaElementwiseBinaryValidate,
+                                        reasonIfUnsupported,
+                                        infos[0],
+                                        infos[1],
+                                        *desc);
         }
         case LayerType::Pooling2d:
         {
diff --git a/src/backends/gpuFsa/layers/CMakeLists.txt b/src/backends/gpuFsa/layers/CMakeLists.txt
index 9ea36b6..3fe4bdc 100644
--- a/src/backends/gpuFsa/layers/CMakeLists.txt
+++ b/src/backends/gpuFsa/layers/CMakeLists.txt
@@ -10,10 +10,8 @@
         GpuFsaConvolution2d.hpp
         GpuFsaDepthwiseConvolution2d.cpp
         GpuFsaDepthwiseConvolution2d.hpp
-        GpuFsaElementwiseBinaryAdd.cpp
-        GpuFsaElementwiseBinaryAdd.hpp
-        GpuFsaElementwiseBinarySub.cpp
-        GpuFsaElementwiseBinarySub.hpp
+        GpuFsaElementwiseBinary.cpp
+        GpuFsaElementwiseBinary.hpp
         GpuFsaPooling2d.cpp
         GpuFsaPooling2d.hpp
         GpuFsaResize.cpp
diff --git a/src/backends/gpuFsa/layers/GpuFsaElementwiseBinary.cpp b/src/backends/gpuFsa/layers/GpuFsaElementwiseBinary.cpp
new file mode 100644
index 0000000..7c3760a
--- /dev/null
+++ b/src/backends/gpuFsa/layers/GpuFsaElementwiseBinary.cpp
@@ -0,0 +1,145 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "GpuFsaElementwiseBinary.hpp"
+
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+
+#include <arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadContext.h>
+#include <arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h>
+#include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuAdd.h>
+#include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuAdd.h>
+#include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuMul.h>
+#include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuSub.h>
+#include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h>
+
+using namespace arm_compute::experimental::dynamic_fusion;
+using namespace armnn::armcomputetensorutils;
+
+namespace armnn
+{
+
+arm_compute::Status GpuFsaElementwiseBinaryValidate(const TensorInfo& input0,
+                                                    const TensorInfo& input1,
+                                                    const ElementwiseBinaryDescriptor& descriptor)
+{
+    // Create a new workload sketch, for validation purposes
+    auto compileCtx         = arm_compute::CLKernelLibrary::get().get_compile_context();
+    auto workloadContext    = GpuWorkloadContext(&compileCtx);
+    GpuWorkloadSketch sketch{ &workloadContext };
+
+    arm_compute::TensorInfo aclInput0Info = BuildArmComputeTensorInfo(input0, input0.GetNumDimensions());
+    arm_compute::TensorInfo aclInput1Info = BuildArmComputeTensorInfo(input1, input1.GetNumDimensions());
+
+    aclInput0Info.set_are_values_constant(input0.IsConstant());
+    aclInput1Info.set_are_values_constant(input1.IsConstant());
+
+    arm_compute::ITensorInfo*  inputInfo0 = workloadContext.create_tensor_info(aclInput0Info);
+    arm_compute::ITensorInfo*  inputInfo1 = workloadContext.create_tensor_info(aclInput1Info);
+
+    switch (descriptor.m_Operation)
+    {
+        case BinaryOperation::Add:
+        {
+            return GpuAdd::validate_op(sketch, inputInfo0, inputInfo1);
+        }
+        case BinaryOperation::Mul:
+        {
+            return GpuMul::validate_op(sketch, inputInfo0, inputInfo1);
+        }
+        case BinaryOperation::Sub:
+        {
+            return GpuSub::validate_op(sketch, inputInfo0, inputInfo1);
+        }
+        default:
+            return arm_compute::Status(arm_compute::ErrorCode::RUNTIME_ERROR,
+                                       std::string("Elementwise Binary operation not supported in GpuFsa: ")
+                                       + GetBinaryOperationAsCString(descriptor.m_Operation));
+    }
+}
+
+void GpuFsaElementwiseBinaryCreateOp(GpuFsaPreCompiledBlob* blob,
+                                     const TensorInfo& input0,
+                                     const TensorInfo& input1,
+                                     const ElementwiseBinaryDescriptor& descriptor)
+{
+    GpuWorkloadSketch* sketch           = blob->sketch.get();
+    GpuWorkloadContext* workloadContext = blob->workloadContext.get();
+    std::vector<arm_compute::ITensorInfo*> inputTensorInfos  = {};
+    std::vector<arm_compute::ITensorInfo*> outputTensorInfos = {};
+
+    arm_compute::TensorInfo aclInput0Info = BuildArmComputeTensorInfo(input0, input0.GetNumDimensions());
+    arm_compute::TensorInfo aclInput1Info = BuildArmComputeTensorInfo(input1, input1.GetNumDimensions());
+
+    aclInput0Info.set_are_values_constant(input0.IsConstant());
+    aclInput1Info.set_are_values_constant(input1.IsConstant());
+
+    inputTensorInfos.emplace_back(workloadContext->create_tensor_info(aclInput0Info));
+    inputTensorInfos.emplace_back(workloadContext->create_tensor_info(aclInput1Info));
+
+    // Validate operator, check status and update reasonIfUnsupported
+    // Validate operator, check status and update reasonIfUnsupported
+    arm_compute::Status aclStatus{};
+    switch (descriptor.m_Operation)
+    {
+        case BinaryOperation::Add:
+        {
+            aclStatus = GpuAdd::validate_op(*sketch, inputTensorInfos[0], inputTensorInfos[1]);
+            break;
+        }
+        case BinaryOperation::Mul:
+        {
+            aclStatus = GpuMul::validate_op(*sketch, inputTensorInfos[0], inputTensorInfos[1]);
+            break;
+        }
+        case BinaryOperation::Sub:
+        {
+            aclStatus = GpuSub::validate_op(*sketch, inputTensorInfos[0], inputTensorInfos[1]);
+            break;
+        }
+        default:
+            throw InvalidArgumentException(std::string("Elementwise Binary operation not supported in GpuFsa: ")
+                                           + GetBinaryOperationAsCString(descriptor.m_Operation));
+    }
+
+    const bool supported = aclStatus.error_code() == arm_compute::ErrorCode::OK;
+    if (!supported)
+    {
+        throw BackendCapabilityException("\"GpuFsa\" backend failed during elementwise binary add validation");
+    }
+
+    arm_compute::ITensorInfo* elementwiseBinaryOutputInfo{};
+    switch (descriptor.m_Operation)
+    {
+        case BinaryOperation::Add:
+        {
+            elementwiseBinaryOutputInfo = GpuAdd::create_op(*sketch, inputTensorInfos[0], inputTensorInfos[1]);
+            break;
+        }
+        case BinaryOperation::Mul:
+        {
+            elementwiseBinaryOutputInfo = GpuMul::create_op(*sketch, inputTensorInfos[0], inputTensorInfos[1]);
+            break;
+        }
+        case BinaryOperation::Sub:
+        {
+            elementwiseBinaryOutputInfo = GpuSub::create_op(*sketch, inputTensorInfos[0], inputTensorInfos[1]);
+            break;
+        }
+        default:
+            throw InvalidArgumentException(std::string("Elementwise Binary operation not supported in GpuFsa: ")
+                                           + GetBinaryOperationAsCString(descriptor.m_Operation));
+    }
+
+    // Temporary fix until fusing attempt is make for GpuFsa backend and Output layer workload is created.
+    outputTensorInfos.emplace_back(workloadContext->create_tensor_info());
+    GpuOutput::create_op(*sketch, elementwiseBinaryOutputInfo, outputTensorInfos[0]);
+
+    // Store the TensorInfos within the blob as unique_ptrs to be used later
+    blob->inputTensorInfos  = std::make_unique<std::vector<arm_compute::ITensorInfo*>>(inputTensorInfos);
+    blob->outputTensorInfos = std::make_unique<std::vector<arm_compute::ITensorInfo*>>(outputTensorInfos);
+}
+
+} // namespace armnn
\ No newline at end of file
diff --git a/src/backends/gpuFsa/layers/GpuFsaElementwiseBinary.hpp b/src/backends/gpuFsa/layers/GpuFsaElementwiseBinary.hpp
new file mode 100644
index 0000000..11583f1
--- /dev/null
+++ b/src/backends/gpuFsa/layers/GpuFsaElementwiseBinary.hpp
@@ -0,0 +1,22 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <armnn/Descriptors.hpp>
+
+#include <gpuFsa/GpuFsaBackend.hpp>
+
+namespace armnn
+{
+arm_compute::Status GpuFsaElementwiseBinaryValidate(const TensorInfo& input0,
+                                                    const TensorInfo& input1,
+                                                    const ElementwiseBinaryDescriptor& descriptor);
+
+void GpuFsaElementwiseBinaryCreateOp(GpuFsaPreCompiledBlob* blob,
+                                     const TensorInfo& input0,
+                                     const TensorInfo& input1,
+                                     const ElementwiseBinaryDescriptor& descriptor);
+
+} // namespace armnn
\ No newline at end of file
diff --git a/src/backends/gpuFsa/layers/GpuFsaElementwiseBinaryAdd.cpp b/src/backends/gpuFsa/layers/GpuFsaElementwiseBinaryAdd.cpp
deleted file mode 100644
index d6404dd..0000000
--- a/src/backends/gpuFsa/layers/GpuFsaElementwiseBinaryAdd.cpp
+++ /dev/null
@@ -1,79 +0,0 @@
-//
-// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "GpuFsaElementwiseBinaryAdd.hpp"
-
-#include <aclCommon/ArmComputeTensorUtils.hpp>
-
-#include <arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadContext.h>
-#include <arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h>
-#include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuAdd.h>
-#include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h>
-
-using namespace arm_compute::experimental::dynamic_fusion;
-using namespace armnn::armcomputetensorutils;
-
-namespace armnn
-{
-
-arm_compute::Status GpuFsaElementwiseBinaryAddValidate(const TensorInfo& input0,
-                                                       const TensorInfo& input1)
-{
-    // Create a new workload sketch, for validation purposes
-    auto compileCtx         = arm_compute::CLKernelLibrary::get().get_compile_context();
-    auto workloadContext    = GpuWorkloadContext(&compileCtx);
-    GpuWorkloadSketch sketch{ &workloadContext };
-
-    arm_compute::TensorInfo aclInput0Info = BuildArmComputeTensorInfo(input0, input0.GetNumDimensions());
-    arm_compute::TensorInfo aclInput1Info = BuildArmComputeTensorInfo(input1, input1.GetNumDimensions());
-
-    aclInput0Info.set_are_values_constant(input0.IsConstant());
-    aclInput1Info.set_are_values_constant(input1.IsConstant());
-
-    arm_compute::ITensorInfo*  inputInfo0 = workloadContext.create_tensor_info(aclInput0Info);
-    arm_compute::ITensorInfo*  inputInfo1 = workloadContext.create_tensor_info(aclInput1Info);
-
-    return GpuAdd::validate_op(sketch, inputInfo0, inputInfo1);
-}
-
-void GpuFsaElementwiseBinaryAddCreateOp(GpuFsaPreCompiledBlob* blob,
-                                        const TensorInfo& input0,
-                                        const TensorInfo& input1)
-{
-    GpuWorkloadSketch* sketch           = blob->sketch.get();
-    GpuWorkloadContext* workloadContext = blob->workloadContext.get();
-    std::vector<arm_compute::ITensorInfo*> inputTensorInfos  = {};
-    std::vector<arm_compute::ITensorInfo*> outputTensorInfos = {};
-
-    arm_compute::TensorInfo aclInput0Info = BuildArmComputeTensorInfo(input0, input0.GetNumDimensions());
-    arm_compute::TensorInfo aclInput1Info = BuildArmComputeTensorInfo(input1, input1.GetNumDimensions());
-
-    aclInput0Info.set_are_values_constant(input0.IsConstant());
-    aclInput1Info.set_are_values_constant(input1.IsConstant());
-
-    inputTensorInfos.emplace_back(workloadContext->create_tensor_info(aclInput0Info));
-    inputTensorInfos.emplace_back(workloadContext->create_tensor_info(aclInput1Info));
-
-    // Validate operator, check status and update reasonIfUnsupported
-    arm_compute::Status aclStatus = GpuAdd::validate_op(*sketch, inputTensorInfos[0], inputTensorInfos[1]);
-    const bool supported = aclStatus.error_code() == arm_compute::ErrorCode::OK;
-    if (!supported)
-    {
-        throw BackendCapabilityException("\"GpuFsa\" backend failed during elementwise binary add validation");
-    }
-
-    arm_compute::ITensorInfo* addOutputInfo =
-            GpuAdd::create_op(*sketch, inputTensorInfos[0], inputTensorInfos[1]);
-
-    // Temporary fix until fusing attempt is make for GpuFsa backend and Output layer workload is created.
-    outputTensorInfos.emplace_back(workloadContext->create_tensor_info());
-    GpuOutput::create_op(*sketch, addOutputInfo, outputTensorInfos[0]);
-
-    // Store the TensorInfos within the blob as unique_ptrs to be used later
-    blob->inputTensorInfos  = std::make_unique<std::vector<arm_compute::ITensorInfo*>>(inputTensorInfos);
-    blob->outputTensorInfos = std::make_unique<std::vector<arm_compute::ITensorInfo*>>(outputTensorInfos);
-}
-
-} // namespace armnn
\ No newline at end of file
diff --git a/src/backends/gpuFsa/layers/GpuFsaElementwiseBinaryAdd.hpp b/src/backends/gpuFsa/layers/GpuFsaElementwiseBinaryAdd.hpp
deleted file mode 100644
index 1392d01..0000000
--- a/src/backends/gpuFsa/layers/GpuFsaElementwiseBinaryAdd.hpp
+++ /dev/null
@@ -1,20 +0,0 @@
-//
-// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-#pragma once
-
-#include <armnn/Descriptors.hpp>
-
-#include <gpuFsa/GpuFsaBackend.hpp>
-
-namespace armnn
-{
-arm_compute::Status GpuFsaElementwiseBinaryAddValidate(const TensorInfo& input0,
-                                                       const TensorInfo& input1);
-
-void GpuFsaElementwiseBinaryAddCreateOp(GpuFsaPreCompiledBlob* blob,
-                                        const TensorInfo& input0,
-                                        const TensorInfo& input1);
-
-} // namespace armnn
\ No newline at end of file
diff --git a/src/backends/gpuFsa/layers/GpuFsaElementwiseBinarySub.cpp b/src/backends/gpuFsa/layers/GpuFsaElementwiseBinarySub.cpp
deleted file mode 100644
index 5e0f478..0000000
--- a/src/backends/gpuFsa/layers/GpuFsaElementwiseBinarySub.cpp
+++ /dev/null
@@ -1,79 +0,0 @@
-//
-// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "GpuFsaElementwiseBinarySub.hpp"
-
-#include <aclCommon/ArmComputeTensorUtils.hpp>
-
-#include <arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadContext.h>
-#include <arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h>
-#include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuSub.h>
-#include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h>
-
-using namespace arm_compute::experimental::dynamic_fusion;
-using namespace armnn::armcomputetensorutils;
-
-namespace armnn
-{
-
-arm_compute::Status GpuFsaElementwiseBinarySubValidate(const TensorInfo& input0,
-                                                       const TensorInfo& input1)
-{
-    // Create a new workload sketch, for validation purposes
-    auto compileCtx         = arm_compute::CLKernelLibrary::get().get_compile_context();
-    auto workloadContext    = GpuWorkloadContext(&compileCtx);
-    GpuWorkloadSketch sketch{ &workloadContext };
-
-    arm_compute::TensorInfo aclInput0Info = BuildArmComputeTensorInfo(input0, input0.GetNumDimensions());
-    arm_compute::TensorInfo aclInput1Info = BuildArmComputeTensorInfo(input1, input1.GetNumDimensions());
-
-    aclInput0Info.set_are_values_constant(input0.IsConstant());
-    aclInput1Info.set_are_values_constant(input1.IsConstant());
-
-    arm_compute::ITensorInfo*  inputInfo0 = workloadContext.create_tensor_info(aclInput0Info);
-    arm_compute::ITensorInfo*  inputInfo1 = workloadContext.create_tensor_info(aclInput1Info);
-
-    return GpuSub::validate_op(sketch, inputInfo0, inputInfo1);
-}
-
-void GpuFsaElementwiseBinarySubCreateOp(GpuFsaPreCompiledBlob* blob,
-                                        const TensorInfo& input0,
-                                        const TensorInfo& input1)
-{
-    GpuWorkloadSketch* sketch           = blob->sketch.get();
-    GpuWorkloadContext* workloadContext = blob->workloadContext.get();
-    std::vector<arm_compute::ITensorInfo*> inputTensorInfos  = {};
-    std::vector<arm_compute::ITensorInfo*> outputTensorInfos = {};
-
-    arm_compute::TensorInfo aclInput0Info = BuildArmComputeTensorInfo(input0, input0.GetNumDimensions());
-    arm_compute::TensorInfo aclInput1Info = BuildArmComputeTensorInfo(input1, input1.GetNumDimensions());
-
-    aclInput0Info.set_are_values_constant(input0.IsConstant());
-    aclInput1Info.set_are_values_constant(input1.IsConstant());
-
-    inputTensorInfos.emplace_back(workloadContext->create_tensor_info(aclInput0Info));
-    inputTensorInfos.emplace_back(workloadContext->create_tensor_info(aclInput1Info));
-
-    // Validate operator, check status and update reasonIfUnsupported
-    arm_compute::Status aclStatus = GpuSub::validate_op(*sketch, inputTensorInfos[0], inputTensorInfos[1]);
-    const bool supported = aclStatus.error_code() == arm_compute::ErrorCode::OK;
-    if (!supported)
-    {
-        throw BackendCapabilityException("\"GpuFsa\" backend failed during elementwise binary subtract validation");
-    }
-
-    arm_compute::ITensorInfo* addOutputInfo =
-            GpuSub::create_op(*sketch, inputTensorInfos[0], inputTensorInfos[1]);
-
-    // Temporary fix until fusing attempt is make for GpuFsa backend and Output layer workload is created.
-    outputTensorInfos.emplace_back(workloadContext->create_tensor_info());
-    GpuOutput::create_op(*sketch, addOutputInfo, outputTensorInfos[0]);
-
-    // Store the TensorInfos within the blob as unique_ptrs to be used later
-    blob->inputTensorInfos  = std::make_unique<std::vector<arm_compute::ITensorInfo*>>(inputTensorInfos);
-    blob->outputTensorInfos = std::make_unique<std::vector<arm_compute::ITensorInfo*>>(outputTensorInfos);
-}
-
-}
\ No newline at end of file
diff --git a/src/backends/gpuFsa/layers/GpuFsaElementwiseBinarySub.hpp b/src/backends/gpuFsa/layers/GpuFsaElementwiseBinarySub.hpp
deleted file mode 100644
index 4d58f31..0000000
--- a/src/backends/gpuFsa/layers/GpuFsaElementwiseBinarySub.hpp
+++ /dev/null
@@ -1,20 +0,0 @@
-//
-// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-#pragma once
-
-#include <armnn/Descriptors.hpp>
-
-#include <gpuFsa/GpuFsaBackend.hpp>
-
-namespace armnn
-{
-arm_compute::Status GpuFsaElementwiseBinarySubValidate(const TensorInfo& input0,
-                                                       const TensorInfo& input1);
-
-void GpuFsaElementwiseBinarySubCreateOp(GpuFsaPreCompiledBlob* blob,
-                                        const TensorInfo& input0,
-                                        const TensorInfo& input1);
-
-} // namespace armnn
\ No newline at end of file
diff --git a/src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp b/src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp
index d2412bf..a2708c0 100644
--- a/src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp
+++ b/src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp
@@ -66,6 +66,17 @@
     ElementwiseBinarySimple3DEndToEnd<armnn::DataType::Float16>(gpuFsaDefaultBackends, BinaryOperation::Add);
 }
 
+// ElementwiseBinary Mul
+TEST_CASE("GpuFsaElementwiseBinaryMulTestFloat32")
+{
+    ElementwiseBinarySimple3DEndToEnd<armnn::DataType::Float32>(gpuFsaDefaultBackends, BinaryOperation::Mul);
+}
+
+TEST_CASE("GpuFsaElementwiseBinaryMulTestFloat16")
+{
+    ElementwiseBinarySimple3DEndToEnd<armnn::DataType::Float16>(gpuFsaDefaultBackends, BinaryOperation::Mul);
+}
+
 // ElementwiseBinary Sub
 TEST_CASE("GpuFsaElementwiseBinarySubTestFloat32")
 {
diff --git a/src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp b/src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp
index dda4d1f..34af190 100644
--- a/src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp
+++ b/src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp
@@ -79,34 +79,25 @@
     REQUIRE(reasonIfNotSupported.find("NCHW not supported by this kernel") != std::string::npos);
 }
 
-TEST_CASE("IsLayerSupportedGpuFsaElementWiseBinaryAdd")
+TEST_CASE("IsLayerSupportedGpuFsaElementWiseBinary")
 {
     TensorInfo input0Info({ 2, 2 }, DataType::Float32);
     TensorInfo input1Info({ 2, 2 }, DataType::Float32);
     TensorInfo outputInfo({ 2, 2 }, DataType::Float32);
 
     ElementwiseBinaryDescriptor desc;
-    desc.m_Operation = BinaryOperation::Add;
-
-    GpuFsaLayerSupport supportChecker;
-    std::string reasonIfNotSupported;
-    auto supported = supportChecker.IsLayerSupported(LayerType::ElementwiseBinary,
-                                                     {input0Info, input1Info, outputInfo},
-                                                     desc,
-                                                     EmptyOptional(),
-                                                     EmptyOptional(),
-                                                     reasonIfNotSupported);
-    CHECK(supported);
-}
-
-TEST_CASE("IsLayerSupportedGpuFsaElementWiseBinarySub")
-{
-    TensorInfo input0Info({ 2, 2 }, DataType::Float32);
-    TensorInfo input1Info({ 2, 2 }, DataType::Float32);
-    TensorInfo outputInfo({ 2, 2 }, DataType::Float32);
-
-    ElementwiseBinaryDescriptor desc;
-    desc.m_Operation = BinaryOperation::Sub;
+    SUBCASE("Add")
+    {
+        desc.m_Operation = BinaryOperation::Add;
+    }
+    SUBCASE("Mul")
+    {
+        desc.m_Operation = BinaryOperation::Mul;
+    }
+    SUBCASE("Sub")
+    {
+        desc.m_Operation = BinaryOperation::Sub;
+    }
 
     GpuFsaLayerSupport supportChecker;
     std::string reasonIfNotSupported;
diff --git a/src/backends/gpuFsa/test/GpuFsaOptimizedNetworkTests.cpp b/src/backends/gpuFsa/test/GpuFsaOptimizedNetworkTests.cpp
index aad3a0f..6ddb942 100644
--- a/src/backends/gpuFsa/test/GpuFsaOptimizedNetworkTests.cpp
+++ b/src/backends/gpuFsa/test/GpuFsaOptimizedNetworkTests.cpp
@@ -173,7 +173,7 @@
                         &IsLayerOfType<OutputLayer>));
 }
 
-TEST_CASE("ElementwiseBinaryAddSupportedOptimizedNetwork")
+TEST_CASE("ElementwiseBinarySupportedOptimizedNetwork")
 {
     using namespace armnn;
 
@@ -196,55 +196,18 @@
     IConnectableLayer* input2 = network->AddInputLayer(1, "input1");
 
     ElementwiseBinaryDescriptor desc;
-    desc.m_Operation = BinaryOperation::Add;
-
-    IConnectableLayer* elementwiseBinaryLayer = network->AddElementwiseBinaryLayer(desc, "elementwiseBinary");
-    IConnectableLayer* output = network->AddOutputLayer(2, "output");
-
-    Connect(input1, elementwiseBinaryLayer, input1TensorInfo, 0, 0);
-    Connect(input2, elementwiseBinaryLayer, input2TensorInfo, 0, 1);
-    Connect(elementwiseBinaryLayer, output, outputTensorInfo, 0, 0);
-
-    std::vector<BackendId> backends = { "GpuFsa" };
-
-    OptimizerOptionsOpaque optimizedOptions;
-    IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime->GetDeviceSpec(), optimizedOptions);
-    CHECK(optNet);
-
-    Graph& graph = GetGraphForTesting(optNet.get());
-
-    // Check graph layer sequence to ensure that the network has been replaced with a PreCompiledLayer
-    CHECK(CheckSequence(graph.cbegin(), graph.cend(),
-                        &IsLayerOfType<InputLayer>,
-                        &IsLayerOfType<InputLayer>,
-                        &IsLayerOfType<PreCompiledLayer>,
-                        &IsLayerOfType<OutputLayer>));
-}
-
-TEST_CASE("ElementwiseBinarySubSupportedOptimizedNetwork")
-{
-    using namespace armnn;
-
-    const float qScale = 1.0f;
-    const int32_t qOffset = 0;
-
-    const TensorShape& input1Shape  = { 2, 2, 2 };
-    const TensorShape& input2Shape  = { 2, 2, 2 };
-    const TensorShape& outputShape  = { 2, 2, 2 };
-
-    TensorInfo input1TensorInfo(input1Shape, DataType::Float32, qScale, qOffset, true);
-    TensorInfo input2TensorInfo(input2Shape, DataType::Float32, qScale, qOffset, true);
-    TensorInfo outputTensorInfo(outputShape, DataType::Float32, qScale, qOffset);
-
-    IRuntime::CreationOptions options;
-    IRuntimePtr runtime(IRuntime::Create(options));
-    INetworkPtr network(INetwork::Create());
-
-    IConnectableLayer* input1 = network->AddInputLayer(0, "input0");
-    IConnectableLayer* input2 = network->AddInputLayer(1, "input1");
-
-    ElementwiseBinaryDescriptor desc;
-    desc.m_Operation = BinaryOperation::Sub;
+    SUBCASE("Add")
+    {
+        desc.m_Operation = BinaryOperation::Add;
+    }
+    SUBCASE("Mul")
+    {
+        desc.m_Operation = BinaryOperation::Mul;
+    }
+    SUBCASE("Sub")
+    {
+        desc.m_Operation = BinaryOperation::Sub;
+    }
 
     IConnectableLayer* elementwiseBinaryLayer = network->AddElementwiseBinaryLayer(desc, "elementwiseBinary");
     IConnectableLayer* output = network->AddOutputLayer(2, "output");