IVGCVSW-7550  GpuFsa Op: Add ElementWiseBinary Operator ADD

  * Adding support for Gpu Add operator
  * Added tests for layer support, end to end and optimization

Signed-off-by: Tracy Narine <tracy.narine@arm.com>
Change-Id: Ie9328d269c5c0ff60a7e10133b728ac9265033af
diff --git a/src/backends/gpuFsa/layers/CMakeLists.txt b/src/backends/gpuFsa/layers/CMakeLists.txt
index c174c51..bba795e 100644
--- a/src/backends/gpuFsa/layers/CMakeLists.txt
+++ b/src/backends/gpuFsa/layers/CMakeLists.txt
@@ -8,6 +8,8 @@
         GpuFsaConvolution2d.hpp
         GpuFsaDepthwiseConvolution2d.cpp
         GpuFsaDepthwiseConvolution2d.hpp
+        GpuFsaElementwiseBinaryAdd.cpp
+        GpuFsaElementwiseBinaryAdd.hpp
     )
 
 add_library(armnnGpuFsaBackendLayerValidators OBJECT ${armnnGpuFsaBackendLayerValidators_sources})
diff --git a/src/backends/gpuFsa/layers/GpuFsaDepthwiseConvolution2d.cpp b/src/backends/gpuFsa/layers/GpuFsaDepthwiseConvolution2d.cpp
index 01a36f2..dd55d4d 100644
--- a/src/backends/gpuFsa/layers/GpuFsaDepthwiseConvolution2d.cpp
+++ b/src/backends/gpuFsa/layers/GpuFsaDepthwiseConvolution2d.cpp
@@ -18,7 +18,6 @@
 
 #include <arm_compute/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.h>
 #include <arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadContext.h>
-#include <src/dynamic_fusion/sketch/gpu/GpuWorkloadContextImpl.h>
 #include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuDepthwiseConv2d.h>
 #include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h>
 
diff --git a/src/backends/gpuFsa/layers/GpuFsaElementwiseBinaryAdd.cpp b/src/backends/gpuFsa/layers/GpuFsaElementwiseBinaryAdd.cpp
new file mode 100644
index 0000000..bc8b370
--- /dev/null
+++ b/src/backends/gpuFsa/layers/GpuFsaElementwiseBinaryAdd.cpp
@@ -0,0 +1,86 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "GpuFsaElementwiseBinaryAdd.hpp"
+
+#include <armnn/Types.hpp>
+
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+
+#include <arm_compute/core/CL/CLKernelLibrary.h>
+
+#include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuAdd.h>
+#include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h>
+#include <arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadContext.h>
+#include <arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h>
+
+using namespace arm_compute::experimental::dynamic_fusion;
+
+namespace armnn
+{
+
+arm_compute::Status GpuFsaElementwiseBinaryAddValidate(const TensorInfo& input0,
+                                                       const TensorInfo& input1)
+{
+    using namespace armcomputetensorutils;
+
+    // Create a new workload sketch, for validation purposes
+    auto compileCtx         = arm_compute::CLKernelLibrary::get().get_compile_context();
+    auto workloadContext    = GpuWorkloadContext(&compileCtx);
+    GpuWorkloadSketch sketch{ &workloadContext };
+
+    arm_compute::TensorInfo aclInput0Info = BuildArmComputeTensorInfo(input0, input0.GetNumDimensions());
+    arm_compute::TensorInfo aclInput1Info = BuildArmComputeTensorInfo(input1, input1.GetNumDimensions());
+
+    aclInput0Info.set_are_values_constant(input0.IsConstant());
+    aclInput1Info.set_are_values_constant(input1.IsConstant());
+
+    arm_compute::ITensorInfo*  inputInfo0 = workloadContext.create_tensor_info(aclInput0Info);
+    arm_compute::ITensorInfo*  inputInfo1 = workloadContext.create_tensor_info(aclInput1Info);
+
+    return GpuAdd::validate_op(sketch, inputInfo0, inputInfo1);
+}
+
+void GpuFsaElementwiseBinaryAddCreateOp(GpuFsaPreCompiledBlob* blob,
+                                        const TensorInfo& input0,
+                                        const TensorInfo& input1)
+{
+    using namespace armcomputetensorutils;
+
+    GpuWorkloadSketch* sketch           = blob->sketch.get();
+    GpuWorkloadContext* workloadContext = blob->workloadContext.get();
+    std::vector<arm_compute::ITensorInfo*> inputTensorInfos  = {};
+    std::vector<arm_compute::ITensorInfo*> outputTensorInfos = {};
+
+    arm_compute::TensorInfo aclInput0Info = BuildArmComputeTensorInfo(input0, input0.GetNumDimensions());
+    arm_compute::TensorInfo aclInput1Info = BuildArmComputeTensorInfo(input1, input1.GetNumDimensions());
+
+    aclInput0Info.set_are_values_constant(input0.IsConstant());
+    aclInput1Info.set_are_values_constant(input1.IsConstant());
+
+    inputTensorInfos.emplace_back(workloadContext->create_tensor_info(aclInput0Info));
+    inputTensorInfos.emplace_back(workloadContext->create_tensor_info(aclInput1Info));
+
+    // Validate operator, check status and update reasonIfUnsupported
+    arm_compute::Status aclStatus = GpuAdd::validate_op(*sketch, inputTensorInfos[0], inputTensorInfos[1]);
+    const bool supported = aclStatus.error_code() == arm_compute::ErrorCode::OK;
+    if (!supported)
+    {
+        throw BackendCapabilityException("\"GpuFsa\" backend failed during elementwise binary add validation");
+    }
+
+    arm_compute::ITensorInfo* addOutputInfo =
+            GpuAdd::create_op(*sketch, inputTensorInfos[0], inputTensorInfos[1]);
+
+    // Temporary fix until fusing attempt is make for GpuFsa backend and Output layer workload is created.
+    outputTensorInfos.emplace_back(workloadContext->create_tensor_info());
+    GpuOutput::create_op(*sketch, addOutputInfo, outputTensorInfos[0]);
+
+    // Store the TensorInfos within the blob as unique_ptrs to be used later
+    blob->inputTensorInfos  = std::make_unique<std::vector<arm_compute::ITensorInfo*>>(inputTensorInfos);
+    blob->outputTensorInfos = std::make_unique<std::vector<arm_compute::ITensorInfo*>>(outputTensorInfos);
+}
+
+} // namespace armnn
\ No newline at end of file
diff --git a/src/backends/gpuFsa/layers/GpuFsaElementwiseBinaryAdd.hpp b/src/backends/gpuFsa/layers/GpuFsaElementwiseBinaryAdd.hpp
new file mode 100644
index 0000000..8221f0e
--- /dev/null
+++ b/src/backends/gpuFsa/layers/GpuFsaElementwiseBinaryAdd.hpp
@@ -0,0 +1,26 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <armnn/Descriptors.hpp>
+#include <armnn/Tensor.hpp>
+
+#include <arm_compute/core/Error.h>
+#include <arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h>
+#include <gpuFsa/GpuFsaBackend.hpp>
+
+namespace armnn
+{
+
+    using namespace arm_compute::experimental::dynamic_fusion;
+
+    arm_compute::Status GpuFsaElementwiseBinaryAddValidate(const TensorInfo& input0,
+                                                           const TensorInfo& input1);
+
+    void GpuFsaElementwiseBinaryAddCreateOp(GpuFsaPreCompiledBlob* blob,
+                                            const TensorInfo& input0,
+                                            const TensorInfo& input1);
+
+} // namespace armnn
\ No newline at end of file