IVGCVSW-7550  GpuFsa Op: Add ElementWiseBinary Operator ADD

  * Adding support for Gpu Add operator
  * Added tests for layer support, end to end and optimization

Signed-off-by: Tracy Narine <tracy.narine@arm.com>
Change-Id: Ie9328d269c5c0ff60a7e10133b728ac9265033af
diff --git a/src/backends/gpuFsa/test/GpuFsaOptimizedNetworkTests.cpp b/src/backends/gpuFsa/test/GpuFsaOptimizedNetworkTests.cpp
index 7e094ce..673a52a 100644
--- a/src/backends/gpuFsa/test/GpuFsaOptimizedNetworkTests.cpp
+++ b/src/backends/gpuFsa/test/GpuFsaOptimizedNetworkTests.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -134,4 +134,52 @@
                         &IsLayerOfType<OutputLayer>));
 }
 
+TEST_CASE("ElementwiseBinaryAddSupportedOptimizedNetwork")
+{
+    using namespace armnn;
+
+    const float qScale = 1.0f;
+    const int32_t qOffset = 0;
+
+    const TensorShape& input1Shape  = { 2, 2, 2 };
+    const TensorShape& input2Shape  = { 2, 2, 2 };
+    const TensorShape& outputShape  = { 2, 2, 2 };
+
+    TensorInfo input1TensorInfo(input1Shape, DataType::Float32, qScale, qOffset, true);
+    TensorInfo input2TensorInfo(input2Shape, DataType::Float32, qScale, qOffset, true);
+    TensorInfo outputTensorInfo(outputShape, DataType::Float32, qScale, qOffset);
+
+    IRuntime::CreationOptions options;
+    IRuntimePtr runtime(IRuntime::Create(options));
+    INetworkPtr network(INetwork::Create());
+
+    IConnectableLayer* input1 = network->AddInputLayer(0, "input0");
+    IConnectableLayer* input2 = network->AddInputLayer(1, "input1");
+
+    ElementwiseBinaryDescriptor desc;
+    desc.m_Operation = BinaryOperation::Add;
+
+    IConnectableLayer* elementwiseBinaryLayer = network->AddElementwiseBinaryLayer(desc, "elementwiseBinary");
+    IConnectableLayer* output = network->AddOutputLayer(2, "output");
+
+    Connect(input1, elementwiseBinaryLayer, input1TensorInfo, 0, 0);
+    Connect(input2, elementwiseBinaryLayer, input2TensorInfo, 0, 1);
+    Connect(elementwiseBinaryLayer, output, outputTensorInfo, 0, 0);
+
+    std::vector<BackendId> backends = { "GpuFsa" };
+
+    OptimizerOptionsOpaque optimizedOptions;
+    IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime->GetDeviceSpec(), optimizedOptions);
+    CHECK(optNet);
+
+    Graph& graph = GetGraphForTesting(optNet.get());
+
+    // Check graph layer sequence to ensure that the network has been replaced with a PreCompiledLayer
+    CHECK(CheckSequence(graph.cbegin(), graph.cend(),
+                        &IsLayerOfType<InputLayer>,
+                        &IsLayerOfType<InputLayer>,
+                        &IsLayerOfType<PreCompiledLayer>,
+                        &IsLayerOfType<OutputLayer>));
+}
+
 }
\ No newline at end of file