IVGCVSW-3808 Add ElementwiseBinaryLayer

!android-nn-driver:9329

 * Added ElementwiseBinaryLayer that can represent all ElementwiseBinary
   operations including Add, Div, Sub, Maximum, Mul and Minimum.
 * Updated Delegate to use ElementwiseBinaryLayer instead of the Add,
   Div, Sub, Maximum, Mul and Minimum layers.
 * Updated Deserializer to use ElementwiseBinaryLayer instead of the Add,
   Div, Sub, Maximum, Mul and Minimum layers.
 * Updated OnnxParser to use ElementwiseBinaryLayer instead of the Add
   layer.
 * Updated TfLiteParser to use ElementwiseBinaryLayer instead of the Add,
   Div, Sub, Maximum, Mul and Minimum layers.
 * Updated CL and Neon tests to use ElementwiseBinaryLayer.
 * Updated CL and Neon Backend Specific Optimizations to accept
   ElementBinaryLayers as well as Add, Div, Mul, Sub, Maximum and Minimum
   layers.

Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com>
Signed-off-by: Mike Kelly <mike.kelly@arm.com>
Change-Id: I7cbb96b60eb01f0e2b57b0541016d48a08b86c75
diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt
index 509157a..77335d5 100644
--- a/src/backends/backendsCommon/test/CMakeLists.txt
+++ b/src/backends/backendsCommon/test/CMakeLists.txt
@@ -24,6 +24,7 @@
     DetectionPostProcessEndToEndTestImpl.hpp
     DynamicBackendTests.cpp
     DynamicBackendTests.hpp
+    ElementwiseBinaryEndToEndTestImpl.hpp
     ElementwiseUnaryEndToEndTestImpl.hpp
     EndToEndTestImpl.hpp
     FillEndToEndTestImpl.hpp
diff --git a/src/backends/backendsCommon/test/ElementwiseBinaryEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ElementwiseBinaryEndToEndTestImpl.hpp
new file mode 100644
index 0000000..6546a6a
--- /dev/null
+++ b/src/backends/backendsCommon/test/ElementwiseBinaryEndToEndTestImpl.hpp
@@ -0,0 +1,107 @@
+//
+// Copyright © 2023 Arm Ltd and contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include "CommonTestUtils.hpp"
+
+#include <ResolveType.hpp>
+
+#include <armnn/INetwork.hpp>
+#include <armnn/utility/NumericCast.hpp>
+
+#include <doctest/doctest.h>
+
+#include <vector>
+
+namespace
+{
+
+template<armnn::DataType ArmnnTypeInput>
+INetworkPtr CreateElementwiseBinaryNetwork(const TensorShape& input1Shape,
+                                          const TensorShape& input2Shape,
+                                          const TensorShape& outputShape,
+                                          BinaryOperation operation,
+                                          const float qScale = 1.0f,
+                                          const int32_t qOffset = 0)
+{
+    using namespace armnn;
+
+    INetworkPtr net(INetwork::Create());
+
+    TensorInfo input1TensorInfo(input1Shape, ArmnnTypeInput, qScale, qOffset, true);
+    TensorInfo input2TensorInfo(input2Shape, ArmnnTypeInput, qScale, qOffset, true);
+    TensorInfo outputTensorInfo(outputShape, ArmnnTypeInput, qScale, qOffset);
+
+    IConnectableLayer* input1 = net->AddInputLayer(armnn::numeric_cast<LayerBindingId>(0));
+    IConnectableLayer* input2 = net->AddInputLayer(armnn::numeric_cast<LayerBindingId>(1));
+    IConnectableLayer* elementwiseBinaryLayer = net->AddElementwiseBinaryLayer(operation, "elementwiseUnary");
+    IConnectableLayer* output = net->AddOutputLayer(0, "output");
+
+    Connect(input1, elementwiseBinaryLayer, input1TensorInfo, 0, 0);
+    Connect(input2, elementwiseBinaryLayer, input2TensorInfo, 0, 1);
+    Connect(elementwiseBinaryLayer, output, outputTensorInfo, 0, 0);
+
+    return net;
+}
+
+template<armnn::DataType ArmnnInType,
+         typename TInput = armnn::ResolveType<ArmnnInType>>
+void ElementwiseBinarySimpleEndToEnd(const std::vector<BackendId>& backends,
+                                     BinaryOperation operation)
+{
+    using namespace armnn;
+
+    const float   qScale  = IsQuantizedType<TInput>() ? 0.25f : 1.0f;
+    const int32_t qOffset = IsQuantizedType<TInput>() ? 50    : 0;
+
+    const TensorShape& input1Shape  = { 2, 2, 2, 2 };
+    const TensorShape& input2Shape  = { 1 };
+    const TensorShape& outputShape = { 2, 2, 2, 2 };
+
+    // Builds up the structure of the network
+    INetworkPtr net = CreateElementwiseBinaryNetwork<ArmnnInType>(input1Shape, input2Shape, outputShape,
+                                                                  operation, qScale, qOffset);
+
+    CHECK(net);
+
+    const std::vector<float> input1({ 1, -1, 1, 1,  5, -5, 5, 5,  -3, 3, 3, 3,  4, 4, -4, 4 });
+
+    const std::vector<float> input2({ 2 });
+    std::vector<float> expectedOutput;
+    switch (operation) {
+        case armnn::BinaryOperation::Add:
+            expectedOutput = { 3, 1, 3, 3,  7, -3, 7, 7,  -1, 5, 5, 5,  6, 6, -2, 6 };
+            break;
+        case armnn::BinaryOperation::Div:
+            expectedOutput = {0.5f, -0.5f, 0.5f, 0.5f, 2.5f, -2.5f, 2.5f, 2.5f, -1.5f, 1.5f, 1.5f, 1.5f, 2, 2, -2, 2};
+            break;
+        case armnn::BinaryOperation::Maximum:
+            expectedOutput = { 2, 2, 2, 2,  5, 2, 5, 5,  2, 3, 3, 3,  4, 4, 2, 4 };
+            break;
+        case armnn::BinaryOperation::Minimum:
+            expectedOutput = { 1, -1, 1, 1,  2, -5, 2, 2,  -3, 2, 2, 2,  2, 2, -4, 2 };
+            break;
+        case armnn::BinaryOperation::Mul:
+            expectedOutput = { 2, -2, 2, 2,  10, -10, 10, 10,  -6, 6, 6, 6,  8, 8, -8, 8 };
+            break;
+        case armnn::BinaryOperation::Sub:
+            expectedOutput = { -1, -3, -1, -1,  3, -7, 3, 3,  -5, 1, 1, 1,  2, 2, -6, 2 };
+            break;
+        default:
+            throw("Invalid Elementwise Binary operation");
+    }
+    const std::vector<float> expectedOutput_const = expectedOutput;
+    // quantize data
+    std::vector<TInput> qInput1Data     = armnnUtils::QuantizedVector<TInput>(input1, qScale, qOffset);
+    std::vector<TInput> qInput2Data     = armnnUtils::QuantizedVector<TInput>(input2, qScale, qOffset);
+    std::vector<TInput> qExpectedOutput = armnnUtils::QuantizedVector<TInput>(expectedOutput_const, qScale, qOffset);
+
+    std::map<int, std::vector<TInput>> inputTensorData    = {{ 0, qInput1Data }, { 1, qInput2Data }};
+    std::map<int, std::vector<TInput>> expectedOutputData = {{ 0, qExpectedOutput }};
+
+    EndToEndLayerTestImpl<ArmnnInType, ArmnnInType>(std::move(net), inputTensorData, expectedOutputData, backends);
+}
+
+} // anonymous namespace
diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
index fb7a027..5b95d3c 100644
--- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
+++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -664,6 +664,8 @@
 
 DECLARE_LAYER_POLICY_2_PARAM(DetectionPostProcess)
 
+DECLARE_LAYER_POLICY_2_PARAM(ElementwiseBinary)
+
 DECLARE_LAYER_POLICY_2_PARAM(ElementwiseUnary)
 
 DECLARE_LAYER_POLICY_2_PARAM(FakeQuantization)
diff --git a/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp b/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
index cd865de..5e619df 100644
--- a/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
+++ b/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -21,7 +21,7 @@
 
     //Defines layers.
     auto input = net->AddInputLayer(0);
-    auto add = net->AddAdditionLayer();
+    auto add = net->AddElementwiseBinaryLayer(armnn::BinaryOperation::Add);
     auto output = net->AddOutputLayer(0);
 
     // Connects layers.
@@ -54,7 +54,7 @@
         "    edge [fontsize=8 fontcolor=\"blue\" fontname=\"arial-bold\"];\n"
         "    " << inputId << " [label=\"{Input|Guid : " << inputId << "\\lLayerType : Input\\l"
                              "BackendID : CpuRef\\l}\"];\n"
-        "    " << addId << " [label=\"{Addition|Guid : " << addId << "\\lLayerType : Addition\\l"
+        "    " << addId << " [label=\"{ElementwiseBinary|Guid : " << addId << "\\lLayerType : ElementwiseBinary\\l"
                            "BackendID : CpuRef\\l}\"];\n"
         "    " << outputId << " [label=\"{Output|Guid : " << outputId << "\\lLayerType : Output\\l"
                               "BackendID : CpuRef\\l}\"];\n"
@@ -187,7 +187,7 @@
     layer->GetOutputSlot(0).SetTensorInfo(desc);
 
     armnn::IConnectableLayer* prevLayer = layer;
-    layer = net->AddMultiplicationLayer("ml");
+    layer = net->AddElementwiseBinaryLayer(armnn::BinaryOperation::Mul, "ml");
 
     prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
     normLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
@@ -258,7 +258,7 @@
     layer->GetOutputSlot(0).SetTensorInfo(desc);
 
     armnn::IConnectableLayer* prevLayer = layer;
-    layer = net->AddMultiplicationLayer("ml");
+    layer = net->AddElementwiseBinaryLayer(armnn::BinaryOperation::Mul, "ml");
 
     prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
     normLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
diff --git a/src/backends/backendsCommon/test/mockBackend/MockImportLayerSupport.hpp b/src/backends/backendsCommon/test/mockBackend/MockImportLayerSupport.hpp
index 380ce4a..da4b7ab 100644
--- a/src/backends/backendsCommon/test/mockBackend/MockImportLayerSupport.hpp
+++ b/src/backends/backendsCommon/test/mockBackend/MockImportLayerSupport.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2021,2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -16,7 +16,7 @@
 public:
     bool IsLayerSupported(const LayerType& type,
                           const std::vector<TensorInfo>& infos,
-                          const BaseDescriptor& /*descriptor*/,
+                          const BaseDescriptor& descriptor,
                           const Optional<LstmInputParamsInfo>& /*lstmParamsInfo*/,
                           const Optional<QuantizedLstmInputParamsInfo>& /*quantizedLstmParamsInfo*/,
                           Optional<std::string&> reasonIfUnsupported) const override
@@ -25,6 +25,11 @@
         {
             case LayerType::Addition:
                 return IsAdditionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+            case LayerType::ElementwiseBinary:
+            {
+                auto elementwiseDesc = *(PolymorphicDowncast<const ElementwiseBinaryDescriptor*>(&descriptor));
+                return (elementwiseDesc.m_Operation == BinaryOperation::Add);
+            }
             case LayerType::Input:
                 return IsInputSupported(infos[0], reasonIfUnsupported);
             case LayerType::Output: