IVGCVSW-3808 Add ElementwiseBinaryLayer

!android-nn-driver:9329

 * Added ElementwiseBinaryLayer that can represent all ElementwiseBinary
   operations including Add, Div, Sub, Maximum, Mul and Minimum.
 * Updated Delegate to use ElementwiseBinaryLayer instead of the Add,
   Div, Sub, Maximum, Mul and Minimum layers.
 * Updated Deserializer to use ElementwiseBinaryLayer instead of the Add,
   Div, Sub, Maximum, Mul and Minimum layers.
 * Updated OnnxParser to use ElementwiseBinaryLayer instead of the Add
   layer.
 * Updated TfLiteParser to use ElementwiseBinaryLayer instead of the Add,
   Div, Sub, Maximum, Mul and Minimum layers.
 * Updated CL and Neon tests to use ElementwiseBinaryLayer.
 * Updated CL and Neon Backend Specific Optimizations to accept
   ElementBinaryLayers as well as Add, Div, Mul, Sub, Maximum and Minimum
   layers.

Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com>
Signed-off-by: Mike Kelly <mike.kelly@arm.com>
Change-Id: I7cbb96b60eb01f0e2b57b0541016d48a08b86c75
diff --git a/Android.mk b/Android.mk
index 0e4e6f9..884624e 100644
--- a/Android.mk
+++ b/Android.mk
@@ -1,5 +1,5 @@
 #
-# Copyright © 2017 ARM Ltd. All rights reserved.
+# Copyright © 2017-2023 ARM Ltd and Contributors. All rights reserved.
 # SPDX-License-Identifier: MIT
 #
 
@@ -227,6 +227,7 @@
         src/armnn/layers/DetectionPostProcessLayer.cpp \
         src/armnn/layers/DivisionLayer.cpp \
         src/armnn/layers/ElementwiseBaseLayer.cpp \
+        src/armnn/layers/ElementwiseBinaryLayer.cpp \
         src/armnn/layers/ElementwiseUnaryLayer.cpp \
         src/armnn/layers/FakeQuantizationLayer.cpp \
         src/armnn/layers/FillLayer.cpp \
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 3b788cd..be6c4c6 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -1,6 +1,6 @@
 #
-# Copyright © 2018-2023 Arm Ltd and Contributors. All rights reserved.
-# Copyright 2020 NXP
+# Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+# Copyright © 2020 NXP
 # SPDX-License-Identifier: MIT
 #
 cmake_minimum_required (VERSION 3.0.2) # 3.0.2 required for return() statement used in AddDllCopyCommands.cmake
@@ -222,6 +222,8 @@
     src/armnn/layers/DetectionPostProcessLayer.cpp
     src/armnn/layers/ElementwiseBaseLayer.hpp
     src/armnn/layers/ElementwiseBaseLayer.cpp
+    src/armnn/layers/ElementwiseBinaryLayer.hpp
+    src/armnn/layers/ElementwiseBinaryLayer.cpp
     src/armnn/layers/ElementwiseUnaryLayer.hpp
     src/armnn/layers/ElementwiseUnaryLayer.cpp
     src/armnn/layers/FakeQuantizationLayer.hpp
diff --git a/delegate/src/ElementwiseBinary.hpp b/delegate/src/ElementwiseBinary.hpp
index 52c6b24..fa9021b 100644
--- a/delegate/src/ElementwiseBinary.hpp
+++ b/delegate/src/ElementwiseBinary.hpp
@@ -27,15 +27,17 @@
     bool isSupported = false;
     auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
     {
+        std::vector<armnn::TensorInfo> infos { inputInfo1, inputInfo2, outputInfo };
         FORWARD_LAYER_SUPPORT_FUNC("ADD",
                                    tfLiteContext,
-                                   IsAdditionSupported,
+                                   IsElementwiseBinarySupported,
                                    delegateData.m_Backends,
                                    isSupported,
                                    armnn::BackendId(),
                                    inputInfo1,
                                    inputInfo2,
-                                   outputTensorInfo);
+                                   outputInfo,
+                                   armnn::BinaryOperation::Add);
     };
 
     validateFunc(outputInfo, isSupported);
@@ -54,13 +56,14 @@
     {
         FORWARD_LAYER_SUPPORT_FUNC("DIV",
                                    tfLiteContext,
-                                   IsDivisionSupported,
+                                   IsElementwiseBinarySupported,
                                    delegateData.m_Backends,
                                    isSupported,
                                    armnn::BackendId(),
                                    inputInfo1,
                                    inputInfo2,
-                                   outputTensorInfo);
+                                   outputTensorInfo,
+                                   armnn::BinaryOperation::Div);
     };
 
     validateFunc(outputInfo, isSupported);
@@ -107,13 +110,14 @@
     {
         FORWARD_LAYER_SUPPORT_FUNC("MAXIMUM",
                                    tfLiteContext,
-                                   IsMaximumSupported,
+                                   IsElementwiseBinarySupported,
                                    delegateData.m_Backends,
                                    isSupported,
                                    armnn::BackendId(),
                                    inputInfo1,
                                    inputInfo2,
-                                   outputTensorInfo);
+                                   outputTensorInfo,
+                                   armnn::BinaryOperation::Maximum);
     };
 
     validateFunc(outputInfo, isSupported);
@@ -131,13 +135,14 @@
     {
         FORWARD_LAYER_SUPPORT_FUNC("MINIMUM",
                                    tfLiteContext,
-                                   IsMinimumSupported,
+                                   IsElementwiseBinarySupported,
                                    delegateData.m_Backends,
                                    isSupported,
                                    armnn::BackendId(),
                                    inputInfo1,
                                    inputInfo2,
-                                   outputTensorInfo);
+                                   outputTensorInfo,
+                                   armnn::BinaryOperation::Minimum);
     };
 
     validateFunc(outputInfo, isSupported);
@@ -155,13 +160,14 @@
     {
         FORWARD_LAYER_SUPPORT_FUNC("MUL",
                                    tfLiteContext,
-                                   IsMultiplicationSupported,
+                                   IsElementwiseBinarySupported,
                                    delegateData.m_Backends,
                                    isSupported,
                                    armnn::BackendId(),
                                    inputInfo1,
                                    inputInfo2,
-                                   outputTensorInfo);
+                                   outputTensorInfo,
+                                   armnn::BinaryOperation::Mul);
     };
 
     validateFunc(outputInfo, isSupported);
@@ -179,13 +185,14 @@
     {
         FORWARD_LAYER_SUPPORT_FUNC("SUB",
                                    tfLiteContext,
-                                   IsSubtractionSupported,
+                                   IsElementwiseBinarySupported,
                                    delegateData.m_Backends,
                                    isSupported,
                                    armnn::BackendId(),
                                    inputInfo1,
                                    inputInfo2,
-                                   outputTensorInfo);
+                                   outputTensorInfo,
+                                   armnn::BinaryOperation::Sub);
     };
 
     validateFunc(outputInfo, isSupported);
@@ -196,7 +203,8 @@
     DelegateData& delegateData,
     const armnn::TensorInfo& outputTensorInfo)
 {
-    armnn::IConnectableLayer* divisionLayer = delegateData.m_Network->AddDivisionLayer();
+    armnn::IConnectableLayer* divisionLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
+            armnn::BinaryOperation::Div);
     // if the output of the div is Signed32 the Floor layer is not required
     if (armnn::DataType::Signed32 == outputTensorInfo.GetDataType())
     {
@@ -330,10 +338,12 @@
     switch(elementwiseBinaryOperatorCode)
     {
         case kTfLiteBuiltinAdd:
-            elementwiseBinaryLayer = delegateData.m_Network->AddAdditionLayer();
+            elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
+                    armnn::BinaryOperation::Add);
             break;
         case kTfLiteBuiltinDiv:
-            elementwiseBinaryLayer = delegateData.m_Network->AddDivisionLayer();
+            elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
+                    armnn::BinaryOperation::Div);
             break;
         case kTfLiteBuiltinFloorDiv:
             {
@@ -343,16 +353,20 @@
             }
             break;
         case kTfLiteBuiltinMaximum:
-            elementwiseBinaryLayer = delegateData.m_Network->AddMaximumLayer();
+            elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
+                    armnn::BinaryOperation::Maximum);
             break;
         case kTfLiteBuiltinMinimum:
-            elementwiseBinaryLayer = delegateData.m_Network->AddMinimumLayer();
+            elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
+                    armnn::BinaryOperation::Minimum);
             break;
         case kTfLiteBuiltinMul:
-            elementwiseBinaryLayer = delegateData.m_Network->AddMultiplicationLayer();
+            elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
+                    armnn::BinaryOperation::Mul);
             break;
         case kTfLiteBuiltinSub:
-            elementwiseBinaryLayer = delegateData.m_Network->AddSubtractionLayer();
+            elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
+                    armnn::BinaryOperation::Sub);
             break;
         default:
             return kTfLiteError;
diff --git a/include/armnn/BackendHelper.hpp b/include/armnn/BackendHelper.hpp
index cf60b01..85aabe0 100644
--- a/include/armnn/BackendHelper.hpp
+++ b/include/armnn/BackendHelper.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2019,2021-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -161,6 +161,12 @@
                              const TensorInfo& output,
                              Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
+    bool IsElementwiseBinarySupported(const TensorInfo& input0,
+                                      const TensorInfo& input1,
+                                      const TensorInfo& output,
+                                      const ElementwiseBinaryDescriptor& descriptor,
+                                      Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
     bool IsElementwiseUnarySupported(const TensorInfo& input,
                                      const TensorInfo& output,
                                      const ElementwiseUnaryDescriptor& descriptor,
diff --git a/include/armnn/Descriptors.hpp b/include/armnn/Descriptors.hpp
index 7c6942c..9ff894f 100644
--- a/include/armnn/Descriptors.hpp
+++ b/include/armnn/Descriptors.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -105,6 +105,26 @@
     ComparisonOperation m_Operation;
 };
 
+/// A ElementwiseBinaryDescriptor for the ElementwiseBinaryLayer
+struct ElementwiseBinaryDescriptor : BaseDescriptor
+{
+    ElementwiseBinaryDescriptor()
+            : ElementwiseBinaryDescriptor(BinaryOperation::Add)
+    {}
+
+    ElementwiseBinaryDescriptor(BinaryOperation operation)
+            : m_Operation(operation)
+    {}
+
+    bool operator ==(const ElementwiseBinaryDescriptor &rhs) const
+    {
+        return m_Operation == rhs.m_Operation;
+    }
+
+    /// Specifies the elementwiseBinary operation to execute
+    BinaryOperation m_Operation;
+};
+
 /// A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer
 struct ElementwiseUnaryDescriptor : BaseDescriptor
 {
diff --git a/include/armnn/DescriptorsFwd.hpp b/include/armnn/DescriptorsFwd.hpp
index 0883350..2c25a49 100644
--- a/include/armnn/DescriptorsFwd.hpp
+++ b/include/armnn/DescriptorsFwd.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -20,6 +20,7 @@
 struct Convolution3dDescriptor;
 struct DepthwiseConvolution2dDescriptor;
 struct DetectionPostProcessDescriptor;
+struct ElementwiseBinaryDescriptor;
 struct ElementwiseUnaryDescriptor;
 struct FakeQuantizationDescriptor;
 struct FillDescriptor;
diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp
index c944d09..4eac0cf 100644
--- a/include/armnn/INetwork.hpp
+++ b/include/armnn/INetwork.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -349,9 +349,16 @@
         const ConstTensor& anchors,
         const char* name = nullptr);
 
+    /// Add an ElementwiseBinary layer to the network.
+    /// @param name - Optional name for the layer.
+    /// @param desc - Descriptor for the elementwiseBinary operations.
+    /// @return - Interface for configuring the layer.
+    IConnectableLayer* AddElementwiseBinaryLayer(const ElementwiseBinaryDescriptor& elementwiseUnaryDescriptor,
+                                                 const char* name = nullptr);
+
     /// Add an ElementwiseUnary layer to the network.
     /// @param name - Optional name for the layer.
-    /// @param desc - Descriptor for the elementwiseUnary operation.
+    /// @param desc - Descriptor for the elementwiseUnary operations.
     /// @return - Interface for configuring the layer.
     IConnectableLayer* AddElementwiseUnaryLayer(const ElementwiseUnaryDescriptor& elementwiseUnaryDescriptor,
                                                 const char* name = nullptr);
diff --git a/include/armnn/Types.hpp b/include/armnn/Types.hpp
index 4dc613b..3c64e82 100644
--- a/include/armnn/Types.hpp
+++ b/include/armnn/Types.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2018-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -133,6 +133,16 @@
     Sin        = 7
 };
 
+enum class BinaryOperation
+{
+    Add     = 0,
+    Div     = 1,
+    Maximum = 2,
+    Minimum = 3,
+    Mul     = 4,
+    Sub     = 5
+};
+
 enum class PoolingAlgorithm
 {
     Max     = 0,
@@ -385,6 +395,7 @@
 
 /// This list uses X macro technique.
 /// See https://en.wikipedia.org/wiki/X_Macro for more info
+// New layers should be added at last position to minimize instability.
 #define LIST_OF_LAYER_TYPE \
     X(Activation) \
     X(Addition) \
@@ -458,8 +469,9 @@
     X(Pooling3d) \
     X(GatherNd) \
     X(BatchMatMul) \
+    X(ElementwiseBinary) \
 
-// New layers should be added at last to minimize instability.
+// New layers should be added at last position to minimize instability.
 
 /// When adding a new layer, adapt also the LastLayer enum value in the
 /// enum class LayerType below
@@ -469,7 +481,7 @@
     LIST_OF_LAYER_TYPE
 #undef X
     FirstLayer = Activation,
-    LastLayer = BatchMatMul
+    LastLayer = ElementwiseBinary
 };
 
 const char* GetLayerTypeAsCString(LayerType type);
diff --git a/include/armnn/TypesUtils.hpp b/include/armnn/TypesUtils.hpp
index 4dd5360..fbe9e8f 100644
--- a/include/armnn/TypesUtils.hpp
+++ b/include/armnn/TypesUtils.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2018-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -72,6 +72,20 @@
     }
 }
 
+constexpr char const* GetBinaryOperationAsCString(BinaryOperation operation)
+{
+    switch (operation)
+    {
+        case BinaryOperation::Add:      return "Add";
+        case BinaryOperation::Div:      return "Div";
+        case BinaryOperation::Maximum:  return "Maximum";
+        case BinaryOperation::Minimum:  return "Minimum";
+        case BinaryOperation::Mul:      return "Mul";
+        case BinaryOperation::Sub:      return "Sub";
+        default:                        return "Unknown";
+    }
+}
+
 constexpr char const* GetUnaryOperationAsCString(UnaryOperation operation)
 {
     switch (operation)
diff --git a/include/armnn/backends/WorkloadData.hpp b/include/armnn/backends/WorkloadData.hpp
index 7a0a765..2abd267 100644
--- a/include/armnn/backends/WorkloadData.hpp
+++ b/include/armnn/backends/WorkloadData.hpp
@@ -663,6 +663,11 @@
     void Validate(const WorkloadInfo& workloadInfo) const;
 };
 
+struct ElementwiseBinaryQueueDescriptor : QueueDescriptorWithParameters<ElementwiseBinaryDescriptor>
+{
+    void Validate(const WorkloadInfo& workloadInfo) const;
+};
+
 struct ElementwiseUnaryQueueDescriptor : QueueDescriptorWithParameters<ElementwiseUnaryDescriptor>
 {
     void Validate(const WorkloadInfo& workloadInfo) const;
diff --git a/include/armnnTestUtils/MockBackend.hpp b/include/armnnTestUtils/MockBackend.hpp
index e5378bf..05df1b4 100644
--- a/include/armnnTestUtils/MockBackend.hpp
+++ b/include/armnnTestUtils/MockBackend.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -320,6 +320,11 @@
                                                     reasonIfUnsupported);
                 }
             }
+            case LayerType::ElementwiseBinary:
+            {
+                auto elementwiseDesc = *(PolymorphicDowncast<const ElementwiseBinaryDescriptor*>(&descriptor));
+                return (elementwiseDesc.m_Operation == BinaryOperation::Add);
+            }
             default:
                 return false;
         }
diff --git a/src/armnn/BackendHelper.cpp b/src/armnn/BackendHelper.cpp
index af38ce8..580c52c 100644
--- a/src/armnn/BackendHelper.cpp
+++ b/src/armnn/BackendHelper.cpp
@@ -600,6 +600,22 @@
                                             reasonIfUnsupported);
 }
 
+bool LayerSupportHandle::IsElementwiseBinarySupported(const TensorInfo &input0,
+                                                      const TensorInfo &input1,
+                                                      const TensorInfo &output,
+                                                      const ElementwiseBinaryDescriptor &descriptor,
+                                                      Optional<std::string &> reasonIfUnsupported)
+{
+    TensorInfos infos{input0, input1, output};
+
+    return m_LayerSupport->IsLayerSupported(LayerType::ElementwiseBinary,
+                                            infos,
+                                            descriptor,
+                                            EmptyOptional(),
+                                            EmptyOptional(),
+                                            reasonIfUnsupported);
+}
+
 bool LayerSupportHandle::IsElementwiseUnarySupported(const TensorInfo& input,
                                                      const TensorInfo& output,
                                                      const ElementwiseUnaryDescriptor& descriptor,
diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp
index 43862d5..f634272 100644
--- a/src/armnn/LayersFwd.hpp
+++ b/src/armnn/LayersFwd.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -27,6 +27,7 @@
 #include "layers/DequantizeLayer.hpp"
 #include "layers/DetectionPostProcessLayer.hpp"
 #include "layers/DivisionLayer.hpp"
+#include "layers/ElementwiseBinaryLayer.hpp"
 #include "layers/ElementwiseUnaryLayer.hpp"
 #include "layers/FakeQuantizationLayer.hpp"
 #include "layers/FillLayer.hpp"
@@ -127,6 +128,7 @@
 DECLARE_LAYER(Dequantize)
 DECLARE_LAYER(DetectionPostProcess)
 DECLARE_LAYER(Division)
+DECLARE_LAYER(ElementwiseBinary)
 DECLARE_LAYER(ElementwiseUnary)
 DECLARE_LAYER(FakeQuantization)
 DECLARE_LAYER(Fill)
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 08d3280..9ebb67b 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017, 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -20,7 +20,6 @@
 #include <backendsCommon/TensorHandleFactoryRegistry.hpp>
 
 #include <armnn/Exceptions.hpp>
-#include <armnn/Utils.hpp>
 #include <armnn/TypesUtils.hpp>
 #include <armnn/BackendRegistry.hpp>
 #include <armnn/Logging.hpp>
@@ -36,10 +35,8 @@
 
 #include <fcntl.h>
 #include <algorithm>
-#include <fstream>
 #include <memory>
 #include <vector>
-#include <algorithm>
 
 namespace armnn
 {
@@ -58,7 +55,6 @@
     return pNetworkImpl->AddInputLayer(id, name);
 }
 
-
 IConnectableLayer* INetwork::AddArgMinMaxLayer(const ArgMinMaxDescriptor& desc,
                                                const char* name)
 {
@@ -126,6 +122,11 @@
     return pNetworkImpl->AddDetectionPostProcessLayer(descriptor, anchors, name);
 }
 
+IConnectableLayer* INetwork::AddElementwiseBinaryLayer(const ElementwiseBinaryDescriptor& elementwiseBinaryDescriptor,
+                                                       const char* name)
+{
+    return pNetworkImpl->AddElementwiseBinaryLayer(elementwiseBinaryDescriptor, name);
+}
 
 IConnectableLayer* INetwork::AddElementwiseUnaryLayer(const ElementwiseUnaryDescriptor& elementwiseUnaryDescriptor,
                                                       const char* name)
@@ -133,7 +134,6 @@
     return pNetworkImpl->AddElementwiseUnaryLayer(elementwiseUnaryDescriptor, name);
 }
 
-
 IConnectableLayer* INetwork::AddFillLayer(const FillDescriptor& fillDescriptor,
                                           const char* name)
 {
@@ -1853,6 +1853,12 @@
     return m_Graph->AddLayer<ComparisonLayer>(comparisonDescriptor, name);
 }
 
+IConnectableLayer* NetworkImpl::AddElementwiseBinaryLayer(const ElementwiseBinaryDescriptor& elementwiseBinaryDesc,
+                                                          const char* name)
+{
+    return m_Graph->AddLayer<ElementwiseBinaryLayer>(elementwiseBinaryDesc, name);
+}
+
 IConnectableLayer* NetworkImpl::AddElementwiseUnaryLayer(const ElementwiseUnaryDescriptor& elementwiseUnaryDescriptor,
                                                      const char* name)
 {
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index a37a4be..03642ce 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -95,6 +95,9 @@
 
     IConnectableLayer* AddDivisionLayer(const char* name = nullptr);
 
+    IConnectableLayer* AddElementwiseBinaryLayer(const ElementwiseBinaryDescriptor& elementwiseBinaryDescriptor,
+                                                 const char* name = nullptr);
+
     IConnectableLayer* AddElementwiseUnaryLayer(const ElementwiseUnaryDescriptor& elementwiseUnaryDescriptor,
                                                 const char* name = nullptr);
 
diff --git a/src/armnn/layers/ElementwiseBinaryLayer.cpp b/src/armnn/layers/ElementwiseBinaryLayer.cpp
new file mode 100644
index 0000000..ae1813f
--- /dev/null
+++ b/src/armnn/layers/ElementwiseBinaryLayer.cpp
@@ -0,0 +1,89 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ElementwiseBinaryLayer.hpp"
+
+#include "LayerCloneBase.hpp"
+
+namespace armnn
+{
+
+ElementwiseBinaryLayer::ElementwiseBinaryLayer(const ElementwiseBinaryDescriptor& param, const char* name)
+    : LayerWithParameters(2, 1, LayerType::ElementwiseBinary, param, name)
+{
+}
+
+std::unique_ptr<IWorkload> ElementwiseBinaryLayer::CreateWorkload(const IWorkloadFactory& factory) const
+{
+    ElementwiseBinaryQueueDescriptor descriptor;
+    SetAdditionalInfo(descriptor);
+
+    return factory.CreateWorkload(LayerType::ElementwiseBinary, descriptor, PrepInfoAndDesc(descriptor));
+}
+
+ElementwiseBinaryLayer* ElementwiseBinaryLayer::Clone(Graph& graph) const
+{
+    return CloneBase<ElementwiseBinaryLayer>(graph, m_Param, GetName());
+}
+
+std::vector<TensorShape> ElementwiseBinaryLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
+{
+    ARMNN_ASSERT(inputShapes.size() == 2);
+    TensorShape input0 = inputShapes[0];
+    TensorShape input1 = inputShapes[1];
+
+    if (inputShapes[0].GetNumDimensions() < inputShapes[1].GetNumDimensions())
+    {
+        input1 = inputShapes[0];
+        input0 = inputShapes[1];
+    }
+
+    unsigned int numDims     = input0.GetNumDimensions();
+    unsigned int shiftedDims = input0.GetNumDimensions() - input1.GetNumDimensions();
+
+    // Get the max of the inputs.
+    std::vector<unsigned int> dims(numDims);
+    for (unsigned int i = shiftedDims; i < numDims; i++)
+    {
+        unsigned int dim0 = input0[i];
+        unsigned int dim1 = input1[i - shiftedDims];
+
+        // Validate inputs are broadcast compatible.
+        ARMNN_ASSERT_MSG(dim0 == dim1 || dim0 == 1 || dim1 == 1,
+                         "Dimensions should either match or one should be of size 1.");
+
+        dims[i] = std::max(dim0, dim1);
+    }
+
+    // Fill in the rest of the shifted dimensions.
+    for (unsigned int i = 0; i < shiftedDims; i++)
+    {
+        dims[i] = input0[i];
+    }
+
+    return std::vector<TensorShape>({ TensorShape(numDims, dims.data()) });
+}
+
+void ElementwiseBinaryLayer::ValidateTensorShapesFromInputs()
+{
+    VerifyLayerConnections(2, CHECK_LOCATION());
+
+    const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
+
+    VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
+
+    auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(),
+                                              GetInputSlot(1).GetConnection()->GetTensorInfo().GetShape() });
+
+    ARMNN_ASSERT(inferredShapes.size() == 1);
+
+    ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, GetLayerTypeAsCString(GetType()));
+}
+
+void ElementwiseBinaryLayer::ExecuteStrategy(IStrategy& strategy) const
+{
+    strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
+}
+} // namespace armnn
diff --git a/src/armnn/layers/ElementwiseBinaryLayer.hpp b/src/armnn/layers/ElementwiseBinaryLayer.hpp
new file mode 100644
index 0000000..78e3f41
--- /dev/null
+++ b/src/armnn/layers/ElementwiseBinaryLayer.hpp
@@ -0,0 +1,48 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "LayerWithParameters.hpp"
+
+namespace armnn
+{
+
+/// This layer represents a elementwiseBinary operation.
+class ElementwiseBinaryLayer : public LayerWithParameters<ElementwiseBinaryDescriptor>
+{
+public:
+    /// Makes a workload for the elementwiseBinary type
+    /// @param [in] graph The graph where this layer can be found
+    /// @param [in] factory The workload factory which will create the workload
+    /// @return A pointer to the created workload, or nullptr if not created
+    virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
+
+    /// Creates a dynamically-allocated copy of this layer
+    /// @param [in] graph The graph into which this layer is being cloned
+    ElementwiseBinaryLayer* Clone(Graph& graph) const override;
+
+    /// Returns inputShapes by default.
+    /// @param [in] inputShapes The input shapes layer has.
+    /// @return A vector to the inferred output shape.
+    std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+
+    /// Check if the input tensor shape(s) will lead to a valid configuration
+    /// of @ref ElementwiseBinaryLayer
+    void ValidateTensorShapesFromInputs() override;
+
+    void ExecuteStrategy(IStrategy& strategy) const override;
+
+protected:
+    /// Constructor to create a ElementwiseBinaryLayer
+    /// @param [in] param ElementwiseBinaryDescriptor to configure the ElementwiseBinaryLayer
+    /// @param [in] name Optional name for the layer
+    ElementwiseBinaryLayer(const ElementwiseBinaryDescriptor& param, const char* name);
+
+    /// Default destructor
+    ~ElementwiseBinaryLayer() = default;
+};
+
+} // namespace armnn
diff --git a/src/armnn/optimizations/AddBroadcastReshapeLayer.hpp b/src/armnn/optimizations/AddBroadcastReshapeLayer.hpp
index b9e8584..dbde72b 100644
--- a/src/armnn/optimizations/AddBroadcastReshapeLayer.hpp
+++ b/src/armnn/optimizations/AddBroadcastReshapeLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2021,2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -18,7 +18,7 @@
 static const std::set<armnn::LayerType> broadcastOps{ LayerType::Addition,       LayerType::Division,
                                                       LayerType::Maximum,        LayerType::Minimum,
                                                       LayerType::Multiplication, LayerType::Prelu,
-                                                      LayerType::Subtraction };
+                                                      LayerType::Subtraction,    LayerType::ElementwiseBinary };
 
 class AddBroadcastReshapeLayerImpl
 {
diff --git a/src/armnn/optimizations/MovePermuteUp.hpp b/src/armnn/optimizations/MovePermuteUp.hpp
index ae8a28c..19078b3 100644
--- a/src/armnn/optimizations/MovePermuteUp.hpp
+++ b/src/armnn/optimizations/MovePermuteUp.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017-2018,2020,2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -68,6 +68,12 @@
             case LayerType::MemCopy:
             case LayerType::Multiplication:
                 return true;
+            case LayerType::ElementwiseBinary:
+            {
+                auto descriptor = PolymorphicDowncast<const ElementwiseBinaryDescriptor*>(&base.GetParameters());
+                return (descriptor->m_Operation == BinaryOperation::Add ||
+                        descriptor->m_Operation == BinaryOperation::Mul);
+            }
             default:
                 return false;
         }
diff --git a/src/armnn/optimizations/MoveTransposeUp.hpp b/src/armnn/optimizations/MoveTransposeUp.hpp
index 999a4eb..40f6b9c 100644
--- a/src/armnn/optimizations/MoveTransposeUp.hpp
+++ b/src/armnn/optimizations/MoveTransposeUp.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd. All rights reserved.
+// Copyright © 2020,2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -68,6 +68,12 @@
             case LayerType::MemCopy:
             case LayerType::Multiplication:
                 return true;
+            case LayerType::ElementwiseBinary:
+            {
+                auto descriptor = PolymorphicDowncast<const ElementwiseBinaryDescriptor*>(&base.GetParameters());
+                return (descriptor->m_Operation == BinaryOperation::Add ||
+                        descriptor->m_Operation == BinaryOperation::Mul);
+            }
             default:
                 return false;
         }
diff --git a/src/armnn/test/GraphTests.cpp b/src/armnn/test/GraphTests.cpp
index eea7ae8..b1b1a84 100644
--- a/src/armnn/test/GraphTests.cpp
+++ b/src/armnn/test/GraphTests.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #include <GraphUtils.hpp>
@@ -36,7 +36,7 @@
 
     CHECK_NOTHROW(graph.AddLayer<armnn::InputLayer>(0, "layerA"));
     CHECK_NOTHROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerB"));
-    CHECK_NOTHROW(graph.AddLayer<armnn::AdditionLayer>("layerC"));
+    CHECK_NOTHROW(graph.AddLayer<armnn::ElementwiseBinaryLayer>(armnn::BinaryOperation::Add, "layerC"));
     CHECK_NOTHROW(graph.AddLayer<armnn::OutputLayer>(0, "output"));
     CHECK_NOTHROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerD"));
     CHECK_NOTHROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerE"));
@@ -82,7 +82,7 @@
     CHECK_NOTHROW(graph.AddLayer<armnn::InputLayer>(0, "layerA"));
     CHECK_NOTHROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerB"));
     CHECK_NOTHROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerC"));
-    CHECK_NOTHROW(graph.AddLayer<armnn::AdditionLayer>("layerD"));
+    CHECK_NOTHROW(graph.AddLayer<armnn::ElementwiseBinaryLayer>(armnn::BinaryOperation::Add, "layerD"));
     CHECK_NOTHROW(graph.AddLayer<armnn::OutputLayer>(0, "output"));
 
     armnn::Layer* const layerA = GetFirstLayerWithName(graph, "layerA");
@@ -168,7 +168,7 @@
     CHECK_NOTHROW(graph.AddLayer<armnn::InputLayer>(0, "layerA"));
     CHECK_NOTHROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerB"));
     CHECK_NOTHROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerC"));
-    CHECK_NOTHROW(graph.AddLayer<armnn::AdditionLayer>("layerD"));
+    CHECK_NOTHROW(graph.AddLayer<armnn::ElementwiseBinaryLayer>(armnn::BinaryOperation::Add, "layerD"));
     CHECK_NOTHROW(graph.AddLayer<armnn::OutputLayer>(0, "output"));
 
     armnn::Layer* const layerA = GetFirstLayerWithName(graph, "layerA");
@@ -548,7 +548,7 @@
     armnn::SplitterLayer* const splitterLayer = graph.AddLayer<armnn::SplitterLayer>(splitterDesc, "splitter");
     splitterLayer->SetBackendId(armnn::Compute::GpuAcc);
 
-    armnn::AdditionLayer* const additionLayer = graph.AddLayer<armnn::AdditionLayer>("addition");
+    auto* const additionLayer = graph.AddLayer<armnn::ElementwiseBinaryLayer>(armnn::BinaryOperation::Add, "addition");
     additionLayer->SetBackendId(armnn::Compute::CpuRef);
 
     armnn::OutputLayer* const outputLayer = graph.AddLayer<armnn::OutputLayer>(0, "output");
diff --git a/src/armnn/test/NetworkTests.cpp b/src/armnn/test/NetworkTests.cpp
index 058f079..0bfad4d 100644
--- a/src/armnn/test/NetworkTests.cpp
+++ b/src/armnn/test/NetworkTests.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -32,7 +32,7 @@
 {
     armnn::NetworkImpl net;
     LayerGuid inputId = net.AddInputLayer(0)->GetGuid();
-    LayerGuid addId = net.AddAdditionLayer()->GetGuid();
+    LayerGuid addId = net.AddElementwiseBinaryLayer(armnn::BinaryOperation::Add)->GetGuid();
     LayerGuid outputId = net.AddOutputLayer(0)->GetGuid();
 
     CHECK(inputId != addId);
@@ -50,7 +50,7 @@
 {
     armnn::INetworkPtr inet(armnn::INetwork::Create());
     inet->AddInputLayer(0);
-    inet->AddAdditionLayer();
+    inet->AddElementwiseBinaryLayer(armnn::BinaryOperation::Add);
     inet->AddActivationLayer(armnn::ActivationDescriptor());
     inet->AddOutputLayer(0);
 }
@@ -59,7 +59,7 @@
 {
     armnn::NetworkImpl net;
     net.AddInputLayer(0);
-    net.AddAdditionLayer();
+    net.AddElementwiseBinaryLayer(armnn::BinaryOperation::Add);
     net.AddActivationLayer(armnn::ActivationDescriptor());
     net.AddOutputLayer(0);
 }
@@ -136,13 +136,15 @@
 
     softmaxLayer->GetOutputSlot(0).Connect(batchNormalizationLayer->GetInputSlot(0));
 
-    armnn::IConnectableLayer* const additionLayer = net.AddAdditionLayer("addition");
+    armnn::IConnectableLayer* const additionLayer = net.AddElementwiseBinaryLayer(armnn::BinaryOperation::Add,
+                                                                                  "addition");
     CHECK(additionLayer);
 
     batchNormalizationLayer->GetOutputSlot(0).Connect(additionLayer->GetInputSlot(0));
     batchNormalizationLayer->GetOutputSlot(0).Connect(additionLayer->GetInputSlot(1));
 
-    armnn::IConnectableLayer* const multiplicationLayer = net.AddMultiplicationLayer("multiplication");
+    armnn::IConnectableLayer* const multiplicationLayer = net.AddElementwiseBinaryLayer(armnn::BinaryOperation::Mul,
+                                                                                        "multiplication");
     CHECK(multiplicationLayer);
 
     additionLayer->GetOutputSlot(0).Connect(multiplicationLayer->GetInputSlot(0));
@@ -338,7 +340,7 @@
     splitterLayer->GetOutputSlot(1).Connect(softmax2Layer->GetInputSlot(0));
 
     // Adds addition layer.
-    layer = net.AddAdditionLayer("add layer");
+    layer = net.AddElementwiseBinaryLayer(armnn::BinaryOperation::Add, "add layer");
     CHECK(layer);
 
     softmax1Layer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
@@ -382,7 +384,7 @@
     splitterLayer->GetOutputSlot(1).Connect(softmax2Layer->GetInputSlot(0));
 
     // Adds multiplication layer.
-    layer = net.AddMultiplicationLayer("multiplication layer");
+    layer = net.AddElementwiseBinaryLayer(armnn::BinaryOperation::Mul, "multiplication layer");
     CHECK(layer);
 
     softmax1Layer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
@@ -634,15 +636,27 @@
     CHECK(originalDescriptor.m_DataLayout == armnn::DataLayout::NCHW);
 }
 
-TEST_CASE("CheckNullDescriptor")
+TEST_CASE("CheckNotNullDescriptor")
 {
     armnn::NetworkImpl net;
-    armnn::IConnectableLayer* const addLayer = net.AddAdditionLayer();
+    armnn::IConnectableLayer* const addLayer = net.AddElementwiseBinaryLayer(armnn::BinaryOperation::Add);
 
     CHECK(addLayer);
 
     const armnn::BaseDescriptor& descriptor = addLayer->GetParameters();
     // additional layer has no descriptor so a NullDescriptor will be returned
+    CHECK(descriptor.IsNull() == false);
+}
+
+TEST_CASE("CheckNullDescriptor")
+{
+    armnn::NetworkImpl net;
+    armnn::IConnectableLayer* const addLayer = net.AddPreluLayer();
+
+    CHECK(addLayer);
+
+    const armnn::BaseDescriptor& descriptor = addLayer->GetParameters();
+    // Prelu has no descriptor so a NullDescriptor will be returned
     CHECK(descriptor.IsNull() == true);
 }
 
diff --git a/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp b/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp
index cfdaaf5..b8607d1 100644
--- a/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp
+++ b/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp
@@ -1,12 +1,10 @@
 //
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2019-2021,2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #include "TestNameAndDescriptorLayerVisitor.hpp"
 #include "Network.hpp"
 
-#include <armnn/Exceptions.hpp>
-
 #include <doctest/doctest.h>
 
 namespace
@@ -84,6 +82,12 @@
 }
 
 template<>
+armnn::ElementwiseBinaryDescriptor GetDescriptor<armnn::ElementwiseBinaryDescriptor>()
+{
+    return armnn::ElementwiseBinaryDescriptor(armnn::BinaryOperation::Add);
+}
+
+template<>
 armnn::ElementwiseUnaryDescriptor GetDescriptor<armnn::ElementwiseUnaryDescriptor>()
 {
     return armnn::ElementwiseUnaryDescriptor(armnn::UnaryOperation::Abs);
@@ -273,12 +277,14 @@
 
 TEST_SUITE("TestNameAndDescriptorLayerVisitor")
 {
-TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(Activation, CheckAdditionLayerVisitorNameAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(Activation, CheckActivationLayerVisitorNameAndDescriptor)
 TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(ArgMinMax, CheckArgMinMaxLayerVisitorNameAndDescriptor)
 TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(DepthToSpace, CheckDepthToSpaceLayerVisitorNameAndDescriptor)
 TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(BatchToSpaceNd, CheckBatchToSpaceNdLayerVisitorNameAndDescriptor)
 TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(Comparison, CheckComparisonLayerVisitorNameAndDescriptor)
 TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(Concat, CheckConcatLayerVisitorNameAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(ElementwiseBinary,
+                                                  CheckElementwiseBinaryLayerVisitorNameAndDescriptor)
 TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(ElementwiseUnary, CheckElementwiseUnaryLayerVisitorNameAndDescriptor)
 TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(Fill, CheckFillLayerVisitorNameAndDescriptor)
 TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(Gather, CheckGatherLayerVisitorNameAndDescriptor)
@@ -304,7 +310,7 @@
 TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(Transpose, CheckTransposeLayerVisitorNameAndDescriptor)
 
 TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(Activation,
-    CheckAdditionLayerVisitorNameNullptrAndDescriptor)
+    CheckActivationLayerVisitorNameNullptrAndDescriptor)
 TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(ArgMinMax,
     CheckArgMinMaxLayerVisitorNameNullptrAndDescriptor)
 TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(DepthToSpace,
@@ -315,6 +321,8 @@
     CheckComparisonLayerVisitorNameNullptrAndDescriptor)
 TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(Concat,
     CheckConcatLayerVisitorNameNullptrAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(ElementwiseBinary,
+    CheckElementwiseBinaryLayerVisitorNameNullptrAndDescriptor)
 TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(ElementwiseUnary,
     CheckElementwiseUnaryLayerVisitorNameNullptrAndDescriptor)
 TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(Fill,
diff --git a/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp b/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp
index b1f9512..9889035 100644
--- a/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp
+++ b/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2019-2021,2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -63,6 +63,7 @@
 DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(Comparison)
 DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(Concat)
 DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(DepthToSpace)
+DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(ElementwiseBinary)
 DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(ElementwiseUnary)
 DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(Fill)
 DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(Gather)
diff --git a/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp b/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp
index 0636a00..59dfb86 100644
--- a/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp
+++ b/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2021,2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -27,7 +27,7 @@
 
     auto input0 = graph.AddLayer<InputLayer>(0, "input0");
     auto input1 = graph.AddLayer<InputLayer>(1, "input1");
-    auto add = graph.AddLayer<AdditionLayer>("add");
+    auto add = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Add, "add");
     auto output = graph.AddLayer<OutputLayer>(0, "output");
     input0->GetOutputSlot().SetTensorInfo(info0);
     input1->GetOutputSlot().SetTensorInfo(info1);
@@ -40,7 +40,7 @@
     CHECK(CheckSequence(graph.cbegin(), graph.cend(),
                              &IsLayerOfType<InputLayer>,
                              &IsLayerOfType<InputLayer>,
-                             &IsLayerOfType<AdditionLayer>,
+                             &IsLayerOfType<ElementwiseBinaryLayer>,
                              &IsLayerOfType<OutputLayer>));
 
     // Run optimizer
@@ -48,19 +48,19 @@
 
     // Broadcast reshape layer has been added to the graph correctly
     CHECK(CheckSequence(graph.cbegin(), graph.cend(),
-                             &IsLayerOfType<InputLayer>,
-                             &IsLayerOfType<InputLayer>,
-                             &IsLayerOfType<ReshapeLayer>,
-                             &IsLayerOfType<AdditionLayer>,
-                             &IsLayerOfType<OutputLayer>));
+                        &IsLayerOfType<InputLayer>,
+                        &IsLayerOfType<InputLayer>,
+                        &IsLayerOfType<ReshapeLayer>,
+                        &IsLayerOfType<ElementwiseBinaryLayer>,
+                        &IsLayerOfType<OutputLayer>));
 
     Layer* const reshapeLayer = GetFirstLayerWithName(graph, reshapeLayerName);
     CHECK(reshapeLayer);
     auto addedReshapeTensorInfo = reshapeLayer->GetOutputSlot().GetTensorInfo();
 
     // Tensorshape and the data type are correct
-    CHECK((addedReshapeTensorInfo.GetShape() == expectedReshapeShape));
-    CHECK((addedReshapeTensorInfo.GetDataType() == expectedDataType));
+    CHECK_EQ(addedReshapeTensorInfo.GetShape(), expectedReshapeShape);
+    CHECK_EQ(addedReshapeTensorInfo.GetDataType(), expectedDataType);
 }
 
 TEST_CASE("AddBroadcastReshapeLayerSimpleTest")
@@ -121,7 +121,7 @@
 
     auto input0 = graph.AddLayer<InputLayer>(0, "input0");
     auto input1 = graph.AddLayer<InputLayer>(1, "input1");
-    auto sub = graph.AddLayer<SubtractionLayer>("sub");
+    auto sub = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Sub, "sub");
     auto output = graph.AddLayer<OutputLayer>(0, "output");
     input0->GetOutputSlot().SetTensorInfo(info0);
     input1->GetOutputSlot().SetTensorInfo(info1);
@@ -134,7 +134,7 @@
     CHECK(CheckSequence(graph.cbegin(), graph.cend(),
                              &IsLayerOfType<InputLayer>,
                              &IsLayerOfType<InputLayer>,
-                             &IsLayerOfType<SubtractionLayer>,
+                             &IsLayerOfType<ElementwiseBinaryLayer>,
                              &IsLayerOfType<OutputLayer>));
 
     // Run optimizer
@@ -145,7 +145,7 @@
                              &IsLayerOfType<InputLayer>,
                              &IsLayerOfType<InputLayer>,
                              &IsLayerOfType<ReshapeLayer>,
-                             &IsLayerOfType<SubtractionLayer>,
+                             &IsLayerOfType<ElementwiseBinaryLayer>,
                              &IsLayerOfType<OutputLayer>));
 
     Layer* const reshapeLayer = GetFirstLayerWithName(graph, "Reshape_for:sub-0");
@@ -153,8 +153,8 @@
     auto addedReshapeTensorInfo = reshapeLayer->GetOutputSlot().GetTensorInfo();
 
     // Tensorshape and the data type are correct
-    CHECK((addedReshapeTensorInfo.GetShape() == TensorShape({ 1, 1, 1, 5 })));
-    CHECK((addedReshapeTensorInfo.GetDataType() == DataType::Float32));
+    CHECK_EQ(addedReshapeTensorInfo.GetShape(), TensorShape({ 1, 1, 1, 5 }));
+    CHECK_EQ(addedReshapeTensorInfo.GetDataType(), DataType::Float32);
 }
 
 TEST_CASE("AddBroadcastReshapeLayerDivisionTest")
@@ -166,7 +166,7 @@
 
     auto input0 = graph.AddLayer<InputLayer>(0, "input0");
     auto input1 = graph.AddLayer<InputLayer>(1, "input1");
-    auto div = graph.AddLayer<DivisionLayer>("div");
+    auto div = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Div, "div");
     auto output = graph.AddLayer<OutputLayer>(0, "output");
     input0->GetOutputSlot().SetTensorInfo(info0);
     input1->GetOutputSlot().SetTensorInfo(info1);
@@ -179,7 +179,7 @@
     CHECK(CheckSequence(graph.cbegin(), graph.cend(),
                              &IsLayerOfType<InputLayer>,
                              &IsLayerOfType<InputLayer>,
-                             &IsLayerOfType<DivisionLayer>,
+                             &IsLayerOfType<ElementwiseBinaryLayer>,
                              &IsLayerOfType<OutputLayer>));
 
     // Run optimizer
@@ -190,7 +190,7 @@
                              &IsLayerOfType<InputLayer>,
                              &IsLayerOfType<InputLayer>,
                              &IsLayerOfType<ReshapeLayer>,
-                             &IsLayerOfType<DivisionLayer>,
+                             &IsLayerOfType<ElementwiseBinaryLayer>,
                              &IsLayerOfType<OutputLayer>));
 
     Layer* const reshapeLayer = GetFirstLayerWithName(graph, "Reshape_for:div-0");
@@ -198,8 +198,8 @@
     auto addedReshapeTensorInfo = reshapeLayer->GetOutputSlot().GetTensorInfo();
 
     // Tensorshape and the data type are correct
-    CHECK((addedReshapeTensorInfo.GetShape() == TensorShape({ 1, 1, 4, 5 })));
-    CHECK((addedReshapeTensorInfo.GetDataType() == DataType::QAsymmS8));
+    CHECK_EQ(addedReshapeTensorInfo.GetShape(), TensorShape({ 1, 1, 4, 5 }));
+    CHECK_EQ(addedReshapeTensorInfo.GetDataType(), DataType::QAsymmS8);
 }
 
 TEST_CASE("AddBroadcastReshapeLayerMultiplicationTest")
@@ -211,7 +211,7 @@
 
     auto input0 = graph.AddLayer<InputLayer>(0, "input0");
     auto input1 = graph.AddLayer<InputLayer>(1, "input1");
-    auto mul = graph.AddLayer<MultiplicationLayer>("mul");
+    auto mul = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Mul, "mul");
     auto output = graph.AddLayer<OutputLayer>(0, "output");
     input0->GetOutputSlot().SetTensorInfo(info0);
     input1->GetOutputSlot().SetTensorInfo(info1);
@@ -224,7 +224,7 @@
     CHECK(CheckSequence(graph.cbegin(), graph.cend(),
                              &IsLayerOfType<InputLayer>,
                              &IsLayerOfType<InputLayer>,
-                             &IsLayerOfType<MultiplicationLayer>,
+                             &IsLayerOfType<ElementwiseBinaryLayer>,
                              &IsLayerOfType<OutputLayer>));
 
     // Run optimizer
@@ -235,7 +235,7 @@
                              &IsLayerOfType<InputLayer>,
                              &IsLayerOfType<InputLayer>,
                              &IsLayerOfType<ReshapeLayer>,
-                             &IsLayerOfType<MultiplicationLayer>,
+                             &IsLayerOfType<ElementwiseBinaryLayer>,
                              &IsLayerOfType<OutputLayer>));
 
     Layer* const reshapeLayer = GetFirstLayerWithName(graph, "Reshape_for:mul-0");
@@ -243,8 +243,8 @@
     auto addedReshapeTensorInfo = reshapeLayer->GetOutputSlot().GetTensorInfo();
 
     // Tensorshape and the data type are correct
-    CHECK((addedReshapeTensorInfo.GetShape() == TensorShape({ 1, 1, 3, 5 })));
-    CHECK((addedReshapeTensorInfo.GetDataType() == DataType::QAsymmU8));
+    CHECK_EQ(addedReshapeTensorInfo.GetShape(), TensorShape({ 1, 1, 3, 5 }));
+    CHECK_EQ(addedReshapeTensorInfo.GetDataType(), DataType::QAsymmU8);
 }
 
 TEST_CASE("AddNoBroadcastReshapeLayerTest")
@@ -256,7 +256,7 @@
 
     auto input0 = graph.AddLayer<InputLayer>(0, "input0");
     auto input1 = graph.AddLayer<InputLayer>(1, "input1");
-    auto mul = graph.AddLayer<MultiplicationLayer>("mul");
+    auto mul = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Mul, "mul");
     auto output = graph.AddLayer<OutputLayer>(0, "output");
     input0->GetOutputSlot().SetTensorInfo(info0);
     input1->GetOutputSlot().SetTensorInfo(info1);
@@ -269,7 +269,7 @@
     CHECK(CheckSequence(graph.cbegin(), graph.cend(),
                              &IsLayerOfType<InputLayer>,
                              &IsLayerOfType<InputLayer>,
-                             &IsLayerOfType<MultiplicationLayer>,
+                             &IsLayerOfType<ElementwiseBinaryLayer>,
                              &IsLayerOfType<OutputLayer>));
 
     // Run optimizer
@@ -279,7 +279,7 @@
     CHECK(CheckSequence(graph.cbegin(), graph.cend(),
                              &IsLayerOfType<InputLayer>,
                              &IsLayerOfType<InputLayer>,
-                             &IsLayerOfType<MultiplicationLayer>,
+                             &IsLayerOfType<ElementwiseBinaryLayer>,
                              &IsLayerOfType<OutputLayer>));
 
     Layer* const reshapeLayer = GetFirstLayerWithName(graph, "Reshape_for:mul-0");
@@ -295,7 +295,7 @@
 
     auto input = graph.AddLayer<InputLayer>(0, "input");
     auto constant = graph.AddLayer<ConstantLayer>("constant");
-    auto mul = graph.AddLayer<MultiplicationLayer>("mul");
+    auto mul = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Mul, "mul");
     auto output = graph.AddLayer<OutputLayer>(0, "output");
 
     uint8_t tensor[] = { 1, 1, 1, 1, 1 };
@@ -313,7 +313,7 @@
     CHECK(CheckSequence(graph.cbegin(), graph.cend(),
                              &IsLayerOfType<InputLayer>,
                              &IsLayerOfType<ConstantLayer>,
-                             &IsLayerOfType<MultiplicationLayer>,
+                             &IsLayerOfType<ElementwiseBinaryLayer>,
                              &IsLayerOfType<OutputLayer>));
 
     // Run optimizer
@@ -323,7 +323,7 @@
     CHECK(CheckSequence(graph.cbegin(), graph.cend(),
                              &IsLayerOfType<InputLayer>,
                              &IsLayerOfType<ConstantLayer>,
-                             &IsLayerOfType<MultiplicationLayer>,
+                             &IsLayerOfType<ElementwiseBinaryLayer>,
                              &IsLayerOfType<OutputLayer>));
 
     TensorShape expectedShape = TensorShape{ 1, 1, 1, 5 };
@@ -351,8 +351,8 @@
 
     auto input = graph.AddLayer<InputLayer>(0, "input");
     auto constant = graph.AddLayer<ConstantLayer>("constant");
-    auto add1 = graph.AddLayer<AdditionLayer>("add1");
-    auto add2 = graph.AddLayer<AdditionLayer>("add2");
+    auto add1 = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Add, "add1");
+    auto add2 = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Add, "add2");
     auto output = graph.AddLayer<OutputLayer>(0, "output");
 
     input->GetOutputSlot().SetTensorInfo(inputInfo);
@@ -371,8 +371,8 @@
     CHECK(CheckSequence(graph.cbegin(), graph.cend(),
                              &IsLayerOfType<InputLayer>,
                              &IsLayerOfType<ConstantLayer>,
-                             &IsLayerOfType<AdditionLayer>,
-                             &IsLayerOfType<AdditionLayer>,
+                             &IsLayerOfType<ElementwiseBinaryLayer>,
+                             &IsLayerOfType<ElementwiseBinaryLayer>,
                              &IsLayerOfType<OutputLayer>));
 
     // Run optimizer
@@ -384,8 +384,8 @@
                              &IsLayerOfType<ConstantLayer>,
                              &IsLayerOfType<ReshapeLayer>,
                              &IsLayerOfType<ReshapeLayer>,
-                             &IsLayerOfType<AdditionLayer>,
-                             &IsLayerOfType<AdditionLayer>,
+                             &IsLayerOfType<ElementwiseBinaryLayer>,
+                             &IsLayerOfType<ElementwiseBinaryLayer>,
                              &IsLayerOfType<OutputLayer>));
 
     // Ensure the output shape of the constant hasn't changed.
diff --git a/src/armnn/test/optimizations/MovePermuteUpTests.cpp b/src/armnn/test/optimizations/MovePermuteUpTests.cpp
index 152e799..018286c 100644
--- a/src/armnn/test/optimizations/MovePermuteUpTests.cpp
+++ b/src/armnn/test/optimizations/MovePermuteUpTests.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017,2019-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -36,7 +36,7 @@
     head = graph.InsertNewLayer<armnn::ActivationLayer>(head->GetInputSlot(0), armnn::ActivationDescriptor{}, "");
     head->GetOutputHandler().SetTensorInfo(info);
 
-    head = graph.InsertNewLayer<armnn::AdditionLayer>(head->GetInputSlot(0), "");
+    head = graph.InsertNewLayer<armnn::ElementwiseBinaryLayer>(head->GetInputSlot(0), armnn::BinaryOperation::Add, "");
     head->GetOutputHandler().SetTensorInfo(info);
 
     // Inserts input for 2nd input of Addition.
@@ -54,7 +54,7 @@
     head = graph.InsertNewLayer<armnn::MemCopyLayer>(head->GetInputSlot(0), "");
     head->GetOutputHandler().SetTensorInfo(info);
 
-    head = graph.InsertNewLayer<armnn::MultiplicationLayer>(head->GetInputSlot(0), "");
+    head = graph.InsertNewLayer<armnn::ElementwiseBinaryLayer>(head->GetInputSlot(0), armnn::BinaryOperation::Mul, "");
     head->GetOutputHandler().SetTensorInfo(info);
 
     // Inserts input for 2nd input of Multiplication.
@@ -69,9 +69,9 @@
 
     CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
                              &IsLayerOfType<armnn::InputLayer>, &IsLayerOfType<armnn::InputLayer>,
-                             &IsLayerOfType<armnn::MultiplicationLayer>, &IsLayerOfType<armnn::MemCopyLayer>,
+                             &IsLayerOfType<armnn::ElementwiseBinaryLayer>, &IsLayerOfType<armnn::MemCopyLayer>,
                              &IsLayerOfType<armnn::FloorLayer>, &IsLayerOfType<armnn::FakeQuantizationLayer>,
-                             &IsLayerOfType<armnn::AdditionLayer>, &IsLayerOfType<armnn::ActivationLayer>,
+                             &IsLayerOfType<armnn::ElementwiseBinaryLayer>, &IsLayerOfType<armnn::ActivationLayer>,
                              &IsLayerOfType<armnn::PermuteLayer>, &IsLayerOfType<armnn::OutputLayer>));
 
     armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(MovePermuteUp()));
@@ -80,10 +80,11 @@
     CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
                              &IsLayerOfType<armnn::InputLayer>, &IsLayerOfType<armnn::InputLayer>,
                              &IsLayerOfType<armnn::PermuteLayer>, &IsLayerOfType<armnn::PermuteLayer>,
-                             &IsLayerOfType<armnn::PermuteLayer>, &IsLayerOfType<armnn::MultiplicationLayer>,
+                             &IsLayerOfType<armnn::PermuteLayer>, &IsLayerOfType<armnn::ElementwiseBinaryLayer>,
                              &IsLayerOfType<armnn::MemCopyLayer>, &IsLayerOfType<armnn::FloorLayer>,
-                             &IsLayerOfType<armnn::FakeQuantizationLayer>, &IsLayerOfType<armnn::AdditionLayer>,
-                             &IsLayerOfType<armnn::ActivationLayer>, &IsLayerOfType<armnn::OutputLayer>));
+                             &IsLayerOfType<armnn::FakeQuantizationLayer>,
+                             &IsLayerOfType<armnn::ElementwiseBinaryLayer>, &IsLayerOfType<armnn::ActivationLayer>,
+                             &IsLayerOfType<armnn::OutputLayer>));
 
     std::list<std::string> testRelatedLayers = { permuteLayerName };
 
diff --git a/src/armnn/test/optimizations/MoveTransposeUpTests.cpp b/src/armnn/test/optimizations/MoveTransposeUpTests.cpp
index 09bf9ae..6a6010c 100644
--- a/src/armnn/test/optimizations/MoveTransposeUpTests.cpp
+++ b/src/armnn/test/optimizations/MoveTransposeUpTests.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd. All rights reserved.
+// Copyright © 2020-2021,2023 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -37,7 +37,7 @@
     head = graph.InsertNewLayer<armnn::ActivationLayer>(head->GetInputSlot(0), armnn::ActivationDescriptor{}, "");
     head->GetOutputHandler().SetTensorInfo(info);
 
-    head = graph.InsertNewLayer<armnn::AdditionLayer>(head->GetInputSlot(0), "");
+    head = graph.InsertNewLayer<armnn::ElementwiseBinaryLayer>(head->GetInputSlot(0), armnn::BinaryOperation::Add, "");
     head->GetOutputHandler().SetTensorInfo(info);
 
     // Inserts input for 2nd input of Addition.
@@ -55,7 +55,7 @@
     head = graph.InsertNewLayer<armnn::MemCopyLayer>(head->GetInputSlot(0), "");
     head->GetOutputHandler().SetTensorInfo(info);
 
-    head = graph.InsertNewLayer<armnn::MultiplicationLayer>(head->GetInputSlot(0), "");
+    head = graph.InsertNewLayer<armnn::ElementwiseBinaryLayer>(head->GetInputSlot(0), armnn::BinaryOperation::Mul, "");
     head->GetOutputHandler().SetTensorInfo(info);
 
     // Inserts input for 2nd input of Multiplication.
@@ -70,9 +70,9 @@
 
     CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
                              &IsLayerOfType<armnn::InputLayer>, &IsLayerOfType<armnn::InputLayer>,
-                             &IsLayerOfType<armnn::MultiplicationLayer>, &IsLayerOfType<armnn::MemCopyLayer>,
+                             &IsLayerOfType<armnn::ElementwiseBinaryLayer>, &IsLayerOfType<armnn::MemCopyLayer>,
                              &IsLayerOfType<armnn::FloorLayer>, &IsLayerOfType<armnn::FakeQuantizationLayer>,
-                             &IsLayerOfType<armnn::AdditionLayer>, &IsLayerOfType<armnn::ActivationLayer>,
+                             &IsLayerOfType<armnn::ElementwiseBinaryLayer>, &IsLayerOfType<armnn::ActivationLayer>,
                              &IsLayerOfType<armnn::TransposeLayer>, &IsLayerOfType<armnn::OutputLayer>));
 
     armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(MoveTransposeUp()));
@@ -81,10 +81,11 @@
     CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
                              &IsLayerOfType<armnn::InputLayer>, &IsLayerOfType<armnn::InputLayer>,
                              &IsLayerOfType<armnn::TransposeLayer>, &IsLayerOfType<armnn::TransposeLayer>,
-                             &IsLayerOfType<armnn::TransposeLayer>, &IsLayerOfType<armnn::MultiplicationLayer>,
+                             &IsLayerOfType<armnn::TransposeLayer>, &IsLayerOfType<armnn::ElementwiseBinaryLayer>,
                              &IsLayerOfType<armnn::MemCopyLayer>, &IsLayerOfType<armnn::FloorLayer>,
-                             &IsLayerOfType<armnn::FakeQuantizationLayer>, &IsLayerOfType<armnn::AdditionLayer>,
-                             &IsLayerOfType<armnn::ActivationLayer>, &IsLayerOfType<armnn::OutputLayer>));
+                             &IsLayerOfType<armnn::FakeQuantizationLayer>,
+                             &IsLayerOfType<armnn::ElementwiseBinaryLayer>, &IsLayerOfType<armnn::ActivationLayer>,
+                             &IsLayerOfType<armnn::OutputLayer>));
 
     std::list<std::string> testRelatedLayers = { transposeLayerName };
 
diff --git a/src/armnnDeserializer/Deserializer.cpp b/src/armnnDeserializer/Deserializer.cpp
index 702b060..ed92188 100644
--- a/src/armnnDeserializer/Deserializer.cpp
+++ b/src/armnnDeserializer/Deserializer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2019-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -229,6 +229,7 @@
     m_ParserFunctions[Layer_DequantizeLayer]             = &DeserializerImpl::ParseDequantize;
     m_ParserFunctions[Layer_DetectionPostProcessLayer]   = &DeserializerImpl::ParseDetectionPostProcess;
     m_ParserFunctions[Layer_DivisionLayer]               = &DeserializerImpl::ParseDivision;
+    m_ParserFunctions[Layer_ElementwiseBinaryLayer]      = &DeserializerImpl::ParseElementwiseBinary;
     m_ParserFunctions[Layer_ElementwiseUnaryLayer]       = &DeserializerImpl::ParseElementwiseUnary;
     m_ParserFunctions[Layer_EqualLayer]                  = &DeserializerImpl::ParseEqual;
     m_ParserFunctions[Layer_FullyConnectedLayer]         = &DeserializerImpl::ParseFullyConnected;
@@ -325,6 +326,8 @@
             return graphPtr->layers()->Get(layerIndex)->layer_as_DivisionLayer()->base();
         case Layer::Layer_EqualLayer:
             return graphPtr->layers()->Get(layerIndex)->layer_as_EqualLayer()->base();
+        case Layer::Layer_ElementwiseBinaryLayer:
+            return graphPtr->layers()->Get(layerIndex)->layer_as_ElementwiseBinaryLayer()->base();
         case Layer::Layer_ElementwiseUnaryLayer:
             return graphPtr->layers()->Get(layerIndex)->layer_as_ElementwiseUnaryLayer()->base();
         case Layer::Layer_FullyConnectedLayer:
@@ -562,7 +565,28 @@
     }
 }
 
-armnn::UnaryOperation ToUnaryOperation(armnnSerializer::UnaryOperation operation)
+armnn::BinaryOperation ToElementwiseBinaryOperation(armnnSerializer::BinaryOperation operation)
+{
+    switch (operation)
+    {
+        case armnnSerializer::BinaryOperation::BinaryOperation_Add:
+            return armnn::BinaryOperation::Add;
+        case armnnSerializer::BinaryOperation::BinaryOperation_Div:
+            return armnn::BinaryOperation::Div;
+        case armnnSerializer::BinaryOperation::BinaryOperation_Maximum:
+            return armnn::BinaryOperation::Maximum;
+        case armnnSerializer::BinaryOperation::BinaryOperation_Minimum:
+            return armnn::BinaryOperation::Minimum;
+        case armnnSerializer::BinaryOperation::BinaryOperation_Mul:
+            return armnn::BinaryOperation::Mul;
+        case armnnSerializer::BinaryOperation::BinaryOperation_Sub:
+            return armnn::BinaryOperation::Sub;
+        default:
+            throw armnn::InvalidArgumentException("Binary operation unknown");
+    }
+}
+
+armnn::UnaryOperation ToElementwiseUnaryOperation(armnnSerializer::UnaryOperation operation)
 {
     switch (operation)
     {
@@ -1226,7 +1250,8 @@
     CHECK_VALID_SIZE(outputs.size(), 1);
 
     auto layerName = GetLayerName(graph, layerIndex);
-    IConnectableLayer* layer = m_Network->AddAdditionLayer(layerName.c_str());
+    armnn::ElementwiseBinaryDescriptor descriptor(armnn::BinaryOperation::Add);
+    IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(descriptor, layerName.c_str());
 
     armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
     layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -1745,7 +1770,8 @@
     CHECK_VALID_SIZE(outputs.size(), 1);
 
     auto layerName = GetLayerName(graph, layerIndex);
-    IConnectableLayer* layer = m_Network->AddDivisionLayer(layerName.c_str());
+    armnn::ElementwiseBinaryDescriptor descriptor(armnn::BinaryOperation::Div);
+    IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(descriptor, layerName.c_str());
 
     armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
     layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -1935,7 +1961,8 @@
     CHECK_VALID_SIZE(outputs.size(), 1);
 
     auto layerName = GetLayerName(graph, layerIndex);
-    IConnectableLayer* layer = m_Network->AddMinimumLayer(layerName.c_str());
+    armnn::ElementwiseBinaryDescriptor descriptor(armnn::BinaryOperation::Minimum);
+    IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(descriptor, layerName.c_str());
 
     armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
     layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -1955,7 +1982,8 @@
     CHECK_VALID_SIZE(outputs.size(), 1);
 
     auto layerName = GetLayerName(graph, layerIndex);
-    IConnectableLayer* layer = m_Network->AddMaximumLayer(layerName.c_str());
+    armnn::ElementwiseBinaryDescriptor descriptor(armnn::BinaryOperation::Maximum);
+    IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(descriptor, layerName.c_str());
 
     armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
     layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -2030,6 +2058,33 @@
     RegisterOutputSlots(graph, layerIndex, layer);
 }
 
+void IDeserializer::DeserializerImpl::ParseElementwiseBinary(GraphPtr graph, unsigned int layerIndex)
+{
+    CHECK_LAYERS(graph, 0, layerIndex);
+    CHECK_LOCATION();
+
+    auto inputs = GetInputs(graph, layerIndex);
+    CHECK_VALID_SIZE(inputs.size(), 2);
+
+    auto outputs = GetOutputs(graph, layerIndex);
+    CHECK_VALID_SIZE(outputs.size(), 1);
+
+    auto fbLayer      = graph->layers()->Get(layerIndex)->layer_as_ElementwiseBinaryLayer();
+    auto fbDescriptor = fbLayer->descriptor();
+
+    armnn::ElementwiseBinaryDescriptor descriptor;
+    descriptor.m_Operation = ToElementwiseBinaryOperation(fbDescriptor->operation());
+
+    const std::string& layerName = GetLayerName(graph, layerIndex);
+    IConnectableLayer* layer     = m_Network->AddElementwiseBinaryLayer(descriptor, layerName.c_str());
+
+    armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+    layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+    RegisterInputSlots(graph, layerIndex, layer);
+    RegisterOutputSlots(graph, layerIndex, layer);
+}
+
 void IDeserializer::DeserializerImpl::ParseElementwiseUnary(GraphPtr graph, unsigned int layerIndex)
 {
     CHECK_LAYERS(graph, 0, layerIndex);
@@ -2045,7 +2100,7 @@
     auto fbDescriptor = fbLayer->descriptor();
 
     armnn::ElementwiseUnaryDescriptor descriptor;
-    descriptor.m_Operation = ToUnaryOperation(fbDescriptor->operation());
+    descriptor.m_Operation = ToElementwiseUnaryOperation(fbDescriptor->operation());
 
     const std::string& layerName = GetLayerName(graph, layerIndex);
     IConnectableLayer* layer     = m_Network->AddElementwiseUnaryLayer(descriptor, layerName.c_str());
@@ -2106,7 +2161,8 @@
     CHECK_VALID_SIZE(outputs.size(), 1);
 
     auto layerName = GetLayerName(graph, layerIndex);
-    IConnectableLayer* layer = m_Network->AddMultiplicationLayer(layerName.c_str());
+    armnn::ElementwiseBinaryDescriptor descriptor(armnn::BinaryOperation::Mul);
+    IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(descriptor, layerName.c_str());
 
     armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
     layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -3023,7 +3079,8 @@
     CHECK_VALID_SIZE(outputs.size(), 1);
 
     auto layerName = GetLayerName(graph, layerIndex);
-    IConnectableLayer* layer = m_Network->AddSubtractionLayer(layerName.c_str());
+    armnn::ElementwiseBinaryDescriptor descriptor(armnn::BinaryOperation::Sub);
+    IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(descriptor, layerName.c_str());
 
     armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
     layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
diff --git a/src/armnnDeserializer/Deserializer.hpp b/src/armnnDeserializer/Deserializer.hpp
index bd01a35..b75c68d 100644
--- a/src/armnnDeserializer/Deserializer.hpp
+++ b/src/armnnDeserializer/Deserializer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2019-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -103,6 +103,7 @@
     void ParseDequantize(GraphPtr graph, unsigned int layerIndex);
     void ParseDetectionPostProcess(GraphPtr graph, unsigned int layerIndex);
     void ParseDivision(GraphPtr graph, unsigned int layerIndex);
+    void ParseElementwiseBinary(GraphPtr graph, unsigned int layerIndex);
     void ParseElementwiseUnary(GraphPtr graph, unsigned int layerIndex);
     void ParseEqual(GraphPtr graph, unsigned int layerIndex);
     void ParseFill(GraphPtr graph, unsigned int layerIndex);
diff --git a/src/armnnOnnxParser/OnnxParser.cpp b/src/armnnOnnxParser/OnnxParser.cpp
index 936216f..26e2cee 100644
--- a/src/armnnOnnxParser/OnnxParser.cpp
+++ b/src/armnnOnnxParser/OnnxParser.cpp
@@ -1558,7 +1558,7 @@
     }
 
 
-    IConnectableLayer* layer = m_Network->AddAdditionLayer(node.name().c_str());
+    IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::Add, node.name().c_str());
     ARMNN_ASSERT(layer != nullptr);
 
     auto outputInfo = ComputeOutputInfo({ node.output(0) }, layer,
diff --git a/src/armnnSerializer/ArmnnSchema.fbs b/src/armnnSerializer/ArmnnSchema.fbs
index 2dbfd85..fb3bc01 100644
--- a/src/armnnSerializer/ArmnnSchema.fbs
+++ b/src/armnnSerializer/ArmnnSchema.fbs
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2019-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -183,6 +183,7 @@
     Pooling3d = 66,
     GatherNd = 67,
     BatchMatMul = 68,
+    ElementwiseBinary = 69,
 }
 
 // Base layer table to be used as part of other layers
@@ -324,6 +325,24 @@
     base:LayerBase;
 }
 
+enum BinaryOperation : byte {
+    Add     = 0,
+    Div     = 1,
+    Maximum = 2,
+    Minimum = 3,
+    Mul     = 4,
+    Sub     = 5
+}
+
+table ElementwiseBinaryDescriptor {
+    operation:BinaryOperation;
+}
+
+table ElementwiseBinaryLayer {
+    base:LayerBase;
+    descriptor:ElementwiseBinaryDescriptor;
+}
+
 enum UnaryOperation : byte {
     Abs = 0,
     Rsqrt = 1,
@@ -1094,6 +1113,7 @@
     Pooling3dLayer,
     GatherNdLayer,
     BatchMatMulLayer,
+    ElementwiseBinaryLayer,
 }
 
 table AnyLayer {
diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp
index c9a3022..5a095d9 100644
--- a/src/armnnSerializer/Serializer.cpp
+++ b/src/armnnSerializer/Serializer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2019-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #include "Serializer.hpp"
@@ -533,6 +533,21 @@
     CreateAnyLayer(fbDivisionLayer.o, serializer::Layer::Layer_DivisionLayer);
 }
 
+void SerializerStrategy::SerializeElementwiseBinaryLayer(const armnn::IConnectableLayer* layer,
+                                                         const armnn::ElementwiseBinaryDescriptor& descriptor,
+                                                         const char* name)
+{
+    IgnoreUnused(name);
+
+    auto fbBaseLayer  = CreateLayerBase(layer, serializer::LayerType::LayerType_ElementwiseBinary);
+    auto fbDescriptor = serializer::CreateElementwiseBinaryDescriptor(
+            m_flatBufferBuilder,
+            GetFlatBufferBinaryOperation(descriptor.m_Operation));
+
+    auto fbLayer = serializer::CreateElementwiseBinaryLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
+    CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_ElementwiseBinaryLayer);
+}
+
 void SerializerStrategy::SerializeElementwiseUnaryLayer(const armnn::IConnectableLayer* layer,
                                                    const armnn::ElementwiseUnaryDescriptor& descriptor,
                                                    const char* name)
@@ -2119,6 +2134,13 @@
             SerializeDivisionLayer(layer, name);
             break;
         }
+        case armnn::LayerType::ElementwiseBinary :
+        {
+            const armnn::ElementwiseBinaryDescriptor& layerDescriptor =
+                    static_cast<const armnn::ElementwiseBinaryDescriptor&>(descriptor);
+            SerializeElementwiseBinaryLayer(layer, layerDescriptor, name);
+            break;
+        }
         case armnn::LayerType::ElementwiseUnary :
         {
             const armnn::ElementwiseUnaryDescriptor& layerDescriptor =
diff --git a/src/armnnSerializer/Serializer.hpp b/src/armnnSerializer/Serializer.hpp
index 60fed4f..4d5e806 100644
--- a/src/armnnSerializer/Serializer.hpp
+++ b/src/armnnSerializer/Serializer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2019-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -172,6 +172,10 @@
     void SerializeDivisionLayer(const armnn::IConnectableLayer* layer,
                                 const char* name = nullptr);
 
+    void SerializeElementwiseBinaryLayer(const armnn::IConnectableLayer* layer,
+                                         const armnn::ElementwiseBinaryDescriptor& descriptor,
+                                         const char* name = nullptr);
+
     void SerializeElementwiseUnaryLayer(const armnn::IConnectableLayer* layer,
                                         const armnn::ElementwiseUnaryDescriptor& descriptor,
                                         const char* name = nullptr);
diff --git a/src/armnnSerializer/SerializerUtils.cpp b/src/armnnSerializer/SerializerUtils.cpp
index 49ce721..703f56f 100644
--- a/src/armnnSerializer/SerializerUtils.cpp
+++ b/src/armnnSerializer/SerializerUtils.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2019-2021,2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -107,9 +107,30 @@
     }
 }
 
-armnnSerializer::UnaryOperation GetFlatBufferUnaryOperation(armnn::UnaryOperation comparisonOperation)
+armnnSerializer::BinaryOperation GetFlatBufferBinaryOperation(armnn::BinaryOperation binaryOperation)
 {
-    switch (comparisonOperation)
+    switch (binaryOperation)
+    {
+        case armnn::BinaryOperation::Add:
+            return armnnSerializer::BinaryOperation::BinaryOperation_Add;
+        case armnn::BinaryOperation::Div:
+            return armnnSerializer::BinaryOperation::BinaryOperation_Div;
+        case armnn::BinaryOperation::Maximum:
+            return armnnSerializer::BinaryOperation::BinaryOperation_Maximum;
+        case armnn::BinaryOperation::Minimum:
+            return armnnSerializer::BinaryOperation::BinaryOperation_Minimum;
+        case armnn::BinaryOperation::Mul:
+            return armnnSerializer::BinaryOperation::BinaryOperation_Mul;
+        case armnn::BinaryOperation::Sub:
+            return armnnSerializer::BinaryOperation::BinaryOperation_Sub;
+        default:
+            throw armnn::InvalidArgumentException("Elementwise Binary operation unknown");
+    }
+}
+
+armnnSerializer::UnaryOperation GetFlatBufferUnaryOperation(armnn::UnaryOperation unaryOperation)
+{
+    switch (unaryOperation)
     {
         case armnn::UnaryOperation::Abs:
             return armnnSerializer::UnaryOperation::UnaryOperation_Abs;
@@ -128,7 +149,7 @@
         case armnn::UnaryOperation::Sin:
             return armnnSerializer::UnaryOperation::UnaryOperation_Sin;
         default:
-            throw armnn::InvalidArgumentException("Unary operation unknown");
+            throw armnn::InvalidArgumentException("Elementwise Unary operation unknown");
     }
 }
 
diff --git a/src/armnnSerializer/SerializerUtils.hpp b/src/armnnSerializer/SerializerUtils.hpp
index 07cdc2a..628385e 100644
--- a/src/armnnSerializer/SerializerUtils.hpp
+++ b/src/armnnSerializer/SerializerUtils.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017,2019-2021,2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -18,6 +18,8 @@
 
 armnnSerializer::DataLayout GetFlatBufferDataLayout(armnn::DataLayout dataLayout);
 
+armnnSerializer::BinaryOperation GetFlatBufferBinaryOperation(armnn::BinaryOperation binaryOperation);
+
 armnnSerializer::UnaryOperation GetFlatBufferUnaryOperation(armnn::UnaryOperation unaryOperation);
 
 armnnSerializer::PoolingAlgorithm GetFlatBufferPoolingAlgorithm(armnn::PoolingAlgorithm poolingAlgorithm);
diff --git a/src/armnnSerializer/test/SerializerTests.cpp b/src/armnnSerializer/test/SerializerTests.cpp
index 3573a81..6ddc971 100644
--- a/src/armnnSerializer/test/SerializerTests.cpp
+++ b/src/armnnSerializer/test/SerializerTests.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2020-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -331,6 +331,7 @@
                 case armnn::LayerType::Input: break;
                 case armnn::LayerType::Output: break;
                 case armnn::LayerType::Addition: break;
+                case armnn::LayerType::ElementwiseBinary: break;
                 default:
                 {
                     this->VerifyNameAndConnections(layer, name);
@@ -972,6 +973,47 @@
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
+void SerializeElementwiseBinaryTest(armnn::BinaryOperation binaryOperation)
+{
+    auto layerName = GetBinaryOperationAsCString(binaryOperation);
+    const armnn::TensorInfo tensorInfo({ 1, 5, 2, 3 }, armnn::DataType::Float32);
+    armnn::ElementwiseBinaryDescriptor descriptor(binaryOperation);
+
+    armnn::INetworkPtr network = armnn::INetwork::Create();
+    armnn::IConnectableLayer* const inputLayer0 = network->AddInputLayer(0);
+    armnn::IConnectableLayer* const inputLayer1 = network->AddInputLayer(1);
+    armnn::IConnectableLayer* const elementwiseBinaryLayer = network->AddElementwiseBinaryLayer(descriptor,
+                                                                                                layerName);
+    armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0);
+
+    inputLayer0->GetOutputSlot(0).Connect(elementwiseBinaryLayer->GetInputSlot(0));
+    inputLayer1->GetOutputSlot(0).Connect(elementwiseBinaryLayer->GetInputSlot(1));
+    elementwiseBinaryLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
+
+    inputLayer0->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+    inputLayer1->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+    elementwiseBinaryLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+    std::string serializedNetwork = SerializeNetwork(*network);
+    armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(serializedNetwork);
+    CHECK(deserializedNetwork);
+
+    LayerVerifierBaseWithDescriptor<armnn::ElementwiseBinaryDescriptor>
+            verifier(layerName, { tensorInfo, tensorInfo }, { tensorInfo }, descriptor);
+    deserializedNetwork->ExecuteStrategy(verifier);
+}
+
+TEST_CASE("SerializeElementwiseBinary")
+{
+    using op = armnn::BinaryOperation;
+    std::initializer_list<op> allBinaryOperations = {op::Add, op::Div, op::Maximum, op::Minimum, op::Mul, op::Sub};
+
+    for (auto binaryOperation : allBinaryOperations)
+    {
+        SerializeElementwiseBinaryTest(binaryOperation);
+    }
+}
+
 void SerializeElementwiseUnaryTest(armnn::UnaryOperation unaryOperation)
 {
     auto layerName = GetUnaryOperationAsCString(unaryOperation);
@@ -2883,6 +2925,7 @@
                     CompareConstTensor(constants.at(0), m_LayerInput);
                     break;
                 }
+                case armnn::LayerType::ElementwiseBinary: break;
                 default:
                 {
                     throw armnn::Exception("Unexpected layer type in test model");
diff --git a/src/armnnTestUtils/CreateWorkload.hpp b/src/armnnTestUtils/CreateWorkload.hpp
index 0846d21..691adbf 100644
--- a/src/armnnTestUtils/CreateWorkload.hpp
+++ b/src/armnnTestUtils/CreateWorkload.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2021-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -125,7 +125,41 @@
     // Makes the workload and checks it.
     auto workload = MakeAndCheckWorkload<WorkloadType>(*layer, factory);
 
-    DescriptorType queueDescriptor = workload->GetData();
+    auto queueDescriptor = workload->GetData();
+    CHECK(queueDescriptor.m_Inputs.size() == 2);
+    CHECK(queueDescriptor.m_Outputs.size() == 1);
+
+    // Returns so we can do extra, backend-specific tests.
+    return workload;
+}
+
+template <typename WorkloadType, armnn::DataType DataType>
+std::unique_ptr<WorkloadType> CreateElementwiseBinaryWorkloadTest(armnn::IWorkloadFactory & factory,
+                                                                  armnn::Graph & graph,
+                                                                  armnn::BinaryOperation binaryOperation)
+{
+    // Creates the layer we're testing.
+    ElementwiseBinaryDescriptor descriptor(binaryOperation);
+    //ElementwiseBinaryDescriptor descriptor = ElementwiseBinaryDescriptor(binaryOperation);
+
+    Layer* const layer = graph.AddLayer<ElementwiseBinaryLayer>(descriptor, "layer");
+
+    // Creates extra layers.
+    Layer* const input1 = graph.AddLayer<InputLayer>(1, "input1");
+    Layer* const input2 = graph.AddLayer<InputLayer>(2, "input2");
+    Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
+
+    // Connects up.
+    armnn::TensorInfo tensorInfo({2, 3}, DataType);
+    Connect(input1, layer, tensorInfo, 0, 0);
+    Connect(input2, layer, tensorInfo, 0, 1);
+    Connect(layer, output, tensorInfo);
+    CreateTensorHandles(graph, factory);
+
+    // Makes the workload and checks it.
+    auto workload = MakeAndCheckWorkload<WorkloadType>(*layer, factory);
+
+    auto queueDescriptor = workload->GetData();
     CHECK(queueDescriptor.m_Inputs.size() == 2);
     CHECK(queueDescriptor.m_Outputs.size() == 1);
 
@@ -191,6 +225,7 @@
     return workload;
 }
 
+
 template<typename WorkloadType,
          typename DescriptorType,
          armnn::DataType DataType>
diff --git a/src/armnnTestUtils/MockBackend.cpp b/src/armnnTestUtils/MockBackend.cpp
index 5dfe9a3..7441d0c 100644
--- a/src/armnnTestUtils/MockBackend.cpp
+++ b/src/armnnTestUtils/MockBackend.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -69,6 +69,7 @@
         case armnn::LayerType::Constant:
         case armnn::LayerType::Addition:
         case armnn::LayerType::Convolution2d:
+        case armnn::LayerType::ElementwiseBinary:
             // Layer supported
             return true;
         default:
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index b7e2762..dc5afca 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -1838,7 +1838,7 @@
     TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
     CheckMatchingQuantization(inputTensorInfo, input1TensorInfo, layerName, "Input 0", "Input 1");
 
-    IConnectableLayer* layer = m_Network->AddMaximumLayer(layerName.c_str());
+    IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::Maximum, layerName.c_str());
     ARMNN_ASSERT(layer != nullptr);
 
     TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
@@ -1868,7 +1868,7 @@
     TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
     CheckMatchingQuantization(inputTensorInfo, input1TensorInfo, layerName, "Input 0", "Input 1");
 
-    IConnectableLayer* layer = m_Network->AddMinimumLayer(layerName.c_str());
+    IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::Minimum, layerName.c_str());
     ARMNN_ASSERT(layer != nullptr);
 
     TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
@@ -2384,7 +2384,7 @@
     armnn::TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
 
     auto layerName = fmt::format("Sub:{}:{}", subgraphIndex, operatorIndex);
-    IConnectableLayer* layer = m_Network->AddSubtractionLayer(layerName.c_str());
+    IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::Sub, layerName.c_str());
     ARMNN_ASSERT(layer != nullptr);
 
     TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
@@ -2416,7 +2416,7 @@
     armnn::TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
 
     auto layerName = fmt::format("Div:{}:{}", subgraphIndex, operatorIndex);
-    IConnectableLayer* layer = m_Network->AddDivisionLayer(layerName.c_str());
+    IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::Div, layerName.c_str());
     ARMNN_ASSERT(layer != nullptr);
 
     TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
@@ -2444,7 +2444,7 @@
     armnn::TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
 
     auto layerName = fmt::format("Div:{}:{}", subgraphIndex, operatorIndex);
-    IConnectableLayer* layer = m_Network->AddDivisionLayer(layerName.c_str());
+    IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::Div, layerName.c_str());
     ARMNN_ASSERT(layer != nullptr);
 
     TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
@@ -2475,7 +2475,7 @@
     armnn::TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
 
     auto layerName = fmt::format("Add:{}:{}", subgraphIndex, operatorIndex);
-    IConnectableLayer* layer = m_Network->AddAdditionLayer(layerName.c_str());
+    IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::Add, layerName.c_str());
     ARMNN_ASSERT(layer != nullptr);
 
     TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
@@ -2506,7 +2506,7 @@
     armnn::TensorInfo input1TensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
 
     auto layerName = fmt::format("Mul:{}:{}", subgraphIndex, operatorIndex);
-    IConnectableLayer* layer = m_Network->AddMultiplicationLayer(layerName.c_str());
+    IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::Mul, layerName.c_str());
     ARMNN_ASSERT(layer != nullptr);
 
     TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
diff --git a/src/backends/aclCommon/ArmComputeSubgraphUtils.hpp b/src/backends/aclCommon/ArmComputeSubgraphUtils.hpp
index c9d6c71..599d353 100644
--- a/src/backends/aclCommon/ArmComputeSubgraphUtils.hpp
+++ b/src/backends/aclCommon/ArmComputeSubgraphUtils.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020,2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -147,6 +147,27 @@
 }
 
 template<typename LayerType>
+LayerType* FuseElementwiseBinaryLayer(OptimizationViews& optimizationViews,
+                                      LayerType* baseLayer,
+                                      ActivationLayer* activationLayer,
+                                      ActivationDescriptor& activationDesc,
+                                      BinaryOperation operation,
+                                      std::string name)
+{
+    IConnectableLayer* replacement = optimizationViews.GetINetwork()->AddElementwiseBinaryLayer(operation,
+                                                                                                name.c_str());
+    LayerType* replacementLayer = PolymorphicDowncast<LayerType*>(replacement);
+
+    FuseLayer(optimizationViews,
+              baseLayer,
+              replacementLayer,
+              activationLayer,
+              activationDesc);
+
+    return replacementLayer;
+}
+
+template<typename LayerType>
 LayerType* FuseBatchNormalizationLayer(OptimizationViews& optimizationViews,
                                        LayerType* baseLayer,
                                        ActivationLayer* activationLayer,
diff --git a/src/backends/backendsCommon/CMakeLists.txt b/src/backends/backendsCommon/CMakeLists.txt
index 8d7e114..28ff205 100644
--- a/src/backends/backendsCommon/CMakeLists.txt
+++ b/src/backends/backendsCommon/CMakeLists.txt
@@ -1,5 +1,5 @@
 #
-# Copyright © 2017, 2023 Arm Ltd and Contributors. All rights reserved.
+# Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
 # SPDX-License-Identifier: MIT
 #
 
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index 62dfc6a..6a5963d 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -35,11 +35,8 @@
         case DataType::Float32:
             return DataType::Float32;
         case DataType::QAsymmS8:
-            return DataType::Signed32;
         case DataType::QAsymmU8:
-            return DataType::Signed32;
         case DataType::QSymmS8:
-            return DataType::Signed32;
         case DataType::QSymmS16:
             return DataType::Signed32;
         default:
@@ -3668,6 +3665,35 @@
     }
 }
 
+void ElementwiseBinaryQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
+{
+    const std::string descriptorName{"ElementwiseBinaryQueueDescriptor"};
+
+    ValidateNumInputs(workloadInfo,  descriptorName, 2);
+    ValidateNumOutputs(workloadInfo, descriptorName, 1);
+
+    const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
+    const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
+    const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
+
+    std::vector<DataType> supportedTypes =
+            {
+                    DataType::BFloat16,
+                    DataType::Float16,
+                    DataType::Float32,
+                    DataType::QAsymmS8,
+                    DataType::QAsymmU8,
+                    DataType::QSymmS16,
+                    DataType::Signed32
+            };
+
+    ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
+    ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
+
+    ValidateTensorDataTypesMatch(inputTensorInfo0, outputTensorInfo, descriptorName, "input", "output");
+    ValidateTensorDataTypesMatch(inputTensorInfo1, outputTensorInfo, descriptorName, "input", "output");
+}
+
 void ElementwiseUnaryQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
 {
     const std::string descriptorName{"ElementwiseUnaryQueueDescriptor"};
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index 1283f67..51bc3e6 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -15,7 +15,6 @@
 #include <armnn/utility/TransformIterator.hpp>
 
 #include <armnn/backends/WorkloadFactory.hpp>
-#include <armnn/backends/TensorHandle.hpp>
 
 #include <sstream>
 
@@ -91,7 +90,8 @@
 
     auto backendFactory = backendRegistry.GetFactory(backendId);
     auto backendObject = backendFactory();
-    auto layerSupportObject = LayerSupportHandle(backendObject->GetLayerSupport(modelOptions), backendId);
+    auto layerSupport = backendObject->GetLayerSupport(modelOptions);
+    auto layerSupportObject = LayerSupportHandle(layerSupport, backendId);
 
     switch(layer.GetType())
     {
@@ -109,6 +109,7 @@
         }
         case LayerType::Addition:
         {
+            ARMNN_NO_DEPRECATE_WARN_BEGIN
             const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
             const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
@@ -117,6 +118,7 @@
                                         OverrideDataType(input1, dataType),
                                         OverrideDataType(output, dataType),
                                         reason);
+            ARMNN_NO_DEPRECATE_WARN_END
             break;
         }
         case LayerType::ArgMinMax:
@@ -392,6 +394,24 @@
                                                                         reason);
             break;
         }
+        case LayerType::ElementwiseBinary:
+        {
+            auto cLayer = PolymorphicDowncast<const ElementwiseBinaryLayer*>(&layer);
+
+            const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
+            const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
+            const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
+            std::vector<TensorInfo> infos = { OverrideDataType(input0, dataType),
+                                              OverrideDataType(input1, dataType),
+                                              OverrideDataType(output, dataType) };
+            result = layerSupport->IsLayerSupported(LayerType::ElementwiseBinary,
+                                                    infos,
+                                                    cLayer->GetParameters(),
+                                                    EmptyOptional(),
+                                                    EmptyOptional(),
+                                                    reason);
+            break;
+        }
         case LayerType::ElementwiseUnary:
         {
             auto cLayer = PolymorphicDowncast<const ElementwiseUnaryLayer*>(&layer);
@@ -740,6 +760,7 @@
         }
         case LayerType::Maximum:
         {
+            ARMNN_NO_DEPRECATE_WARN_BEGIN
             const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
             const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
@@ -748,6 +769,7 @@
                                                            OverrideDataType(input1, dataType),
                                                            OverrideDataType(output, dataType),
                                                            reason);
+            ARMNN_NO_DEPRECATE_WARN_END
             break;
         }
         case LayerType::MemCopy:
@@ -814,6 +836,7 @@
         }
         case LayerType::Multiplication:
         {
+            ARMNN_NO_DEPRECATE_WARN_BEGIN
             const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
             const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
@@ -822,6 +845,7 @@
                                                OverrideDataType(input1, dataType),
                                                OverrideDataType(output, dataType),
                                                reason);
+            ARMNN_NO_DEPRECATE_WARN_END
             break;
         }
         case LayerType::Normalization:
@@ -1052,6 +1076,7 @@
         }
         case LayerType::Division:
         {
+            ARMNN_NO_DEPRECATE_WARN_BEGIN
             const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
             const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
@@ -1060,6 +1085,7 @@
                                          OverrideDataType(input1, dataType),
                                          OverrideDataType(output, dataType),
                                          reason);
+            ARMNN_NO_DEPRECATE_WARN_END
             break;
         }
         case LayerType::Rank:
@@ -1254,6 +1280,7 @@
         }
         case LayerType::Subtraction:
         {
+            ARMNN_NO_DEPRECATE_WARN_BEGIN
             const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
             const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
@@ -1262,6 +1289,7 @@
                                             OverrideDataType(input1, dataType),
                                             OverrideDataType(output, dataType),
                                             reason);
+            ARMNN_NO_DEPRECATE_WARN_END
             break;
         }
         case LayerType::Switch:
@@ -1291,6 +1319,7 @@
         }
         case LayerType::Minimum:
         {
+            ARMNN_NO_DEPRECATE_WARN_BEGIN
             const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
             const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
@@ -1298,6 +1327,7 @@
                                                            OverrideDataType(input1, dataType),
                                                            OverrideDataType(output, dataType),
                                                            reason);
+            ARMNN_NO_DEPRECATE_WARN_END
             break;
         }
         case LayerType::Prelu:
@@ -1670,6 +1700,11 @@
             auto divisionQueueDescriptor = PolymorphicDowncast<const DivisionQueueDescriptor*>(&descriptor);
             return CreateDivision(*divisionQueueDescriptor, info);
         }
+        case LayerType::ElementwiseBinary:
+        {
+            auto queueDescriptor = PolymorphicDowncast<const ElementwiseBinaryQueueDescriptor*>(&descriptor);
+            return CreateWorkload(LayerType::ElementwiseBinary, *queueDescriptor, info);
+        }
         case LayerType::ElementwiseUnary:
         {
             auto elementwiseUnaryQueueDescriptor
diff --git a/src/backends/backendsCommon/common.mk b/src/backends/backendsCommon/common.mk
index 3545331..986d253 100644
--- a/src/backends/backendsCommon/common.mk
+++ b/src/backends/backendsCommon/common.mk
@@ -1,5 +1,5 @@
 #
-# Copyright © 2017 ARM Ltd. All rights reserved.
+# Copyright © 2017-2023 ARM Ltd and Contributors. All rights reserved.
 # SPDX-License-Identifier: MIT
 #
 
diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt
index 509157a..77335d5 100644
--- a/src/backends/backendsCommon/test/CMakeLists.txt
+++ b/src/backends/backendsCommon/test/CMakeLists.txt
@@ -24,6 +24,7 @@
     DetectionPostProcessEndToEndTestImpl.hpp
     DynamicBackendTests.cpp
     DynamicBackendTests.hpp
+    ElementwiseBinaryEndToEndTestImpl.hpp
     ElementwiseUnaryEndToEndTestImpl.hpp
     EndToEndTestImpl.hpp
     FillEndToEndTestImpl.hpp
diff --git a/src/backends/backendsCommon/test/ElementwiseBinaryEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ElementwiseBinaryEndToEndTestImpl.hpp
new file mode 100644
index 0000000..6546a6a
--- /dev/null
+++ b/src/backends/backendsCommon/test/ElementwiseBinaryEndToEndTestImpl.hpp
@@ -0,0 +1,107 @@
+//
+// Copyright © 2023 Arm Ltd and contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include "CommonTestUtils.hpp"
+
+#include <ResolveType.hpp>
+
+#include <armnn/INetwork.hpp>
+#include <armnn/utility/NumericCast.hpp>
+
+#include <doctest/doctest.h>
+
+#include <vector>
+
+namespace
+{
+
+template<armnn::DataType ArmnnTypeInput>
+INetworkPtr CreateElementwiseBinaryNetwork(const TensorShape& input1Shape,
+                                          const TensorShape& input2Shape,
+                                          const TensorShape& outputShape,
+                                          BinaryOperation operation,
+                                          const float qScale = 1.0f,
+                                          const int32_t qOffset = 0)
+{
+    using namespace armnn;
+
+    INetworkPtr net(INetwork::Create());
+
+    TensorInfo input1TensorInfo(input1Shape, ArmnnTypeInput, qScale, qOffset, true);
+    TensorInfo input2TensorInfo(input2Shape, ArmnnTypeInput, qScale, qOffset, true);
+    TensorInfo outputTensorInfo(outputShape, ArmnnTypeInput, qScale, qOffset);
+
+    IConnectableLayer* input1 = net->AddInputLayer(armnn::numeric_cast<LayerBindingId>(0));
+    IConnectableLayer* input2 = net->AddInputLayer(armnn::numeric_cast<LayerBindingId>(1));
+    IConnectableLayer* elementwiseBinaryLayer = net->AddElementwiseBinaryLayer(operation, "elementwiseUnary");
+    IConnectableLayer* output = net->AddOutputLayer(0, "output");
+
+    Connect(input1, elementwiseBinaryLayer, input1TensorInfo, 0, 0);
+    Connect(input2, elementwiseBinaryLayer, input2TensorInfo, 0, 1);
+    Connect(elementwiseBinaryLayer, output, outputTensorInfo, 0, 0);
+
+    return net;
+}
+
+template<armnn::DataType ArmnnInType,
+         typename TInput = armnn::ResolveType<ArmnnInType>>
+void ElementwiseBinarySimpleEndToEnd(const std::vector<BackendId>& backends,
+                                     BinaryOperation operation)
+{
+    using namespace armnn;
+
+    const float   qScale  = IsQuantizedType<TInput>() ? 0.25f : 1.0f;
+    const int32_t qOffset = IsQuantizedType<TInput>() ? 50    : 0;
+
+    const TensorShape& input1Shape  = { 2, 2, 2, 2 };
+    const TensorShape& input2Shape  = { 1 };
+    const TensorShape& outputShape = { 2, 2, 2, 2 };
+
+    // Builds up the structure of the network
+    INetworkPtr net = CreateElementwiseBinaryNetwork<ArmnnInType>(input1Shape, input2Shape, outputShape,
+                                                                  operation, qScale, qOffset);
+
+    CHECK(net);
+
+    const std::vector<float> input1({ 1, -1, 1, 1,  5, -5, 5, 5,  -3, 3, 3, 3,  4, 4, -4, 4 });
+
+    const std::vector<float> input2({ 2 });
+    std::vector<float> expectedOutput;
+    switch (operation) {
+        case armnn::BinaryOperation::Add:
+            expectedOutput = { 3, 1, 3, 3,  7, -3, 7, 7,  -1, 5, 5, 5,  6, 6, -2, 6 };
+            break;
+        case armnn::BinaryOperation::Div:
+            expectedOutput = {0.5f, -0.5f, 0.5f, 0.5f, 2.5f, -2.5f, 2.5f, 2.5f, -1.5f, 1.5f, 1.5f, 1.5f, 2, 2, -2, 2};
+            break;
+        case armnn::BinaryOperation::Maximum:
+            expectedOutput = { 2, 2, 2, 2,  5, 2, 5, 5,  2, 3, 3, 3,  4, 4, 2, 4 };
+            break;
+        case armnn::BinaryOperation::Minimum:
+            expectedOutput = { 1, -1, 1, 1,  2, -5, 2, 2,  -3, 2, 2, 2,  2, 2, -4, 2 };
+            break;
+        case armnn::BinaryOperation::Mul:
+            expectedOutput = { 2, -2, 2, 2,  10, -10, 10, 10,  -6, 6, 6, 6,  8, 8, -8, 8 };
+            break;
+        case armnn::BinaryOperation::Sub:
+            expectedOutput = { -1, -3, -1, -1,  3, -7, 3, 3,  -5, 1, 1, 1,  2, 2, -6, 2 };
+            break;
+        default:
+            throw("Invalid Elementwise Binary operation");
+    }
+    const std::vector<float> expectedOutput_const = expectedOutput;
+    // quantize data
+    std::vector<TInput> qInput1Data     = armnnUtils::QuantizedVector<TInput>(input1, qScale, qOffset);
+    std::vector<TInput> qInput2Data     = armnnUtils::QuantizedVector<TInput>(input2, qScale, qOffset);
+    std::vector<TInput> qExpectedOutput = armnnUtils::QuantizedVector<TInput>(expectedOutput_const, qScale, qOffset);
+
+    std::map<int, std::vector<TInput>> inputTensorData    = {{ 0, qInput1Data }, { 1, qInput2Data }};
+    std::map<int, std::vector<TInput>> expectedOutputData = {{ 0, qExpectedOutput }};
+
+    EndToEndLayerTestImpl<ArmnnInType, ArmnnInType>(std::move(net), inputTensorData, expectedOutputData, backends);
+}
+
+} // anonymous namespace
diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
index fb7a027..5b95d3c 100644
--- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
+++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -664,6 +664,8 @@
 
 DECLARE_LAYER_POLICY_2_PARAM(DetectionPostProcess)
 
+DECLARE_LAYER_POLICY_2_PARAM(ElementwiseBinary)
+
 DECLARE_LAYER_POLICY_2_PARAM(ElementwiseUnary)
 
 DECLARE_LAYER_POLICY_2_PARAM(FakeQuantization)
diff --git a/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp b/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
index cd865de..5e619df 100644
--- a/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
+++ b/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -21,7 +21,7 @@
 
     //Defines layers.
     auto input = net->AddInputLayer(0);
-    auto add = net->AddAdditionLayer();
+    auto add = net->AddElementwiseBinaryLayer(armnn::BinaryOperation::Add);
     auto output = net->AddOutputLayer(0);
 
     // Connects layers.
@@ -54,7 +54,7 @@
         "    edge [fontsize=8 fontcolor=\"blue\" fontname=\"arial-bold\"];\n"
         "    " << inputId << " [label=\"{Input|Guid : " << inputId << "\\lLayerType : Input\\l"
                              "BackendID : CpuRef\\l}\"];\n"
-        "    " << addId << " [label=\"{Addition|Guid : " << addId << "\\lLayerType : Addition\\l"
+        "    " << addId << " [label=\"{ElementwiseBinary|Guid : " << addId << "\\lLayerType : ElementwiseBinary\\l"
                            "BackendID : CpuRef\\l}\"];\n"
         "    " << outputId << " [label=\"{Output|Guid : " << outputId << "\\lLayerType : Output\\l"
                               "BackendID : CpuRef\\l}\"];\n"
@@ -187,7 +187,7 @@
     layer->GetOutputSlot(0).SetTensorInfo(desc);
 
     armnn::IConnectableLayer* prevLayer = layer;
-    layer = net->AddMultiplicationLayer("ml");
+    layer = net->AddElementwiseBinaryLayer(armnn::BinaryOperation::Mul, "ml");
 
     prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
     normLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
@@ -258,7 +258,7 @@
     layer->GetOutputSlot(0).SetTensorInfo(desc);
 
     armnn::IConnectableLayer* prevLayer = layer;
-    layer = net->AddMultiplicationLayer("ml");
+    layer = net->AddElementwiseBinaryLayer(armnn::BinaryOperation::Mul, "ml");
 
     prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
     normLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
diff --git a/src/backends/backendsCommon/test/mockBackend/MockImportLayerSupport.hpp b/src/backends/backendsCommon/test/mockBackend/MockImportLayerSupport.hpp
index 380ce4a..da4b7ab 100644
--- a/src/backends/backendsCommon/test/mockBackend/MockImportLayerSupport.hpp
+++ b/src/backends/backendsCommon/test/mockBackend/MockImportLayerSupport.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2021,2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -16,7 +16,7 @@
 public:
     bool IsLayerSupported(const LayerType& type,
                           const std::vector<TensorInfo>& infos,
-                          const BaseDescriptor& /*descriptor*/,
+                          const BaseDescriptor& descriptor,
                           const Optional<LstmInputParamsInfo>& /*lstmParamsInfo*/,
                           const Optional<QuantizedLstmInputParamsInfo>& /*quantizedLstmParamsInfo*/,
                           Optional<std::string&> reasonIfUnsupported) const override
@@ -25,6 +25,11 @@
         {
             case LayerType::Addition:
                 return IsAdditionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+            case LayerType::ElementwiseBinary:
+            {
+                auto elementwiseDesc = *(PolymorphicDowncast<const ElementwiseBinaryDescriptor*>(&descriptor));
+                return (elementwiseDesc.m_Operation == BinaryOperation::Add);
+            }
             case LayerType::Input:
                 return IsInputSupported(infos[0], reasonIfUnsupported);
             case LayerType::Output:
diff --git a/src/backends/cl/ClBackend.cpp b/src/backends/cl/ClBackend.cpp
index d2e8fbf..a10b6fb 100644
--- a/src/backends/cl/ClBackend.cpp
+++ b/src/backends/cl/ClBackend.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022 Arm Ltd. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -311,7 +311,8 @@
         if ((base.GetType() == LayerType::DepthwiseConvolution2d || base.GetType() == LayerType::Convolution2d
             || base.GetType() == LayerType::BatchNormalization || base.GetType() == LayerType::FullyConnected
             || base.GetType() == LayerType::Addition || base.GetType() == LayerType::Multiplication
-            || base.GetType() == LayerType::Subtraction || base.GetType() == LayerType::Division)
+            || base.GetType() == LayerType::Subtraction || base.GetType() == LayerType::Division
+            || base.GetType() == LayerType::ElementwiseBinary)
             && (base.GetAdditionalInformation<ActivationDescriptor>() == nullptr))
         {
             for (auto output = base.BeginOutputSlots(); output != base.EndOutputSlots(); ++output)
@@ -542,6 +543,90 @@
                                     untouched.erase(activationLayer->GetGuid());
                                 }
                             }
+                            else if (base.GetType() == LayerType::ElementwiseBinary)
+                            {
+                                ElementwiseBinaryLayer* baseLayer = PolymorphicDowncast<ElementwiseBinaryLayer*>(&base);
+
+                                if (baseLayer->GetParameters().m_Operation == BinaryOperation::Add)
+                                {
+                                    arm_compute::Status status = ClAdditionValidate(
+                                            baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
+                                            baseLayer->GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo(),
+                                            activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
+                                            &activationDesc);
+
+                                    if (status)
+                                    {
+                                        FuseElementwiseBinaryLayer<ElementwiseBinaryLayer>(optimizationViews,
+                                                                                           baseLayer,
+                                                                                           activationLayer,
+                                                                                           activationDesc,
+                                                                                           BinaryOperation::Add,
+                                                                                           name);
+                                        untouched.erase(baseLayer->GetGuid());
+                                        untouched.erase(activationLayer->GetGuid());
+                                    }
+                                }
+                                else if (baseLayer->GetParameters().m_Operation == BinaryOperation::Div)
+                                {
+                                    arm_compute::Status status = ClDivisionWorkloadValidate(
+                                            baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
+                                            baseLayer->GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo(),
+                                            activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
+                                            &activationDesc);
+
+                                    if (status)
+                                    {
+                                        FuseElementwiseBinaryLayer<ElementwiseBinaryLayer>(optimizationViews,
+                                                                                           baseLayer,
+                                                                                           activationLayer,
+                                                                                           activationDesc,
+                                                                                           BinaryOperation::Div,
+                                                                                           name);
+                                        untouched.erase(baseLayer->GetGuid());
+                                        untouched.erase(activationLayer->GetGuid());
+                                    }
+                                }
+                                else if (baseLayer->GetParameters().m_Operation == BinaryOperation::Mul)
+                                {
+                                    arm_compute::Status status = ClMultiplicationWorkloadValidate(
+                                            baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
+                                            baseLayer->GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo(),
+                                            activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
+                                            &activationDesc);
+
+                                    if (status)
+                                    {
+                                        FuseElementwiseBinaryLayer<ElementwiseBinaryLayer>(optimizationViews,
+                                                                                           baseLayer,
+                                                                                           activationLayer,
+                                                                                           activationDesc,
+                                                                                           BinaryOperation::Mul,
+                                                                                           name);
+                                        untouched.erase(baseLayer->GetGuid());
+                                        untouched.erase(activationLayer->GetGuid());
+                                    }
+                                }
+                                else if (baseLayer->GetParameters().m_Operation == BinaryOperation::Sub)
+                                {
+                                    arm_compute::Status status = ClSubtractionValidate(
+                                            baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
+                                            baseLayer->GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo(),
+                                            activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
+                                            &activationDesc);
+
+                                    if (status)
+                                    {
+                                        FuseElementwiseBinaryLayer<ElementwiseBinaryLayer>(optimizationViews,
+                                                                                           baseLayer,
+                                                                                           activationLayer,
+                                                                                           activationDesc,
+                                                                                           BinaryOperation::Sub,
+                                                                                           name);
+                                    }
+                                }
+                                // No fusion available for other BinaryOperations
+                            }
                         }
                     }
                 }
diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp
index cb2d756..89bcf9b 100644
--- a/src/backends/cl/ClLayerSupport.cpp
+++ b/src/backends/cl/ClLayerSupport.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -347,6 +347,56 @@
             return IsDequantizeSupported(infos[0], infos[1], reasonIfUnsupported);
         case LayerType::Division:
             return IsDivisionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+        case LayerType::ElementwiseBinary:
+        {
+            auto desc = *(PolymorphicDowncast<const ElementwiseBinaryDescriptor *>(&descriptor));
+
+            switch (desc.m_Operation)
+            {
+                case BinaryOperation::Add:
+                    FORWARD_WORKLOAD_VALIDATE_FUNC(ClAdditionValidate,
+                                                   reasonIfUnsupported,
+                                                   infos[0],
+                                                   infos[1],
+                                                   infos[2],
+                                                   nullptr);
+                case BinaryOperation::Div:
+                    FORWARD_WORKLOAD_VALIDATE_FUNC(ClDivisionWorkloadValidate,
+                                                   reasonIfUnsupported,
+                                                   infos[0],
+                                                   infos[1],
+                                                   infos[2],
+                                                   nullptr);
+                case BinaryOperation::Minimum:
+                    FORWARD_WORKLOAD_VALIDATE_FUNC(ClMinimumWorkloadValidate,
+                                                   reasonIfUnsupported,
+                                                   infos[0],
+                                                   infos[1],
+                                                   infos[2]);
+                case BinaryOperation::Maximum:
+                    FORWARD_WORKLOAD_VALIDATE_FUNC(ClMaximumWorkloadValidate,
+                                                   reasonIfUnsupported,
+                                                   infos[0],
+                                                   infos[1],
+                                                   infos[2]);
+                case BinaryOperation::Mul:
+                    FORWARD_WORKLOAD_VALIDATE_FUNC(ClMultiplicationWorkloadValidate,
+                                                   reasonIfUnsupported,
+                                                   infos[0],
+                                                   infos[1],
+                                                   infos[2],
+                                                   nullptr);
+                case BinaryOperation::Sub:
+                    FORWARD_WORKLOAD_VALIDATE_FUNC(ClSubtractionValidate,
+                                                   reasonIfUnsupported,
+                                                   infos[0],
+                                                   infos[1],
+                                                   infos[2],
+                                                   nullptr);
+                default:
+                    return false;
+            }
+        }
         case LayerType::ElementwiseUnary:
             return IsElementwiseUnarySupported(infos[0],
                                                infos[1],
diff --git a/src/backends/cl/ClWorkloadFactory.cpp b/src/backends/cl/ClWorkloadFactory.cpp
index 6bf510a..0228677 100644
--- a/src/backends/cl/ClWorkloadFactory.cpp
+++ b/src/backends/cl/ClWorkloadFactory.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #include "ClWorkloadFactory.hpp"
@@ -405,6 +405,75 @@
             auto divisionQueueDescriptor = PolymorphicDowncast<const DivisionQueueDescriptor*>(&descriptor);
             return std::make_unique<ClDivisionWorkload>(*divisionQueueDescriptor, info, m_CLCompileContext);
         }
+        case LayerType::ElementwiseBinary :
+        {
+            auto elementwiseBinaryQueueDescriptor
+                    = PolymorphicDowncast<const ElementwiseBinaryQueueDescriptor*>(&descriptor);
+
+            switch (elementwiseBinaryQueueDescriptor->m_Parameters.m_Operation)
+            {
+                case BinaryOperation::Add:
+                {
+                    AdditionQueueDescriptor additionQueueDescriptor;
+                    additionQueueDescriptor.m_Inputs = descriptor.m_Inputs;
+                    additionQueueDescriptor.m_Outputs = descriptor.m_Outputs;
+                    additionQueueDescriptor.m_AdditionalInfoObject =
+                            elementwiseBinaryQueueDescriptor->m_AdditionalInfoObject;
+                    return std::make_unique<ClAdditionWorkload>(additionQueueDescriptor, info, m_CLCompileContext);
+                }
+                case BinaryOperation::Div:
+                {
+                    DivisionQueueDescriptor divisionQueueDescriptor;
+                    divisionQueueDescriptor.m_Inputs = descriptor.m_Inputs;
+                    divisionQueueDescriptor.m_Outputs = descriptor.m_Outputs;
+                    divisionQueueDescriptor.m_AdditionalInfoObject =
+                            elementwiseBinaryQueueDescriptor->m_AdditionalInfoObject;
+                    return std::make_unique<ClDivisionWorkload>(divisionQueueDescriptor, info, m_CLCompileContext);
+                }
+                case BinaryOperation::Maximum:
+                {
+                    MaximumQueueDescriptor maximumQueueDescriptor;
+                    maximumQueueDescriptor.m_Inputs = descriptor.m_Inputs;
+                    maximumQueueDescriptor.m_Outputs = descriptor.m_Outputs;
+                    maximumQueueDescriptor.m_AdditionalInfoObject =
+                            elementwiseBinaryQueueDescriptor->m_AdditionalInfoObject;
+                    return std::make_unique<ClMaximumWorkload>(maximumQueueDescriptor, info, m_CLCompileContext);
+                }
+                case BinaryOperation::Minimum:
+                {
+                    MinimumQueueDescriptor minimumQueueDescriptor;
+                    minimumQueueDescriptor.m_Inputs = descriptor.m_Inputs;
+                    minimumQueueDescriptor.m_Outputs = descriptor.m_Outputs;
+                    minimumQueueDescriptor.m_AdditionalInfoObject =
+                            elementwiseBinaryQueueDescriptor->m_AdditionalInfoObject;
+                    return std::make_unique<ClMinimumWorkload>(minimumQueueDescriptor, info, m_CLCompileContext);
+                }
+                case BinaryOperation::Mul:
+                {
+                    MultiplicationQueueDescriptor multiplicationQueueDescriptor;
+                    multiplicationQueueDescriptor.m_Inputs = descriptor.m_Inputs;
+                    multiplicationQueueDescriptor.m_Outputs = descriptor.m_Outputs;
+                    multiplicationQueueDescriptor.m_AdditionalInfoObject =
+                            elementwiseBinaryQueueDescriptor->m_AdditionalInfoObject;
+                    return std::make_unique<ClMultiplicationWorkload>(multiplicationQueueDescriptor,
+                                                                      info,
+                                                                      m_CLCompileContext);
+                }
+                case BinaryOperation::Sub:
+                {
+                    SubtractionQueueDescriptor subtractionQueueDescriptor;
+                    subtractionQueueDescriptor.m_Inputs = descriptor.m_Inputs;
+                    subtractionQueueDescriptor.m_Outputs = descriptor.m_Outputs;
+                    subtractionQueueDescriptor.m_AdditionalInfoObject =
+                            elementwiseBinaryQueueDescriptor->m_AdditionalInfoObject;
+                    return std::make_unique<ClSubtractionWorkload>(subtractionQueueDescriptor,
+                                                                   info,
+                                                                   m_CLCompileContext);
+                }
+                default:
+                    return nullptr;
+            }
+        }
         case LayerType::ElementwiseUnary :
         {
             auto elementwiseUnaryQueueDescriptor
diff --git a/src/backends/cl/test/ClCreateWorkloadTests.cpp b/src/backends/cl/test/ClCreateWorkloadTests.cpp
index adea733..c49ca23 100644
--- a/src/backends/cl/test/ClCreateWorkloadTests.cpp
+++ b/src/backends/cl/test/ClCreateWorkloadTests.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -66,19 +66,17 @@
 }
 
 template <typename WorkloadType,
-          typename DescriptorType,
-          typename LayerType,
           armnn::DataType DataType>
-static void ClCreateElementwiseWorkloadTest()
+static void ClCreateElementwiseWorkloadTest(BinaryOperation binaryOperator)
 {
     Graph graph;
     ClWorkloadFactory factory =
         ClWorkloadFactoryHelper::GetFactory(ClWorkloadFactoryHelper::GetMemoryManager());
 
-    auto workload = CreateElementwiseWorkloadTest<WorkloadType, DescriptorType, LayerType, DataType>(factory, graph);
+    auto workload = CreateElementwiseBinaryWorkloadTest<WorkloadType, DataType>(factory, graph, binaryOperator);
 
     // Checks that inputs/outputs are as we expect them (see definition of CreateElementwiseWorkloadTest).
-    DescriptorType queueDescriptor = workload->GetData();
+    auto queueDescriptor = workload->GetData();
     auto inputHandle1 = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
     auto inputHandle2 = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[1]);
     auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
@@ -93,73 +91,55 @@
 TEST_CASE_FIXTURE(ClContextControlFixture, "CreateAdditionFloatWorkload")
 {
     ClCreateElementwiseWorkloadTest<ClAdditionWorkload,
-                                    AdditionQueueDescriptor,
-                                    AdditionLayer,
-                                    armnn::DataType::Float32>();
+                                    armnn::DataType::Float32>(BinaryOperation::Add);
 }
 
 TEST_CASE_FIXTURE(ClContextControlFixture, "CreateAdditionFloat16Workload")
 {
     ClCreateElementwiseWorkloadTest<ClAdditionWorkload,
-                                    AdditionQueueDescriptor,
-                                    AdditionLayer,
-                                    armnn::DataType::Float16>();
+                                    armnn::DataType::Float16>(BinaryOperation::Add);
 }
 
 TEST_CASE_FIXTURE(ClContextControlFixture, "CreateSubtractionFloatWorkload")
 {
     ClCreateElementwiseWorkloadTest<ClSubtractionWorkload,
-                                    SubtractionQueueDescriptor,
-                                    SubtractionLayer,
-                                    armnn::DataType::Float32>();
+                                    armnn::DataType::Float32>(BinaryOperation::Sub);
 }
 
 TEST_CASE_FIXTURE(ClContextControlFixture, "CreateSubtractionFloat16Workload")
 {
     ClCreateElementwiseWorkloadTest<ClSubtractionWorkload,
-                                    SubtractionQueueDescriptor,
-                                    SubtractionLayer,
-                                    armnn::DataType::Float16>();
+                                    armnn::DataType::Float16>(BinaryOperation::Sub);
 }
 
 TEST_CASE_FIXTURE(ClContextControlFixture, "CreateMultiplicationFloatWorkloadTest")
 {
     ClCreateElementwiseWorkloadTest<ClMultiplicationWorkload,
-                                    MultiplicationQueueDescriptor,
-                                    MultiplicationLayer,
-                                    armnn::DataType::Float32>();
+                                    armnn::DataType::Float32>(BinaryOperation::Mul);
 }
 
 TEST_CASE_FIXTURE(ClContextControlFixture, "CreateMultiplicationFloat16WorkloadTest")
 {
     ClCreateElementwiseWorkloadTest<ClMultiplicationWorkload,
-                                    MultiplicationQueueDescriptor,
-                                    MultiplicationLayer,
-                                    armnn::DataType::Float16>();
+                                    armnn::DataType::Float16>(BinaryOperation::Mul);
 }
 
 TEST_CASE_FIXTURE(ClContextControlFixture, "CreateMultiplicationUint8WorkloadTest")
 {
     ClCreateElementwiseWorkloadTest<ClMultiplicationWorkload,
-                                    MultiplicationQueueDescriptor,
-                                    MultiplicationLayer,
-                                    armnn::DataType::QAsymmU8>();
+                                    armnn::DataType::QAsymmU8>(BinaryOperation::Mul);
 }
 
 TEST_CASE_FIXTURE(ClContextControlFixture, "CreateDivisionFloatWorkloadTest")
 {
     ClCreateElementwiseWorkloadTest<ClDivisionWorkload,
-                                    DivisionQueueDescriptor,
-                                    DivisionLayer,
-                                    armnn::DataType::Float32>();
+                                    armnn::DataType::Float32>(BinaryOperation::Div);
 }
 
 TEST_CASE_FIXTURE(ClContextControlFixture, "CreateDivisionFloat16WorkloadTest")
 {
     ClCreateElementwiseWorkloadTest<ClDivisionWorkload,
-                                    DivisionQueueDescriptor,
-                                    DivisionLayer,
-                                    armnn::DataType::Float16>();
+                                    armnn::DataType::Float16>(BinaryOperation::Div);
 }
 
 template <typename WorkloadType, 
diff --git a/src/backends/cl/test/ClFallbackTests.cpp b/src/backends/cl/test/ClFallbackTests.cpp
index 51a983a..9443116 100644
--- a/src/backends/cl/test/ClFallbackTests.cpp
+++ b/src/backends/cl/test/ClFallbackTests.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -24,8 +24,8 @@
     IConnectableLayer* input0 = net->AddInputLayer(0, "input0");
     IConnectableLayer* input1 = net->AddInputLayer(1, "input1");
     IConnectableLayer* input2 = net->AddInputLayer(2, "input2");
-    IConnectableLayer* add = net->AddAdditionLayer("add");
-    IConnectableLayer* sub = net->AddSubtractionLayer("sub");
+    IConnectableLayer* add = net->AddElementwiseBinaryLayer(BinaryOperation::Add, "add");
+    IConnectableLayer* sub = net->AddElementwiseBinaryLayer(BinaryOperation::Sub, "sub");
     IConnectableLayer* output = net->AddOutputLayer(0, "output");
 
     input0->GetOutputSlot(0).Connect(add->GetInputSlot(0));
@@ -172,8 +172,8 @@
     IConnectableLayer* input0 = net->AddInputLayer(0, "input0");
     IConnectableLayer* input1 = net->AddInputLayer(1, "input1");
     IConnectableLayer* input2 = net->AddInputLayer(2, "input2");
-    IConnectableLayer* add = net->AddAdditionLayer("add");
-    IConnectableLayer* sub = net->AddSubtractionLayer("sub");
+    IConnectableLayer* add = net->AddElementwiseBinaryLayer(BinaryOperation::Add, "add");
+    IConnectableLayer* sub = net->AddElementwiseBinaryLayer(BinaryOperation::Sub, "sub");
     IConnectableLayer* output = net->AddOutputLayer(0, "output");
 
     input0->GetOutputSlot(0).Connect(add->GetInputSlot(0));
@@ -301,8 +301,8 @@
     IConnectableLayer* input0 = net->AddInputLayer(0, "input0");
     IConnectableLayer* input1 = net->AddInputLayer(1, "input1");
     IConnectableLayer* input2 = net->AddInputLayer(2, "input2");
-    IConnectableLayer* add = net->AddAdditionLayer("add");
-    IConnectableLayer* sub = net->AddSubtractionLayer("sub");
+    IConnectableLayer* add = net->AddElementwiseBinaryLayer(BinaryOperation::Add, "add");
+    IConnectableLayer* sub = net->AddElementwiseBinaryLayer(BinaryOperation::Sub, "sub");
     IConnectableLayer* pooling = net->AddPooling2dLayer(desc, "pooling");
     IConnectableLayer* output = net->AddOutputLayer(0, "output");
 
@@ -460,8 +460,8 @@
     IConnectableLayer* input0 = net->AddInputLayer(0, "input0");
     IConnectableLayer* input1 = net->AddInputLayer(1, "input1");
     IConnectableLayer* input2 = net->AddInputLayer(2, "input2");
-    IConnectableLayer* add = net->AddAdditionLayer("add");
-    IConnectableLayer* sub = net->AddSubtractionLayer("sub");
+    IConnectableLayer* add = net->AddElementwiseBinaryLayer(BinaryOperation::Add, "add");
+    IConnectableLayer* sub = net->AddElementwiseBinaryLayer(BinaryOperation::Sub, "sub");
     IConnectableLayer* pooling = net->AddPooling2dLayer(desc, "pooling");
     IConnectableLayer* output = net->AddOutputLayer(0, "output");
 
diff --git a/src/backends/cl/test/Fp16SupportTest.cpp b/src/backends/cl/test/Fp16SupportTest.cpp
index da6ea10..28ae479 100644
--- a/src/backends/cl/test/Fp16SupportTest.cpp
+++ b/src/backends/cl/test/Fp16SupportTest.cpp
@@ -28,7 +28,7 @@
     Layer* const inputLayer1 = graph.AddLayer<InputLayer>(1, "input1");
     Layer* const inputLayer2 = graph.AddLayer<InputLayer>(2, "input2");
 
-    Layer* const additionLayer = graph.AddLayer<AdditionLayer>("addition");
+    Layer* const additionLayer = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Add, "addition");
     Layer* const outputLayer = graph.AddLayer<armnn::OutputLayer>(0, "output");
 
     TensorInfo fp16TensorInfo({1, 2, 3, 5}, armnn::DataType::Float16);
@@ -57,7 +57,7 @@
 
    IConnectableLayer* inputLayer1 = net->AddInputLayer(0);
    IConnectableLayer* inputLayer2 = net->AddInputLayer(1);
-   IConnectableLayer* additionLayer = net->AddAdditionLayer();
+   IConnectableLayer* additionLayer = net->AddElementwiseBinaryLayer(BinaryOperation::Add);
    IConnectableLayer* outputLayer = net->AddOutputLayer(0);
 
    inputLayer1->GetOutputSlot(0).Connect(additionLayer->GetInputSlot(0));
diff --git a/src/backends/neon/NeonBackend.cpp b/src/backends/neon/NeonBackend.cpp
index 968bce4..cea2aa3 100644
--- a/src/backends/neon/NeonBackend.cpp
+++ b/src/backends/neon/NeonBackend.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -395,6 +395,92 @@
                                     untouched.erase(activationLayer->GetGuid());
                                 }
                             }
+                            else if (base.GetType() == LayerType::ElementwiseBinary)
+                            {
+                                ElementwiseBinaryLayer* baseLayer = PolymorphicDowncast<ElementwiseBinaryLayer*>(&base);
+
+                                if (baseLayer->GetParameters().m_Operation == BinaryOperation::Add)
+                                {
+                                    arm_compute::Status status = NeonAdditionWorkloadValidate(
+                                            baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
+                                            baseLayer->GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo(),
+                                            activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
+                                            &activationDesc);
+
+                                    if (status)
+                                    {
+                                        FuseElementwiseBinaryLayer<ElementwiseBinaryLayer>(optimizationViews,
+                                                                                           baseLayer,
+                                                                                           activationLayer,
+                                                                                           activationDesc,
+                                                                                           BinaryOperation::Add,
+                                                                                           name);
+                                        untouched.erase(baseLayer->GetGuid());
+                                        untouched.erase(activationLayer->GetGuid());
+                                    }
+                                }
+                                else if (baseLayer->GetParameters().m_Operation == BinaryOperation::Div)
+                                {
+                                    arm_compute::Status status = NeonDivisionWorkloadValidate(
+                                            baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
+                                            baseLayer->GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo(),
+                                            activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
+                                            &activationDesc);
+
+                                    if (status)
+                                    {
+                                        FuseElementwiseBinaryLayer<ElementwiseBinaryLayer>(optimizationViews,
+                                                                                           baseLayer,
+                                                                                           activationLayer,
+                                                                                           activationDesc,
+                                                                                           BinaryOperation::Div,
+                                                                                           name);
+                                        untouched.erase(baseLayer->GetGuid());
+                                        untouched.erase(activationLayer->GetGuid());
+                                    }
+                                }
+                                else if (baseLayer->GetParameters().m_Operation == BinaryOperation::Mul)
+                                {
+                                    arm_compute::Status status = NeonMultiplicationWorkloadValidate(
+                                            baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
+                                            baseLayer->GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo(),
+                                            activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
+                                            &activationDesc);
+
+                                    if (status)
+                                    {
+                                        FuseElementwiseBinaryLayer<ElementwiseBinaryLayer>(optimizationViews,
+                                                                                           baseLayer,
+                                                                                           activationLayer,
+                                                                                           activationDesc,
+                                                                                           BinaryOperation::Mul,
+                                                                                           name);
+                                        untouched.erase(baseLayer->GetGuid());
+                                        untouched.erase(activationLayer->GetGuid());
+                                    }
+                                }
+                                else if (baseLayer->GetParameters().m_Operation == BinaryOperation::Sub)
+                                {
+                                    arm_compute::Status status = NeonSubtractionWorkloadValidate(
+                                            baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
+                                            baseLayer->GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo(),
+                                            activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
+                                            &activationDesc);
+
+                                    if (status)
+                                    {
+                                        FuseElementwiseBinaryLayer<ElementwiseBinaryLayer>(optimizationViews,
+                                                                                           baseLayer,
+                                                                                           activationLayer,
+                                                                                           activationDesc,
+                                                                                           BinaryOperation::Sub,
+                                                                                           name);
+                                        untouched.erase(baseLayer->GetGuid());
+                                        untouched.erase(activationLayer->GetGuid());
+                                    }
+                                }
+                                // No fusion available for other BinaryOperations
+                            }
                         }
                     }
                 }
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp
index ee155a2..672b2f3 100644
--- a/src/backends/neon/NeonLayerSupport.cpp
+++ b/src/backends/neon/NeonLayerSupport.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -346,6 +346,56 @@
         }
         case LayerType::Division:
             return support.IsDivisionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+        case LayerType::ElementwiseBinary:
+        {
+            auto desc = *(PolymorphicDowncast<const ElementwiseBinaryDescriptor *>(&descriptor));
+
+            switch (desc.m_Operation)
+            {
+                case BinaryOperation::Add:
+                    FORWARD_WORKLOAD_VALIDATE_FUNC(NeonAdditionWorkloadValidate,
+                                                   reasonIfUnsupported,
+                                                   infos[0],
+                                                   infos[1],
+                                                   infos[2],
+                                                   nullptr);
+                case BinaryOperation::Div:
+                    FORWARD_WORKLOAD_VALIDATE_FUNC(NeonDivisionWorkloadValidate,
+                                                   reasonIfUnsupported,
+                                                   infos[0],
+                                                   infos[1],
+                                                   infos[2],
+                                                   nullptr);
+                case BinaryOperation::Maximum:
+                    FORWARD_WORKLOAD_VALIDATE_FUNC(NeonMaximumWorkloadValidate,
+                                                   reasonIfUnsupported,
+                                                   infos[0],
+                                                   infos[1],
+                                                   infos[2]);
+                case BinaryOperation::Minimum:
+                    FORWARD_WORKLOAD_VALIDATE_FUNC(NeonMinimumWorkloadValidate,
+                                                   reasonIfUnsupported,
+                                                   infos[0],
+                                                   infos[1],
+                                                   infos[2]);
+                case BinaryOperation::Mul:
+                    FORWARD_WORKLOAD_VALIDATE_FUNC(NeonMultiplicationWorkloadValidate,
+                                                   reasonIfUnsupported,
+                                                   infos[0],
+                                                   infos[1],
+                                                   infos[2],
+                                                   nullptr);
+                case BinaryOperation::Sub:
+                    FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSubtractionWorkloadValidate,
+                                                   reasonIfUnsupported,
+                                                   infos[0],
+                                                   infos[1],
+                                                   infos[2],
+                                                   nullptr);
+                default:
+                    return false;
+            }
+        }
         case LayerType::ElementwiseUnary:
             return support.IsElementwiseUnarySupported(infos[0],
                                                        infos[1],
diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp
index dccd4a3..08168ec 100644
--- a/src/backends/neon/NeonWorkloadFactory.cpp
+++ b/src/backends/neon/NeonWorkloadFactory.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -280,6 +280,59 @@
             auto divisionQueueDescriptor = PolymorphicDowncast<const DivisionQueueDescriptor*>(&descriptor);
             return std::make_unique<NeonDivisionWorkload>(*divisionQueueDescriptor, info);
         }
+        case LayerType::ElementwiseBinary :
+        {
+            auto elementwiseBinaryQueueDescriptor
+                    = PolymorphicDowncast<const ElementwiseBinaryQueueDescriptor*>(&descriptor);
+
+            switch (elementwiseBinaryQueueDescriptor->m_Parameters.m_Operation)
+            {
+                case BinaryOperation::Add:
+                {
+                    AdditionQueueDescriptor additionQueueDescriptor;
+                    additionQueueDescriptor.m_Inputs = descriptor.m_Inputs;
+                    additionQueueDescriptor.m_Outputs = descriptor.m_Outputs;
+                    return std::make_unique<NeonAdditionWorkload>(additionQueueDescriptor, info);
+                }
+                case BinaryOperation::Div:
+                {
+                    DivisionQueueDescriptor divisionQueueDescriptor;
+                    divisionQueueDescriptor.m_Inputs = descriptor.m_Inputs;
+                    divisionQueueDescriptor.m_Outputs = descriptor.m_Outputs;
+                    return std::make_unique<NeonDivisionWorkload>(divisionQueueDescriptor, info);
+                }
+                case BinaryOperation::Maximum:
+                {
+                    MaximumQueueDescriptor maximumQueueDescriptor;
+                    maximumQueueDescriptor.m_Inputs = descriptor.m_Inputs;
+                    maximumQueueDescriptor.m_Outputs = descriptor.m_Outputs;
+                    return std::make_unique<NeonMaximumWorkload>(maximumQueueDescriptor, info);
+                }
+                case BinaryOperation::Minimum:
+                {
+                    MinimumQueueDescriptor minimumQueueDescriptor;
+                    minimumQueueDescriptor.m_Inputs = descriptor.m_Inputs;
+                    minimumQueueDescriptor.m_Outputs = descriptor.m_Outputs;
+                    return std::make_unique<NeonMinimumWorkload>(minimumQueueDescriptor, info);
+                }
+                case BinaryOperation::Mul:
+                {
+                    MultiplicationQueueDescriptor multiplicationQueueDescriptor;
+                    multiplicationQueueDescriptor.m_Inputs = descriptor.m_Inputs;
+                    multiplicationQueueDescriptor.m_Outputs = descriptor.m_Outputs;
+                    return std::make_unique<NeonMultiplicationWorkload>(multiplicationQueueDescriptor, info);
+                }
+                case BinaryOperation::Sub:
+                {
+                    SubtractionQueueDescriptor subtractionQueueDescriptor;
+                    subtractionQueueDescriptor.m_Inputs = descriptor.m_Inputs;
+                    subtractionQueueDescriptor.m_Outputs = descriptor.m_Outputs;
+                    return std::make_unique<NeonSubtractionWorkload>(subtractionQueueDescriptor, info);
+                }
+                default:
+                    return nullptr;
+            }
+        }
         case LayerType::ElementwiseUnary :
         {
             auto elementwiseUnaryQueueDescriptor
diff --git a/src/backends/neon/test/NeonFallbackTests.cpp b/src/backends/neon/test/NeonFallbackTests.cpp
index 8e0e0ab..40df2dc 100644
--- a/src/backends/neon/test/NeonFallbackTests.cpp
+++ b/src/backends/neon/test/NeonFallbackTests.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -38,8 +38,8 @@
     IConnectableLayer* input0 = net->AddInputLayer(0, "input0");
     IConnectableLayer* input1 = net->AddInputLayer(1, "input1");
     IConnectableLayer* input2 = net->AddInputLayer(2, "input2");
-    IConnectableLayer* add = net->AddAdditionLayer("add");
-    IConnectableLayer* sub = net->AddSubtractionLayer("sub");
+    IConnectableLayer* add = net->AddElementwiseBinaryLayer(BinaryOperation::Add, "add");
+    IConnectableLayer* sub = net->AddElementwiseBinaryLayer(BinaryOperation::Sub, "sub");
     IConnectableLayer* output = net->AddOutputLayer(0, "output");
 
     input0->GetOutputSlot(0).Connect(add->GetInputSlot(0));
@@ -183,7 +183,7 @@
 
     IConnectableLayer* input0 = net->AddInputLayer(0, "input0");
     IConnectableLayer* input1 = net->AddInputLayer(1, "input1");
-    IConnectableLayer* add = net->AddAdditionLayer("add");
+    IConnectableLayer* add = net->AddElementwiseBinaryLayer(BinaryOperation::Add, "add");
     IConnectableLayer* pooling = net->AddPooling2dLayer(desc, "pooling");
     IConnectableLayer* output = net->AddOutputLayer(0, "output");
 
@@ -318,8 +318,8 @@
     IConnectableLayer* input0 = net->AddInputLayer(0, "input0");
     IConnectableLayer* input1 = net->AddInputLayer(1, "input1");
     IConnectableLayer* input2 = net->AddInputLayer(2, "input2");
-    IConnectableLayer* sub = net->AddSubtractionLayer("sub");
-    IConnectableLayer* add = net->AddAdditionLayer("add");
+    IConnectableLayer* sub = net->AddElementwiseBinaryLayer(BinaryOperation::Sub, "sub");
+    IConnectableLayer* add = net->AddElementwiseBinaryLayer(BinaryOperation::Add, "add");
     IConnectableLayer* output = net->AddOutputLayer(0, "output");
 
     input0->GetOutputSlot(0).Connect(sub->GetInputSlot(0));
@@ -465,7 +465,7 @@
     IConnectableLayer* input0 = net->AddInputLayer(0, "input0");
     IConnectableLayer* input1 = net->AddInputLayer(1, "input1");
     IConnectableLayer* pooling = net->AddPooling2dLayer(desc, "pooling");
-    IConnectableLayer* add = net->AddAdditionLayer("add");
+    IConnectableLayer* add = net->AddElementwiseBinaryLayer(BinaryOperation::Add, "add");
     IConnectableLayer* output = net->AddOutputLayer(0, "output");
 
     input0->GetOutputSlot(0).Connect(pooling->GetInputSlot(0));
@@ -599,8 +599,8 @@
     IConnectableLayer* input0 = net->AddInputLayer(0, "input0");
     IConnectableLayer* input1 = net->AddInputLayer(1, "input1");
     IConnectableLayer* input2 = net->AddInputLayer(2, "input2");
-    IConnectableLayer* sub = net->AddSubtractionLayer("sub");
-    IConnectableLayer* add = net->AddAdditionLayer("add");
+    IConnectableLayer* sub = net->AddElementwiseBinaryLayer(BinaryOperation::Sub, "sub");
+    IConnectableLayer* add = net->AddElementwiseBinaryLayer(BinaryOperation::Add, "add");
     IConnectableLayer* output = net->AddOutputLayer(0, "output");
 
     input0->GetOutputSlot(0).Connect(sub->GetInputSlot(0));
@@ -725,8 +725,8 @@
     IConnectableLayer* input0 = net->AddInputLayer(0, "input0");
     IConnectableLayer* input1 = net->AddInputLayer(1, "input1");
     IConnectableLayer* input2 = net->AddInputLayer(2, "input2");
-    IConnectableLayer* add = net->AddAdditionLayer("add");
-    IConnectableLayer* sub = net->AddSubtractionLayer("sub");
+    IConnectableLayer* add = net->AddElementwiseBinaryLayer(BinaryOperation::Add, "add");
+    IConnectableLayer* sub = net->AddElementwiseBinaryLayer(BinaryOperation::Sub, "sub");
     IConnectableLayer* output = net->AddOutputLayer(0, "output");
 
     input0->GetOutputSlot(0).Connect(add->GetInputSlot(0));
@@ -878,8 +878,8 @@
     IConnectableLayer* input0 = net->AddInputLayer(0, "input0");
     IConnectableLayer* input1 = net->AddInputLayer(1, "input1");
     IConnectableLayer* input2 = net->AddInputLayer(2, "input2");
-    IConnectableLayer* add = net->AddAdditionLayer("add");
-    IConnectableLayer* sub = net->AddSubtractionLayer("sub");
+    IConnectableLayer* add = net->AddElementwiseBinaryLayer(BinaryOperation::Add, "add");
+    IConnectableLayer* sub = net->AddElementwiseBinaryLayer(BinaryOperation::Sub, "sub");
     IConnectableLayer* output = net->AddOutputLayer(0, "output");
 
     input0->GetOutputSlot(0).Connect(add->GetInputSlot(0));
@@ -1013,8 +1013,8 @@
     IConnectableLayer* input0 = net->AddInputLayer(0, "input0");
     IConnectableLayer* input1 = net->AddInputLayer(1, "input1");
     IConnectableLayer* input2 = net->AddInputLayer(2, "input2");
-    IConnectableLayer* add = net->AddAdditionLayer("add");
-    IConnectableLayer* sub = net->AddSubtractionLayer("sub");
+    IConnectableLayer* add = net->AddElementwiseBinaryLayer(BinaryOperation::Add, "add");
+    IConnectableLayer* sub = net->AddElementwiseBinaryLayer(BinaryOperation::Sub, "sub");
     IConnectableLayer* pooling = net->AddPooling2dLayer(desc, "pooling");
     IConnectableLayer* output = net->AddOutputLayer(0, "output");
 
@@ -1177,8 +1177,8 @@
     IConnectableLayer* input0 = net->AddInputLayer(0, "input0");
     IConnectableLayer* input1 = net->AddInputLayer(1, "input1");
     IConnectableLayer* input2 = net->AddInputLayer(2, "input2");
-    IConnectableLayer* add = net->AddAdditionLayer("add");
-    IConnectableLayer* sub = net->AddSubtractionLayer("sub");
+    IConnectableLayer* add = net->AddElementwiseBinaryLayer(BinaryOperation::Add, "add");
+    IConnectableLayer* sub = net->AddElementwiseBinaryLayer(BinaryOperation::Sub, "sub");
     IConnectableLayer* pooling = net->AddPooling2dLayer(desc, "pooling");
     IConnectableLayer* output = net->AddOutputLayer(0, "output");
 
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index a5015a7..cbc6723 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -189,6 +189,36 @@
             return IsDequantizeSupported(infos[0], infos[1], reasonIfUnsupported);
         case LayerType::Division:
             return IsDivisionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+        case LayerType::ElementwiseBinary:
+        {
+            std::array<DataType, 7> supportedTypes =
+                    {
+                            DataType::Float32,
+                            DataType::Float16,
+                            DataType::QAsymmS8,
+                            DataType::QAsymmU8,
+                            DataType::QSymmS16,
+                            DataType::Signed32
+                    };
+
+            bool supported = true;
+            supported &= CheckSupportRule(TypeAnyOf(infos[0], supportedTypes), reasonIfUnsupported,
+                                          "Reference elementwise unary: input type not supported");
+
+            supported &= CheckSupportRule(TypeAnyOf(infos[1], supportedTypes), reasonIfUnsupported,
+                                          "Reference elementwise unary: input type not supported");
+
+            supported &= CheckSupportRule(TypeAnyOf(infos[2], supportedTypes), reasonIfUnsupported,
+                                          "Reference elementwise unary: output type not supported");
+
+            supported &= CheckSupportRule(TypesAreEqual(infos[0], infos[1]), reasonIfUnsupported,
+                                          "Reference elementwise unary: input types not matching");
+
+            supported &= CheckSupportRule(TypesAreEqual(infos[0], infos[2]), reasonIfUnsupported,
+                                          "Reference elementwise unary: input and output types not matching");
+
+            return supported;
+        }
         case LayerType::ElementwiseUnary:
             return IsElementwiseUnarySupported(infos[0],
                                                infos[1],
diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp
index f0e9e35..8e1f68e 100644
--- a/src/backends/reference/RefLayerSupport.hpp
+++ b/src/backends/reference/RefLayerSupport.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp
index bfe37d7..10f623e 100644
--- a/src/backends/reference/RefWorkloadFactory.cpp
+++ b/src/backends/reference/RefWorkloadFactory.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #include <Layer.hpp>
@@ -302,6 +302,12 @@
                 return std::make_unique<RefDivisionWorkload<float>>(*divisionQueueDescriptor, info);
             }
         }
+        case LayerType::ElementwiseBinary:
+        {
+            auto elementwiseBinaryQueueDescriptor
+                    = PolymorphicDowncast<const ElementwiseBinaryQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefElementwiseBinaryWorkload>(*elementwiseBinaryQueueDescriptor, info);
+        }
         case LayerType::ElementwiseUnary:
         {
             auto elementwiseUnaryQueueDescriptor
diff --git a/src/backends/reference/backend.mk b/src/backends/reference/backend.mk
index eb2ec2d..c23984c 100644
--- a/src/backends/reference/backend.mk
+++ b/src/backends/reference/backend.mk
@@ -67,6 +67,7 @@
         workloads/RefDepthwiseConvolution2dWorkload.cpp \
         workloads/RefDequantizeWorkload.cpp \
         workloads/RefDetectionPostProcessWorkload.cpp \
+        workloads/RefElementwiseBinaryWorkload.cpp \
         workloads/RefElementwiseWorkload.cpp \
         workloads/RefElementwiseUnaryWorkload.cpp \
         workloads/RefFakeQuantizationFloat32Workload.cpp \
diff --git a/src/backends/reference/test/RefEndToEndTests.cpp b/src/backends/reference/test/RefEndToEndTests.cpp
index 6ff5771..8bf414f 100644
--- a/src/backends/reference/test/RefEndToEndTests.cpp
+++ b/src/backends/reference/test/RefEndToEndTests.cpp
@@ -18,6 +18,7 @@
 #include <backendsCommon/test/DepthToSpaceEndToEndTestImpl.hpp>
 #include <backendsCommon/test/DequantizeEndToEndTestImpl.hpp>
 #include <backendsCommon/test/DetectionPostProcessEndToEndTestImpl.hpp>
+#include <backendsCommon/test/ElementwiseBinaryEndToEndTestImpl.hpp>
 #include <backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp>
 #include <backendsCommon/test/FillEndToEndTestImpl.hpp>
 #include <backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp>
@@ -185,7 +186,7 @@
 
     IConnectableLayer* input1 = net->AddInputLayer(0);
     IConnectableLayer* input2 = net->AddInputLayer(1);
-    IConnectableLayer* add    = net->AddAdditionLayer();
+    IConnectableLayer* add    = net->AddElementwiseBinaryLayer(ElementwiseBinaryDescriptor(BinaryOperation::Add));
     IConnectableLayer* output = net->AddOutputLayer(0);
 
     input1->GetOutputSlot(0).Connect(add->GetInputSlot(0));
@@ -347,7 +348,7 @@
 
     IConnectableLayer* input1 = net->AddInputLayer(0);
     IConnectableLayer* input2 = net->AddInputLayer(1);
-    IConnectableLayer* min    = net->AddMinimumLayer();
+    IConnectableLayer* min    = net->AddElementwiseBinaryLayer(ElementwiseBinaryDescriptor(BinaryOperation::Minimum));
     IConnectableLayer* output = net->AddOutputLayer(0);
 
     input1->GetOutputSlot(0).Connect(min->GetInputSlot(0));
@@ -1547,6 +1548,55 @@
 {
     armnn::experimental::StridedSlicedEndToEndTest<armnn::DataType::Float32>(defaultBackends, 3);
 }
+
+TEST_CASE("RefAddEndToEndTestFloat32")
+{
+    ElementwiseBinarySimpleEndToEnd<armnn::DataType::Float32>(defaultBackends, BinaryOperation::Add);
+}
+TEST_CASE("RefAddEndToEndTestUint8")
+{
+    ElementwiseBinarySimpleEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, BinaryOperation::Add);
+}
+TEST_CASE("RefDivEndToEndTestFloat32")
+{
+    ElementwiseBinarySimpleEndToEnd<armnn::DataType::Float32>(defaultBackends, BinaryOperation::Div);
+}
+TEST_CASE("RefDivEndToEndTestUint8")
+{
+    ElementwiseBinarySimpleEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, BinaryOperation::Div);
+}
+TEST_CASE("RefMulEndToEndTestFloat32")
+{
+    ElementwiseBinarySimpleEndToEnd<armnn::DataType::Float32>(defaultBackends, BinaryOperation::Mul);
+}
+TEST_CASE("RefMulEndToEndTestUint8")
+{
+    ElementwiseBinarySimpleEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, BinaryOperation::Mul);
+}
+TEST_CASE("RefSubEndToEndTestFloat32")
+{
+    ElementwiseBinarySimpleEndToEnd<armnn::DataType::Float32>(defaultBackends, BinaryOperation::Sub);
+}
+TEST_CASE("RefSubEndToEndTestUint8")
+{
+    ElementwiseBinarySimpleEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, BinaryOperation::Sub);
+}
+TEST_CASE("RefMaximumEndToEndTestFloat32")
+{
+    ElementwiseBinarySimpleEndToEnd<armnn::DataType::Float32>(defaultBackends, BinaryOperation::Maximum);
+}
+TEST_CASE("RefMaximumEndToEndTestUint8")
+{
+    ElementwiseBinarySimpleEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, BinaryOperation::Maximum);
+}
+TEST_CASE("RefMinimumEndToEndTestFloat32")
+{
+    ElementwiseBinarySimpleEndToEnd<armnn::DataType::Float32>(defaultBackends, BinaryOperation::Minimum);
+}
+TEST_CASE("RefMinimumEndToEndTestUint8")
+{
+    ElementwiseBinarySimpleEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, BinaryOperation::Minimum);
+}
 #endif
 
 }
diff --git a/src/backends/reference/test/RefOptimizedNetworkTests.cpp b/src/backends/reference/test/RefOptimizedNetworkTests.cpp
index 7ca1e0e..7e8064f 100644
--- a/src/backends/reference/test/RefOptimizedNetworkTests.cpp
+++ b/src/backends/reference/test/RefOptimizedNetworkTests.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -53,7 +53,7 @@
     layer->GetOutputSlot(0).SetTensorInfo(desc);
 
     armnn::IConnectableLayer* prevLayer = layer;
-    layer = net->AddMultiplicationLayer("ml");
+    layer = net->AddElementwiseBinaryLayer(armnn::BinaryOperation::Mul, "ml");
 
     prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
     normLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt
index de6c042..3592f22 100644
--- a/src/backends/reference/workloads/CMakeLists.txt
+++ b/src/backends/reference/workloads/CMakeLists.txt
@@ -1,5 +1,5 @@
 #
-# Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+# Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
 # SPDX-License-Identifier: MIT
 #
 
@@ -108,6 +108,8 @@
     RefDequantizeWorkload.hpp
     RefDetectionPostProcessWorkload.cpp
     RefDetectionPostProcessWorkload.hpp
+    RefElementwiseBinaryWorkload.cpp
+    RefElementwiseBinaryWorkload.hpp
     RefElementwiseUnaryWorkload.cpp
     RefElementwiseUnaryWorkload.hpp
     RefFakeQuantizationFloat32Workload.cpp
diff --git a/src/backends/reference/workloads/RefElementwiseBinaryWorkload.cpp b/src/backends/reference/workloads/RefElementwiseBinaryWorkload.cpp
new file mode 100644
index 0000000..5dc77f8
--- /dev/null
+++ b/src/backends/reference/workloads/RefElementwiseBinaryWorkload.cpp
@@ -0,0 +1,120 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "RefElementwiseBinaryWorkload.hpp"
+
+#include "Decoders.hpp"
+#include "ElementwiseFunction.hpp"
+#include "Encoders.hpp"
+#include "RefWorkloadUtils.hpp"
+#include "Maximum.hpp"
+#include "Minimum.hpp"
+
+#include <Profiling.hpp>
+
+#include <armnn/TypesUtils.hpp>
+
+#include <functional>
+
+namespace armnn
+{
+
+template<typename DataType>
+void ExecuteFunction(std::vector<ITensorHandle*> inputs,
+                     std::vector<ITensorHandle*> outputs,
+                     BinaryOperation operation)
+{
+    const TensorInfo& inputInfo0 = GetTensorInfo(inputs[0]);
+    const TensorInfo& inputInfo1 = GetTensorInfo(inputs[1]);
+    const TensorInfo& outputInfo = GetTensorInfo(outputs[0]);
+
+    const TensorShape& inShape0 = inputInfo0.GetShape();
+    const TensorShape& inShape1 = inputInfo1.GetShape();
+    const TensorShape& outShape = outputInfo.GetShape();
+
+    std::unique_ptr<Decoder<DataType>> input0 = MakeDecoder<DataType>(inputInfo0, inputs[0]->Map());
+    std::unique_ptr<Decoder<DataType>> input1 = MakeDecoder<DataType>(inputInfo1, inputs[1]->Map());
+    std::unique_ptr<Encoder<DataType>> output = MakeEncoder<DataType>(outputInfo, outputs[0]->Map());
+
+    using AddFunction     = ElementwiseBinaryFunction<std::plus<DataType>>;
+    using DivFunction     = ElementwiseBinaryFunction<std::divides<DataType>>;
+    using MaximumFunction = ElementwiseBinaryFunction<armnn::maximum<DataType>>;
+    using MinimumFunction = ElementwiseBinaryFunction<armnn::minimum<DataType>>;
+    using MulFunction     = ElementwiseBinaryFunction<std::multiplies<DataType>>;
+    using SubFunction     = ElementwiseBinaryFunction<std::minus<DataType>>;
+
+    switch (operation)
+    {
+        case BinaryOperation::Add:
+        {
+            AddFunction(inShape0, inShape1, outShape, *input0, *input1, *output);
+            break;
+        }
+        case BinaryOperation::Div:
+        {
+            DivFunction(inShape0, inShape1, outShape, *input0, *input1, *output);
+            break;
+        }
+        case BinaryOperation::Maximum:
+        {
+            MaximumFunction(inShape0, inShape1, outShape, *input0, *input1, *output);
+            break;
+        }
+        case BinaryOperation::Minimum:
+        {
+            MinimumFunction(inShape0, inShape1, outShape, *input0, *input1, *output);
+            break;
+        }
+        case BinaryOperation::Mul:
+        {
+            MulFunction(inShape0, inShape1, outShape, *input0, *input1, *output);
+            break;
+        }
+        case BinaryOperation::Sub:
+        {
+            SubFunction(inShape0, inShape1, outShape, *input0, *input1, *output);
+            break;
+        }
+        default:
+        {
+            throw InvalidArgumentException(std::string("Unsupported binary operation ") +
+                                           GetBinaryOperationAsCString(operation), CHECK_LOCATION());
+        }
+    }
+}
+
+RefElementwiseBinaryWorkload::RefElementwiseBinaryWorkload(const ElementwiseBinaryQueueDescriptor& desc,
+                                                         const WorkloadInfo& info)
+    : RefBaseWorkload<ElementwiseBinaryQueueDescriptor>(desc, info)
+{}
+
+void RefElementwiseBinaryWorkload::Execute() const
+{
+    Execute(m_Data.m_Inputs, m_Data.m_Outputs);
+}
+
+void RefElementwiseBinaryWorkload::ExecuteAsync(ExecutionData& executionData)
+{
+
+    WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+    Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
+}
+
+void RefElementwiseBinaryWorkload::Execute(std::vector<ITensorHandle*> inputs,
+                                           std::vector<ITensorHandle*> outputs) const
+{
+    ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefElementwiseBinaryWorkload_Execute");
+
+    if (GetTensorInfo(inputs[0]).GetDataType() == DataType::Signed32)
+    {
+        ExecuteFunction<int32_t>(inputs, outputs, m_Data.m_Parameters.m_Operation);
+    }
+    else
+    {
+        ExecuteFunction<float>(inputs, outputs, m_Data.m_Parameters.m_Operation);
+    }
+}
+
+} // namespace armnn
diff --git a/src/backends/reference/workloads/RefElementwiseBinaryWorkload.hpp b/src/backends/reference/workloads/RefElementwiseBinaryWorkload.hpp
new file mode 100644
index 0000000..37458a1
--- /dev/null
+++ b/src/backends/reference/workloads/RefElementwiseBinaryWorkload.hpp
@@ -0,0 +1,29 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "BaseIterator.hpp"
+
+#include "RefBaseWorkload.hpp"
+#include <armnn/backends/WorkloadData.hpp>
+
+namespace armnn
+{
+
+class RefElementwiseBinaryWorkload : public RefBaseWorkload<ElementwiseBinaryQueueDescriptor>
+{
+public:
+    using RefBaseWorkload<ElementwiseBinaryQueueDescriptor>::m_Data;
+
+    RefElementwiseBinaryWorkload(const ElementwiseBinaryQueueDescriptor& descriptor, const WorkloadInfo& info);
+    void Execute() const override;
+    void ExecuteAsync(ExecutionData& executionData)  override;
+
+private:
+    void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
+};
+
+} // namespace armnn
diff --git a/src/backends/reference/workloads/RefWorkloads.hpp b/src/backends/reference/workloads/RefWorkloads.hpp
index afed71b..dba880b 100644
--- a/src/backends/reference/workloads/RefWorkloads.hpp
+++ b/src/backends/reference/workloads/RefWorkloads.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -26,6 +26,7 @@
 #include "RefDetectionPostProcessWorkload.hpp"
 #include "RefDequantizeWorkload.hpp"
 #include "RefElementwiseWorkload.hpp"
+#include "RefElementwiseBinaryWorkload.hpp"
 #include "RefElementwiseUnaryWorkload.hpp"
 #include "RefFakeQuantizationFloat32Workload.hpp"
 #include "RefFillWorkload.hpp"