IVGCVSW-7172 Add ElementwiseBinary (Subtraction & Multiplication) support to TOSA Reference Backend

 * Removed AdditionOperator and moved to new ElementwiseBinaryOperator.

Signed-off-by: Nikhil Raj <nikhil.raj@arm.com>
Signed-off-by: Matthew Sloyan <matthew.sloyan@arm.com>
Change-Id: I8ce20f7575d68334aadcd176827bca3db53d0052
diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt
index 5fcc8b5..d251bd2 100644
--- a/src/backends/backendsCommon/test/CMakeLists.txt
+++ b/src/backends/backendsCommon/test/CMakeLists.txt
@@ -1,5 +1,5 @@
 #
-# Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
+# Copyright © 2017-2022 Arm Ltd and Contributors. All rights reserved.
 # SPDX-License-Identifier: MIT
 #
 
@@ -41,6 +41,7 @@
     LogSoftmaxEndToEndTestImpl.hpp
     MemoryManagerTests.cpp
     MockBackendId.hpp
+    MultiplicationEndToEndTestImpl.hpp
     OptimizeSubgraphViewTests.cpp
     OptimizationViewsTests.cpp
     PreluEndToEndTestImpl.hpp
@@ -57,6 +58,7 @@
     SpaceToDepthEndToEndTestImpl.hpp
     SplitterEndToEndTestImpl.hpp
     StridedSliceAsyncEndToEndTest.hpp
+    SubtractionEndToEndTestImpl.hpp
     TransposeEndToEndTestImpl.hpp
     TensorCopyUtils.hpp
     WorkloadFactoryHelper.hpp
diff --git a/src/backends/backendsCommon/test/MultiplicationEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/MultiplicationEndToEndTestImpl.hpp
new file mode 100644
index 0000000..40442e2
--- /dev/null
+++ b/src/backends/backendsCommon/test/MultiplicationEndToEndTestImpl.hpp
@@ -0,0 +1,96 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <armnn/INetwork.hpp>
+
+#include <CommonTestUtils.hpp>
+#include <ResolveType.hpp>
+
+#include <doctest/doctest.h>
+
+namespace
+{
+
+template<typename armnn::DataType DataType>
+armnn::INetworkPtr CreateMultiplicationNetwork(const armnn::TensorShape& inputXShape,
+                                               const armnn::TensorShape& inputYShape,
+                                               const armnn::TensorShape& outputShape,
+                                               const float qScale = 1.0f,
+                                               const int32_t qOffset = 0)
+{
+    using namespace armnn;
+
+    INetworkPtr network(INetwork::Create());
+
+    TensorInfo inputXTensorInfo(inputXShape, DataType, qScale, qOffset, true);
+    TensorInfo inputYTensorInfo(inputYShape, DataType, qScale, qOffset, true);
+
+    TensorInfo outputTensorInfo(outputShape, DataType, qScale, qOffset);
+
+
+    IConnectableLayer* multiplication = network->AddMultiplicationLayer("multiplication");
+    IConnectableLayer* inputX = network->AddInputLayer(0, "inputX");
+    IConnectableLayer* inputY = network->AddInputLayer(1, "inputY");
+    IConnectableLayer* output = network->AddOutputLayer(0, "output");
+
+    Connect(inputX, multiplication, inputXTensorInfo, 0, 0);
+    Connect(inputY, multiplication, inputYTensorInfo, 0, 1);
+    Connect(multiplication, output, outputTensorInfo, 0, 0);
+
+    return network;
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+void MultiplicationEndToEnd(const std::vector<armnn::BackendId>& backends)
+{
+    using namespace armnn;
+
+    const TensorShape& inputXShape = { 2, 2 };
+    const TensorShape& inputYShape = { 2, 2 };
+    const TensorShape& outputShape = { 2, 2 };
+
+    INetworkPtr network = CreateMultiplicationNetwork<ArmnnType>(inputXShape, inputYShape, outputShape);
+
+    CHECK(network);
+
+    std::vector<T> inputXData{ 1, 2, 3, 4 };
+    std::vector<T> inputYData{ 5, 2, 6, 3 };
+    std::vector<T> expectedOutput{ 5, 4, 18, 12 };
+
+    std::map<int, std::vector<T>> inputTensorData = {{ 0, inputXData }, {1, inputYData}};
+    std::map<int, std::vector<T>> expectedOutputData = { { 0, expectedOutput } };
+
+    EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network), inputTensorData, expectedOutputData, backends);
+}
+
+template<armnn::DataType ArmnnType>
+void MultiplicationEndToEndFloat16(const std::vector<armnn::BackendId>& backends)
+{
+    using namespace armnn;
+    using namespace half_float::literal;
+    using Half = half_float::half;
+
+    const TensorShape& inputXShape = { 2, 2 };
+    const TensorShape& inputYShape = { 2, 2 };
+    const TensorShape& outputShape = { 2, 2 };
+
+    INetworkPtr network = CreateMultiplicationNetwork<ArmnnType>(inputXShape, inputYShape, outputShape);
+    CHECK(network);
+
+    std::vector<Half> inputXData{ 1._h, 2._h,
+                                  3._h, 4._h };
+    std::vector<Half> inputYData{ 1._h, 2._h,
+                                  3._h, 4._h };
+    std::vector<Half> expectedOutput{ 1._h, 4._h,
+                                      9._h, 16._h };
+
+    std::map<int, std::vector<Half>> inputTensorData = {{ 0, inputXData }, { 1, inputYData }};
+    std::map<int, std::vector<Half>> expectedOutputData = { { 0, expectedOutput } };
+
+    EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network), inputTensorData, expectedOutputData, backends);
+}
+
+} // anonymous namespace
diff --git a/src/backends/backendsCommon/test/SubtractionEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/SubtractionEndToEndTestImpl.hpp
new file mode 100644
index 0000000..747fe26
--- /dev/null
+++ b/src/backends/backendsCommon/test/SubtractionEndToEndTestImpl.hpp
@@ -0,0 +1,96 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <armnn/INetwork.hpp>
+
+#include <CommonTestUtils.hpp>
+#include <ResolveType.hpp>
+
+#include <doctest/doctest.h>
+
+namespace
+{
+
+template<typename armnn::DataType DataType>
+armnn::INetworkPtr CreateSubtractionNetwork(const armnn::TensorShape& inputXShape,
+                                            const armnn::TensorShape& inputYShape,
+                                            const armnn::TensorShape& outputShape,
+                                            const float qScale = 1.0f,
+                                            const int32_t qOffset = 0)
+{
+    using namespace armnn;
+
+    INetworkPtr network(INetwork::Create());
+
+    TensorInfo inputXTensorInfo(inputXShape, DataType, qScale, qOffset, true);
+    TensorInfo inputYTensorInfo(inputYShape, DataType, qScale, qOffset, true);
+
+    TensorInfo outputTensorInfo(outputShape, DataType, qScale, qOffset);
+
+
+    IConnectableLayer* subtraction = network->AddSubtractionLayer("subtraction");
+    IConnectableLayer* inputX = network->AddInputLayer(0, "inputX");
+    IConnectableLayer* inputY = network->AddInputLayer(1, "inputY");
+    IConnectableLayer* output = network->AddOutputLayer(0, "output");
+
+    Connect(inputX, subtraction, inputXTensorInfo, 0, 0);
+    Connect(inputY, subtraction, inputYTensorInfo, 0, 1);
+    Connect(subtraction, output, outputTensorInfo, 0, 0);
+
+    return network;
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+void SubtractionEndToEnd(const std::vector<armnn::BackendId>& backends)
+{
+    using namespace armnn;
+
+    const TensorShape& inputXShape = { 2, 2 };
+    const TensorShape& inputYShape = { 2, 2 };
+    const TensorShape& outputShape = { 2, 2 };
+
+    INetworkPtr network = CreateSubtractionNetwork<ArmnnType>(inputXShape, inputYShape, outputShape);
+
+    CHECK(network);
+
+    std::vector<T> inputXData{ 10, 11, 12, 13 };
+    std::vector<T> inputYData{ 5, 7, 6, 8 };
+    std::vector<T> expectedOutput{ 5, 4, 6, 5 };
+
+    std::map<int, std::vector<T>> inputTensorData = {{ 0, inputXData }, {1, inputYData}};
+    std::map<int, std::vector<T>> expectedOutputData = { { 0, expectedOutput } };
+
+    EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network), inputTensorData, expectedOutputData, backends);
+}
+
+template<armnn::DataType ArmnnType>
+void SubtractionEndToEndFloat16(const std::vector<armnn::BackendId>& backends)
+{
+    using namespace armnn;
+    using namespace half_float::literal;
+    using Half = half_float::half;
+
+    const TensorShape& inputXShape = { 2, 2 };
+    const TensorShape& inputYShape = { 2, 2 };
+    const TensorShape& outputShape = { 2, 2 };
+
+    INetworkPtr network = CreateSubtractionNetwork<ArmnnType>(inputXShape, inputYShape, outputShape);
+    CHECK(network);
+
+    std::vector<Half> inputXData{ 11._h, 12._h,
+                                  13._h, 14._h };
+    std::vector<Half> inputYData{ 5._h, 7._h,
+                                  6._h, 8._h };
+    std::vector<Half> expectedOutput{ 6._h, 5._h,
+                                      7._h, 6._h };
+
+    std::map<int, std::vector<Half>> inputTensorData = {{ 0, inputXData }, { 1, inputYData }};
+    std::map<int, std::vector<Half>> expectedOutputData = { { 0, expectedOutput } };
+
+    EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network), inputTensorData, expectedOutputData, backends);
+}
+
+} // anonymous namespace
diff --git a/src/backends/tosaCommon/TosaMappings.cpp b/src/backends/tosaCommon/TosaMappings.cpp
index 1452e4a..b0f8fd9 100644
--- a/src/backends/tosaCommon/TosaMappings.cpp
+++ b/src/backends/tosaCommon/TosaMappings.cpp
@@ -24,8 +24,10 @@
     switch (type)
     {
         case LayerType::Addition:
+        case LayerType::Multiplication:
+        case LayerType::Subtraction:
         {
-            return ConvertAdditionToTosaOperator(layer, inputs, outputs);
+            return ConvertElementwiseBinaryToTosaOperator(layer, type, inputs, outputs);
         }
         case LayerType::Concat:
         {
diff --git a/src/backends/tosaCommon/operatorMappings/AdditionOperator.cpp b/src/backends/tosaCommon/operatorMappings/AdditionOperator.cpp
deleted file mode 100644
index 7014886..0000000
--- a/src/backends/tosaCommon/operatorMappings/AdditionOperator.cpp
+++ /dev/null
@@ -1,72 +0,0 @@
-//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "AdditionOperator.hpp"
-
-TosaSerializationBasicBlock* ConvertAdditionToTosaOperator(const Layer* layer,
-                                                           const std::vector<const TensorInfo*>& inputs,
-                                                           const std::vector<const TensorInfo*>& outputs)
-{
-    std::string input0Name = std::string("input0_");
-    std::string input1Name = std::string("input1_");
-    std::string outputName = std::string("output0_");
-    std::string blockName  = std::string("Op_ADD_block_") + GetUniqueTosaMappingID();
-
-    // If a layer is present then the block will be used for execution, so input and output names need to be determined
-    // using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
-    if(layer != nullptr)
-    {
-        // Get the layers connected to the input slots and determine unique tensors names.
-        Layer& connectedLayer0 = layer->GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer();
-        input0Name = GenerateUniqueName(connectedLayer0, 0);
-
-        Layer& connectedLayer1 = layer->GetInputSlot(1).GetConnectedOutputSlot()->GetOwningLayer();
-        input1Name = GenerateUniqueName(connectedLayer1, 1);
-
-        // Determine unique output tensor name.
-        outputName = GenerateUniqueOutputName(*layer, 0);
-    }
-
-    auto* op = new TosaSerializationOperator(Op_ADD,
-                                             Attribute_NONE,
-                                             nullptr,
-                                             {input0Name, input1Name},
-                                             {outputName});
-
-
-    std::vector<TosaSerializationTensor*> tensors;
-
-    // Only add input tensors if connected layer is an input layer.
-    // As intermediate or constant tensors will be created separately.
-    // There also can't be duplicate tensor.
-    if(input0Name.find("input0_") != std::string::npos)
-    {
-        std::vector<int32_t> inputShape0 = GetTosaTensorShape(inputs[0]->GetShape());
-        DType inputDType0 = ArmNNToDType(inputs[0]->GetDataType());
-
-        tensors.push_back(new TosaSerializationTensor(input0Name, inputShape0, inputDType0, {}));
-    }
-
-    if(input1Name.find("input1_") != std::string::npos)
-    {
-        std::vector<int32_t> inputShape1 = GetTosaTensorShape(inputs[1]->GetShape());
-        DType inputDType1 = ArmNNToDType(inputs[1]->GetDataType());
-
-        tensors.push_back(new TosaSerializationTensor(input1Name, inputShape1, inputDType1, {}));
-    }
-
-    std::vector<int32_t> outputShape0 = GetTosaTensorShape(outputs[0]->GetShape());
-    DType outputDType0 = ArmNNToDType(outputs[0]->GetDataType());
-
-    tensors.push_back(new TosaSerializationTensor(outputName, outputShape0, outputDType0, {}));
-
-    // operatorInputNames/operatorOutputNames ends up being the same as
-    // blockInputNames/blockOutputNames for one-to-one ArmNN to TOSA mappings
-    return new TosaSerializationBasicBlock(blockName, // name
-                                           {op}, // operators
-                                           tensors, // tensors
-                                           {input0Name, input1Name}, // inputs
-                                           {outputName}); // outputs
-}
\ No newline at end of file
diff --git a/src/backends/tosaCommon/operatorMappings/AdditionOperator.hpp b/src/backends/tosaCommon/operatorMappings/AdditionOperator.hpp
deleted file mode 100644
index 5eb7441..0000000
--- a/src/backends/tosaCommon/operatorMappings/AdditionOperator.hpp
+++ /dev/null
@@ -1,20 +0,0 @@
-//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "TosaOperatorUtils.hpp"
-
-#include <Layer.hpp>
-
-#include <tosa_serialization_handler.h>
-
-using namespace armnn;
-using namespace tosa;
-
-TosaSerializationBasicBlock* ConvertAdditionToTosaOperator(const Layer* layer,
-                                                           const std::vector<const TensorInfo*>& inputs,
-                                                           const std::vector<const TensorInfo*>& outputs);
-
diff --git a/src/backends/tosaCommon/operatorMappings/CMakeLists.txt b/src/backends/tosaCommon/operatorMappings/CMakeLists.txt
index 2443dc0..6e897aa 100644
--- a/src/backends/tosaCommon/operatorMappings/CMakeLists.txt
+++ b/src/backends/tosaCommon/operatorMappings/CMakeLists.txt
@@ -4,8 +4,6 @@
 #
 
 list(APPEND armnnTosaBackendOperators_sources
-        AdditionOperator.hpp
-        AdditionOperator.cpp
         AvgPool2DIgnoreValueOperator.hpp
         AvgPool2DIgnoreValueOperator.cpp
         ConcatOperator.hpp
@@ -14,6 +12,8 @@
         ConstantOperator.cpp
         Conv2dOperator.hpp
         Conv2dOperator.cpp
+        ElementwiseBinaryOperator.hpp
+        ElementwiseBinaryOperator.cpp
         Pooling2DOperator.hpp
         Pooling2DOperator.cpp
         ReshapeOperator.hpp
diff --git a/src/backends/tosaCommon/operatorMappings/ElementwiseBinaryOperator.cpp b/src/backends/tosaCommon/operatorMappings/ElementwiseBinaryOperator.cpp
new file mode 100644
index 0000000..9909e66
--- /dev/null
+++ b/src/backends/tosaCommon/operatorMappings/ElementwiseBinaryOperator.cpp
@@ -0,0 +1,103 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ElementwiseBinaryOperator.hpp"
+
+TosaSerializationBasicBlock* ConvertElementwiseBinaryToTosaOperator(const Layer* layer,
+                                                                    const LayerType type,
+                                                                    const std::vector<const TensorInfo*>& inputs,
+                                                                    const std::vector<const TensorInfo*>& outputs)
+{
+    std::string input0Name = std::string("input0_");
+    std::string input1Name = std::string("input1_");
+    std::string outputName = std::string("output0_");
+    std::string blockName;
+
+    // If a layer is present then the block will be used for execution, so input and output names need to be determined
+    // using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
+    if(layer != nullptr)
+    {
+        // Get the layers connected to the input slots and determine unique tensor names.
+        Layer& connectedLayer0 = layer->GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer();
+        input0Name = GenerateUniqueName(connectedLayer0, 0);
+
+        Layer& connectedLayer1 = layer->GetInputSlot(1).GetConnectedOutputSlot()->GetOwningLayer();
+        input1Name = GenerateUniqueName(connectedLayer1, 1);
+
+        // Determine unique output tensor name.
+        outputName = GenerateUniqueOutputName(*layer, 0);
+    }
+
+    TosaSerializationOperator* op = nullptr;
+    switch(type)
+    {
+        case LayerType::Addition:
+        {
+            op = new TosaSerializationOperator(Op_ADD,
+                                               Attribute_NONE,
+                                               nullptr,
+                                               {input0Name, input1Name},
+                                               {outputName});
+            blockName = std::string("Op_ADD_block_") + GetUniqueTosaMappingID();
+            break;
+        }
+        case LayerType::Multiplication:
+        {
+            int32_t shift = 0;
+            TosaMulAttribute mulAttribute(shift);
+            op = new TosaSerializationOperator(Op_MUL,
+                                               Attribute_MulAttribute,
+                                               &mulAttribute,
+                                               {input0Name, input1Name},
+                                               {outputName});
+            blockName = std::string("Op_MUL_block_") + GetUniqueTosaMappingID();
+            break;
+        }
+        case LayerType::Subtraction:
+        {
+            op = new TosaSerializationOperator(Op_SUB,
+                                               Attribute_NONE,
+                                               nullptr,
+                                               {input0Name, input1Name},
+                                               {outputName});
+            blockName = std::string("Op_SUB_block_") + GetUniqueTosaMappingID();
+            break;
+        }
+        default:
+            throw armnn::Exception("ConvertElementwiseBinaryToTosaOperator: Unsupported layer type.");
+    }
+    ARMNN_ASSERT(op != nullptr);
+
+    std::vector<TosaSerializationTensor*> tensors;
+    // Only add input tensors if connected layer is an input layer.
+    // As intermediate or constant tensors will be created separately.
+    // There also can't be duplicate tensor.
+    if(input0Name.find("input0_") != std::string::npos)
+    {
+        std::vector<int32_t> inputShape0 = GetTosaTensorShape(inputs[0]->GetShape());
+        DType inputDType0 = ArmNNToDType(inputs[0]->GetDataType());
+        tensors.push_back(new TosaSerializationTensor(input0Name, inputShape0, inputDType0, {}));
+    }
+    if(input1Name.find("input1_") != std::string::npos)
+    {
+        std::vector<int32_t> inputShape1 = GetTosaTensorShape(inputs[1]->GetShape());
+        DType inputDType1 = ArmNNToDType(inputs[1]->GetDataType());
+        tensors.push_back(new TosaSerializationTensor(input1Name, inputShape1, inputDType1, {}));
+    }
+
+    std::vector<int32_t> outputShape0 = GetTosaTensorShape(outputs[0]->GetShape());
+    DType outputDType0 = ArmNNToDType(outputs[0]->GetDataType());
+
+    tensors.push_back(new TosaSerializationTensor(outputName, outputShape0, outputDType0, {}));
+
+    // operatorInputNames/operatorOutputNames ends up being the same as
+    // blockInputNames/blockOutputNames for one-to-one ArmNN to Tosa mappings
+    return new TosaSerializationBasicBlock(blockName, // name
+                                           {op}, // operators
+                                           tensors, // tensors
+                                           {input0Name, input1Name}, // inputs
+                                           {outputName}); // outputs
+}
+
diff --git a/src/backends/tosaCommon/operatorMappings/ElementwiseBinaryOperator.hpp b/src/backends/tosaCommon/operatorMappings/ElementwiseBinaryOperator.hpp
new file mode 100644
index 0000000..86031c6
--- /dev/null
+++ b/src/backends/tosaCommon/operatorMappings/ElementwiseBinaryOperator.hpp
@@ -0,0 +1,20 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "TosaOperatorUtils.hpp"
+
+#include <Layer.hpp>
+
+#include <tosa_serialization_handler.h>
+
+using namespace armnn;
+using namespace tosa;
+
+TosaSerializationBasicBlock* ConvertElementwiseBinaryToTosaOperator(const Layer* layer,
+                                                                    const LayerType type,
+                                                                    const std::vector<const TensorInfo*>& inputs,
+                                                                    const std::vector<const TensorInfo*>& outputs);
\ No newline at end of file
diff --git a/src/backends/tosaCommon/operatorMappings/TosaCommonOperators.hpp b/src/backends/tosaCommon/operatorMappings/TosaCommonOperators.hpp
index 052c54c..7b117d8 100644
--- a/src/backends/tosaCommon/operatorMappings/TosaCommonOperators.hpp
+++ b/src/backends/tosaCommon/operatorMappings/TosaCommonOperators.hpp
@@ -5,12 +5,12 @@
 
 #pragma once
 
-#include "AdditionOperator.hpp"
+#include "AvgPool2DIgnoreValueOperator.hpp"
 #include "ConcatOperator.hpp"
 #include "ConstantOperator.hpp"
 #include "Conv2dOperator.hpp"
-#include "AvgPool2DIgnoreValueOperator.hpp"
+#include "ElementwiseBinaryOperator.hpp"
 #include "Pooling2DOperator.hpp"
 #include "ReshapeOperator.hpp"
 #include "SliceOperator.hpp"
-#include "TransposeConv2dOperator.hpp"
\ No newline at end of file
+#include "TransposeConv2dOperator.hpp"
diff --git a/src/backends/tosaCommon/test/OneToOneMappingTests.cpp b/src/backends/tosaCommon/test/OneToOneMappingTests.cpp
index b3ab14a..146a9cb 100644
--- a/src/backends/tosaCommon/test/OneToOneMappingTests.cpp
+++ b/src/backends/tosaCommon/test/OneToOneMappingTests.cpp
@@ -253,6 +253,54 @@
         basicBlock, inputShape, outputShape, Op_CONV2D, Attribute_ConvAttribute, descriptor, LayerType::Convolution2d);
 }
 
+TEST_CASE("GetTosaMapping_MultiplicationLayer")
+{
+
+    const TensorInfo input0Info ({ 1, 2, 4, 2 }, DataType::Float32);
+    const TensorInfo input1Info ({ 1, 2, 4, 2 }, DataType::Float32);
+    const TensorInfo outputInfo ({ 1, 2, 4, 2 }, DataType::Float32);
+
+    std::vector<std::vector<int32_t>> inputShape  = {{ 1, 2, 4, 2 }, { 1, 2, 4, 2 }};
+    std::vector<std::vector<int32_t>> outputShape = {{ 1, 2, 4, 2 }};
+
+    TosaSerializationBasicBlock* basicBlock =
+        GetTosaMapping(nullptr, LayerType::Multiplication, {&input0Info, &input1Info}, {&outputInfo}, BaseDescriptor());
+    AssertTosaOneToOneMappingBasicBlock( basicBlock, inputShape, outputShape,
+        tosa::Op_MUL, tosa::Attribute_MulAttribute, BaseDescriptor(), LayerType::Multiplication);
+}
+
+TEST_CASE("GetTosaMappingFromLayer_MultiplicationLayer")
+{
+    IRuntime::CreationOptions options;
+    IRuntimePtr runtime(IRuntime::Create(options));
+
+    // Builds up the structure of the network.
+    INetworkPtr net(INetwork::Create());
+
+    IConnectableLayer* input0 = net->AddInputLayer(0, "input0");
+    IConnectableLayer* input1 = net->AddInputLayer(1, "input1");
+    IConnectableLayer* add    = net->AddMultiplicationLayer("multiplication");
+    IConnectableLayer* output = net->AddOutputLayer(0, "output");
+
+    input0->GetOutputSlot(0).Connect(add->GetInputSlot(0));
+    input1->GetOutputSlot(0).Connect(add->GetInputSlot(1));
+    add->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+
+    TensorInfo info = TensorInfo({ 2, 2 }, DataType::Float32, 0.0f, 0, true);
+
+    input0->GetOutputSlot(0).SetTensorInfo(info);
+    input1->GetOutputSlot(0).SetTensorInfo(info);
+    add->GetOutputSlot(0).SetTensorInfo(info);
+
+    std::vector<std::vector<int32_t>> inputShape  = {{ 2, 2 }, { 2, 2 }};
+    std::vector<std::vector<int32_t>> outputShape = {{ 2, 2 }};
+
+    TosaSerializationBasicBlock* basicBlock =
+            GetTosaMappingFromLayer(PolymorphicDowncast<Layer*>(add));
+    AssertTosaOneToOneMappingBasicBlock( basicBlock, inputShape, outputShape,
+            tosa::Op_MUL, Attribute_MulAttribute, BaseDescriptor(), LayerType::Multiplication);
+}
+
 TEST_CASE("GetTosaMapping_AvgPool2DLayer")
 {
     Pooling2dDescriptor descriptor;
diff --git a/src/backends/tosaReference/TosaRefLayerSupport.cpp b/src/backends/tosaReference/TosaRefLayerSupport.cpp
index 0d0d07a..b37ecc4 100644
--- a/src/backends/tosaReference/TosaRefLayerSupport.cpp
+++ b/src/backends/tosaReference/TosaRefLayerSupport.cpp
@@ -38,6 +38,8 @@
         case LayerType::Output:
             return true;
         case LayerType::Addition:
+        case LayerType::Multiplication:
+        case LayerType::Subtraction:
             // Setup inputs and outputs
             inputInfos.push_back(&infos[0]);
             inputInfos.push_back(&infos[1]);
diff --git a/src/backends/tosaReference/test/TosaRefEndToEndTests.cpp b/src/backends/tosaReference/test/TosaRefEndToEndTests.cpp
index a377293..67b87ae 100644
--- a/src/backends/tosaReference/test/TosaRefEndToEndTests.cpp
+++ b/src/backends/tosaReference/test/TosaRefEndToEndTests.cpp
@@ -8,9 +8,11 @@
 #include "backendsCommon/test/AdditionEndToEndTestImpl.hpp"
 #include "backendsCommon/test/Convolution2dEndToEndTestImpl.hpp"
 #include "backendsCommon/test/ConcatEndToEndTestImpl.hpp"
+#include "backendsCommon/test/MultiplicationEndToEndTestImpl.hpp"
 #include "backendsCommon/test/Pooling2dEndToEndTestImpl.hpp"
 #include "backendsCommon/test/ReshapeEndToEndTestImpl.hpp"
 #include "backendsCommon/test/SliceEndToEndTestImpl.hpp"
+#include "backendsCommon/test/SubtractionEndToEndTestImpl.hpp"
 #include "backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp"
 
 #include <doctest/doctest.h>
@@ -150,6 +152,35 @@
 {
     SliceEndToEndFloat16<DataType::Float16>(tosaDefaultBackends);
 }
+TEST_CASE("TosaRefSubtractionEndtoEndTestFloat32")
+{
+    SubtractionEndToEnd<DataType::Float32>(tosaDefaultBackends);
+}
+
+TEST_CASE("TosaRefSubtractionEndtoEndTestInt32")
+{
+    SubtractionEndToEnd<DataType::Signed32>(tosaDefaultBackends);
+}
+
+TEST_CASE("TosaRefSubtractionEndtoEndTestFloat16")
+{
+    SubtractionEndToEndFloat16<DataType::Float16>(tosaDefaultBackends);
+}
+
+TEST_CASE("TosaRefMultiplicationEndtoEndTestFloat32")
+{
+    MultiplicationEndToEnd<DataType::Float32>(tosaDefaultBackends);
+}
+
+TEST_CASE("TosaRefMultiplicationEndtoEndTestInt32")
+{
+    MultiplicationEndToEnd<DataType::Signed32>(tosaDefaultBackends);
+}
+
+TEST_CASE("TosaRefMultiplicationEndtoEndTestFloat16")
+{
+    MultiplicationEndToEndFloat16<DataType::Float16>(tosaDefaultBackends);
+}
 
 // TransposeConvolution2d
 TEST_CASE("TosaRefTransposeConvolution2dEndToEndFloatNhwcTest")
diff --git a/src/backends/tosaReference/test/TosaRefLayerSupportTests.cpp b/src/backends/tosaReference/test/TosaRefLayerSupportTests.cpp
index 051965f..9119b13 100644
--- a/src/backends/tosaReference/test/TosaRefLayerSupportTests.cpp
+++ b/src/backends/tosaReference/test/TosaRefLayerSupportTests.cpp
@@ -190,6 +190,50 @@
     CHECK(!supported);
 }
 
+TEST_CASE("IsLayerSupportedTosaReferenceMultiplication")
+{
+    TensorShape shape0 = {1,1,3,4};
+    TensorShape shape1 = {1,1,3,4};
+    TensorShape outShape = {1,1,3,4};
+    TensorInfo in0(shape0, armnn::DataType::Float32);
+    TensorInfo in1(shape1, armnn::DataType::Float32);
+    TensorInfo out(outShape, armnn::DataType::Float32);
+
+    BaseDescriptor desc;
+    TosaRefLayerSupport supportChecker;
+    std::string reasonIfNotSupported;
+    auto supported = supportChecker.IsLayerSupported(armnn::LayerType::Multiplication,
+                                                     {in0, in1, out},
+                                                     desc,
+                                                     armnn::EmptyOptional(),
+                                                     armnn::EmptyOptional(),
+                                                     reasonIfNotSupported);
+
+    CHECK(supported);
+}
+
+TEST_CASE("IsLayerSupportedTosaReferenceMultiplicationUnsupported")
+{
+    TensorShape shape0 = {1,1,3,4};
+    TensorShape shape1 = {1,2,3,4};
+    TensorShape outShape = {1,1,3,4};
+    TensorInfo in0(shape0, armnn::DataType::Signed64);
+    TensorInfo in1(shape1, armnn::DataType::Signed64);
+    TensorInfo out(outShape, armnn::DataType::Signed64);
+
+    BaseDescriptor desc;
+    TosaRefLayerSupport supportChecker;
+    std::string reasonIfNotSupported;
+    auto supported = supportChecker.IsLayerSupported(armnn::LayerType::Multiplication,
+                                                     {in0, in1, out},
+                                                     desc,
+                                                     armnn::EmptyOptional(),
+                                                     armnn::EmptyOptional(),
+                                                     reasonIfNotSupported);
+
+    CHECK(!supported);
+}
+
 TEST_CASE("IsLayerSupportedTosaReferenceMaxPooling2d")
 {
     TensorShape inShape = {1,1,3,4};
@@ -376,6 +420,50 @@
     CHECK(!supported);
 }
 
+TEST_CASE("IsLayerSupportedTosaReferenceSubtraction")
+{
+    TensorShape shape0 = {1,1,3,4};
+    TensorShape shape1 = {1,1,3,4};
+    TensorShape outShape = {1,1,3,4};
+    TensorInfo in0(shape0, armnn::DataType::Float32);
+    TensorInfo in1(shape1, armnn::DataType::Float32);
+    TensorInfo out(outShape, armnn::DataType::Float32);
+
+    BaseDescriptor desc;
+    TosaRefLayerSupport supportChecker;
+    std::string reasonIfNotSupported;
+    auto supported = supportChecker.IsLayerSupported(armnn::LayerType::Subtraction,
+                                                     {in0, in1, out},
+                                                     desc,
+                                                     armnn::EmptyOptional(),
+                                                     armnn::EmptyOptional(),
+                                                     reasonIfNotSupported);
+
+    CHECK(supported);
+}
+
+TEST_CASE("IsLayerSupportedTosaReferenceSubtractionUnsupported")
+{
+    TensorShape shape0 = {1,1,3,4};
+    TensorShape shape1 = {4};
+    TensorShape outShape = {1,1,3,4};
+    TensorInfo in0(shape0, armnn::DataType::Signed64);
+    TensorInfo in1(shape1, armnn::DataType::Signed64);
+    TensorInfo out(outShape, armnn::DataType::Signed64);
+
+    BaseDescriptor desc;
+    TosaRefLayerSupport supportChecker;
+    std::string reasonIfNotSupported;
+    auto supported = supportChecker.IsLayerSupported(armnn::LayerType::Subtraction,
+                                                     {in0, in1, out},
+                                                     desc,
+                                                     armnn::EmptyOptional(),
+                                                     armnn::EmptyOptional(),
+                                                     reasonIfNotSupported);
+
+    CHECK(!supported);
+}
+
 TEST_CASE("IsLayerSupportedTosaReferenceTransposeConv2d")
 {
     TensorInfo inputInfo ({ 1, 3, 3, 1 }, DataType::Float32);