IVGCVSW-1843 : replacing trivial arithmetic helpers

Change-Id: Iddf637694f1a3a7ef00f006a41b8044a35c7e73c
diff --git a/Android.mk b/Android.mk
index 9c23736..9c4db74 100644
--- a/Android.mk
+++ b/Android.mk
@@ -128,16 +128,14 @@
         src/armnn/backends/RefWorkloads/RefSoftmaxFloat32Workload.cpp \
         src/armnn/backends/RefWorkloads/RefActivationFloat32Workload.cpp \
         src/armnn/backends/RefWorkloads/RefBatchNormalizationUint8Workload.cpp \
-        src/armnn/backends/RefWorkloads/Multiplication.cpp \
         src/armnn/backends/RefWorkloads/RefMultiplicationUint8Workload.cpp \
         src/armnn/backends/RefWorkloads/RefBaseConstantWorkload.cpp \
         src/armnn/backends/RefWorkloads/RefResizeBilinearFloat32Workload.cpp \
         src/armnn/backends/RefWorkloads/RefBatchNormalizationFloat32Workload.cpp \
         src/armnn/backends/RefWorkloads/Broadcast.cpp \
-        src/armnn/backends/RefWorkloads/Addition.cpp \
+        src/armnn/backends/RefWorkloads/ArithmeticFunction.cpp \
         src/armnn/backends/RefWorkloads/RefAdditionUint8Workload.cpp \
         src/armnn/backends/RefWorkloads/RefAdditionFloat32Workload.cpp \
-        src/armnn/backends/RefWorkloads/Subtraction.cpp \
         src/armnn/backends/RefWorkloads/RefSubtractionUint8Workload.cpp \
         src/armnn/backends/RefWorkloads/RefSubtractionFloat32Workload.cpp \
         src/armnn/backends/RefWorkloads/RefFakeQuantizationFloat32Workload.cpp \
@@ -170,7 +168,6 @@
         src/armnn/backends/RefWorkloads/RefPermuteWorkload.cpp \
         src/armnn/backends/RefWorkloads/RefConvertFp16ToFp32Workload.cpp \
         src/armnn/backends/RefWorkloads/RefConvertFp32ToFp16Workload.cpp \
-        src/armnn/backends/RefWorkloads/Division.cpp \
         src/armnn/backends/RefWorkloads/RefDivisionFloat32Workload.cpp \
         src/armnn/backends/RefWorkloads/RefDivisionUint8Workload.cpp \
         src/armnn/backends/MemCopyWorkload.cpp \
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 777c315..9c2685c 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -186,14 +186,12 @@
     src/armnn/backends/RefWorkloads/Broadcast.cpp
     src/armnn/backends/RefWorkloads/RefMergerUint8Workload.cpp
     src/armnn/backends/RefWorkloads/RefConstantUint8Workload.hpp
-    src/armnn/backends/RefWorkloads/Addition.cpp
-    src/armnn/backends/RefWorkloads/Addition.hpp
+    src/armnn/backends/RefWorkloads/ArithmeticFunction.cpp
+    src/armnn/backends/RefWorkloads/ArithmeticFunction.hpp
     src/armnn/backends/RefWorkloads/RefAdditionUint8Workload.cpp
     src/armnn/backends/RefWorkloads/RefAdditionUint8Workload.hpp
     src/armnn/backends/RefWorkloads/RefAdditionFloat32Workload.cpp
     src/armnn/backends/RefWorkloads/RefAdditionFloat32Workload.hpp
-    src/armnn/backends/RefWorkloads/Subtraction.cpp
-    src/armnn/backends/RefWorkloads/Subtraction.hpp
     src/armnn/backends/RefWorkloads/RefSubtractionUint8Workload.cpp
     src/armnn/backends/RefWorkloads/RefSubtractionUint8Workload.hpp
     src/armnn/backends/RefWorkloads/RefSubtractionFloat32Workload.cpp
@@ -210,12 +208,8 @@
     src/armnn/backends/RefWorkloads/RefActivationFloat32Workload.cpp
     src/armnn/backends/RefWorkloads/RefBatchNormalizationUint8Workload.cpp
     src/armnn/backends/RefWorkloads/RefResizeBilinearUint8Workload.hpp
-    src/armnn/backends/RefWorkloads/Multiplication.cpp
-    src/armnn/backends/RefWorkloads/Division.cpp
-    src/armnn/backends/RefWorkloads/Division.hpp
     src/armnn/backends/RefWorkloads/RefMultiplicationUint8Workload.cpp
     src/armnn/backends/RefWorkloads/RefL2NormalizationFloat32Workload.hpp
-    src/armnn/backends/RefWorkloads/Multiplication.hpp
     src/armnn/backends/RefWorkloads/RefActivationUint8Workload.hpp
     src/armnn/backends/RefWorkloads/RefBaseConstantWorkload.cpp
     src/armnn/backends/RefWorkloads/RefResizeBilinearFloat32Workload.cpp
diff --git a/src/armnn/backends/RefWorkloads.hpp b/src/armnn/backends/RefWorkloads.hpp
index 910610c..e58d4ac 100644
--- a/src/armnn/backends/RefWorkloads.hpp
+++ b/src/armnn/backends/RefWorkloads.hpp
@@ -6,7 +6,7 @@
 #pragma once
 
 #include "backends/RefWorkloads/RefConstantUint8Workload.hpp"
-#include "backends/RefWorkloads/Addition.hpp"
+#include "backends/RefWorkloads/ArithmeticFunction.hpp"
 #include "backends/RefWorkloads/ConvImpl.hpp"
 #include "backends/RefWorkloads/RefMultiplicationUint8Workload.hpp"
 #include "backends/RefWorkloads/RefBaseConstantWorkload.hpp"
@@ -14,7 +14,6 @@
 #include "backends/RefWorkloads/RefSplitterUint8Workload.hpp"
 #include "backends/RefWorkloads/RefResizeBilinearUint8Workload.hpp"
 #include "backends/RefWorkloads/RefL2NormalizationFloat32Workload.hpp"
-#include "backends/RefWorkloads/Multiplication.hpp"
 #include "backends/RefWorkloads/RefActivationUint8Workload.hpp"
 #include "backends/RefWorkloads/RefPooling2dFloat32Workload.hpp"
 #include "backends/RefWorkloads/RefWorkloadUtils.hpp"
diff --git a/src/armnn/backends/RefWorkloads/Addition.cpp b/src/armnn/backends/RefWorkloads/Addition.cpp
deleted file mode 100644
index 33d5bd5..0000000
--- a/src/armnn/backends/RefWorkloads/Addition.cpp
+++ /dev/null
@@ -1,44 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "Addition.hpp"
-#include "Broadcast.hpp"
-
-#include <functional>
-
-namespace
-{
-
-void ElementwiseAddition(unsigned int numElements, const float* inData0, const float* inData1, float* outData)
-{
-    for (unsigned int i = 0; i < numElements; ++i)
-    {
-        outData[i] = inData0[i] + inData1[i];
-    }
-}
-
-} // namespace
-
-namespace armnn
-{
-
-void Addition(const TensorShape& inShape0,
-              const TensorShape& inShape1,
-              const TensorShape& outShape,
-              const float* inData0,
-              const float* inData1,
-              float* outData)
-{
-    if (inShape0 == inShape1)
-    {
-        ElementwiseAddition(inShape0.GetNumElements(), inData0, inData1, outData);
-    }
-    else
-    {
-        BroadcastLoop(inShape0, inShape1, outShape).Unroll(std::plus<float>(), 0, inData0, inData1, outData);
-    }
-}
-
-} //namespace armnn
diff --git a/src/armnn/backends/RefWorkloads/Addition.hpp b/src/armnn/backends/RefWorkloads/Addition.hpp
deleted file mode 100644
index dcbd499..0000000
--- a/src/armnn/backends/RefWorkloads/Addition.hpp
+++ /dev/null
@@ -1,20 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <armnn/Tensor.hpp>
-
-namespace armnn
-{
-
-void Addition(const TensorShape& inShape0,
-              const TensorShape& inShape1,
-              const TensorShape& outShape,
-              const float* inData0,
-              const float* inData1,
-              float* outData);
-
-} //namespace armnn
diff --git a/src/armnn/backends/RefWorkloads/ArithmeticFunction.cpp b/src/armnn/backends/RefWorkloads/ArithmeticFunction.cpp
new file mode 100644
index 0000000..fede138
--- /dev/null
+++ b/src/armnn/backends/RefWorkloads/ArithmeticFunction.cpp
@@ -0,0 +1,29 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ArithmeticFunction.hpp"
+#include "Broadcast.hpp"
+#include <functional>
+
+namespace armnn
+{
+
+template <typename Functor>
+ArithmeticFunction<Functor>::ArithmeticFunction(const TensorShape& inShape0,
+                                                const TensorShape& inShape1,
+                                                const TensorShape& outShape,
+                                                const float* inData0,
+                                                const float* inData1,
+                                                float* outData)
+{
+    BroadcastLoop(inShape0, inShape1, outShape).Unroll(Functor(), 0, inData0, inData1, outData);
+}
+
+} //namespace armnn
+
+template struct armnn::ArithmeticFunction<std::plus<float>>;
+template struct armnn::ArithmeticFunction<std::minus<float>>;
+template struct armnn::ArithmeticFunction<std::multiplies<float>>;
+template struct armnn::ArithmeticFunction<std::divides<float>>;
diff --git a/src/armnn/backends/RefWorkloads/ArithmeticFunction.hpp b/src/armnn/backends/RefWorkloads/ArithmeticFunction.hpp
new file mode 100644
index 0000000..eafb644
--- /dev/null
+++ b/src/armnn/backends/RefWorkloads/ArithmeticFunction.hpp
@@ -0,0 +1,24 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn/Tensor.hpp>
+
+namespace armnn
+{
+
+template <typename Functor>
+struct ArithmeticFunction
+{
+    ArithmeticFunction(const TensorShape& inShape0,
+                       const TensorShape& inShape1,
+                       const TensorShape& outShape,
+                       const float* inData0,
+                       const float* inData1,
+                       float* outData);
+};
+
+} //namespace armnn
diff --git a/src/armnn/backends/RefWorkloads/Division.cpp b/src/armnn/backends/RefWorkloads/Division.cpp
deleted file mode 100644
index cc7f7c9..0000000
--- a/src/armnn/backends/RefWorkloads/Division.cpp
+++ /dev/null
@@ -1,89 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "Division.hpp"
-#include "Broadcast.hpp"
-
-#include <functional>
-
-#include <cmath>
-
-namespace
-{
-
-void ElementwiseDivision(unsigned int numElements,
-                         const float* inData0,
-                         const float* inData1,
-                         float* outData)
-{
-    for (unsigned int i = 0; i < numElements; ++i)
-    {
-        if (inData1[i] != 0.0f)
-        {
-            outData[i] = inData0[i] / inData1[i];
-        }
-        else if (inData0[i] == 0.0f)
-        {
-            if (!std::signbit(inData1[i]))
-            {
-                outData[i]= NAN;
-            }
-            else
-            {
-                outData[i]= -NAN;
-            }
-        }
-        else if (inData0[i] < 0.0f)
-        {
-            if (!std::signbit(inData1[i]))
-            {
-                outData[i] = -INFINITY;
-            }
-            else
-            {
-                outData[i] = INFINITY;
-            }
-        }
-        else
-        {
-            if (!std::signbit(inData1[i]))
-            {
-                outData[i] = INFINITY;
-            }
-            else
-            {
-                outData[i] = -INFINITY;
-            }
-        }
-    }
-}
-
-} // namespace
-
-namespace armnn
-{
-
-void Division(const TensorShape& inShape0,
-              const TensorShape& inShape1,
-              const TensorShape& outShape,
-              const float* inData0,
-              const float* inData1,
-              float* outData)
-{
-    if (inShape0 == inShape1)
-    {
-        ElementwiseDivision(inShape0.GetNumElements(), inData0, inData1, outData);
-    }
-    else
-    {
-        BroadcastLoop(inShape0, inShape1, outShape).Unroll(std::divides<float>(),
-                                                           0,
-                                                           inData0,
-                                                           inData1,
-                                                           outData);
-    }
-}
-
-} //namespace armnn
diff --git a/src/armnn/backends/RefWorkloads/Division.hpp b/src/armnn/backends/RefWorkloads/Division.hpp
deleted file mode 100644
index b83c77f..0000000
--- a/src/armnn/backends/RefWorkloads/Division.hpp
+++ /dev/null
@@ -1,20 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <armnn/Tensor.hpp>
-
-namespace armnn
-{
-
-    void Division(const TensorShape& inShape0,
-                  const TensorShape& inShape1,
-                  const TensorShape& outShape,
-                  const float* inData0,
-                  const float* inData1,
-                  float* outData);
-
-} //namespace armnn
diff --git a/src/armnn/backends/RefWorkloads/Multiplication.cpp b/src/armnn/backends/RefWorkloads/Multiplication.cpp
deleted file mode 100644
index ae6446a..0000000
--- a/src/armnn/backends/RefWorkloads/Multiplication.cpp
+++ /dev/null
@@ -1,52 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "Multiplication.hpp"
-#include "Broadcast.hpp"
-
-#include <functional>
-
-namespace
-{
-
-void ElementwiseMultiplication(unsigned int numElements,
-                               const float* inData0,
-                               const float* inData1,
-                               float* outData)
-{
-    for (unsigned int i = 0; i < numElements; ++i)
-    {
-        outData[i] = inData0[i] * inData1[i];
-    }
-}
-
-} // namespace
-
-namespace armnn
-{
-
-void Multiplication(const TensorShape& inShape0,
-                    const TensorShape& inShape1,
-                    const TensorShape& outShape,
-                    const float* inData0,
-                    const float* inData1,
-                    float* outData)
-{
-    if (inShape0 == inShape1)
-    {
-        ElementwiseMultiplication(inShape0.GetNumElements(), inData0, inData1, outData);
-    }
-    else
-    {
-        BroadcastLoop(inShape0, inShape1, outShape).Unroll(
-            std::multiplies<float>(),
-            0,
-            inData0,
-            inData1,
-            outData);
-    }
-}
-
-} //namespace armnn
diff --git a/src/armnn/backends/RefWorkloads/Multiplication.hpp b/src/armnn/backends/RefWorkloads/Multiplication.hpp
deleted file mode 100644
index 58ad7b4..0000000
--- a/src/armnn/backends/RefWorkloads/Multiplication.hpp
+++ /dev/null
@@ -1,20 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <armnn/Tensor.hpp>
-
-namespace armnn
-{
-
-void Multiplication(const TensorShape& inShape0,
-                    const TensorShape& inShape1,
-                    const TensorShape& outShape,
-                    const float* inData0,
-                    const float* inData1,
-                    float* outData);
-
-} //namespace armnn
diff --git a/src/armnn/backends/RefWorkloads/RefAdditionFloat32Workload.cpp b/src/armnn/backends/RefWorkloads/RefAdditionFloat32Workload.cpp
index c2a5b5f..21c7533 100644
--- a/src/armnn/backends/RefWorkloads/RefAdditionFloat32Workload.cpp
+++ b/src/armnn/backends/RefWorkloads/RefAdditionFloat32Workload.cpp
@@ -5,7 +5,7 @@
 
 #include "RefAdditionFloat32Workload.hpp"
 
-#include "Addition.hpp"
+#include "ArithmeticFunction.hpp"
 #include "RefWorkloadUtils.hpp"
 
 #include "Profiling.hpp"
@@ -25,7 +25,7 @@
     const float* inData1 = GetInputTensorDataFloat(1, m_Data);
     float* outData = GetOutputTensorDataFloat(0, m_Data);
 
-    Addition(inShape0, inShape1, outShape, inData0, inData1, outData);
+    ArithmeticFunction<std::plus<float>>(inShape0, inShape1, outShape, inData0, inData1, outData);
 }
 
 } //namespace armnn
diff --git a/src/armnn/backends/RefWorkloads/RefAdditionUint8Workload.cpp b/src/armnn/backends/RefWorkloads/RefAdditionUint8Workload.cpp
index 2999be9..116a5f1 100644
--- a/src/armnn/backends/RefWorkloads/RefAdditionUint8Workload.cpp
+++ b/src/armnn/backends/RefWorkloads/RefAdditionUint8Workload.cpp
@@ -5,7 +5,7 @@
 
 #include "RefAdditionUint8Workload.hpp"
 
-#include "Addition.hpp"
+#include "ArithmeticFunction.hpp"
 #include "RefWorkloadUtils.hpp"
 
 #include "Profiling.hpp"
@@ -28,12 +28,12 @@
 
     std::vector<float> results(outputInfo.GetNumElements());
 
-    Addition(inputInfo0.GetShape(),
-             inputInfo1.GetShape(),
-             outputInfo.GetShape(),
-             dequant0.data(),
-             dequant1.data(),
-             results.data());
+    ArithmeticFunction<std::plus<float>>(inputInfo0.GetShape(),
+                                         inputInfo1.GetShape(),
+                                         outputInfo.GetShape(),
+                                         dequant0.data(),
+                                         dequant1.data(),
+                                         results.data());
 
     Quantize(GetOutputTensorDataU8(0, m_Data), results.data(), outputInfo);
 }
diff --git a/src/armnn/backends/RefWorkloads/RefDivisionFloat32Workload.cpp b/src/armnn/backends/RefWorkloads/RefDivisionFloat32Workload.cpp
index 81f4645..28c9061 100644
--- a/src/armnn/backends/RefWorkloads/RefDivisionFloat32Workload.cpp
+++ b/src/armnn/backends/RefWorkloads/RefDivisionFloat32Workload.cpp
@@ -5,7 +5,7 @@
 
 #include "RefDivisionFloat32Workload.hpp"
 
-#include "Division.hpp"
+#include "ArithmeticFunction.hpp"
 #include "RefWorkloadUtils.hpp"
 
 #include "Profiling.hpp"
@@ -25,7 +25,7 @@
     const float* inputData0 = GetInputTensorDataFloat(0, m_Data);
     const float* inputData1 = GetInputTensorDataFloat(1, m_Data);
 
-    Division(inShape0, inShape1, outShape, inputData0, inputData1, outputData);
+    ArithmeticFunction<std::divides<float>>(inShape0, inShape1, outShape, inputData0, inputData1, outputData);
 }
 
 } //namespace armnn
diff --git a/src/armnn/backends/RefWorkloads/RefDivisionUint8Workload.cpp b/src/armnn/backends/RefWorkloads/RefDivisionUint8Workload.cpp
index a6ed770..d10d874 100644
--- a/src/armnn/backends/RefWorkloads/RefDivisionUint8Workload.cpp
+++ b/src/armnn/backends/RefWorkloads/RefDivisionUint8Workload.cpp
@@ -5,7 +5,7 @@
 
 #include "RefDivisionUint8Workload.hpp"
 
-#include "Division.hpp"
+#include "ArithmeticFunction.hpp"
 #include "RefWorkloadUtils.hpp"
 
 #include "Profiling.hpp"
@@ -27,9 +27,13 @@
     auto dequant1 = Dequantize(GetInputTensorDataU8(1, m_Data), inputInfo1);
 
     std::vector<float> results(outputInfo.GetNumElements());
-    Division(
-        inputInfo0.GetShape(), inputInfo1.GetShape(), outputInfo.GetShape(),
-        dequant0.data(), dequant1.data(),results.data());
+
+    ArithmeticFunction<std::divides<float>>(inputInfo0.GetShape(),
+                                            inputInfo1.GetShape(),
+                                            outputInfo.GetShape(),
+                                            dequant0.data(),
+                                            dequant1.data(),
+                                            results.data());
 
     Quantize(GetOutputTensorDataU8(0, m_Data), results.data(), outputInfo);
 }
diff --git a/src/armnn/backends/RefWorkloads/RefMultiplicationFloat32Workload.cpp b/src/armnn/backends/RefWorkloads/RefMultiplicationFloat32Workload.cpp
index 022cca7..0b36f0f 100644
--- a/src/armnn/backends/RefWorkloads/RefMultiplicationFloat32Workload.cpp
+++ b/src/armnn/backends/RefWorkloads/RefMultiplicationFloat32Workload.cpp
@@ -5,7 +5,7 @@
 
 #include "RefMultiplicationFloat32Workload.hpp"
 
-#include "Multiplication.hpp"
+#include "ArithmeticFunction.hpp"
 #include "RefWorkloadUtils.hpp"
 
 #include "Profiling.hpp"
@@ -25,7 +25,7 @@
     const float* inputData0 = GetInputTensorDataFloat(0, m_Data);
     const float* inputData1 = GetInputTensorDataFloat(1, m_Data);
 
-    Multiplication(inShape0, inShape1, outShape, inputData0, inputData1, outputData);
+    ArithmeticFunction<std::multiplies<float>>(inShape0, inShape1, outShape, inputData0, inputData1, outputData);
 }
 
 } //namespace armnn
diff --git a/src/armnn/backends/RefWorkloads/RefMultiplicationUint8Workload.cpp b/src/armnn/backends/RefWorkloads/RefMultiplicationUint8Workload.cpp
index 8e0a617..b929a53 100644
--- a/src/armnn/backends/RefWorkloads/RefMultiplicationUint8Workload.cpp
+++ b/src/armnn/backends/RefWorkloads/RefMultiplicationUint8Workload.cpp
@@ -5,7 +5,7 @@
 
 #include "RefMultiplicationUint8Workload.hpp"
 
-#include "Multiplication.hpp"
+#include "ArithmeticFunction.hpp"
 #include "RefWorkloadUtils.hpp"
 
 #include "Profiling.hpp"
@@ -27,9 +27,13 @@
     auto dequant1 = Dequantize(GetInputTensorDataU8(1, m_Data), inputInfo1);
 
     std::vector<float> results(outputInfo.GetNumElements());
-    Multiplication(
-        inputInfo0.GetShape(), inputInfo1.GetShape(), outputInfo.GetShape(),
-        dequant0.data(), dequant1.data(),results.data());
+
+    ArithmeticFunction<std::multiplies<float>>(inputInfo0.GetShape(),
+                                               inputInfo1.GetShape(),
+                                               outputInfo.GetShape(),
+                                               dequant0.data(),
+                                               dequant1.data(),
+                                               results.data());
 
    Quantize(GetOutputTensorDataU8(0, m_Data), results.data(), outputInfo);
 }
diff --git a/src/armnn/backends/RefWorkloads/RefSubtractionFloat32Workload.cpp b/src/armnn/backends/RefWorkloads/RefSubtractionFloat32Workload.cpp
index 4440eed..f1840c3 100644
--- a/src/armnn/backends/RefWorkloads/RefSubtractionFloat32Workload.cpp
+++ b/src/armnn/backends/RefWorkloads/RefSubtractionFloat32Workload.cpp
@@ -5,7 +5,7 @@
 
 #include "RefSubtractionFloat32Workload.hpp"
 
-#include "Subtraction.hpp"
+#include "ArithmeticFunction.hpp"
 #include "RefWorkloadUtils.hpp"
 
 #include "Profiling.hpp"
@@ -25,7 +25,7 @@
     const float* inData1 = GetInputTensorDataFloat(1, m_Data);
     float* outData = GetOutputTensorDataFloat(0, m_Data);
 
-    Subtraction(inShape0, inShape1, outShape, inData0, inData1, outData);
+    ArithmeticFunction<std::minus<float>>(inShape0, inShape1, outShape, inData0, inData1, outData);
 }
 
 } //namespace armnn
diff --git a/src/armnn/backends/RefWorkloads/RefSubtractionUint8Workload.cpp b/src/armnn/backends/RefWorkloads/RefSubtractionUint8Workload.cpp
index 8066762..1affbdd 100644
--- a/src/armnn/backends/RefWorkloads/RefSubtractionUint8Workload.cpp
+++ b/src/armnn/backends/RefWorkloads/RefSubtractionUint8Workload.cpp
@@ -5,7 +5,7 @@
 
 #include "RefSubtractionUint8Workload.hpp"
 
-#include "Subtraction.hpp"
+#include "ArithmeticFunction.hpp"
 #include "RefWorkloadUtils.hpp"
 
 #include "Profiling.hpp"
@@ -28,12 +28,12 @@
 
     std::vector<float> results(outputInfo.GetNumElements());
 
-    Subtraction(inputInfo0.GetShape(),
-                inputInfo1.GetShape(),
-                outputInfo.GetShape(),
-                dequant0.data(),
-                dequant1.data(),
-                results.data());
+    ArithmeticFunction<std::minus<float>>(inputInfo0.GetShape(),
+                                          inputInfo1.GetShape(),
+                                          outputInfo.GetShape(),
+                                          dequant0.data(),
+                                          dequant1.data(),
+                                          results.data());
 
     Quantize(GetOutputTensorDataU8(0, m_Data), results.data(), outputInfo);
 }
diff --git a/src/armnn/backends/RefWorkloads/Subtraction.cpp b/src/armnn/backends/RefWorkloads/Subtraction.cpp
deleted file mode 100644
index f25c8ad..0000000
--- a/src/armnn/backends/RefWorkloads/Subtraction.cpp
+++ /dev/null
@@ -1,44 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "Subtraction.hpp"
-#include "Broadcast.hpp"
-
-#include <functional>
-
-namespace
-{
-
-void ElementwiseSubtraction(unsigned int numElements, const float* inData0, const float* inData1, float* outData)
-{
-    for (unsigned int i = 0; i < numElements; ++i)
-    {
-        outData[i] = inData0[i] - inData1[i];
-    }
-}
-
-} // namespace
-
-namespace armnn
-{
-
-void Subtraction(const TensorShape& inShape0,
-                 const TensorShape& inShape1,
-                 const TensorShape& outShape,
-                 const float* inData0,
-                 const float* inData1,
-                 float* outData)
-{
-    if (inShape0 == inShape1)
-    {
-        ElementwiseSubtraction(inShape0.GetNumElements(), inData0, inData1, outData);
-    }
-    else
-    {
-        BroadcastLoop(inShape0, inShape1, outShape).Unroll(std::minus<float>(), 0, inData0, inData1, outData);
-    }
-}
-
-} //namespace armnn
diff --git a/src/armnn/backends/RefWorkloads/Subtraction.hpp b/src/armnn/backends/RefWorkloads/Subtraction.hpp
deleted file mode 100644
index 3956797..0000000
--- a/src/armnn/backends/RefWorkloads/Subtraction.hpp
+++ /dev/null
@@ -1,20 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <armnn/Tensor.hpp>
-
-namespace armnn
-{
-
-void Subtraction(const TensorShape& inShape0,
-                 const TensorShape& inShape1,
-                 const TensorShape& outShape,
-                 const float* inData0,
-                 const float* inData1,
-                 float* outData);
-
-} //namespace armnn