IVGCVSW-3740 Add Reference Workload support for ABS

 * Implemented RefAbsWorkload and unit tests

Signed-off-by: Sadik Armagan <sadik.armagan@arm.com>
Change-Id: Ibfcdb2b37fd8d240c181f96856e2c997a4b88914
diff --git a/src/backends/backendsCommon/common.mk b/src/backends/backendsCommon/common.mk
index 88eae35..f75870a 100644
--- a/src/backends/backendsCommon/common.mk
+++ b/src/backends/backendsCommon/common.mk
@@ -33,6 +33,7 @@
     test/CommonTestUtils.cpp \
     test/JsonPrinterTestImpl.cpp \
     test/TensorCopyUtils.cpp \
+    test/layerTests/AbsTestImpl.cpp \
     test/layerTests/ActivationTestImpl.cpp \
     test/layerTests/AdditionTestImpl.cpp \
     test/layerTests/BatchNormalizationTestImpl.cpp \
diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt
index c0760cb..6b71522 100644
--- a/src/backends/backendsCommon/test/CMakeLists.txt
+++ b/src/backends/backendsCommon/test/CMakeLists.txt
@@ -38,6 +38,8 @@
     TensorCopyUtils.hpp
     WorkloadFactoryHelper.hpp
     WorkloadTestUtils.hpp
+    layerTests/AbsTestImpl.cpp
+    layerTests/AbsTestImpl.hpp
     layerTests/ActivationTestImpl.cpp
     layerTests/ActivationTestImpl.hpp
     layerTests/AdditionTestImpl.cpp
diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp
index a4c09a6..bf2ef6a 100644
--- a/src/backends/backendsCommon/test/LayerTests.hpp
+++ b/src/backends/backendsCommon/test/LayerTests.hpp
@@ -5,6 +5,7 @@
 
 #pragma once
 
+#include <backendsCommon/test/layerTests/AbsTestImpl.hpp>
 #include <backendsCommon/test/layerTests/ActivationTestImpl.hpp>
 #include <backendsCommon/test/layerTests/AdditionTestImpl.hpp>
 #include <backendsCommon/test/layerTests/BatchNormalizationTestImpl.hpp>
diff --git a/src/backends/backendsCommon/test/layerTests/AbsTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/AbsTestImpl.cpp
new file mode 100644
index 0000000..60ac54b
--- /dev/null
+++ b/src/backends/backendsCommon/test/layerTests/AbsTestImpl.cpp
@@ -0,0 +1,241 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "AbsTestImpl.hpp"
+
+#include <armnn/ArmNN.hpp>
+
+#include <backendsCommon/test/DataTypeUtils.hpp>
+#include <backendsCommon/test/TensorCopyUtils.hpp>
+#include <backendsCommon/test/WorkloadTestUtils.hpp>
+
+#include <test/TensorHelpers.hpp>
+
+namespace
+{
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 2> Abs2dTestCommon(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::TensorInfo inputTensorInfo,
+    const armnn::TensorInfo outputTensorInfo,
+    const std::vector<float>& inputValues,
+    const std::vector<float>& expectedOutputValues)
+{
+    auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, ConvertToDataType<ArmnnType>(inputValues,inputTensorInfo));
+
+    LayerTestResult<T, 2> result(outputTensorInfo);
+
+    result.outputExpected = MakeTensor<T, 2>(outputTensorInfo,
+                                             ConvertToDataType<ArmnnType>(expectedOutputValues,outputTensorInfo));
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::AbsQueueDescriptor descriptor;
+
+    armnn::WorkloadInfo info;
+
+    AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAbs(descriptor, info);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
+
+    workload->PostAllocationConfigure();
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
+
+    return result;
+}
+
+} // anonymous namespace
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 2> Abs2dTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    const armnn::TensorShape inputShape{ 2, 2 };
+    const armnn::TensorShape outputShape{ 2, 2 };
+
+    float qScale    = 0.0625f;
+    int32_t qOffset = 64;
+
+    if (ArmnnType == armnn::DataType::QuantisedSymm16)
+    {
+        qScale  = 0.1f;
+        qOffset = 0;
+    }
+
+    armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
+    inputTensorInfo.SetQuantizationScale(qScale);
+    inputTensorInfo.SetQuantizationOffset(qOffset);
+
+    armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
+    outputTensorInfo.SetQuantizationScale(qScale);
+    outputTensorInfo.SetQuantizationOffset(qOffset);
+
+    std::vector<float> inputValues
+    {
+        -0.1f, 0.2f,
+        0.3f, -0.4f
+    };
+
+    // Calculate output values for input.
+    auto f = [](float value)
+    {
+        return std::abs(value);
+    };
+    std::vector<float> expectedOutputValues(inputValues.size());
+    std::transform(inputValues.begin(), inputValues.end(), expectedOutputValues.begin(), f);
+
+    return Abs2dTestCommon<ArmnnType>(workloadFactory, memoryManager,
+                                inputTensorInfo, outputTensorInfo,
+                                inputValues, expectedOutputValues);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 3> Abs3dTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    const armnn::TensorShape inputShape{ 3, 1, 2 };
+    const armnn::TensorShape outputShape{ 3, 1, 2 };
+
+    float qScale    = 0.0625f;
+    int32_t qOffset = 64;
+
+    if (ArmnnType == armnn::DataType::QuantisedSymm16)
+    {
+        qScale  = 0.1f;
+        qOffset = 0;
+    }
+
+    armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
+    inputTensorInfo.SetQuantizationScale(qScale);
+    inputTensorInfo.SetQuantizationOffset(qOffset);
+
+    armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
+    outputTensorInfo.SetQuantizationScale(qScale);
+    outputTensorInfo.SetQuantizationOffset(qOffset);
+
+    std::vector<float> inputValues
+    {
+        -0.1f, -0.2f, -0.3f,
+        0.1f,  0.2f,  0.3f
+    };
+
+    auto f = [](float value)
+    {
+        return std::abs(value);
+    };
+    std::vector<float>expectedOutputValues(inputValues.size());
+    std::transform(inputValues.begin(), inputValues.end(), expectedOutputValues.begin(), f);
+
+    auto inputTensor = MakeTensor<T, 3>(inputTensorInfo, ConvertToDataType<ArmnnType>(inputValues,inputTensorInfo));
+
+    LayerTestResult<T, 3> result(outputTensorInfo);
+    result.outputExpected = MakeTensor<T, 3>(outputTensorInfo,
+                                             ConvertToDataType<ArmnnType>(expectedOutputValues,outputTensorInfo));
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::AbsQueueDescriptor descriptor;
+
+    armnn::WorkloadInfo info;
+
+    AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAbs(descriptor, info);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]);
+
+    workload->PostAllocationConfigure();
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
+
+    return result;
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 2> AbsZeroTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    const armnn::TensorShape inputShape{ 1, 2 };
+    const armnn::TensorShape outputShape{ 1, 2 };
+
+    armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
+    inputTensorInfo.SetQuantizationScale(0.1f);
+
+    armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
+    outputTensorInfo.SetQuantizationScale(0.1f);
+
+    std::vector<float> inputValues
+    {
+        0.f, -0.f
+    };
+
+    std::vector<float> expectedOutputValues
+    {
+        0.f, 0.f
+    };
+
+    return Abs2dTestCommon<ArmnnType>(workloadFactory, memoryManager,
+                                inputTensorInfo, outputTensorInfo,
+                                inputValues, expectedOutputValues);
+}
+
+//
+// Explicit template specializations
+//
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 2>
+Abs2dTest<armnn::DataType::Float32>(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 2>
+Abs2dTest<armnn::DataType::QuantisedAsymm8>(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 2>
+Abs2dTest<armnn::DataType::QuantisedSymm16>(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 3>
+Abs3dTest<armnn::DataType::Float32>(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 3>
+Abs3dTest<armnn::DataType::QuantisedAsymm8>(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 3>
+Abs3dTest<armnn::DataType::QuantisedSymm16>(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 2>
+AbsZeroTest<armnn::DataType::Float32>(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
\ No newline at end of file
diff --git a/src/backends/backendsCommon/test/layerTests/AbsTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/AbsTestImpl.hpp
new file mode 100644
index 0000000..88bf1ea
--- /dev/null
+++ b/src/backends/backendsCommon/test/layerTests/AbsTestImpl.hpp
@@ -0,0 +1,28 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "LayerTestResult.hpp"
+
+#include <ResolveType.hpp>
+
+#include <backendsCommon/IBackendInternal.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 2> Abs2dTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 3> Abs3dTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 2> AbsZeroTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index 22d7914..5c53f12 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -66,6 +66,32 @@
 
 } // anonymous namespace
 
+bool RefLayerSupport::IsAbsSupported(const TensorInfo& input, const TensorInfo& output,
+                                     Optional<std::string&> reasonIfUnsupported) const
+{
+    bool supported = true;
+    std::array<DataType,3> supportedTypes =
+        {
+            DataType::Float32,
+            DataType::QuantisedAsymm8,
+            DataType::QuantisedSymm16
+        };
+
+    supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
+                                  "Reference abs: input type not supported");
+
+    supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
+                                  "Reference abs: output type not supported");
+
+    supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
+                                  "Reference abs: input and output types not matching");
+
+    supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
+                                  "Reference abs: input and output shapes have different number of total elements");
+
+    return supported;
+}
+
 bool RefLayerSupport::IsActivationSupported(const TensorInfo& input,
                                             const TensorInfo& output,
                                             const ActivationDescriptor& descriptor,
diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp
index f8bbeb7..26c60dc 100644
--- a/src/backends/reference/RefLayerSupport.hpp
+++ b/src/backends/reference/RefLayerSupport.hpp
@@ -12,6 +12,10 @@
 class RefLayerSupport : public LayerSupportBase
 {
 public:
+    bool IsAbsSupported(const TensorInfo& input,
+                        const TensorInfo& output,
+                        Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
     bool IsActivationSupported(const TensorInfo& input,
                                const TensorInfo& output,
                                const ActivationDescriptor& descriptor,
diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp
index fff2fd2..dc97356 100644
--- a/src/backends/reference/RefWorkloadFactory.cpp
+++ b/src/backends/reference/RefWorkloadFactory.cpp
@@ -535,4 +535,14 @@
     return std::make_unique<RefStackWorkload>(descriptor, info);
 }
 
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateAbs(const AbsQueueDescriptor& descriptor,
+                                                         const WorkloadInfo& info) const
+{
+    if (IsFloat16(info))
+    {
+        return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
+    }
+    return std::make_unique<RefAbsWorkload>(descriptor, info);
+}
+
 } // namespace armnn
diff --git a/src/backends/reference/RefWorkloadFactory.hpp b/src/backends/reference/RefWorkloadFactory.hpp
index 314e117..5851528 100644
--- a/src/backends/reference/RefWorkloadFactory.hpp
+++ b/src/backends/reference/RefWorkloadFactory.hpp
@@ -209,6 +209,9 @@
     std::unique_ptr<IWorkload> CreateStack(const StackQueueDescriptor& descriptor,
                                            const WorkloadInfo& info) const override;
 
+    std::unique_ptr<IWorkload> CreateAbs(const AbsQueueDescriptor& descriptor,
+                                         const WorkloadInfo& info) const override;
+
 private:
 
     template <typename F32Workload, typename U8Workload, typename QueueDescriptorType>
diff --git a/src/backends/reference/backend.mk b/src/backends/reference/backend.mk
index 4bfacd5..b1f0a03 100644
--- a/src/backends/reference/backend.mk
+++ b/src/backends/reference/backend.mk
@@ -21,6 +21,7 @@
         RefWorkloadFactory.cpp \
         RefRegistryInitializer.cpp \
         RefTensorHandleFactory.cpp \
+        workloads/Abs.cpp \
         workloads/Activation.cpp \
         workloads/BatchNormImpl.cpp \
         workloads/BatchToSpaceNd.cpp \
@@ -37,6 +38,7 @@
         workloads/Pad.cpp \
         workloads/Pooling2d.cpp \
         workloads/PreluImpl.cpp \
+        workloads/RefAbsWorkload.cpp \
         workloads/RefActivationWorkload.cpp \
         workloads/RefBatchNormalizationWorkload.cpp \
         workloads/RefBatchToSpaceNdWorkload.cpp \
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index 59ca516..af9f645 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -1110,6 +1110,15 @@
 ARMNN_AUTO_TEST_CASE(GatherMultiDimParamsMultiDimIndicesUint8, GatherMultiDimParamsMultiDimIndicesUint8Test)
 ARMNN_AUTO_TEST_CASE(GatherMultiDimParamsMultiDimIndicesInt16, GatherMultiDimParamsMultiDimIndicesInt16Test)
 
+// Abs
+ARMNN_AUTO_TEST_CASE(Abs2d, Abs2dTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(Abs3d, Abs3dTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(AbsZero, AbsZeroTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(Abs2dQuantisedAsymm8, Abs2dTest<DataType::QuantisedAsymm8>)
+ARMNN_AUTO_TEST_CASE(Abs3dQuantisedAsymm8, Abs3dTest<DataType::QuantisedAsymm8>)
+ARMNN_AUTO_TEST_CASE(Abs2dQuantisedSymm16, Abs2dTest<DataType::QuantisedSymm16>)
+ARMNN_AUTO_TEST_CASE(Abs3dQuantisedSymm16, Abs3dTest<DataType::QuantisedSymm16>)
+
 // Detection PostProcess
 BOOST_AUTO_TEST_CASE(DetectionPostProcessRegularNmsFloat)
 {
diff --git a/src/backends/reference/workloads/Abs.cpp b/src/backends/reference/workloads/Abs.cpp
new file mode 100644
index 0000000..6a6a79c
--- /dev/null
+++ b/src/backends/reference/workloads/Abs.cpp
@@ -0,0 +1,23 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "Abs.hpp"
+
+namespace armnn
+{
+
+void Abs(Decoder<float>& in,
+         Encoder<float>& out,
+         const TensorInfo& tensorInfo)
+{
+    for (unsigned int i = 0u; i < tensorInfo.GetNumElements(); ++i)
+    {
+        out[i];
+        in[i];
+        out.Set(std::abs(in.Get()));
+    }
+}
+
+} //namespace armnn
diff --git a/src/backends/reference/workloads/Abs.hpp b/src/backends/reference/workloads/Abs.hpp
new file mode 100644
index 0000000..b1165d2
--- /dev/null
+++ b/src/backends/reference/workloads/Abs.hpp
@@ -0,0 +1,19 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "BaseIterator.hpp"
+#include <armnn/Tensor.hpp>
+#include <armnn/Types.hpp>
+
+namespace armnn
+{
+
+/// Performs the absolute function elementwise
+/// on the inputs to give the outputs.
+void Abs(Decoder<float>& in,
+         Encoder<float>& out,
+         const TensorInfo& tensorInfo);
+
+} //namespace armnn
diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt
index c9db057..7f49e80 100644
--- a/src/backends/reference/workloads/CMakeLists.txt
+++ b/src/backends/reference/workloads/CMakeLists.txt
@@ -4,6 +4,8 @@
 #
 
 list(APPEND armnnRefBackendWorkloads_sources
+    Abs.cpp
+    Abs.hpp
     Activation.cpp
     Activation.hpp
     BaseIterator.hpp
@@ -41,6 +43,8 @@
     Pooling2d.hpp
     PreluImpl.cpp
     PreluImpl.hpp
+    RefAbsWorkload.cpp
+    RefAbsWorkload.hpp
     RefActivationWorkload.cpp
     RefActivationWorkload.hpp
     RefBatchNormalizationWorkload.cpp
diff --git a/src/backends/reference/workloads/RefAbsWorkload.cpp b/src/backends/reference/workloads/RefAbsWorkload.cpp
new file mode 100644
index 0000000..5c1f8c0
--- /dev/null
+++ b/src/backends/reference/workloads/RefAbsWorkload.cpp
@@ -0,0 +1,37 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "RefAbsWorkload.hpp"
+
+#include "Abs.hpp"
+#include "Decoders.hpp"
+#include "Encoders.hpp"
+#include "RefWorkloadUtils.hpp"
+
+#include <Profiling.hpp>
+
+namespace armnn
+{
+
+void RefAbsWorkload::Execute() const
+{
+    ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefAbsWorkload_Execute");
+
+    const TensorInfo& inputTensorInfo = GetTensorInfo(m_Data.m_Inputs[0]);
+
+    std::unique_ptr<Decoder<float>> decoderPtr = MakeDecoder<float>(inputTensorInfo, m_Data.m_Inputs[0]->Map());
+    Decoder<float>& decoder = *decoderPtr;
+
+    const TensorInfo& outputTensorInfo = GetTensorInfo(m_Data.m_Outputs[0]);
+
+    std::unique_ptr<Encoder<float>> encoderPtr = MakeEncoder<float>(outputTensorInfo, m_Data.m_Outputs[0]->Map());
+    Encoder<float>& encoder = *encoderPtr;
+
+    Abs(decoder,
+        encoder,
+        inputTensorInfo);
+}
+
+} //namespace armnn
diff --git a/src/backends/reference/workloads/RefAbsWorkload.hpp b/src/backends/reference/workloads/RefAbsWorkload.hpp
new file mode 100644
index 0000000..6810555
--- /dev/null
+++ b/src/backends/reference/workloads/RefAbsWorkload.hpp
@@ -0,0 +1,21 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/Workload.hpp>
+#include <backendsCommon/WorkloadData.hpp>
+
+namespace armnn
+{
+
+class RefAbsWorkload : public BaseWorkload<AbsQueueDescriptor>
+{
+public:
+    using BaseWorkload<AbsQueueDescriptor>::BaseWorkload;
+    virtual void Execute() const override;
+};
+
+} //namespace armnn
diff --git a/src/backends/reference/workloads/RefWorkloads.hpp b/src/backends/reference/workloads/RefWorkloads.hpp
index e86dccd..1ec349e 100644
--- a/src/backends/reference/workloads/RefWorkloads.hpp
+++ b/src/backends/reference/workloads/RefWorkloads.hpp
@@ -5,6 +5,7 @@
 
 #pragma once
 
+#include "Abs.hpp"
 #include "Activation.hpp"
 #include "BatchNormImpl.hpp"
 #include "ConvImpl.hpp"
@@ -13,6 +14,7 @@
 #include "FullyConnected.hpp"
 #include "Gather.hpp"
 #include "Pooling2d.hpp"
+#include "RefAbsWorkload.hpp"
 #include "RefActivationWorkload.hpp"
 #include "RefBatchNormalizationWorkload.hpp"
 #include "RefBatchToSpaceNdWorkload.hpp"