IVGCVSW-3975 Add reference workload for LOG_SOFTMAX

Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com>
Change-Id: I10bb7133e0e2d6d7199abdf39562b1226bbbd3e7
diff --git a/src/backends/backendsCommon/common.mk b/src/backends/backendsCommon/common.mk
index 3da2259..754a3a0 100644
--- a/src/backends/backendsCommon/common.mk
+++ b/src/backends/backendsCommon/common.mk
@@ -57,6 +57,7 @@
     test/layerTests/GreaterTestImpl.cpp \
     test/layerTests/InstanceNormalizationTestImpl.cpp \
     test/layerTests/L2NormalizationTestImpl.cpp \
+    test/layerTests/LogSoftmaxTestImpl.cpp \
     test/layerTests/LstmTestImpl.cpp \
     test/layerTests/MaximumTestImpl.cpp \
     test/layerTests/MinimumTestImpl.cpp \
diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt
index f7d58bf..d353a77 100644
--- a/src/backends/backendsCommon/test/CMakeLists.txt
+++ b/src/backends/backendsCommon/test/CMakeLists.txt
@@ -91,6 +91,8 @@
     layerTests/L2NormalizationTestImpl.cpp
     layerTests/L2NormalizationTestImpl.hpp
     layerTests/LayerTestResult.hpp
+    layerTests/LogSoftmaxTestImpl.cpp
+    layerTests/LogSoftmaxTestImpl.hpp
     layerTests/LstmTestImpl.cpp
     layerTests/LstmTestImpl.hpp
     layerTests/MaximumTestImpl.cpp
diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp
index 239d0d5..eb41314 100644
--- a/src/backends/backendsCommon/test/LayerTests.hpp
+++ b/src/backends/backendsCommon/test/LayerTests.hpp
@@ -29,6 +29,7 @@
 #include <backendsCommon/test/layerTests/GreaterTestImpl.hpp>
 #include <backendsCommon/test/layerTests/InstanceNormalizationTestImpl.hpp>
 #include <backendsCommon/test/layerTests/L2NormalizationTestImpl.hpp>
+#include <backendsCommon/test/layerTests/LogSoftmaxTestImpl.hpp>
 #include <backendsCommon/test/layerTests/LstmTestImpl.hpp>
 #include <backendsCommon/test/layerTests/MaximumTestImpl.hpp>
 #include <backendsCommon/test/layerTests/MeanTestImpl.hpp>
diff --git a/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp
new file mode 100644
index 0000000..0b73d37
--- /dev/null
+++ b/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp
@@ -0,0 +1,251 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "LogSoftmaxTestImpl.hpp"
+
+#include <Half.hpp>
+#include <ResolveType.hpp>
+
+#include <armnn/ArmNN.hpp>
+
+#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/IBackendInternal.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+#include <backendsCommon/test/QuantizeHelper.hpp>
+#include <backendsCommon/test/TensorCopyUtils.hpp>
+#include <backendsCommon/test/WorkloadTestUtils.hpp>
+
+#include <test/TensorHelpers.hpp>
+
+namespace
+{
+
+template<armnn::DataType ArmnnType,
+         std::size_t NumDims,
+         typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, NumDims> LogSoftmaxTestImpl(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::TensorInfo& inputInfo,
+    const armnn::TensorInfo& outputInfo,
+    const std::vector<float>& inputValues,
+    const std::vector<float>& expectedOutputValues,
+    armnn::LogSoftmaxQueueDescriptor descriptor,
+    float qScale = 1.0f,
+    int32_t qOffset = 0)
+{
+    LayerTestResult<T, NumDims> result(outputInfo);
+    result.outputExpected =
+        MakeTensor<T, NumDims>(outputInfo, QuantizedVector<T>(qScale, qOffset, expectedOutputValues));
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle  = workloadFactory.CreateTensorHandle(inputInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputInfo);
+
+    armnn::WorkloadInfo info;
+
+    AddInputToWorkload(descriptor, info, inputInfo, inputHandle.get());
+    AddOutputToWorkload(descriptor, info, outputInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateLogSoftmax(descriptor, info);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+
+    auto inputTensor = MakeTensor<T, NumDims>(inputInfo, QuantizedVector<T>(qScale, qOffset, inputValues));
+    CopyDataToITensorHandle(inputHandle.get(), inputTensor.origin());
+
+    workload->Execute();
+
+    CopyDataFromITensorHandle(result.output.origin(), outputHandle.get());
+
+    return result;
+}
+
+} // anonymous namespace
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 4> LogSoftmaxTest1(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    const armnn::TensorShape inputOutputShape{1, 1, 2, 4};
+
+    armnn::TensorInfo inputTensorInfo(inputOutputShape, ArmnnType);
+    armnn::TensorInfo outputTensorInfo(inputOutputShape, ArmnnType);
+
+    std::vector<float> inputValues
+    {
+        0.f, -6.f,  2.f, 4.f,
+        3.f, -2.f, 10.f, 1.f
+    };
+
+    std::vector<float> expectedOutputValues
+    {
+        -4.14297f, -10.14297f, -2.14297f, -0.14297f,
+        -7.00104f, -12.00104f, -0.00105f, -9.00104f
+    };
+
+    armnn::LogSoftmaxQueueDescriptor descriptor;
+    descriptor.m_Parameters.m_Beta = 1.0f; // default beta
+    descriptor.m_Parameters.m_Axis = -1;   // default axis
+
+    return LogSoftmaxTestImpl<ArmnnType, 4>(
+        workloadFactory,
+        memoryManager,
+        inputTensorInfo,
+        outputTensorInfo,
+        inputValues,
+        expectedOutputValues,
+        descriptor);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 4> LogSoftmaxTest2(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    const armnn::TensorShape inputOutputShape{1, 1, 2, 4};
+
+    armnn::TensorInfo inputTensorInfo(inputOutputShape, ArmnnType);
+    armnn::TensorInfo outputTensorInfo(inputOutputShape, ArmnnType);
+
+    std::vector<float> inputValues
+    {
+        0.f, -6.f,  2.f, 4.f,
+        3.f, -2.f, 10.f, 1.f
+    };
+
+    std::vector<float> expectedOutputValues
+    {
+        -4.14297f, -10.14297f, -2.14297f, -0.14297f,
+        -7.00104f, -12.00104f, -0.00105f, -9.00104f
+    };
+
+    armnn::LogSoftmaxQueueDescriptor descriptor;
+    descriptor.m_Parameters.m_Beta = 1.0f; // default beta
+    descriptor.m_Parameters.m_Axis = 3;    // positive axis
+
+    return LogSoftmaxTestImpl<ArmnnType, 4>(
+        workloadFactory,
+        memoryManager,
+        inputTensorInfo,
+        outputTensorInfo,
+        inputValues,
+        expectedOutputValues,
+        descriptor);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 4> LogSoftmaxTest3(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    const armnn::TensorShape inputOutputShape{1, 1, 2, 4};
+
+    armnn::TensorInfo inputTensorInfo(inputOutputShape, ArmnnType);
+    armnn::TensorInfo outputTensorInfo(inputOutputShape, ArmnnType);
+
+    std::vector<float> inputValues
+    {
+        0.0f, -0.6f, 0.2f, 0.4f,
+        0.3f, -0.2f, 1.0f, 0.1f
+    };
+
+    std::vector<float> expectedOutputValues
+    {
+        -4.14297f, -10.14297f, -2.14297f, -0.14297f,
+        -7.00104f, -12.00104f, -0.00105f, -9.00104f
+    };
+
+    armnn::LogSoftmaxQueueDescriptor descriptor;
+    descriptor.m_Parameters.m_Beta = 10.0f; // non-default beta
+    descriptor.m_Parameters.m_Axis = 3;     // positive axis
+
+    return LogSoftmaxTestImpl<ArmnnType, 4>(
+        workloadFactory,
+        memoryManager,
+        inputTensorInfo,
+        outputTensorInfo,
+        inputValues,
+        expectedOutputValues,
+        descriptor);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 4> LogSoftmaxTest4(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    const armnn::TensorShape inputOutputShape{1, 1, 2, 4};
+
+    armnn::TensorInfo inputTensorInfo(inputOutputShape, ArmnnType);
+    armnn::TensorInfo outputTensorInfo(inputOutputShape, ArmnnType);
+
+    std::vector<float> inputValues
+    {
+        0.f, -6.f,  2.f, 4.f,
+        3.f, -2.f, 10.f, 1.f
+    };
+
+    std::vector<float> expectedOutputValues
+    {
+        -3.048587f, -4.018149f, -8.000336f, -0.048587f,
+        -0.048587f, -0.018149f, -0.000335f, -3.048587f
+    };
+
+    armnn::LogSoftmaxQueueDescriptor descriptor;
+    descriptor.m_Parameters.m_Beta = 1.0f; // default beta
+    descriptor.m_Parameters.m_Axis = -2;   // negative axis
+
+    return LogSoftmaxTestImpl<ArmnnType, 4>(
+        workloadFactory,
+        memoryManager,
+        inputTensorInfo,
+        outputTensorInfo,
+        inputValues,
+        expectedOutputValues,
+        descriptor);
+}
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
+LogSoftmaxTest1<armnn::DataType::Float32>(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
+LogSoftmaxTest2<armnn::DataType::Float32>(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
+LogSoftmaxTest3<armnn::DataType::Float32>(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
+LogSoftmaxTest4<armnn::DataType::Float32>(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
+LogSoftmaxTest1<armnn::DataType::Float16>(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
+LogSoftmaxTest2<armnn::DataType::Float16>(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
+LogSoftmaxTest3<armnn::DataType::Float16>(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
+LogSoftmaxTest4<armnn::DataType::Float16>(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
diff --git a/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.hpp
new file mode 100644
index 0000000..18a14cc
--- /dev/null
+++ b/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.hpp
@@ -0,0 +1,33 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "LayerTestResult.hpp"
+
+#include <ResolveType.hpp>
+
+#include <backendsCommon/IBackendInternal.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> LogSoftmaxTest1(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> LogSoftmaxTest2(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template<armnn::DataType ArmnnType,typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> LogSoftmaxTest3(
+    armnn::IWorkloadFactory& workloadFactory,
+   const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template<armnn::DataType ArmnnType,typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> LogSoftmaxTest4(
+    armnn::IWorkloadFactory& workloadFactory,
+   const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index 0d6b16c..9342b29 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -897,6 +897,32 @@
     return supported;
 }
 
+bool RefLayerSupport::IsLogSoftmaxSupported(const TensorInfo& input,
+                                            const TensorInfo& output,
+                                            const LogSoftmaxDescriptor& descriptor,
+                                            Optional<std::string&> reasonIfUnsupported) const
+{
+    ignore_unused(descriptor);
+
+    std::array<DataType, 2> supportedTypes =
+    {
+            DataType::Float32,
+            DataType::Float16
+    };
+
+    bool supported = true;
+    supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
+                                  "Reference LogSoftmax: input type not supported");
+
+    supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
+                                  "Reference LogSoftmax: output type not supported");
+
+    supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
+                                  "Reference LogSoftmax: input and output types do not match");
+
+    return supported;
+}
+
 bool RefLayerSupport::IsLstmSupported(const TensorInfo& input,
                                       const TensorInfo& outputStateIn,
                                       const TensorInfo& cellStateIn,
@@ -1499,13 +1525,13 @@
     };
 
     supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
-                                  "Reference concatenation: output type not supported");
+                                  "Reference Softmax: output type not supported");
 
     supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
-                                  "Reference concatenation: input type not supported");
+                                  "Reference Softmax: input type not supported");
 
     supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
-                                  "Reference concatenation: input type not supported");
+                                  "Reference Softmax: input type not supported");
 
     return supported;
 }
diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp
index 36080f7..5c71e8d 100644
--- a/src/backends/reference/RefLayerSupport.hpp
+++ b/src/backends/reference/RefLayerSupport.hpp
@@ -149,6 +149,11 @@
                                     const L2NormalizationDescriptor& descriptor,
                                     Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    bool IsLogSoftmaxSupported(const TensorInfo& input,
+                               const TensorInfo& output,
+                               const LogSoftmaxDescriptor& descriptor,
+                               Optional<std::string&> reasonIfUnsupported) const override;
+
     bool IsLstmSupported(const TensorInfo& input,
                          const TensorInfo& outputStateIn,
                          const TensorInfo& cellStateIn,
diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp
index 8c08274..1f6d1d7 100644
--- a/src/backends/reference/RefWorkloadFactory.cpp
+++ b/src/backends/reference/RefWorkloadFactory.cpp
@@ -23,10 +23,9 @@
 }
 template <typename F32Workload, typename U8Workload, typename QueueDescriptorType>
 std::unique_ptr<IWorkload> RefWorkloadFactory::MakeWorkload(const QueueDescriptorType& descriptor,
-    const WorkloadInfo& info) const
+                                                            const WorkloadInfo& info) const
 {
-    return armnn::MakeWorkloadHelper<NullWorkload, F32Workload, U8Workload, NullWorkload, NullWorkload>(descriptor,
-                                                                                                        info);
+    return MakeWorkloadHelper<NullWorkload, F32Workload, U8Workload, NullWorkload, NullWorkload>(descriptor, info);
 }
 
 template <DataType ArmnnType>
@@ -95,6 +94,155 @@
     return std::make_unique<RefTensorHandle>(tensorInfo, m_MemoryManager);
 }
 
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateAbs(const AbsQueueDescriptor& descriptor,
+                                                         const WorkloadInfo& info) const
+{
+    return std::make_unique<RefAbsWorkload>(descriptor, info);
+}
+
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& descriptor,
+                                                                const WorkloadInfo& info) const
+{
+    return std::make_unique<RefActivationWorkload>(descriptor, info);
+}
+
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateAddition(const AdditionQueueDescriptor& descriptor,
+                                                              const WorkloadInfo& info) const
+{
+    return std::make_unique<RefAdditionWorkload>(descriptor, info);
+}
+
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateArgMinMax(const ArgMinMaxQueueDescriptor& descriptor,
+                                                               const WorkloadInfo& info) const
+{
+    return std::make_unique<RefArgMinMaxWorkload>(descriptor, info);
+}
+
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateBatchNormalization(
+    const BatchNormalizationQueueDescriptor& descriptor,
+    const WorkloadInfo& info) const
+{
+    return std::make_unique<RefBatchNormalizationWorkload>(descriptor, info);
+}
+
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& descriptor,
+                                                                    const WorkloadInfo& info) const
+{
+    return std::make_unique<RefBatchToSpaceNdWorkload>(descriptor, info);
+}
+
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateConcat(const ConcatQueueDescriptor& descriptor,
+                                                            const WorkloadInfo& info) const
+{
+    return std::make_unique<RefConcatWorkload>(descriptor, info);
+}
+
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateConstant(const ConstantQueueDescriptor& descriptor,
+                                                              const WorkloadInfo& info) const
+{
+    return std::make_unique<RefConstantWorkload>(descriptor, info);
+}
+
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateConvertFp16ToFp32(
+    const ConvertFp16ToFp32QueueDescriptor& descriptor,
+    const WorkloadInfo& info) const
+{
+    return std::make_unique<RefConvertFp16ToFp32Workload>(descriptor, info);
+}
+
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateConvertFp32ToFp16(
+    const ConvertFp32ToFp16QueueDescriptor& descriptor,
+    const WorkloadInfo& info) const
+{
+    return std::make_unique<RefConvertFp32ToFp16Workload>(descriptor, info);
+}
+
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateConvolution2d(const Convolution2dQueueDescriptor& descriptor,
+                                                                   const WorkloadInfo& info) const
+{
+    return std::make_unique<RefConvolution2dWorkload>(descriptor, info);
+}
+
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateDebug(const DebugQueueDescriptor& descriptor,
+                                                           const WorkloadInfo& info) const
+{
+    if (IsQSymm16(info))
+    {
+        return std::make_unique<RefDebugQSymm16Workload>(descriptor, info);
+    }
+    return MakeWorkload<RefDebugFloat32Workload, RefDebugQAsymm8Workload>(descriptor, info);
+}
+
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateDepthToSpace(const DepthToSpaceQueueDescriptor& descriptor,
+                                                                  const WorkloadInfo& info) const
+{
+    return std::make_unique<RefDepthToSpaceWorkload>(descriptor, info);
+}
+
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateDepthwiseConvolution2d(
+    const DepthwiseConvolution2dQueueDescriptor& descriptor,
+    const WorkloadInfo& info) const
+{
+    return std::make_unique<RefDepthwiseConvolution2dWorkload>(descriptor, info);
+}
+
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateDequantize(const DequantizeQueueDescriptor& descriptor,
+                                                                const WorkloadInfo& info) const
+{
+    return std::make_unique<RefDequantizeWorkload>(descriptor, info);
+}
+
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateDetectionPostProcess(
+    const DetectionPostProcessQueueDescriptor& descriptor,
+    const WorkloadInfo& info) const
+{
+    return std::make_unique<RefDetectionPostProcessWorkload>(descriptor, info);
+}
+
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateDivision(const DivisionQueueDescriptor& descriptor,
+                                                              const WorkloadInfo& info) const
+{
+    return std::make_unique<RefDivisionWorkload>(descriptor, info);
+}
+
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateEqual(const EqualQueueDescriptor& descriptor,
+                                                           const WorkloadInfo& info) const
+{
+    return std::make_unique<RefEqualWorkload>(descriptor, info);
+}
+
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateFakeQuantization(
+    const FakeQuantizationQueueDescriptor& descriptor,
+    const WorkloadInfo& info) const
+{
+    return MakeWorkload<RefFakeQuantizationFloat32Workload, NullWorkload>(descriptor, info);
+}
+
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateFloor(const FloorQueueDescriptor& descriptor,
+                                                           const WorkloadInfo& info) const
+{
+    return std::make_unique<RefFloorWorkload>(descriptor, info);
+}
+
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateFullyConnected(
+    const FullyConnectedQueueDescriptor& descriptor,
+    const WorkloadInfo& info) const
+{
+    return std::make_unique<RefFullyConnectedWorkload>(descriptor, info);
+}
+
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateGather(const GatherQueueDescriptor& descriptor,
+                                                            const WorkloadInfo& info) const
+{
+    return std::make_unique<RefGatherWorkload>(descriptor, info);
+}
+
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateGreater(const GreaterQueueDescriptor& descriptor,
+                                                             const WorkloadInfo& info) const
+{
+    return std::make_unique<RefGreaterWorkload>(descriptor, info);
+}
+
 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateInput(const InputQueueDescriptor& descriptor,
                                                            const WorkloadInfo& info) const
 {
@@ -115,6 +263,87 @@
     return std::make_unique<CopyMemGenericWorkload>(descriptor, info);
 }
 
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateInstanceNormalization(
+    const InstanceNormalizationQueueDescriptor& descriptor,
+    const WorkloadInfo& info) const
+{
+    return std::make_unique<RefInstanceNormalizationWorkload>(descriptor, info);
+}
+
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor,
+                                                                     const WorkloadInfo& info) const
+{
+    return std::make_unique<RefL2NormalizationWorkload>(descriptor, info);
+}
+
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateLogSoftmax(const LogSoftmaxQueueDescriptor& descriptor,
+                                                                const WorkloadInfo& info) const
+{
+    return std::make_unique<RefLogSoftmaxWorkload>(descriptor, info);
+}
+
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateLstm(const LstmQueueDescriptor& descriptor,
+                                                          const WorkloadInfo& info) const
+{
+    return std::make_unique<RefLstmWorkload>(descriptor, info);
+}
+
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateMaximum(const MaximumQueueDescriptor& descriptor,
+                                                             const WorkloadInfo& info) const
+{
+    return std::make_unique<RefMaximumWorkload>(descriptor, info);
+}
+
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateMean(const MeanQueueDescriptor& descriptor,
+                                                          const WorkloadInfo& info) const
+{
+    return  std::make_unique<RefMeanWorkload>(descriptor, info);
+}
+
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateMemCopy(const MemCopyQueueDescriptor& descriptor,
+                                                             const WorkloadInfo& info) const
+{
+    if (descriptor.m_Inputs.empty())
+    {
+        throw InvalidArgumentException("RefWorkloadFactory: CreateMemCopy() expected an input tensor.");
+    }
+    return std::make_unique<CopyMemGenericWorkload>(descriptor, info);
+}
+
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateMemImport(const MemImportQueueDescriptor& descriptor,
+                                                               const WorkloadInfo& info) const
+{
+    if (descriptor.m_Inputs.empty())
+    {
+        throw InvalidArgumentException("RefWorkloadFactory: CreateMemImport() expected an input tensor.");
+    }
+    return std::make_unique<ImportMemGenericWorkload>(descriptor, info);
+}
+
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateMerger(const MergerQueueDescriptor& descriptor,
+                                                            const WorkloadInfo& info) const
+{
+    return CreateConcat(descriptor, info);
+}
+
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateMinimum(const MinimumQueueDescriptor& descriptor,
+                                                             const WorkloadInfo& info) const
+{
+    return std::make_unique<RefMinimumWorkload>(descriptor, info);
+}
+
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateMultiplication(const MultiplicationQueueDescriptor& descriptor,
+                                                                    const WorkloadInfo& info) const
+{
+    return std::make_unique<RefMultiplicationWorkload>(descriptor, info);
+}
+
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateNormalization(const NormalizationQueueDescriptor& descriptor,
+                                                                   const WorkloadInfo& info) const
+{
+    return std::make_unique<RefNormalizationWorkload>(descriptor, info);
+}
+
 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateOutput(const OutputQueueDescriptor& descriptor,
                                                             const WorkloadInfo& info) const
 {
@@ -134,44 +363,22 @@
     return std::make_unique<CopyMemGenericWorkload>(descriptor, info);
 }
 
-std::unique_ptr<IWorkload> RefWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& descriptor,
-                                                                const WorkloadInfo&              info) const
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreatePad(const PadQueueDescriptor& descriptor,
+                                                         const WorkloadInfo& info) const
 {
-    return std::make_unique<RefActivationWorkload>(descriptor, info);
+    if (IsQSymm16(info))
+    {
+        return std::make_unique<RefPadQSymm16Workload>(descriptor, info);
+    }
+    else if (IsFloat16(info))
+    {
+        return std::make_unique<RefPadFloat16Workload>(descriptor, info);
+    }
+    return MakeWorkload<RefPadFloat32Workload, RefPadQAsymm8Workload>(descriptor, info);
 }
 
-std::unique_ptr<IWorkload> RefWorkloadFactory::CreateArgMinMax(const ArgMinMaxQueueDescriptor& descriptor,
-                                                               const WorkloadInfo&              info) const
-{
-    return std::make_unique<RefArgMinMaxWorkload>(descriptor, info);
-}
-
-std::unique_ptr<IWorkload> RefWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& descriptor,
-                                                             const WorkloadInfo&           info) const
-{
-    return std::make_unique<RefSoftmaxWorkload>(descriptor, info);
-}
-
-std::unique_ptr<IWorkload> RefWorkloadFactory::CreateSplitter(const SplitterQueueDescriptor& descriptor,
-                                                              const WorkloadInfo&            info) const
-{
-    return std::make_unique<RefSplitterWorkload>(descriptor, info);
-}
-
-std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreateMerger(const MergerQueueDescriptor& descriptor,
-                                                                   const WorkloadInfo&          info) const
-{
-    return CreateConcat(descriptor, info);
-}
-
-std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreateFullyConnected(
-    const FullyConnectedQueueDescriptor& descriptor, const WorkloadInfo& info) const
-{
-    return std::make_unique<RefFullyConnectedWorkload>(descriptor, info);
-}
-
-std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreatePermute(const PermuteQueueDescriptor& descriptor,
-                                                                    const WorkloadInfo&           info) const
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreatePermute(const PermuteQueueDescriptor& descriptor,
+                                                             const WorkloadInfo& info) const
 {
     if (IsQSymm16(info))
     {
@@ -181,78 +388,34 @@
         NullWorkload, NullWorkload>(descriptor, info);
 }
 
-std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreatePooling2d(const Pooling2dQueueDescriptor& descriptor,
-                                                                      const WorkloadInfo&           info) const
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreatePooling2d(const Pooling2dQueueDescriptor& descriptor,
+                                                               const WorkloadInfo& info) const
 {
     return std::make_unique<RefPooling2dWorkload>(descriptor, info);
 }
 
-std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreateConvolution2d(
-    const Convolution2dQueueDescriptor& descriptor, const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreatePreCompiled(const PreCompiledQueueDescriptor& descriptor,
+                                                                 const WorkloadInfo& info) const
 {
-    return std::make_unique<RefConvolution2dWorkload>(descriptor, info);
+    return nullptr;
 }
 
-std::unique_ptr<IWorkload> RefWorkloadFactory::CreateDepthToSpace(const DepthToSpaceQueueDescriptor& descriptor,
-                                                                  const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreatePrelu(const PreluQueueDescriptor& descriptor,
+                                                           const WorkloadInfo& info) const
 {
-    return std::make_unique<RefDepthToSpaceWorkload>(descriptor, info);
+    return std::make_unique<RefPreluWorkload>(descriptor, info);
 }
 
-std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreateDepthwiseConvolution2d(
-    const DepthwiseConvolution2dQueueDescriptor& descriptor, const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateQuantize(const QuantizeQueueDescriptor& descriptor,
+                                                              const WorkloadInfo& info) const
 {
-    return std::make_unique<RefDepthwiseConvolution2dWorkload>(descriptor, info);
+    return std::make_unique<RefQuantizeWorkload>(descriptor, info);
 }
 
-std::unique_ptr<IWorkload> RefWorkloadFactory::CreateDetectionPostProcess(
-    const armnn::DetectionPostProcessQueueDescriptor& descriptor, const armnn::WorkloadInfo& info) const
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateReshape(const ReshapeQueueDescriptor& descriptor,
+                                                             const WorkloadInfo& info) const
 {
-    return std::make_unique<RefDetectionPostProcessWorkload>(descriptor, info);
-}
-
-std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreateNormalization(
-    const NormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) const
-{
-    return std::make_unique<RefNormalizationWorkload>(descriptor, info);
-}
-
-std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreateAddition(const AdditionQueueDescriptor& descriptor,
-                                                                     const WorkloadInfo&            info) const
-{
-    return std::make_unique<RefAdditionWorkload>(descriptor, info);
-}
-
-std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreateMultiplication(
-    const MultiplicationQueueDescriptor& descriptor, const WorkloadInfo& info) const
-{
-    return std::make_unique<RefMultiplicationWorkload>(descriptor, info);
-}
-
-std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreateBatchNormalization(
-    const BatchNormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) const
-{
-    return std::make_unique<RefBatchNormalizationWorkload>(descriptor, info);
-}
-
-std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreateMemCopy(const MemCopyQueueDescriptor& descriptor,
-                                                                    const WorkloadInfo&        info) const
-{
-    if (descriptor.m_Inputs.empty())
-    {
-        throw InvalidArgumentException("RefWorkloadFactory: CreateMemCopy() expected an input tensor.");
-    }
-    return std::make_unique<CopyMemGenericWorkload>(descriptor, info);
-}
-
-std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreateMemImport(const MemImportQueueDescriptor& descriptor,
-                                                                      const WorkloadInfo&        info) const
-{
-    if (descriptor.m_Inputs.empty())
-    {
-        throw InvalidArgumentException("RefWorkloadFactory: CreateMemImport() expected an input tensor.");
-    }
-    return std::make_unique<ImportMemGenericWorkload>(descriptor, info);
+    return std::make_unique<RefReshapeWorkload>(descriptor, info);
 }
 
 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateResize(const ResizeQueueDescriptor& descriptor,
@@ -273,218 +436,65 @@
     return CreateResize(resizeDescriptor, info);
 }
 
-std::unique_ptr<IWorkload> RefWorkloadFactory::CreateFakeQuantization(
-    const FakeQuantizationQueueDescriptor& descriptor,
-    const WorkloadInfo& info) const
-{
-    return MakeWorkload<RefFakeQuantizationFloat32Workload, NullWorkload>(descriptor, info);
-}
-
-std::unique_ptr<IWorkload> RefWorkloadFactory::CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor,
-    const WorkloadInfo& info) const
-{
-    return std::make_unique<RefL2NormalizationWorkload>(descriptor, info);
-}
-
-std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreateConcat(const ConcatQueueDescriptor& descriptor,
-                                                                   const WorkloadInfo&          info) const
-{
-    return std::make_unique<RefConcatWorkload>(descriptor, info);
-}
-
-std::unique_ptr<IWorkload> RefWorkloadFactory::CreateConstant(const ConstantQueueDescriptor& descriptor,
-    const WorkloadInfo& info) const
-{
-    return std::make_unique<RefConstantWorkload>(descriptor, info);
-}
-
-std::unique_ptr<IWorkload> RefWorkloadFactory::CreateReshape(const ReshapeQueueDescriptor& descriptor,
-    const WorkloadInfo& info) const
-{
-    return std::make_unique<RefReshapeWorkload>(descriptor, info);
-}
-
-std::unique_ptr<IWorkload> RefWorkloadFactory::CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& descriptor,
-    const WorkloadInfo& info) const
-{
-    return std::make_unique<RefSpaceToBatchNdWorkload>(descriptor, info);
-}
-
-std::unique_ptr<IWorkload> RefWorkloadFactory::CreateSpaceToDepth(const armnn::SpaceToDepthQueueDescriptor& descriptor,
-    const armnn::WorkloadInfo& info) const
-{
-    return std::make_unique<RefSpaceToDepthWorkload>(descriptor, info);
-}
-
-std::unique_ptr<IWorkload> RefWorkloadFactory::CreateFloor(const FloorQueueDescriptor& descriptor,
-                                                          const WorkloadInfo& info) const
-{
-    return std::make_unique<RefFloorWorkload>(descriptor, info);
-}
-
-std::unique_ptr<IWorkload> RefWorkloadFactory::CreateLstm(const LstmQueueDescriptor& descriptor,
-    const WorkloadInfo& info) const
-{
-    return std::make_unique<RefLstmWorkload>(descriptor, info);
-}
-
-std::unique_ptr<IWorkload> RefWorkloadFactory::CreateConvertFp16ToFp32(
-    const ConvertFp16ToFp32QueueDescriptor& descriptor,
-    const WorkloadInfo& info) const
-{
-    return std::make_unique<RefConvertFp16ToFp32Workload>(descriptor, info);
-}
-
-std::unique_ptr<IWorkload> RefWorkloadFactory::CreateConvertFp32ToFp16(
-    const ConvertFp32ToFp16QueueDescriptor& descriptor,
-    const WorkloadInfo& info) const
-{
-    return std::make_unique<RefConvertFp32ToFp16Workload>(descriptor, info);
-}
-
-std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreateDivision(
-    const DivisionQueueDescriptor& descriptor, const WorkloadInfo& info) const
-{
-    return std::make_unique<RefDivisionWorkload>(descriptor, info);
-}
-
-std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreateSubtraction(
-    const SubtractionQueueDescriptor& descriptor, const WorkloadInfo& info) const
-{
-    return std::make_unique<RefSubtractionWorkload>(descriptor, info);
-}
-
-std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreateMaximum(
-    const MaximumQueueDescriptor& descriptor, const WorkloadInfo& info) const
-{
-    return std::make_unique<RefMaximumWorkload>(descriptor, info);
-}
-
-std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreateMean(
-    const MeanQueueDescriptor& descriptor, const WorkloadInfo& info) const
-{
-    return  std::make_unique<RefMeanWorkload>(descriptor, info);
-}
-
-std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreateMinimum(
-    const MinimumQueueDescriptor& descriptor, const WorkloadInfo& info) const
-{
-    return std::make_unique<RefMinimumWorkload>(descriptor, info);
-}
-
-std::unique_ptr<IWorkload> RefWorkloadFactory::CreatePad(const PadQueueDescriptor& descriptor,
-                                                 const WorkloadInfo& info) const
-{
-    if (IsQSymm16(info))
-    {
-        return std::make_unique<RefPadQSymm16Workload>(descriptor, info);
-    }
-    else if (IsFloat16(info))
-    {
-        return std::make_unique<RefPadFloat16Workload>(descriptor, info);
-    }
-    return MakeWorkload<RefPadFloat32Workload, RefPadQAsymm8Workload>(descriptor, info);
-}
-
-std::unique_ptr<IWorkload> RefWorkloadFactory::CreateEqual(const EqualQueueDescriptor& descriptor,
-                                                           const WorkloadInfo& info) const
-{
-    return std::make_unique<RefEqualWorkload>(descriptor, info);
-}
-
-std::unique_ptr<IWorkload> RefWorkloadFactory::CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& descriptor,
-                                                                    const WorkloadInfo& info) const
-{
-    return std::make_unique<RefBatchToSpaceNdWorkload>(descriptor, info);
-}
-
-std::unique_ptr<IWorkload> RefWorkloadFactory::CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor,
-                                                                  const WorkloadInfo& info) const
-{
-    return std::make_unique<RefStridedSliceWorkload>(descriptor, info);
-}
-
-std::unique_ptr<IWorkload> RefWorkloadFactory::CreateGreater(const GreaterQueueDescriptor& descriptor,
-                                                             const WorkloadInfo& info) const
-{
-    return std::make_unique<RefGreaterWorkload>(descriptor, info);
-}
-
-std::unique_ptr<IWorkload> RefWorkloadFactory::CreateDebug(const DebugQueueDescriptor& descriptor,
-                                                           const WorkloadInfo& info) const
-{
-    if (IsQSymm16(info))
-    {
-        return std::make_unique<RefDebugQSymm16Workload>(descriptor, info);
-    }
-    return MakeWorkload<RefDebugFloat32Workload, RefDebugQAsymm8Workload>(descriptor, info);
-}
-
 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateRsqrt(const RsqrtQueueDescriptor& descriptor,
                                                            const WorkloadInfo& info) const
 {
     return std::make_unique<RefRsqrtWorkload>(descriptor, info);
 }
 
-std::unique_ptr<IWorkload> RefWorkloadFactory::CreateGather(const armnn::GatherQueueDescriptor& descriptor,
-                                                            const armnn::WorkloadInfo& info) const
-{
-    return std::make_unique<RefGatherWorkload>(descriptor, info);
-}
-
-std::unique_ptr<IWorkload> RefWorkloadFactory::CreatePreCompiled(const PreCompiledQueueDescriptor& descriptor,
-                                                                 const WorkloadInfo& info) const
-{
-    return nullptr;
-}
-
-std::unique_ptr<IWorkload> RefWorkloadFactory::CreateQuantize(const QuantizeQueueDescriptor& descriptor,
-                                                              const WorkloadInfo& info) const
-{
-    return std::make_unique<RefQuantizeWorkload>(descriptor, info);
-}
-
-std::unique_ptr<IWorkload> RefWorkloadFactory::CreateDequantize(const DequantizeQueueDescriptor& descriptor,
-                                                                const WorkloadInfo& info) const
-{
-    return std::make_unique<RefDequantizeWorkload>(descriptor, info);
-}
-
-std::unique_ptr<IWorkload> RefWorkloadFactory::CreatePrelu(const PreluQueueDescriptor& descriptor,
-                                                           const WorkloadInfo& info) const
-{
-    return std::make_unique<RefPreluWorkload>(descriptor, info);
-}
-
-std::unique_ptr<IWorkload> RefWorkloadFactory::CreateTransposeConvolution2d(
-    const TransposeConvolution2dQueueDescriptor& descriptor,
-    const WorkloadInfo& info) const
-{
-    return std::make_unique<RefTransposeConvolution2dWorkload>(descriptor, info);
-}
-
-std::unique_ptr<IWorkload> RefWorkloadFactory::CreateStack(const StackQueueDescriptor& descriptor,
-                                                           const WorkloadInfo& info) const
-{
-    return std::make_unique<RefStackWorkload>(descriptor, info);
-}
-
-std::unique_ptr<IWorkload> RefWorkloadFactory::CreateAbs(const AbsQueueDescriptor& descriptor,
-                                                         const WorkloadInfo& info) const
-{
-    return std::make_unique<RefAbsWorkload>(descriptor, info);
-}
-
 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateSlice(const SliceQueueDescriptor& descriptor,
                                                            const WorkloadInfo& info) const
 {
     return std::make_unique<RefSliceWorkload>(descriptor, info);
 }
 
-std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreateInstanceNormalization(
-    const InstanceNormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& descriptor,
+                                                             const WorkloadInfo& info) const
 {
-    return std::make_unique<RefInstanceNormalizationWorkload>(descriptor, info);
+    return std::make_unique<RefSoftmaxWorkload>(descriptor, info);
+}
+
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& descriptor,
+                                                                    const WorkloadInfo& info) const
+{
+    return std::make_unique<RefSpaceToBatchNdWorkload>(descriptor, info);
+}
+
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateSpaceToDepth(const SpaceToDepthQueueDescriptor& descriptor,
+                                                                  const WorkloadInfo& info) const
+{
+    return std::make_unique<RefSpaceToDepthWorkload>(descriptor, info);
+}
+
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateSplitter(const SplitterQueueDescriptor& descriptor,
+                                                              const WorkloadInfo& info) const
+{
+    return std::make_unique<RefSplitterWorkload>(descriptor, info);
+}
+
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateStack(const StackQueueDescriptor& descriptor,
+                                                           const WorkloadInfo& info) const
+{
+    return std::make_unique<RefStackWorkload>(descriptor, info);
+}
+
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor,
+                                                                  const WorkloadInfo& info) const
+{
+    return std::make_unique<RefStridedSliceWorkload>(descriptor, info);
+}
+
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateSubtraction(const SubtractionQueueDescriptor& descriptor,
+                                                                 const WorkloadInfo& info) const
+{
+    return std::make_unique<RefSubtractionWorkload>(descriptor, info);
+}
+
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateTransposeConvolution2d(
+    const TransposeConvolution2dQueueDescriptor& descriptor,
+    const WorkloadInfo& info) const
+{
+    return std::make_unique<RefTransposeConvolution2dWorkload>(descriptor, info);
 }
 
 } // namespace armnn
diff --git a/src/backends/reference/RefWorkloadFactory.hpp b/src/backends/reference/RefWorkloadFactory.hpp
index 0a1fab1..41e9b28 100644
--- a/src/backends/reference/RefWorkloadFactory.hpp
+++ b/src/backends/reference/RefWorkloadFactory.hpp
@@ -60,174 +60,177 @@
                                                       DataLayout dataLayout,
                                                       const bool IsMemoryManaged = true) const override;
 
-    std::unique_ptr<IWorkload> CreateInput(const InputQueueDescriptor& descriptor,
-                                           const WorkloadInfo& info) const override;
-
-    std::unique_ptr<IWorkload> CreateOutput(const OutputQueueDescriptor& descriptor,
-                                            const WorkloadInfo& info) const override;
+    std::unique_ptr<IWorkload> CreateAbs(const AbsQueueDescriptor& descriptor,
+                                         const WorkloadInfo& info) const override;
 
     std::unique_ptr<IWorkload> CreateActivation(const ActivationQueueDescriptor& descriptor,
                                                 const WorkloadInfo& info) const override;
 
-    std::unique_ptr<IWorkload> CreateArgMinMax(const ArgMinMaxQueueDescriptor& descriptor,
-                                               const WorkloadInfo& info) const override;
-
-    std::unique_ptr<IWorkload> CreateSoftmax(const SoftmaxQueueDescriptor& descriptor,
-                                             const WorkloadInfo& info) const override;
-
-    std::unique_ptr<IWorkload> CreateSplitter(const SplitterQueueDescriptor& descriptor,
-                                              const WorkloadInfo& info) const override;
-
-    ARMNN_DEPRECATED_MSG("Use CreateConcat instead")
-    std::unique_ptr<IWorkload> CreateMerger(const MergerQueueDescriptor& descriptor,
-                                            const WorkloadInfo& info) const override;
-
-    std::unique_ptr<IWorkload> CreateFullyConnected(const FullyConnectedQueueDescriptor& descriptor,
-                                                    const WorkloadInfo& info) const override;
-
-    std::unique_ptr<IWorkload> CreatePooling2d(const Pooling2dQueueDescriptor& descriptor,
-                                               const WorkloadInfo& info) const override;
-
-    std::unique_ptr<IWorkload> CreatePermute(const PermuteQueueDescriptor& descriptor,
-                                             const WorkloadInfo& info) const override;
-
-    std::unique_ptr<IWorkload> CreateConvolution2d(const Convolution2dQueueDescriptor& descriptor,
-                                                   const WorkloadInfo& info) const override;
-
-    std::unique_ptr<IWorkload> CreateDepthToSpace(const DepthToSpaceQueueDescriptor& descriptor,
-                                                  const WorkloadInfo& info) const override;
-
-    std::unique_ptr<IWorkload> CreateDepthwiseConvolution2d(const DepthwiseConvolution2dQueueDescriptor& descriptor,
-                                                            const WorkloadInfo& info) const override;
-
-    std::unique_ptr<IWorkload> CreateDetectionPostProcess(const DetectionPostProcessQueueDescriptor& descriptor,
-                                                          const WorkloadInfo& info) const override;
-
-    std::unique_ptr<IWorkload> CreateNormalization(const NormalizationQueueDescriptor& descriptor,
-                                                   const WorkloadInfo& info) const override;
-
-    std::unique_ptr<IWorkload> CreateMultiplication(const MultiplicationQueueDescriptor& descriptor,
-                                                    const WorkloadInfo& info) const override;
-
     std::unique_ptr<IWorkload> CreateAddition(const AdditionQueueDescriptor& descriptor,
                                               const WorkloadInfo& info) const override;
 
+    std::unique_ptr<IWorkload> CreateArgMinMax(const ArgMinMaxQueueDescriptor& descriptor,
+                                               const WorkloadInfo& info) const override;
+
     std::unique_ptr<IWorkload> CreateBatchNormalization(const BatchNormalizationQueueDescriptor& descriptor,
                                                         const WorkloadInfo& info) const override;
 
-    std::unique_ptr<IWorkload> CreateMemCopy(const MemCopyQueueDescriptor& descriptor,
-                                             const WorkloadInfo& info) const override;
-
-    std::unique_ptr<IWorkload> CreateMemImport(const MemImportQueueDescriptor& descriptor,
-                                                const WorkloadInfo& info) const override;
-
-    std::unique_ptr<IWorkload> CreateResize(const ResizeQueueDescriptor& descriptor,
-                                            const WorkloadInfo& info) const override;
-
-    std::unique_ptr<IWorkload> CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor,
+    std::unique_ptr<IWorkload> CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& descriptor,
                                                     const WorkloadInfo& info) const override;
 
-    std::unique_ptr<IWorkload> CreateFakeQuantization(const FakeQuantizationQueueDescriptor& descriptor,
-                                                      const WorkloadInfo& info) const override;
-
-    std::unique_ptr<IWorkload> CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor,
-                                                     const WorkloadInfo& info) const override;
-
     std::unique_ptr<IWorkload> CreateConcat(const ConcatQueueDescriptor& descriptor,
                                             const WorkloadInfo& info) const override;
 
     std::unique_ptr<IWorkload> CreateConstant(const ConstantQueueDescriptor& descriptor,
                                               const WorkloadInfo& info) const override;
 
+    std::unique_ptr<IWorkload> CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor& descriptor,
+                                                       const WorkloadInfo& info) const override;
+
+    std::unique_ptr<IWorkload> CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor& descriptor,
+                                                       const WorkloadInfo& info) const override;
+
+    std::unique_ptr<IWorkload> CreateConvolution2d(const Convolution2dQueueDescriptor& descriptor,
+                                                   const WorkloadInfo& info) const override;
+
+    std::unique_ptr<IWorkload> CreateDebug(const DebugQueueDescriptor& descriptor,
+                                           const WorkloadInfo& info) const override;
+
+    std::unique_ptr<IWorkload> CreateDepthToSpace(const DepthToSpaceQueueDescriptor& descriptor,
+                                                  const WorkloadInfo& info) const override;
+
+    std::unique_ptr<IWorkload> CreateDepthwiseConvolution2d(const DepthwiseConvolution2dQueueDescriptor& descriptor,
+                                                            const WorkloadInfo& info) const override;
+
+    std::unique_ptr<IWorkload> CreateDequantize(const DequantizeQueueDescriptor& descriptor,
+                                                const WorkloadInfo& info) const override;
+
+    std::unique_ptr<IWorkload> CreateDetectionPostProcess(const DetectionPostProcessQueueDescriptor& descriptor,
+                                                          const WorkloadInfo& info) const override;
+
+    std::unique_ptr<IWorkload> CreateDivision(const DivisionQueueDescriptor& descriptor,
+                                              const WorkloadInfo& info) const override;
+
+    std::unique_ptr<IWorkload> CreateEqual(const EqualQueueDescriptor& descriptor,
+                                           const WorkloadInfo& info) const override;
+
+    std::unique_ptr<IWorkload> CreateFakeQuantization(const FakeQuantizationQueueDescriptor& descriptor,
+                                                      const WorkloadInfo& info) const override;
+
+    std::unique_ptr<IWorkload> CreateFloor(const FloorQueueDescriptor& descriptor,
+                                           const WorkloadInfo& info) const override;
+
+    std::unique_ptr<IWorkload> CreateFullyConnected(const FullyConnectedQueueDescriptor& descriptor,
+                                                    const WorkloadInfo& info) const override;
+
+    std::unique_ptr<IWorkload> CreateGather(const GatherQueueDescriptor& descriptor,
+                                            const WorkloadInfo& info) const override;
+
+    std::unique_ptr<IWorkload> CreateGreater(const GreaterQueueDescriptor& descriptor,
+                                             const WorkloadInfo& info) const override;
+
+    std::unique_ptr<IWorkload> CreateInput(const InputQueueDescriptor& descriptor,
+                                           const WorkloadInfo& info) const override;
+
+    std::unique_ptr<IWorkload> CreateInstanceNormalization(const InstanceNormalizationQueueDescriptor& descriptor,
+                                                           const WorkloadInfo& info) const override;
+
+    std::unique_ptr<IWorkload> CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor,
+                                                     const WorkloadInfo& info) const override;
+
+    std::unique_ptr<IWorkload> CreateLogSoftmax(const LogSoftmaxQueueDescriptor& descriptor,
+                                                const WorkloadInfo& info) const override;
+
+    std::unique_ptr<IWorkload> CreateLstm(const LstmQueueDescriptor& descriptor,
+                                          const WorkloadInfo& info) const override;
+
+    std::unique_ptr<IWorkload> CreateMaximum(const MaximumQueueDescriptor& descriptor,
+                                             const WorkloadInfo& info) const override;
+
+    std::unique_ptr<IWorkload> CreateMean(const MeanQueueDescriptor& descriptor,
+                                          const WorkloadInfo& Info) const override;
+
+    std::unique_ptr<IWorkload> CreateMemCopy(const MemCopyQueueDescriptor& descriptor,
+                                             const WorkloadInfo& info) const override;
+
+    std::unique_ptr<IWorkload> CreateMemImport(const MemImportQueueDescriptor& descriptor,
+                                                const WorkloadInfo& info) const override;
+
+    ARMNN_DEPRECATED_MSG("Use CreateConcat instead")
+    std::unique_ptr<IWorkload> CreateMerger(const MergerQueueDescriptor& descriptor,
+                                            const WorkloadInfo& info) const override;
+
+    std::unique_ptr<IWorkload> CreateMinimum(const MinimumQueueDescriptor& descriptor,
+                                             const WorkloadInfo& info) const override;
+
+    std::unique_ptr<IWorkload> CreateMultiplication(const MultiplicationQueueDescriptor& descriptor,
+                                                    const WorkloadInfo& info) const override;
+
+    std::unique_ptr<IWorkload> CreateNormalization(const NormalizationQueueDescriptor& descriptor,
+                                                   const WorkloadInfo& info) const override;
+
+    std::unique_ptr<IWorkload> CreateOutput(const OutputQueueDescriptor& descriptor,
+                                            const WorkloadInfo& info) const override;
+
+    std::unique_ptr<IWorkload> CreatePad(const PadQueueDescriptor& descriptor,
+                                         const WorkloadInfo& info) const override;
+
+    std::unique_ptr<IWorkload> CreatePermute(const PermuteQueueDescriptor& descriptor,
+                                             const WorkloadInfo& info) const override;
+
+    std::unique_ptr<IWorkload> CreatePooling2d(const Pooling2dQueueDescriptor& descriptor,
+                                               const WorkloadInfo& info) const override;
+
+    std::unique_ptr<IWorkload> CreatePreCompiled(const PreCompiledQueueDescriptor& descriptor,
+                                                 const WorkloadInfo& info) const override;
+
+    std::unique_ptr<IWorkload> CreatePrelu(const PreluQueueDescriptor& descriptor,
+                                           const WorkloadInfo& info) const override;
+
+    std::unique_ptr<IWorkload> CreateQuantize(const QuantizeQueueDescriptor& descriptor,
+                                              const WorkloadInfo& info) const override;
+
     std::unique_ptr<IWorkload> CreateReshape(const ReshapeQueueDescriptor& descriptor,
                                              const WorkloadInfo& info) const override;
 
+    std::unique_ptr<IWorkload> CreateResize(const ResizeQueueDescriptor& descriptor,
+                                            const WorkloadInfo& info) const override;
+
+    ARMNN_DEPRECATED_MSG("Use CreateResize instead")
+    std::unique_ptr<IWorkload> CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor,
+                                                    const WorkloadInfo& info) const override;
+
+    std::unique_ptr<IWorkload> CreateRsqrt(const RsqrtQueueDescriptor& descriptor,
+                                           const WorkloadInfo& info) const override;
+
+    std::unique_ptr<IWorkload> CreateSlice(const SliceQueueDescriptor& descriptor,
+                                           const WorkloadInfo& info) const override;
+
+    std::unique_ptr<IWorkload> CreateSoftmax(const SoftmaxQueueDescriptor& descriptor,
+                                             const WorkloadInfo& info) const override;
+
     std::unique_ptr<IWorkload> CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& descriptor,
                                                     const WorkloadInfo& info) const override;
 
     std::unique_ptr<IWorkload> CreateSpaceToDepth(const SpaceToDepthQueueDescriptor& descriptor,
                                                   const WorkloadInfo& info) const override;
 
-    std::unique_ptr<IWorkload> CreateFloor(const FloorQueueDescriptor& descriptor,
-                                           const WorkloadInfo& info) const override;
-
-    std::unique_ptr<IWorkload> CreateLstm(const LstmQueueDescriptor& descriptor,
-                                          const WorkloadInfo& info) const override;
-
-    std::unique_ptr<IWorkload> CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor& descriptor,
-                                                       const WorkloadInfo& info) const override;
-
-    std::unique_ptr<IWorkload> CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor& descriptor,
-                                                       const WorkloadInfo& info) const override;
-
-    std::unique_ptr<IWorkload> CreateDivision(const DivisionQueueDescriptor& descriptor,
+    std::unique_ptr<IWorkload> CreateSplitter(const SplitterQueueDescriptor& descriptor,
                                               const WorkloadInfo& info) const override;
 
-    std::unique_ptr<IWorkload> CreateSubtraction(const SubtractionQueueDescriptor& descriptor,
-                                                 const WorkloadInfo& info) const override;
-
-    std::unique_ptr<IWorkload> CreateMaximum(const MaximumQueueDescriptor& descriptor,
-                                             const WorkloadInfo& info) const override;
-
-    std::unique_ptr<IWorkload> CreateMean(const MeanQueueDescriptor& descriptor,
-                                          const WorkloadInfo& Info) const override;
-
-    std::unique_ptr<IWorkload> CreatePad(const PadQueueDescriptor& descriptor,
-                                         const WorkloadInfo& info) const override;
-
-    std::unique_ptr<IWorkload> CreateEqual(const EqualQueueDescriptor& descriptor,
-                                           const WorkloadInfo& info) const override;
-
-    std::unique_ptr<IWorkload> CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& descriptor,
-                                                    const WorkloadInfo& info) const override;
-
-    std::unique_ptr<IWorkload> CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor,
-                                                  const WorkloadInfo& info) const override;
-
-    std::unique_ptr<IWorkload> CreateMinimum(const MinimumQueueDescriptor& descriptor,
-                                             const WorkloadInfo& info) const override;
-
-    std::unique_ptr<IWorkload> CreateGreater(const GreaterQueueDescriptor& descriptor,
-                                             const WorkloadInfo& info) const override;
-
-    std::unique_ptr<IWorkload> CreateDebug(const DebugQueueDescriptor& descriptor,
-                                           const WorkloadInfo& info) const override;
-
-    std::unique_ptr<IWorkload> CreateRsqrt(const RsqrtQueueDescriptor& descriptor,
-                                           const WorkloadInfo& info) const override;
-
-    std::unique_ptr<IWorkload> CreatePreCompiled(const PreCompiledQueueDescriptor& descriptor,
-                                                 const WorkloadInfo& info) const override;
-
-    std::unique_ptr<IWorkload> CreateGather(const GatherQueueDescriptor& descriptor,
-                                            const WorkloadInfo& info) const override;
-
-    std::unique_ptr<IWorkload> CreateDequantize(const DequantizeQueueDescriptor& descriptor,
-                                                const WorkloadInfo& info) const override;
-
-    std::unique_ptr<IWorkload> CreateQuantize(const QuantizeQueueDescriptor& descriptor,
-                                              const WorkloadInfo& info) const override;
-
-    std::unique_ptr<IWorkload> CreatePrelu(const PreluQueueDescriptor& descriptor,
-                                           const WorkloadInfo& info) const override;
-
-    std::unique_ptr<IWorkload> CreateTransposeConvolution2d(const TransposeConvolution2dQueueDescriptor& descriptor,
-                                                            const WorkloadInfo& info) const override;
-
     std::unique_ptr<IWorkload> CreateStack(const StackQueueDescriptor& descriptor,
                                            const WorkloadInfo& info) const override;
 
-    std::unique_ptr<IWorkload> CreateAbs(const AbsQueueDescriptor& descriptor,
-                                         const WorkloadInfo& info) const override;
+    std::unique_ptr<IWorkload> CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor,
+                                                  const WorkloadInfo& info) const override;
 
-    std::unique_ptr<IWorkload> CreateSlice(const SliceQueueDescriptor& descriptor,
-                                           const WorkloadInfo& info) const override;
+    std::unique_ptr<IWorkload> CreateSubtraction(const SubtractionQueueDescriptor& descriptor,
+                                                 const WorkloadInfo& info) const override;
 
-    std::unique_ptr<IWorkload> CreateInstanceNormalization(const InstanceNormalizationQueueDescriptor& descriptor,
-                                                           const WorkloadInfo& info) const override;
+    std::unique_ptr<IWorkload> CreateTransposeConvolution2d(const TransposeConvolution2dQueueDescriptor& descriptor,
+                                                            const WorkloadInfo& info) const override;
 
 private:
-
     template <typename F32Workload, typename U8Workload, typename QueueDescriptorType>
     std::unique_ptr<IWorkload> MakeWorkload(const QueueDescriptorType& descriptor, const WorkloadInfo& info) const;
 
diff --git a/src/backends/reference/backend.mk b/src/backends/reference/backend.mk
index f45b015..49b07a4 100644
--- a/src/backends/reference/backend.mk
+++ b/src/backends/reference/backend.mk
@@ -35,6 +35,7 @@
         workloads/FullyConnected.cpp \
         workloads/Gather.cpp \
         workloads/InstanceNorm.cpp \
+        workloads/LogSoftmax.cpp \
         workloads/LstmUtils.cpp \
         workloads/Mean.cpp \
         workloads/Concatenate.cpp \
@@ -63,6 +64,7 @@
         workloads/RefGatherWorkload.cpp \
         workloads/RefInstanceNormalizationWorkload.cpp \
         workloads/RefL2NormalizationWorkload.cpp \
+        workloads/RefLogSoftmaxWorkload.cpp \
         workloads/RefLstmWorkload.cpp \
         workloads/RefMeanWorkload.cpp \
         workloads/RefNormalizationWorkload.cpp \
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index cef3a80..5de9b75 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -868,6 +868,17 @@
 ARMNN_AUTO_TEST_CASE(L2NormalizationDefaultEpsilon, L2NormalizationDefaultEpsilonTest, DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(L2NormalizationNonDefaultEpsilon, L2NormalizationNonDefaultEpsilonTest, DataLayout::NCHW)
 
+// LogSoftmax
+ARMNN_AUTO_TEST_CASE(LogSoftmaxFloat32_1, LogSoftmaxTest1<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(LogSoftmaxFloat32_2, LogSoftmaxTest2<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(LogSoftmaxFloat32_3, LogSoftmaxTest3<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(LogSoftmaxFloat32_4, LogSoftmaxTest4<DataType::Float32>)
+
+ARMNN_AUTO_TEST_CASE(LogSoftmaxFloat16_1, LogSoftmaxTest1<DataType::Float16>)
+ARMNN_AUTO_TEST_CASE(LogSoftmaxFloat16_2, LogSoftmaxTest2<DataType::Float16>)
+ARMNN_AUTO_TEST_CASE(LogSoftmaxFloat16_3, LogSoftmaxTest3<DataType::Float16>)
+ARMNN_AUTO_TEST_CASE(LogSoftmaxFloat16_4, LogSoftmaxTest4<DataType::Float16>)
+
 // Pad
 ARMNN_AUTO_TEST_CASE(PadFloat322d, PadFloat322dTest)
 ARMNN_AUTO_TEST_CASE(PadFloat322dCustomPadding, PadFloat322dCustomPaddingTest)
diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt
index 9a5f427..b8eb95c 100644
--- a/src/backends/reference/workloads/CMakeLists.txt
+++ b/src/backends/reference/workloads/CMakeLists.txt
@@ -37,6 +37,8 @@
     Gather.hpp
     InstanceNorm.cpp
     InstanceNorm.hpp
+    LogSoftmax.cpp
+    LogSoftmax.hpp
     LstmUtils.hpp
     LstmUtils.cpp
     Maximum.hpp
@@ -95,6 +97,8 @@
     RefInstanceNormalizationWorkload.hpp
     RefL2NormalizationWorkload.cpp
     RefL2NormalizationWorkload.hpp
+    RefLogSoftmaxWorkload.cpp
+    RefLogSoftmaxWorkload.hpp
     RefLstmWorkload.cpp
     RefLstmWorkload.hpp
     RefMeanWorkload.cpp
diff --git a/src/backends/reference/workloads/LogSoftmax.cpp b/src/backends/reference/workloads/LogSoftmax.cpp
new file mode 100644
index 0000000..3fa3dc0
--- /dev/null
+++ b/src/backends/reference/workloads/LogSoftmax.cpp
@@ -0,0 +1,91 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "LogSoftmax.hpp"
+
+#include <TensorUtils.hpp>
+
+#include <cmath>
+
+#include <boost/assert.hpp>
+#include <boost/core/ignore_unused.hpp>
+#include <boost/numeric/conversion/cast.hpp>
+
+namespace
+{
+
+inline bool ValidateAxis(int axis, unsigned int numDimensions)
+{
+    const int sNumDimensions = boost::numeric_cast<int>(numDimensions);
+    return axis < sNumDimensions && axis >= -sNumDimensions;
+}
+
+} // anonymous namespace
+
+namespace armnn
+{
+
+void LogSoftmax(Decoder<float>& input,
+                Encoder<float>& output,
+                const TensorInfo& inputInfo,
+                const LogSoftmaxDescriptor& descriptor)
+{
+    const unsigned int numDimensions = inputInfo.GetNumDimensions();
+
+    bool axisIsValid = ValidateAxis(descriptor.m_Axis, numDimensions);
+    BOOST_ASSERT_MSG(axisIsValid,
+        "Axis index is not in range [-numDimensions, numDimensions).");
+    boost::ignore_unused(axisIsValid);
+
+    unsigned int uAxis = descriptor.m_Axis < 0  ?
+        numDimensions - boost::numeric_cast<unsigned int>(std::abs(descriptor.m_Axis)) :
+        boost::numeric_cast<unsigned int>(descriptor.m_Axis);
+
+    const TensorShape& inputShape = inputInfo.GetShape();
+    const unsigned int outerSize  = armnnUtils::GetNumElementsBetween(inputShape, 0, uAxis);
+    const unsigned int axisSize   = inputShape[uAxis];
+    const unsigned int innerSize  = armnnUtils::GetNumElementsBetween(inputShape,
+                                                                      uAxis + 1,
+                                                                      inputShape.GetNumDimensions());
+
+    for (unsigned int outer = 0; outer < outerSize; ++outer)
+    {
+        for (unsigned int inner = 0; inner < innerSize; ++inner)
+        {
+            // Find max
+            input[outer * axisSize * innerSize + inner];
+            float maxValue = input.Get();
+            for (unsigned int i = 1u; i < axisSize; ++i)
+            {
+                input[(outer * axisSize + i) * innerSize + inner];
+                maxValue = std::max(maxValue, input.Get());
+            }
+
+            // Compute sum
+            float sum = 0.0f;
+            for (unsigned int i = 0u; i < axisSize; ++i)
+            {
+                input[(outer * axisSize + i) * innerSize + inner];
+                sum += std::exp((input.Get() - maxValue) * descriptor.m_Beta);
+            }
+
+            // Compute log sum
+            const float logSum = std::log(sum);
+
+            // Compute result
+            for (unsigned int i = 0u; i < axisSize; ++i)
+            {
+                const unsigned int index = (outer * axisSize + i) * innerSize + inner;
+
+                input [index];
+                output[index];
+
+                output.Set((input.Get() - maxValue) * descriptor.m_Beta - logSum);
+            }
+        }
+    }
+}
+
+} // namespace armnn
diff --git a/src/backends/reference/workloads/LogSoftmax.hpp b/src/backends/reference/workloads/LogSoftmax.hpp
new file mode 100644
index 0000000..2e38399
--- /dev/null
+++ b/src/backends/reference/workloads/LogSoftmax.hpp
@@ -0,0 +1,20 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "BaseIterator.hpp"
+
+#include <armnn/Tensor.hpp>
+
+namespace armnn
+{
+
+void LogSoftmax(Decoder<float>& input,
+                Encoder<float>& output,
+                const TensorInfo& inputInfo,
+                const LogSoftmaxDescriptor& descriptor);
+
+} // namespace armnn
diff --git a/src/backends/reference/workloads/RefLogSoftmaxWorkload.cpp b/src/backends/reference/workloads/RefLogSoftmaxWorkload.cpp
new file mode 100644
index 0000000..a987e79
--- /dev/null
+++ b/src/backends/reference/workloads/RefLogSoftmaxWorkload.cpp
@@ -0,0 +1,36 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "RefLogSoftmaxWorkload.hpp"
+
+#include "Decoders.hpp"
+#include "Encoders.hpp"
+#include "LogSoftmax.hpp"
+#include "RefWorkloadUtils.hpp"
+
+#include <Profiling.hpp>
+
+#include <boost/assert.hpp>
+
+namespace armnn
+{
+
+void RefLogSoftmaxWorkload::Execute() const
+{
+    ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefLogSoftmaxWorkload_Execute");
+
+    const TensorInfo& inputInfo  = GetTensorInfo(m_Data.m_Inputs[0]);
+    const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
+
+    std::unique_ptr<Decoder<float>> decoder = MakeDecoder<float>(inputInfo, m_Data.m_Inputs[0]->Map());
+    std::unique_ptr<Encoder<float>> encoder = MakeEncoder<float>(outputInfo, m_Data.m_Outputs[0]->Map());
+
+    BOOST_ASSERT(decoder != nullptr);
+    BOOST_ASSERT(encoder != nullptr);
+
+    LogSoftmax(*decoder, *encoder, inputInfo, m_Data.m_Parameters);
+}
+
+} // namespace armnn
diff --git a/src/backends/reference/workloads/RefLogSoftmaxWorkload.hpp b/src/backends/reference/workloads/RefLogSoftmaxWorkload.hpp
new file mode 100644
index 0000000..f5048d9
--- /dev/null
+++ b/src/backends/reference/workloads/RefLogSoftmaxWorkload.hpp
@@ -0,0 +1,21 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/Workload.hpp>
+#include <backendsCommon/WorkloadData.hpp>
+
+namespace armnn
+{
+
+class RefLogSoftmaxWorkload : public BaseWorkload<LogSoftmaxQueueDescriptor>
+{
+public:
+    using BaseWorkload<LogSoftmaxQueueDescriptor>::BaseWorkload;
+    virtual void Execute() const override;
+};
+
+} // namespace armnn
diff --git a/src/backends/reference/workloads/RefWorkloads.hpp b/src/backends/reference/workloads/RefWorkloads.hpp
index 39dfa05..79d1935 100644
--- a/src/backends/reference/workloads/RefWorkloads.hpp
+++ b/src/backends/reference/workloads/RefWorkloads.hpp
@@ -38,6 +38,7 @@
 #include "RefGatherWorkload.hpp"
 #include "RefInstanceNormalizationWorkload.hpp"
 #include "RefL2NormalizationWorkload.hpp"
+#include "RefLogSoftmaxWorkload.hpp"
 #include "RefLstmWorkload.hpp"
 #include "RefMeanWorkload.hpp"
 #include "RefNormalizationWorkload.hpp"