IVGCVSW-3168 Refactor reference softmax workload into a single workload

Change-Id: Ie290efcbb9e3a6365cbd630cb2041e7b0f542505
Signed-off-by: nikraj01 <nikhil.raj@arm.com>
diff --git a/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp b/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp
index 3248954..4591def 100644
--- a/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp
+++ b/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp
@@ -244,6 +244,7 @@
     std::string result = GetSoftmaxProfilerJson(backends);
 
     std::string backend = "Ref";
+    std::string testName = "SoftmaxWorkload_Execute";
     std::string changeLine31 = "\n},\n\"CopyMemGeneric_Execute\": {";
     std::string changeLine39 = "us\"";
     std::string changeLine40;
@@ -253,6 +254,7 @@
     if (firstBackend == armnn::Compute::GpuAcc)
     {
         backend = "Cl";
+        testName = "SoftmaxUintWorkload_Execute";
         changeLine31 = ",\n\"OpenClKernelTimer/: softmax_layer_max_shift_exp_sum_quantized_serial GWS[,,]\": {";
         changeLine39 = R"(us"
 },
@@ -278,6 +280,7 @@
     else if (firstBackend == armnn::Compute::CpuAcc)
     {
         backend = "Neon";
+        testName = "SoftmaxUintWorkload_Execute";
         changeLine31 = ",\n\"NeonKernelTimer/: NEFillBorderKernel\": {";
         changeLine39 = R"(us"
 },
@@ -332,7 +335,7 @@
 ],
 "unit": "us"
 },
-")" + backend + R"(SoftmaxUintWorkload_Execute": {
+")" + backend + testName + R"(": {
 "raw": [
 ,
 ,
diff --git a/src/backends/backendsCommon/test/WorkloadDataValidation.cpp b/src/backends/backendsCommon/test/WorkloadDataValidation.cpp
index 94bef9b..795791f 100644
--- a/src/backends/backendsCommon/test/WorkloadDataValidation.cpp
+++ b/src/backends/backendsCommon/test/WorkloadDataValidation.cpp
@@ -78,7 +78,7 @@
     AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
 
     //Invalid argument exception is expected, because height != 1.
-    BOOST_CHECK_THROW(RefSoftmaxFloat32Workload(invalidData, invalidInfo), armnn::InvalidArgumentException);
+    BOOST_CHECK_THROW(RefSoftmaxWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
 }
 
 BOOST_AUTO_TEST_CASE(FullyConnectedQueueDescriptor_Validate_RequiredDataMissing)
diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp
index a21becd..50e3c00 100644
--- a/src/backends/reference/RefWorkloadFactory.cpp
+++ b/src/backends/reference/RefWorkloadFactory.cpp
@@ -123,7 +123,11 @@
 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& descriptor,
                                                              const WorkloadInfo&           info) const
 {
-    return MakeWorkload<RefSoftmaxFloat32Workload, RefSoftmaxUint8Workload>(descriptor, info);
+    if (IsFloat16(info))
+    {
+        return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
+    }
+    return std::make_unique<RefSoftmaxWorkload>(descriptor, info);
 }
 
 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateSplitter(const SplitterQueueDescriptor& descriptor,
diff --git a/src/backends/reference/backend.mk b/src/backends/reference/backend.mk
index 9a4cf14..57204a0 100644
--- a/src/backends/reference/backend.mk
+++ b/src/backends/reference/backend.mk
@@ -58,8 +58,7 @@
         workloads/RefResizeBilinearFloat32Workload.cpp \
         workloads/RefResizeBilinearUint8Workload.cpp \
         workloads/RefRsqrtFloat32Workload.cpp \
-        workloads/RefSoftmaxFloat32Workload.cpp \
-        workloads/RefSoftmaxUint8Workload.cpp \
+        workloads/RefSoftmaxWorkload.cpp \
         workloads/RefSpaceToBatchNdWorkload.cpp \
         workloads/RefStridedSliceWorkload.cpp \
         workloads/RefSplitterFloat32Workload.cpp \
diff --git a/src/backends/reference/test/RefCreateWorkloadTests.cpp b/src/backends/reference/test/RefCreateWorkloadTests.cpp
index a96d656..2222a22 100644
--- a/src/backends/reference/test/RefCreateWorkloadTests.cpp
+++ b/src/backends/reference/test/RefCreateWorkloadTests.cpp
@@ -433,12 +433,12 @@
 
 BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat32Workload)
 {
-    RefCreateSoftmaxWorkloadTest<RefSoftmaxFloat32Workload, armnn::DataType::Float32>();
+    RefCreateSoftmaxWorkloadTest<RefSoftmaxWorkload, armnn::DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSoftmaxUint8Workload)
+BOOST_AUTO_TEST_CASE(CreateSoftmaxQuantisedAsymm8Workload)
 {
-    RefCreateSoftmaxWorkloadTest<RefSoftmaxUint8Workload, armnn::DataType::QuantisedAsymm8>();
+    RefCreateSoftmaxWorkloadTest<RefSoftmaxWorkload, armnn::DataType::QuantisedAsymm8>();
 }
 
 template <typename SplitterWorkloadType, armnn::DataType DataType>
diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt
index 3db0314..e2f93d7 100644
--- a/src/backends/reference/workloads/CMakeLists.txt
+++ b/src/backends/reference/workloads/CMakeLists.txt
@@ -99,10 +99,8 @@
     RefResizeBilinearUint8Workload.hpp
     RefRsqrtFloat32Workload.cpp
     RefRsqrtFloat32Workload.hpp
-    RefSoftmaxFloat32Workload.cpp
-    RefSoftmaxFloat32Workload.hpp
-    RefSoftmaxUint8Workload.cpp
-    RefSoftmaxUint8Workload.hpp
+    RefSoftmaxWorkload.cpp
+    RefSoftmaxWorkload.hpp
     RefSpaceToBatchNdWorkload.cpp
     RefSpaceToBatchNdWorkload.hpp
     RefSplitterFloat32Workload.cpp
diff --git a/src/backends/reference/workloads/RefSoftmaxFloat32Workload.cpp b/src/backends/reference/workloads/RefSoftmaxFloat32Workload.cpp
deleted file mode 100644
index 1f519bd..0000000
--- a/src/backends/reference/workloads/RefSoftmaxFloat32Workload.cpp
+++ /dev/null
@@ -1,26 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "RefSoftmaxFloat32Workload.hpp"
-
-#include "RefWorkloadUtils.hpp"
-#include "Softmax.hpp"
-
-#include "Profiling.hpp"
-
-namespace armnn
-{
-
-void RefSoftmaxFloat32Workload::Execute() const
-{
-    ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefSoftmaxFloat32Workload_Execute");
-
-    Softmax(GetInputTensorDataFloat(0, m_Data),
-            GetOutputTensorDataFloat(0, m_Data),
-            GetTensorInfo(m_Data.m_Inputs[0]),
-            m_Data.m_Parameters.m_Beta);
-}
-
-} //namespace armnn
diff --git a/src/backends/reference/workloads/RefSoftmaxFloat32Workload.hpp b/src/backends/reference/workloads/RefSoftmaxFloat32Workload.hpp
deleted file mode 100644
index 82ddfac..0000000
--- a/src/backends/reference/workloads/RefSoftmaxFloat32Workload.hpp
+++ /dev/null
@@ -1,21 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <backendsCommon/Workload.hpp>
-#include <backendsCommon/WorkloadData.hpp>
-
-namespace armnn
-{
-
-class RefSoftmaxFloat32Workload : public Float32Workload<SoftmaxQueueDescriptor>
-{
-public:
-    using Float32Workload<SoftmaxQueueDescriptor>::Float32Workload;
-    virtual void Execute() const override;
-};
-
-} //namespace armnn
diff --git a/src/backends/reference/workloads/RefSoftmaxUint8Workload.cpp b/src/backends/reference/workloads/RefSoftmaxUint8Workload.cpp
deleted file mode 100644
index 17114ec..0000000
--- a/src/backends/reference/workloads/RefSoftmaxUint8Workload.cpp
+++ /dev/null
@@ -1,36 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "RefSoftmaxUint8Workload.hpp"
-
-#include "RefWorkloadUtils.hpp"
-#include "Softmax.hpp"
-
-#include "Profiling.hpp"
-
-#include <vector>
-
-namespace armnn
-{
-
-void RefSoftmaxUint8Workload::Execute() const
-{
-    ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefSoftmaxUint8Workload_Execute");
-
-    const TensorInfo& tensorInfo = GetTensorInfo(m_Data.m_Inputs[0]);
-
-    auto dequant = Dequantize(GetInputTensorDataU8(0, m_Data), tensorInfo);
-
-    std::vector<float> results(tensorInfo.GetNumElements());
-
-    Softmax(dequant.data(),
-            results.data(),
-            tensorInfo,
-            m_Data.m_Parameters.m_Beta);
-
-    Quantize(GetOutputTensorDataU8(0, m_Data), results.data(), GetTensorInfo(m_Data.m_Outputs[0]));
-}
-
-} //namespace armnn
diff --git a/src/backends/reference/workloads/RefSoftmaxWorkload.cpp b/src/backends/reference/workloads/RefSoftmaxWorkload.cpp
new file mode 100644
index 0000000..b176667
--- /dev/null
+++ b/src/backends/reference/workloads/RefSoftmaxWorkload.cpp
@@ -0,0 +1,39 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "RefSoftmaxWorkload.hpp"
+
+#include "Decoders.hpp"
+#include "Encoders.hpp"
+#include "RefWorkloadUtils.hpp"
+#include "Softmax.hpp"
+
+#include "Profiling.hpp"
+
+#include <vector>
+
+namespace armnn
+{
+
+void RefSoftmaxWorkload::Execute() const
+{
+    ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefSoftmaxWorkload_Execute");
+
+    const TensorInfo &inputTensorInfo = GetTensorInfo(m_Data.m_Inputs[0]);
+
+    std::unique_ptr<Decoder<float>> decoderPtr = MakeDecoder<float>(inputTensorInfo, m_Data.m_Inputs[0]->Map());
+    Decoder<float> &decoder = *decoderPtr;
+
+    const TensorInfo &outputTensorInfo = GetTensorInfo(m_Data.m_Outputs[0]);
+
+    std::unique_ptr<Encoder<float>> encoderPtr = MakeEncoder<float>(outputTensorInfo, m_Data.m_Outputs[0]->Map());
+    Encoder<float> &encoder = *encoderPtr;
+
+    Softmax(decoder,
+            encoder,
+            inputTensorInfo,
+            m_Data.m_Parameters.m_Beta);
+}
+} //namespace armnn
diff --git a/src/backends/reference/workloads/RefSoftmaxUint8Workload.hpp b/src/backends/reference/workloads/RefSoftmaxWorkload.hpp
similarity index 67%
rename from src/backends/reference/workloads/RefSoftmaxUint8Workload.hpp
rename to src/backends/reference/workloads/RefSoftmaxWorkload.hpp
index bb7b214..cf3623b 100644
--- a/src/backends/reference/workloads/RefSoftmaxUint8Workload.hpp
+++ b/src/backends/reference/workloads/RefSoftmaxWorkload.hpp
@@ -11,10 +11,10 @@
 namespace armnn
 {
 
-class RefSoftmaxUint8Workload : public Uint8Workload<SoftmaxQueueDescriptor>
+class RefSoftmaxWorkload : public BaseWorkload<SoftmaxQueueDescriptor>
 {
 public:
-    using Uint8Workload<SoftmaxQueueDescriptor>::Uint8Workload;
+    using BaseWorkload<SoftmaxQueueDescriptor>::BaseWorkload;
     virtual void Execute() const override;
 };
 
diff --git a/src/backends/reference/workloads/RefWorkloads.hpp b/src/backends/reference/workloads/RefWorkloads.hpp
index 6ffec2b..ab3da88 100644
--- a/src/backends/reference/workloads/RefWorkloads.hpp
+++ b/src/backends/reference/workloads/RefWorkloads.hpp
@@ -27,8 +27,7 @@
 #include "FullyConnected.hpp"
 #include "Gather.hpp"
 #include "RefFloorFloat32Workload.hpp"
-#include "RefSoftmaxFloat32Workload.hpp"
-#include "RefSoftmaxUint8Workload.hpp"
+#include "RefSoftmaxWorkload.hpp"
 #include "RefResizeBilinearFloat32Workload.hpp"
 #include "RefBatchNormalizationUint8Workload.hpp"
 #include "ResizeBilinear.hpp"
diff --git a/src/backends/reference/workloads/Softmax.cpp b/src/backends/reference/workloads/Softmax.cpp
index 4f1016e..6cb219a 100644
--- a/src/backends/reference/workloads/Softmax.cpp
+++ b/src/backends/reference/workloads/Softmax.cpp
@@ -12,16 +12,19 @@
 {
 
 /// Computes the softmax function on some inputs, into outputs, with a shape given by tensorInfo.
-void Softmax(const float* in, float* out, const TensorInfo& tensorInfo, float beta)
+void Softmax(Decoder<float>& in, Encoder<float>& out, const TensorInfo& inputTensorInfo, float beta)
 {
-    unsigned int numChannels = tensorInfo.GetShape()[1];
-    for (unsigned int n = 0; n < tensorInfo.GetShape()[0]; n++)
+    unsigned int numChannels = inputTensorInfo.GetShape()[1];
+
+    for (unsigned int n = 0; n < inputTensorInfo.GetShape()[0]; n++)
     {
         // Find maximum channel.
-        float max = in[n * numChannels];
+        in[n * numChannels];
+        float max = in.Get();
         for (unsigned int c = 1; c < numChannels; c++)
         {
-            float val = in[n * numChannels + c];
+            in[n * numChannels + c];
+            float val = in.Get();
             if (val > max)
             {
                 max = val;
@@ -33,7 +36,8 @@
         float              sum = 0.0f;
         for (unsigned int c = 0; c < numChannels; c++)
         {
-            float val       = in[n * numChannels + c];
+            in[n * numChannels + c];
+            float val = in.Get();
             exponentials[c] = expf((val - max) * beta);
             sum += exponentials[c];
         }
@@ -41,7 +45,8 @@
         // Divide exponentials by sum to give outputs.
         for (unsigned int c = 0; c < numChannels; c++)
         {
-            out[n * numChannels + c] = exponentials[c] / sum;
+            out[n * numChannels + c];
+            out.Set(exponentials[c] / sum);
         }
     }
 }
diff --git a/src/backends/reference/workloads/Softmax.hpp b/src/backends/reference/workloads/Softmax.hpp
index 3b974f9..3876293 100644
--- a/src/backends/reference/workloads/Softmax.hpp
+++ b/src/backends/reference/workloads/Softmax.hpp
@@ -5,12 +5,13 @@
 
 #pragma once
 
+#include "BaseIterator.hpp"
 #include <armnn/Tensor.hpp>
 
 namespace armnn
 {
 
 /// Computes the softmax function on some inputs, into outputs, with a shape given by tensorInfo.
-void Softmax(const float* in, float* out, const TensorInfo& tensorInfo, float beta);
+void Softmax(Decoder<float>& in, Encoder<float>& out, const TensorInfo& inputTensorInfo, float beta);
 
 } //namespace armnn