IVGCVSW-3168 Refactor reference softmax workload into a single workload

Change-Id: Ie290efcbb9e3a6365cbd630cb2041e7b0f542505
Signed-off-by: nikraj01 <nikhil.raj@arm.com>
diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt
index 3db0314..e2f93d7 100644
--- a/src/backends/reference/workloads/CMakeLists.txt
+++ b/src/backends/reference/workloads/CMakeLists.txt
@@ -99,10 +99,8 @@
     RefResizeBilinearUint8Workload.hpp
     RefRsqrtFloat32Workload.cpp
     RefRsqrtFloat32Workload.hpp
-    RefSoftmaxFloat32Workload.cpp
-    RefSoftmaxFloat32Workload.hpp
-    RefSoftmaxUint8Workload.cpp
-    RefSoftmaxUint8Workload.hpp
+    RefSoftmaxWorkload.cpp
+    RefSoftmaxWorkload.hpp
     RefSpaceToBatchNdWorkload.cpp
     RefSpaceToBatchNdWorkload.hpp
     RefSplitterFloat32Workload.cpp
diff --git a/src/backends/reference/workloads/RefSoftmaxFloat32Workload.cpp b/src/backends/reference/workloads/RefSoftmaxFloat32Workload.cpp
deleted file mode 100644
index 1f519bd..0000000
--- a/src/backends/reference/workloads/RefSoftmaxFloat32Workload.cpp
+++ /dev/null
@@ -1,26 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "RefSoftmaxFloat32Workload.hpp"
-
-#include "RefWorkloadUtils.hpp"
-#include "Softmax.hpp"
-
-#include "Profiling.hpp"
-
-namespace armnn
-{
-
-void RefSoftmaxFloat32Workload::Execute() const
-{
-    ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefSoftmaxFloat32Workload_Execute");
-
-    Softmax(GetInputTensorDataFloat(0, m_Data),
-            GetOutputTensorDataFloat(0, m_Data),
-            GetTensorInfo(m_Data.m_Inputs[0]),
-            m_Data.m_Parameters.m_Beta);
-}
-
-} //namespace armnn
diff --git a/src/backends/reference/workloads/RefSoftmaxFloat32Workload.hpp b/src/backends/reference/workloads/RefSoftmaxFloat32Workload.hpp
deleted file mode 100644
index 82ddfac..0000000
--- a/src/backends/reference/workloads/RefSoftmaxFloat32Workload.hpp
+++ /dev/null
@@ -1,21 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <backendsCommon/Workload.hpp>
-#include <backendsCommon/WorkloadData.hpp>
-
-namespace armnn
-{
-
-class RefSoftmaxFloat32Workload : public Float32Workload<SoftmaxQueueDescriptor>
-{
-public:
-    using Float32Workload<SoftmaxQueueDescriptor>::Float32Workload;
-    virtual void Execute() const override;
-};
-
-} //namespace armnn
diff --git a/src/backends/reference/workloads/RefSoftmaxUint8Workload.cpp b/src/backends/reference/workloads/RefSoftmaxUint8Workload.cpp
deleted file mode 100644
index 17114ec..0000000
--- a/src/backends/reference/workloads/RefSoftmaxUint8Workload.cpp
+++ /dev/null
@@ -1,36 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "RefSoftmaxUint8Workload.hpp"
-
-#include "RefWorkloadUtils.hpp"
-#include "Softmax.hpp"
-
-#include "Profiling.hpp"
-
-#include <vector>
-
-namespace armnn
-{
-
-void RefSoftmaxUint8Workload::Execute() const
-{
-    ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefSoftmaxUint8Workload_Execute");
-
-    const TensorInfo& tensorInfo = GetTensorInfo(m_Data.m_Inputs[0]);
-
-    auto dequant = Dequantize(GetInputTensorDataU8(0, m_Data), tensorInfo);
-
-    std::vector<float> results(tensorInfo.GetNumElements());
-
-    Softmax(dequant.data(),
-            results.data(),
-            tensorInfo,
-            m_Data.m_Parameters.m_Beta);
-
-    Quantize(GetOutputTensorDataU8(0, m_Data), results.data(), GetTensorInfo(m_Data.m_Outputs[0]));
-}
-
-} //namespace armnn
diff --git a/src/backends/reference/workloads/RefSoftmaxWorkload.cpp b/src/backends/reference/workloads/RefSoftmaxWorkload.cpp
new file mode 100644
index 0000000..b176667
--- /dev/null
+++ b/src/backends/reference/workloads/RefSoftmaxWorkload.cpp
@@ -0,0 +1,39 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "RefSoftmaxWorkload.hpp"
+
+#include "Decoders.hpp"
+#include "Encoders.hpp"
+#include "RefWorkloadUtils.hpp"
+#include "Softmax.hpp"
+
+#include "Profiling.hpp"
+
+#include <vector>
+
+namespace armnn
+{
+
+void RefSoftmaxWorkload::Execute() const
+{
+    ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefSoftmaxWorkload_Execute");
+
+    const TensorInfo &inputTensorInfo = GetTensorInfo(m_Data.m_Inputs[0]);
+
+    std::unique_ptr<Decoder<float>> decoderPtr = MakeDecoder<float>(inputTensorInfo, m_Data.m_Inputs[0]->Map());
+    Decoder<float> &decoder = *decoderPtr;
+
+    const TensorInfo &outputTensorInfo = GetTensorInfo(m_Data.m_Outputs[0]);
+
+    std::unique_ptr<Encoder<float>> encoderPtr = MakeEncoder<float>(outputTensorInfo, m_Data.m_Outputs[0]->Map());
+    Encoder<float> &encoder = *encoderPtr;
+
+    Softmax(decoder,
+            encoder,
+            inputTensorInfo,
+            m_Data.m_Parameters.m_Beta);
+}
+} //namespace armnn
diff --git a/src/backends/reference/workloads/RefSoftmaxUint8Workload.hpp b/src/backends/reference/workloads/RefSoftmaxWorkload.hpp
similarity index 67%
rename from src/backends/reference/workloads/RefSoftmaxUint8Workload.hpp
rename to src/backends/reference/workloads/RefSoftmaxWorkload.hpp
index bb7b214..cf3623b 100644
--- a/src/backends/reference/workloads/RefSoftmaxUint8Workload.hpp
+++ b/src/backends/reference/workloads/RefSoftmaxWorkload.hpp
@@ -11,10 +11,10 @@
 namespace armnn
 {
 
-class RefSoftmaxUint8Workload : public Uint8Workload<SoftmaxQueueDescriptor>
+class RefSoftmaxWorkload : public BaseWorkload<SoftmaxQueueDescriptor>
 {
 public:
-    using Uint8Workload<SoftmaxQueueDescriptor>::Uint8Workload;
+    using BaseWorkload<SoftmaxQueueDescriptor>::BaseWorkload;
     virtual void Execute() const override;
 };
 
diff --git a/src/backends/reference/workloads/RefWorkloads.hpp b/src/backends/reference/workloads/RefWorkloads.hpp
index 6ffec2b..ab3da88 100644
--- a/src/backends/reference/workloads/RefWorkloads.hpp
+++ b/src/backends/reference/workloads/RefWorkloads.hpp
@@ -27,8 +27,7 @@
 #include "FullyConnected.hpp"
 #include "Gather.hpp"
 #include "RefFloorFloat32Workload.hpp"
-#include "RefSoftmaxFloat32Workload.hpp"
-#include "RefSoftmaxUint8Workload.hpp"
+#include "RefSoftmaxWorkload.hpp"
 #include "RefResizeBilinearFloat32Workload.hpp"
 #include "RefBatchNormalizationUint8Workload.hpp"
 #include "ResizeBilinear.hpp"
diff --git a/src/backends/reference/workloads/Softmax.cpp b/src/backends/reference/workloads/Softmax.cpp
index 4f1016e..6cb219a 100644
--- a/src/backends/reference/workloads/Softmax.cpp
+++ b/src/backends/reference/workloads/Softmax.cpp
@@ -12,16 +12,19 @@
 {
 
 /// Computes the softmax function on some inputs, into outputs, with a shape given by tensorInfo.
-void Softmax(const float* in, float* out, const TensorInfo& tensorInfo, float beta)
+void Softmax(Decoder<float>& in, Encoder<float>& out, const TensorInfo& inputTensorInfo, float beta)
 {
-    unsigned int numChannels = tensorInfo.GetShape()[1];
-    for (unsigned int n = 0; n < tensorInfo.GetShape()[0]; n++)
+    unsigned int numChannels = inputTensorInfo.GetShape()[1];
+
+    for (unsigned int n = 0; n < inputTensorInfo.GetShape()[0]; n++)
     {
         // Find maximum channel.
-        float max = in[n * numChannels];
+        in[n * numChannels];
+        float max = in.Get();
         for (unsigned int c = 1; c < numChannels; c++)
         {
-            float val = in[n * numChannels + c];
+            in[n * numChannels + c];
+            float val = in.Get();
             if (val > max)
             {
                 max = val;
@@ -33,7 +36,8 @@
         float              sum = 0.0f;
         for (unsigned int c = 0; c < numChannels; c++)
         {
-            float val       = in[n * numChannels + c];
+            in[n * numChannels + c];
+            float val = in.Get();
             exponentials[c] = expf((val - max) * beta);
             sum += exponentials[c];
         }
@@ -41,7 +45,8 @@
         // Divide exponentials by sum to give outputs.
         for (unsigned int c = 0; c < numChannels; c++)
         {
-            out[n * numChannels + c] = exponentials[c] / sum;
+            out[n * numChannels + c];
+            out.Set(exponentials[c] / sum);
         }
     }
 }
diff --git a/src/backends/reference/workloads/Softmax.hpp b/src/backends/reference/workloads/Softmax.hpp
index 3b974f9..3876293 100644
--- a/src/backends/reference/workloads/Softmax.hpp
+++ b/src/backends/reference/workloads/Softmax.hpp
@@ -5,12 +5,13 @@
 
 #pragma once
 
+#include "BaseIterator.hpp"
 #include <armnn/Tensor.hpp>
 
 namespace armnn
 {
 
 /// Computes the softmax function on some inputs, into outputs, with a shape given by tensorInfo.
-void Softmax(const float* in, float* out, const TensorInfo& tensorInfo, float beta);
+void Softmax(Decoder<float>& in, Encoder<float>& out, const TensorInfo& inputTensorInfo, float beta);
 
 } //namespace armnn