IVGCVSW-3218 Refactor the Reference Workloads for the ResizeBilinear layer

 * Refactored ResizeBilinear Reference Workloads to combine Float32 and Uint8 files

Signed-off-by: Ellen Norris-Thompson <ellen.norris-thompson@arm.com>
Change-Id: I725a830f4c4755a7d3a37ca68e31e44e7eb267cb
diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp
index a2d8640..728e605 100644
--- a/src/backends/reference/RefWorkloadFactory.cpp
+++ b/src/backends/reference/RefWorkloadFactory.cpp
@@ -242,7 +242,11 @@
 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor,
                                                                     const WorkloadInfo& info) const
 {
-    return MakeWorkload<RefResizeBilinearFloat32Workload, RefResizeBilinearUint8Workload>(descriptor, info);
+    if (IsFloat16(info))
+    {
+        return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
+    }
+    return std::make_unique<RefResizeBilinearWorkload>(descriptor, info);
 }
 
 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateFakeQuantization(
diff --git a/src/backends/reference/backend.mk b/src/backends/reference/backend.mk
index 189f692..c4a0c76 100644
--- a/src/backends/reference/backend.mk
+++ b/src/backends/reference/backend.mk
@@ -53,8 +53,7 @@
         workloads/RefPooling2dWorkload.cpp \
         workloads/RefQuantizeWorkload.cpp \
         workloads/RefReshapeWorkload.cpp \
-        workloads/RefResizeBilinearFloat32Workload.cpp \
-        workloads/RefResizeBilinearUint8Workload.cpp \
+        workloads/RefResizeBilinearWorkload.cpp \
         workloads/RefRsqrtWorkload.cpp \
         workloads/RefSoftmaxWorkload.cpp \
         workloads/RefSpaceToBatchNdWorkload.cpp \
diff --git a/src/backends/reference/test/RefCreateWorkloadTests.cpp b/src/backends/reference/test/RefCreateWorkloadTests.cpp
index ef8ff9e..7816d5b 100644
--- a/src/backends/reference/test/RefCreateWorkloadTests.cpp
+++ b/src/backends/reference/test/RefCreateWorkloadTests.cpp
@@ -643,17 +643,17 @@
 
 BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32)
 {
-    RefCreateResizeBilinearTest<RefResizeBilinearFloat32Workload, armnn::DataType::Float32>(DataLayout::NCHW);
+    RefCreateResizeBilinearTest<RefResizeBilinearWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
 }
 
 BOOST_AUTO_TEST_CASE(CreateResizeBilinearUint8)
 {
-    RefCreateResizeBilinearTest<RefResizeBilinearUint8Workload, armnn::DataType::QuantisedAsymm8>(DataLayout::NCHW);
+    RefCreateResizeBilinearTest<RefResizeBilinearWorkload, armnn::DataType::QuantisedAsymm8>(DataLayout::NCHW);
 }
 
 BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32Nhwc)
 {
-    RefCreateResizeBilinearTest<RefResizeBilinearFloat32Workload, armnn::DataType::Float32>(DataLayout::NHWC);
+    RefCreateResizeBilinearTest<RefResizeBilinearWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
 }
 
 template <typename RsqrtWorkloadType, armnn::DataType DataType>
diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt
index 41a5534..ebd3390 100644
--- a/src/backends/reference/workloads/CMakeLists.txt
+++ b/src/backends/reference/workloads/CMakeLists.txt
@@ -88,10 +88,8 @@
     RefQuantizeWorkload.hpp
     RefReshapeWorkload.cpp
     RefReshapeWorkload.hpp
-    RefResizeBilinearFloat32Workload.cpp
-    RefResizeBilinearFloat32Workload.hpp
-    RefResizeBilinearUint8Workload.cpp
-    RefResizeBilinearUint8Workload.hpp
+    RefResizeBilinearWorkload.cpp
+    RefResizeBilinearWorkload.hpp
     RefRsqrtWorkload.cpp
     RefRsqrtWorkload.hpp
     RefSoftmaxWorkload.cpp
diff --git a/src/backends/reference/workloads/RefResizeBilinearFloat32Workload.cpp b/src/backends/reference/workloads/RefResizeBilinearFloat32Workload.cpp
deleted file mode 100644
index 8d86bdc..0000000
--- a/src/backends/reference/workloads/RefResizeBilinearFloat32Workload.cpp
+++ /dev/null
@@ -1,30 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "RefResizeBilinearFloat32Workload.hpp"
-
-#include "RefWorkloadUtils.hpp"
-#include "ResizeBilinear.hpp"
-
-#include "Profiling.hpp"
-
-namespace armnn
-{
-
-void RefResizeBilinearFloat32Workload::Execute() const
-{
-    ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefResizeBilinearFloat32Workload_Execute");
-
-    const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]);
-    const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
-
-    ResizeBilinear(GetInputTensorDataFloat(0, m_Data),
-        inputInfo,
-        GetOutputTensorDataFloat(0, m_Data),
-        outputInfo,
-        m_Data.m_Parameters.m_DataLayout);
-}
-
-} //namespace armnn
diff --git a/src/backends/reference/workloads/RefResizeBilinearFloat32Workload.hpp b/src/backends/reference/workloads/RefResizeBilinearFloat32Workload.hpp
deleted file mode 100644
index 84d3a51..0000000
--- a/src/backends/reference/workloads/RefResizeBilinearFloat32Workload.hpp
+++ /dev/null
@@ -1,21 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <backendsCommon/Workload.hpp>
-#include <backendsCommon/WorkloadData.hpp>
-
-namespace armnn
-{
-
-class RefResizeBilinearFloat32Workload : public Float32Workload<ResizeBilinearQueueDescriptor>
-{
-public:
-    using Float32Workload<ResizeBilinearQueueDescriptor>::Float32Workload;
-    virtual void Execute() const override;
-};
-
-} //namespace armnn
diff --git a/src/backends/reference/workloads/RefResizeBilinearUint8Workload.cpp b/src/backends/reference/workloads/RefResizeBilinearUint8Workload.cpp
deleted file mode 100644
index 3a6793c..0000000
--- a/src/backends/reference/workloads/RefResizeBilinearUint8Workload.cpp
+++ /dev/null
@@ -1,33 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "RefResizeBilinearUint8Workload.hpp"
-
-#include "RefWorkloadUtils.hpp"
-#include "ResizeBilinear.hpp"
-
-#include "Profiling.hpp"
-
-#include <vector>
-
-namespace armnn
-{
-
-void RefResizeBilinearUint8Workload::Execute() const
-{
-    ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefResizeBilinearUint8Workload_Execute");
-
-    const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]);
-    const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
-
-    auto dequant = Dequantize(GetInputTensorDataU8(0, m_Data), inputInfo);
-
-    std::vector<float> results(outputInfo.GetNumElements());
-    ResizeBilinear(dequant.data(), inputInfo, results.data(), outputInfo, m_Data.m_Parameters.m_DataLayout);
-
-    Quantize(GetOutputTensorDataU8(0, m_Data), results.data(), outputInfo);
-}
-
-} //namespace armnn
diff --git a/src/backends/reference/workloads/RefResizeBilinearWorkload.cpp b/src/backends/reference/workloads/RefResizeBilinearWorkload.cpp
new file mode 100644
index 0000000..03fcec2
--- /dev/null
+++ b/src/backends/reference/workloads/RefResizeBilinearWorkload.cpp
@@ -0,0 +1,35 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "RefResizeBilinearWorkload.hpp"
+
+#include "RefWorkloadUtils.hpp"
+#include "ResizeBilinear.hpp"
+#include "BaseIterator.hpp"
+#include "Profiling.hpp"
+
+#include "BaseIterator.hpp"
+#include "Decoders.hpp"
+#include "Encoders.hpp"
+
+namespace armnn
+{
+
+void RefResizeBilinearWorkload::Execute() const
+{
+    ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefResizeBilinearWorkload_Execute");
+
+    const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]);
+    const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
+
+    std::unique_ptr<Decoder<float>> decoderPtr = MakeDecoder<float>(inputInfo, m_Data.m_Inputs[0]->Map());
+    Decoder<float> &decoder = *decoderPtr;
+    std::unique_ptr<Encoder<float>> encoderPtr = MakeEncoder<float>(outputInfo, m_Data.m_Outputs[0]->Map());
+    Encoder<float> &encoder = *encoderPtr;
+
+    ResizeBilinear(decoder, inputInfo, encoder, outputInfo, m_Data.m_Parameters.m_DataLayout);
+}
+
+} //namespace armnn
diff --git a/src/backends/reference/workloads/RefResizeBilinearUint8Workload.hpp b/src/backends/reference/workloads/RefResizeBilinearWorkload.hpp
similarity index 63%
rename from src/backends/reference/workloads/RefResizeBilinearUint8Workload.hpp
rename to src/backends/reference/workloads/RefResizeBilinearWorkload.hpp
index 6380441..8f86f6f 100644
--- a/src/backends/reference/workloads/RefResizeBilinearUint8Workload.hpp
+++ b/src/backends/reference/workloads/RefResizeBilinearWorkload.hpp
@@ -11,10 +11,10 @@
 namespace armnn
 {
 
-class RefResizeBilinearUint8Workload : public Uint8Workload<ResizeBilinearQueueDescriptor>
+class RefResizeBilinearWorkload : public BaseWorkload<ResizeBilinearQueueDescriptor>
 {
 public:
-    using Uint8Workload<ResizeBilinearQueueDescriptor>::Uint8Workload;
+    using BaseWorkload<ResizeBilinearQueueDescriptor>::BaseWorkload;
     virtual void Execute() const override;
 };
 
diff --git a/src/backends/reference/workloads/RefWorkloads.hpp b/src/backends/reference/workloads/RefWorkloads.hpp
index 1a2dec4..7cfced4 100644
--- a/src/backends/reference/workloads/RefWorkloads.hpp
+++ b/src/backends/reference/workloads/RefWorkloads.hpp
@@ -11,7 +11,7 @@
 #include "RefConstantWorkload.hpp"
 #include "RefConvolution2dWorkload.hpp"
 #include "RefSplitterWorkload.hpp"
-#include "RefResizeBilinearUint8Workload.hpp"
+#include "RefResizeBilinearWorkload.hpp"
 #include "RefL2NormalizationWorkload.hpp"
 #include "RefActivationWorkload.hpp"
 #include "RefPooling2dWorkload.hpp"
@@ -28,7 +28,6 @@
 #include "Gather.hpp"
 #include "RefFloorWorkload.hpp"
 #include "RefSoftmaxWorkload.hpp"
-#include "RefResizeBilinearFloat32Workload.hpp"
 #include "ResizeBilinear.hpp"
 #include "RefNormalizationWorkload.hpp"
 #include "RefDetectionPostProcessWorkload.hpp"
diff --git a/src/backends/reference/workloads/ResizeBilinear.cpp b/src/backends/reference/workloads/ResizeBilinear.cpp
index 2d1087c..70a0514 100644
--- a/src/backends/reference/workloads/ResizeBilinear.cpp
+++ b/src/backends/reference/workloads/ResizeBilinear.cpp
@@ -27,9 +27,9 @@
 
 }
 
-void ResizeBilinear(const float*      in,
+void ResizeBilinear(Decoder<float>&   in,
                     const TensorInfo& inputInfo,
-                    float*            out,
+                    Encoder<float>&   out,
                     const TensorInfo& outputInfo,
                     DataLayoutIndexed dataLayout)
 {
@@ -50,8 +50,8 @@
     const float scaleY = boost::numeric_cast<float>(inputHeight) / boost::numeric_cast<float>(outputHeight);
     const float scaleX = boost::numeric_cast<float>(inputWidth) / boost::numeric_cast<float>(outputWidth);
 
-    TensorBufferArrayView<const float> input(inputInfo.GetShape(), in, dataLayout);
-    TensorBufferArrayView<float> output(outputInfo.GetShape(), out, dataLayout);
+    TensorShape inputShape =  inputInfo.GetShape();
+    TensorShape outputShape =  outputInfo.GetShape();
 
     for (unsigned int n = 0; n < batchSize; ++n)
     {
@@ -84,11 +84,21 @@
                     const unsigned int y1 = std::min(y0 + 1, inputHeight - 1u);
 
                     // Interpolation
-                    const float ly0 = Lerp(input.Get(n, c, y0, x0), input.Get(n, c, y0, x1), xw); // lerp along row y0.
-                    const float ly1 = Lerp(input.Get(n, c, y1, x0), input.Get(n, c, y1, x1), xw); // lerp along row y1.
+                    in[dataLayout.GetIndex(inputShape, n, c, y0, x0)];
+                    float input1 = in.Get();
+                    in[dataLayout.GetIndex(inputShape, n, c, y0, x1)];
+                    float input2 = in.Get();
+                    in[dataLayout.GetIndex(inputShape, n, c, y1, x0)];
+                    float input3 = in.Get();
+                    in[dataLayout.GetIndex(inputShape, n, c, y1, x1)];
+                    float input4 = in.Get();
+
+                    const float ly0 = Lerp(input1, input2, xw); // lerp along row y0.
+                    const float ly1 = Lerp(input3, input4, xw); // lerp along row y1.
                     const float l = Lerp(ly0, ly1, yw);
 
-                    output.Get(n, c, y, x) = l;
+                    out[dataLayout.GetIndex(outputShape, n, c, y, x)];
+                    out.Set(l);
                 }
             }
         }
diff --git a/src/backends/reference/workloads/ResizeBilinear.hpp b/src/backends/reference/workloads/ResizeBilinear.hpp
index 814a0f2..ad2e487 100644
--- a/src/backends/reference/workloads/ResizeBilinear.hpp
+++ b/src/backends/reference/workloads/ResizeBilinear.hpp
@@ -5,6 +5,7 @@
 
 #pragma once
 
+#include "BaseIterator.hpp"
 #include <armnn/Tensor.hpp>
 
 #include <DataLayoutIndexed.hpp>
@@ -12,9 +13,9 @@
 namespace armnn
 {
 
-void ResizeBilinear(const float*                  in,
+void ResizeBilinear(Decoder<float>&               in,
                     const TensorInfo&             inputInfo,
-                    float*                        out,
+                    Encoder<float>&               out,
                     const TensorInfo&             outputInfo,
                     armnnUtils::DataLayoutIndexed dataLayout = DataLayout::NCHW);