IVGCVSW-3221 Refactor Mean ref workload and tests

 * Renamed RefMeanFloat32Workload and RefMeanUint8Workload
   to RefMeanWorkload, updated references to reflect this
   change.
 * Refactored RefFloorWorkload to use Decoders/Encoders,
   to support the use of multiple data types.
 * Deleted reference Unit8 Mean tests as they were
   duplicates of the Float32 tests. Refactored these tests
   to support multiple data types and updated references.
 * Adjusted the values used in the tests' input tensors so
   that they are more like floating point numbers
   e.g. change 1.0f to 1.5f.
 * Replace size_t with unsigned int in Mean ref workload,
   for better compatibility with the Encoder/Decoder,
   removed some unnecessary casts after this.
 * Added ValidateTensorDataTypesMatch() function to
   WorkloadData.cpp, added CreateIncorrectDimensionsErrorMsg
   function to RefLayerSupport.cpp.
 * Added passing and failing tests for ref IsMeanSupported.

Signed-off-by: James Conroy <james.conroy@arm.com>
Change-Id: Id3d44463d1385255c727a497d4026d21a49e7eb2
diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt
index ebd3390..1ab38cc 100644
--- a/src/backends/reference/workloads/CMakeLists.txt
+++ b/src/backends/reference/workloads/CMakeLists.txt
@@ -119,10 +119,8 @@
     TensorBufferArrayView.hpp
     Mean.cpp
     Mean.hpp
-    RefMeanFloat32Workload.cpp
-    RefMeanFloat32Workload.hpp
-    RefMeanUint8Workload.cpp
-    RefMeanUint8Workload.hpp
+    RefMeanWorkload.cpp
+    RefMeanWorkload.hpp
 )
 
 add_library(armnnRefBackendWorkloads OBJECT ${armnnRefBackendWorkloads_sources})
diff --git a/src/backends/reference/workloads/Mean.cpp b/src/backends/reference/workloads/Mean.cpp
index 530aade..3ac3af9 100644
--- a/src/backends/reference/workloads/Mean.cpp
+++ b/src/backends/reference/workloads/Mean.cpp
@@ -36,10 +36,13 @@
     return (carry == 0);
 }
 
-std::size_t ReducedOutputOffset(const unsigned int numDims, const armnn::TensorShape& dims,
-                                std::vector<unsigned int>& index, const unsigned int numAxis,
-                                const std::vector<unsigned int>& axis) {
-    std::size_t offset = 0;
+unsigned int ReducedOutputOffset(const unsigned int numDims,
+                                 const armnn::TensorShape& dims,
+                                 std::vector<unsigned int>& index,
+                                 const unsigned int numAxis,
+                                 const std::vector<unsigned int>& axis)
+{
+    unsigned int offset = 0;
     for (unsigned int idx = 0; idx < numDims; ++idx)
     {
         bool isAxis = false;
@@ -56,7 +59,7 @@
         }
         if (!isAxis)
         {
-            offset = offset * boost::numeric_cast<size_t>(dims[idx]) + boost::numeric_cast<size_t>(index[idx]);
+            offset = offset * dims[idx] + index[idx];
         }
     }
     return offset;
@@ -68,8 +71,9 @@
 void Mean(const armnn::TensorInfo& inputInfo,
           const armnn::TensorInfo& outputInfo,
           const std::vector<unsigned int>& axis,
-          const float* inputData,
-          float* outputData) {
+          Decoder<float>& input,
+          Encoder<float>& output)
+{
 
     unsigned int inputNumDims = inputInfo.GetNumDimensions();
     unsigned int outputNumDims = outputInfo.GetNumDimensions();
@@ -78,16 +82,17 @@
     armnn::TensorShape inputDims = inputInfo.GetShape();
 
     // Initialise output data.
-    size_t numOutputs = 1;
+    unsigned int numOutputs = 1;
     for (unsigned int idx = 0; idx < outputNumDims; ++idx)
     {
-        numOutputs *= boost::numeric_cast<size_t>(outputDims[idx]);
+        numOutputs *= outputDims[idx];
     }
 
     std::vector<float> tempSum(numOutputs);
-    for (size_t idx = 0; idx < numOutputs; ++idx)
+    for (unsigned int idx = 0; idx < numOutputs; ++idx)
     {
-        outputData[idx] = 0.0f;
+        output[idx];
+        output.Set(0.0f);
         tempSum[idx] = 0.0f;
     }
 
@@ -106,30 +111,32 @@
           resolvedAxis.push_back(idx);
       }
     }
-    unsigned int numResolvedAxis = boost::numeric_cast<unsigned int>(resolvedAxis.size());
+    auto numResolvedAxis = boost::numeric_cast<unsigned int>(resolvedAxis.size());
 
     // Iterates through input_data and sum up the reduced axis.
     for (bool hasNext = true; hasNext; hasNext = NextIndex(inputNumDims, inputDims, tempIndex))
     {
-        size_t inputOffset = ReducedOutputOffset(inputNumDims, inputDims, tempIndex, 0, {});
-        size_t outputOffset = ReducedOutputOffset(inputNumDims, inputDims, tempIndex,
-                                                  numResolvedAxis, resolvedAxis);
-        tempSum[outputOffset] += inputData[inputOffset];
+        unsigned int inputOffset = ReducedOutputOffset(inputNumDims, inputDims, tempIndex, 0, {});
+        unsigned int outputOffset = ReducedOutputOffset(inputNumDims, inputDims, tempIndex,
+                                                        numResolvedAxis, resolvedAxis);
+        input[inputOffset];
+        tempSum[outputOffset] += input.Get();
     }
 
     // Takes average by num of elements added to get mean.
     size_t numElementsInAxis = 1;
     for (unsigned int idx = 0; idx < numResolvedAxis; ++idx)
     {
-        size_t current = boost::numeric_cast<size_t>(inputDims[resolvedAxis[idx]]);
+        unsigned int current = inputDims[resolvedAxis[idx]];
         BOOST_ASSERT(boost::numeric_cast<float>(current) <
               (std::numeric_limits<float>::max() / boost::numeric_cast<float>(numElementsInAxis)));
         numElementsInAxis *= current;
     }
     if (numElementsInAxis > 0) {
-        for (size_t idx = 0; idx < numOutputs; ++idx)
+        for (unsigned int idx = 0; idx < numOutputs; ++idx)
         {
-            outputData[idx] = tempSum[idx] / boost::numeric_cast<float>(numElementsInAxis);
+            output[idx];
+            output.Set(tempSum[idx] / boost::numeric_cast<float>(numElementsInAxis));
         }
     }
 }
diff --git a/src/backends/reference/workloads/Mean.hpp b/src/backends/reference/workloads/Mean.hpp
index 38c2e39..dfb0302 100644
--- a/src/backends/reference/workloads/Mean.hpp
+++ b/src/backends/reference/workloads/Mean.hpp
@@ -7,6 +7,7 @@
 
 #include "armnn/DescriptorsFwd.hpp"
 #include "armnn/Tensor.hpp"
+#include "BaseIterator.hpp"
 
 #include <vector>
 
@@ -15,7 +16,7 @@
 void Mean(const TensorInfo& inputInfo,
           const TensorInfo& outputInfo,
           const std::vector<unsigned int>& axis,
-          const float* inputData,
-          float* outputData);
+          Decoder<float>& input,
+          Encoder<float>& output);
 } //namespace armnn
 
diff --git a/src/backends/reference/workloads/RefMeanFloat32Workload.cpp b/src/backends/reference/workloads/RefMeanFloat32Workload.cpp
deleted file mode 100644
index a23906b..0000000
--- a/src/backends/reference/workloads/RefMeanFloat32Workload.cpp
+++ /dev/null
@@ -1,35 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "RefMeanFloat32Workload.hpp"
-
-#include "Mean.hpp"
-#include "RefWorkloadUtils.hpp"
-
-#include "Profiling.hpp"
-#include "vector"
-
-namespace armnn
-{
-
-RefMeanFloat32Workload::RefMeanFloat32Workload(const MeanQueueDescriptor& descriptor, const WorkloadInfo& info)
-  :Float32Workload<MeanQueueDescriptor>(descriptor, info) {}
-
-
-void RefMeanFloat32Workload::Execute() const
-{
-    ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefMeanFloat32Workload_Execute");
-
-    const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]);
-    const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
-    const float* inputData = GetInputTensorDataFloat(0, m_Data);
-    float* outputData = GetOutputTensorDataFloat(0, m_Data);
-
-    Mean(inputInfo, outputInfo, m_Data.m_Parameters.m_Axis, inputData, outputData);
-}
-
-} //namespace armnn
-
-
diff --git a/src/backends/reference/workloads/RefMeanFloat32Workload.hpp b/src/backends/reference/workloads/RefMeanFloat32Workload.hpp
deleted file mode 100644
index 153ebe1..0000000
--- a/src/backends/reference/workloads/RefMeanFloat32Workload.hpp
+++ /dev/null
@@ -1,22 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "backendsCommon/Workload.hpp"
-#include "backendsCommon/WorkloadData.hpp"
-
-namespace armnn
-{
-
-
-class RefMeanFloat32Workload : public Float32Workload<MeanQueueDescriptor>
-{
-public:
-    explicit RefMeanFloat32Workload (const MeanQueueDescriptor& descriptor, const WorkloadInfo& info);
-    virtual void Execute() const override;
-};
-
-}//namespace armnn
diff --git a/src/backends/reference/workloads/RefMeanUint8Workload.cpp b/src/backends/reference/workloads/RefMeanUint8Workload.cpp
deleted file mode 100644
index 4ebffcf..0000000
--- a/src/backends/reference/workloads/RefMeanUint8Workload.cpp
+++ /dev/null
@@ -1,39 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "RefMeanUint8Workload.hpp"
-
-#include "Mean.hpp"
-#include "RefWorkloadUtils.hpp"
-
-#include "Profiling.hpp"
-
-#include <vector>
-
-namespace armnn
-{
-
-RefMeanUint8Workload::RefMeanUint8Workload(const MeanQueueDescriptor& descriptor, const WorkloadInfo& info)
-  :Uint8Workload<MeanQueueDescriptor>(descriptor, info) {}
-
-
-void RefMeanUint8Workload::Execute() const
-{
-    ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefMeanUint8Workload_Execute");
-
-    const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]);
-    const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
-
-    auto dequant = Dequantize(GetInputTensorDataU8(0, m_Data), inputInfo);
-
-    std::vector<float> results(outputInfo.GetNumElements());
-
-    Mean(inputInfo, outputInfo, m_Data.m_Parameters.m_Axis, dequant.data(), results.data());
-
-    Quantize(GetOutputTensorDataU8(0, m_Data), results.data(), outputInfo);
-}
-
-} //namespace armnn
-
diff --git a/src/backends/reference/workloads/RefMeanUint8Workload.hpp b/src/backends/reference/workloads/RefMeanUint8Workload.hpp
deleted file mode 100644
index f53b8a4..0000000
--- a/src/backends/reference/workloads/RefMeanUint8Workload.hpp
+++ /dev/null
@@ -1,21 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "backendsCommon/Workload.hpp"
-#include "backendsCommon/WorkloadData.hpp"
-
-namespace armnn
-{
-
-class RefMeanUint8Workload : public Uint8Workload<MeanQueueDescriptor>
-{
-public:
-    explicit RefMeanUint8Workload (const MeanQueueDescriptor& descriptor, const WorkloadInfo& info);
-    virtual void Execute() const override;
-};
-
-} //namespace armnn
diff --git a/src/backends/reference/workloads/RefMeanWorkload.cpp b/src/backends/reference/workloads/RefMeanWorkload.cpp
new file mode 100644
index 0000000..375ab39
--- /dev/null
+++ b/src/backends/reference/workloads/RefMeanWorkload.cpp
@@ -0,0 +1,34 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "RefMeanWorkload.hpp"
+
+#include "Mean.hpp"
+#include "RefWorkloadUtils.hpp"
+
+#include "Profiling.hpp"
+
+#include <vector>
+
+namespace armnn
+{
+
+RefMeanWorkload::RefMeanWorkload(const MeanQueueDescriptor& descriptor, const WorkloadInfo& info)
+  :BaseWorkload<MeanQueueDescriptor>(descriptor, info) {}
+
+void RefMeanWorkload::Execute() const
+{
+    ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefMeanWorkload_Execute");
+
+    const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]);
+    const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
+
+    auto inputDecoder  = MakeDecoder<float>(inputInfo,  m_Data.m_Inputs[0]->Map());
+    auto outputEncoder = MakeEncoder<float>(outputInfo, m_Data.m_Outputs[0]->Map());
+
+    Mean(inputInfo, outputInfo, m_Data.m_Parameters.m_Axis, *inputDecoder, *outputEncoder);
+}
+
+} //namespace armnn
diff --git a/src/backends/reference/workloads/RefMeanWorkload.hpp b/src/backends/reference/workloads/RefMeanWorkload.hpp
new file mode 100644
index 0000000..eb4b407
--- /dev/null
+++ b/src/backends/reference/workloads/RefMeanWorkload.hpp
@@ -0,0 +1,24 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "backendsCommon/Workload.hpp"
+#include "backendsCommon/WorkloadData.hpp"
+
+#include "Decoders.hpp"
+#include "Encoders.hpp"
+
+namespace armnn
+{
+
+class RefMeanWorkload : public BaseWorkload<MeanQueueDescriptor>
+{
+public:
+    explicit RefMeanWorkload (const MeanQueueDescriptor& descriptor, const WorkloadInfo& info);
+    virtual void Execute() const override;
+};
+
+} //namespace armnn
diff --git a/src/backends/reference/workloads/RefWorkloads.hpp b/src/backends/reference/workloads/RefWorkloads.hpp
index 7cfced4..b141291 100644
--- a/src/backends/reference/workloads/RefWorkloads.hpp
+++ b/src/backends/reference/workloads/RefWorkloads.hpp
@@ -42,8 +42,7 @@
 #include "RefLstmWorkload.hpp"
 #include "RefConvertFp16ToFp32Workload.hpp"
 #include "RefConvertFp32ToFp16Workload.hpp"
-#include "RefMeanUint8Workload.hpp"
-#include "RefMeanFloat32Workload.hpp"
+#include "RefMeanWorkload.hpp"
 #include "RefPadWorkload.hpp"
 #include "RefBatchToSpaceNdUint8Workload.hpp"
 #include "RefBatchToSpaceNdFloat32Workload.hpp"