IVGCVSW-3231 Add reference workload support for SpaceToDepth

* Added reference workload for SpaceToDepth
* Added unit tests for float32 & uint8
* Minor sort refactoring to RefWorkloads.hpp to alphabetical order

Change-Id: I2e01f8101650e2aae102a8a32bc0064f067141ab
Signed-off-by: Keith Davis <keith.davis@arm.com>
diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt
index db0daa0..ca35e27 100644
--- a/src/backends/reference/workloads/CMakeLists.txt
+++ b/src/backends/reference/workloads/CMakeLists.txt
@@ -100,6 +100,8 @@
     RefSoftmaxWorkload.hpp
     RefSpaceToBatchNdWorkload.cpp
     RefSpaceToBatchNdWorkload.hpp
+    RefSpaceToDepthWorkload.cpp
+    RefSpaceToDepthWorkload.hpp
     RefSplitterWorkload.cpp
     RefSplitterWorkload.hpp
     RefStridedSliceWorkload.cpp
@@ -114,6 +116,8 @@
     Softmax.hpp
     SpaceToBatchNd.hpp
     SpaceToBatchNd.cpp
+    SpaceToDepth.hpp
+    SpaceToDepth.cpp
     Splitter.hpp
     Splitter.cpp
     StridedSlice.hpp
diff --git a/src/backends/reference/workloads/RefSpaceToDepthWorkload.cpp b/src/backends/reference/workloads/RefSpaceToDepthWorkload.cpp
new file mode 100644
index 0000000..1b12272
--- /dev/null
+++ b/src/backends/reference/workloads/RefSpaceToDepthWorkload.cpp
@@ -0,0 +1,28 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "RefSpaceToDepthWorkload.hpp"
+#include "SpaceToDepth.hpp"
+
+#include "RefWorkloadUtils.hpp"
+#include <ResolveType.hpp>
+
+namespace armnn
+{
+
+void RefSpaceToDepthWorkload::Execute() const
+{
+    ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefSpaceToDepthWorkload_Execute");
+
+    const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]);
+    std::unique_ptr<Decoder<float>> decoder = MakeDecoder<float>(inputInfo, m_Data.m_Inputs[0]->Map());
+
+    const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
+    std::unique_ptr<Encoder<float>> encoder = MakeEncoder<float>(outputInfo, m_Data.m_Outputs[0]->Map());
+
+    SpaceToDepth(inputInfo, outputInfo, m_Data.m_Parameters, *decoder, *encoder);
+}
+
+} //namespace armnn
diff --git a/src/backends/reference/workloads/RefSpaceToDepthWorkload.hpp b/src/backends/reference/workloads/RefSpaceToDepthWorkload.hpp
new file mode 100644
index 0000000..82d8528
--- /dev/null
+++ b/src/backends/reference/workloads/RefSpaceToDepthWorkload.hpp
@@ -0,0 +1,21 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include "backendsCommon/Workload.hpp"
+
+#include <armnn/TypesUtils.hpp>
+
+namespace armnn
+{
+
+class RefSpaceToDepthWorkload : public BaseWorkload<SpaceToDepthQueueDescriptor>
+{
+public:
+    using BaseWorkload<SpaceToDepthQueueDescriptor>::BaseWorkload;
+    virtual void Execute() const override;
+};
+
+} //namespace armnn
diff --git a/src/backends/reference/workloads/RefWorkloads.hpp b/src/backends/reference/workloads/RefWorkloads.hpp
index 41b16fa..056127a 100644
--- a/src/backends/reference/workloads/RefWorkloads.hpp
+++ b/src/backends/reference/workloads/RefWorkloads.hpp
@@ -5,50 +5,51 @@
 
 #pragma once
 
-#include "ElementwiseFunction.hpp"
-#include "RefElementwiseWorkload.hpp"
+#include "Activation.hpp"
+#include "BatchNormImpl.hpp"
 #include "ConvImpl.hpp"
-#include "RefConstantWorkload.hpp"
-#include "RefConvolution2dWorkload.hpp"
-#include "RefSplitterWorkload.hpp"
-#include "RefResizeBilinearWorkload.hpp"
-#include "RefL2NormalizationWorkload.hpp"
-#include "RefActivationWorkload.hpp"
-#include "RefPooling2dWorkload.hpp"
-#include "RefWorkloadUtils.hpp"
-#include "RefConcatWorkload.hpp"
-#include "RefFullyConnectedWorkload.hpp"
-#include "RefGatherWorkload.hpp"
-#include "Softmax.hpp"
-#include "TensorBufferArrayView.hpp"
-#include "RefBatchNormalizationWorkload.hpp"
-#include "Splitter.hpp"
-#include "RefDepthwiseConvolution2dWorkload.hpp"
+#include "Concatenate.hpp"
+#include "ElementwiseFunction.hpp"
 #include "FullyConnected.hpp"
 #include "Gather.hpp"
-#include "RefFloorWorkload.hpp"
-#include "RefSoftmaxWorkload.hpp"
-#include "ResizeBilinear.hpp"
-#include "RefNormalizationWorkload.hpp"
-#include "RefDetectionPostProcessWorkload.hpp"
-#include "BatchNormImpl.hpp"
-#include "Activation.hpp"
-#include "Concatenate.hpp"
-#include "RefSpaceToBatchNdWorkload.hpp"
-#include "RefStridedSliceWorkload.hpp"
 #include "Pooling2d.hpp"
-#include "RefFakeQuantizationFloat32Workload.hpp"
-#include "RefPermuteWorkload.hpp"
-#include "RefLstmWorkload.hpp"
-#include "RefConvertFp16ToFp32Workload.hpp"
-#include "RefConvertFp32ToFp16Workload.hpp"
-#include "RefMeanWorkload.hpp"
-#include "RefPadWorkload.hpp"
+#include "RefActivationWorkload.hpp"
+#include "RefBatchNormalizationWorkload.hpp"
 #include "RefBatchToSpaceNdUint8Workload.hpp"
 #include "RefBatchToSpaceNdFloat32Workload.hpp"
+#include "RefConvolution2dWorkload.hpp"
+#include "RefConstantWorkload.hpp"
+#include "RefConcatWorkload.hpp"
+#include "RefConvertFp16ToFp32Workload.hpp"
+#include "RefConvertFp32ToFp16Workload.hpp"
+#include "RefDepthwiseConvolution2dWorkload.hpp"
+#include "RefDetectionPostProcessWorkload.hpp"
 #include "RefDebugWorkload.hpp"
-#include "RefRsqrtWorkload.hpp"
 #include "RefDequantizeWorkload.hpp"
-#include "RefQuantizeWorkload.hpp"
-#include "RefReshapeWorkload.hpp"
+#include "RefElementwiseWorkload.hpp"
+#include "RefFullyConnectedWorkload.hpp"
+#include "RefFloorWorkload.hpp"
+#include "RefFakeQuantizationFloat32Workload.hpp"
+#include "RefGatherWorkload.hpp"
+#include "RefL2NormalizationWorkload.hpp"
+#include "RefLstmWorkload.hpp"
+#include "RefMeanWorkload.hpp"
+#include "RefNormalizationWorkload.hpp"
+#include "RefPooling2dWorkload.hpp"
+#include "RefPermuteWorkload.hpp"
+#include "RefPadWorkload.hpp"
 #include "RefPreluWorkload.hpp"
+#include "RefQuantizeWorkload.hpp"
+#include "RefResizeBilinearWorkload.hpp"
+#include "RefRsqrtWorkload.hpp"
+#include "RefReshapeWorkload.hpp"
+#include "RefSplitterWorkload.hpp"
+#include "RefSoftmaxWorkload.hpp"
+#include "RefSpaceToBatchNdWorkload.hpp"
+#include "RefStridedSliceWorkload.hpp"
+#include "RefSpaceToDepthWorkload.hpp"
+#include "RefWorkloadUtils.hpp"
+#include "ResizeBilinear.hpp"
+#include "Softmax.hpp"
+#include "Splitter.hpp"
+#include "TensorBufferArrayView.hpp"
\ No newline at end of file
diff --git a/src/backends/reference/workloads/SpaceToDepth.cpp b/src/backends/reference/workloads/SpaceToDepth.cpp
new file mode 100644
index 0000000..4a4f418
--- /dev/null
+++ b/src/backends/reference/workloads/SpaceToDepth.cpp
@@ -0,0 +1,107 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "SpaceToDepth.hpp"
+
+#include <DataLayoutIndexed.hpp>
+
+using namespace armnnUtils;
+
+namespace {
+    unsigned int GetOffset(const armnn::TensorShape& shape,
+        unsigned int c,
+        unsigned int h,
+        unsigned int w,
+        unsigned int b,
+        const DataLayoutIndexed& dataLayout)
+    {
+        if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC)
+        {
+            return ((b * shape[dataLayout.GetHeightIndex()] + h) * shape[dataLayout.GetWidthIndex()] + w) *
+                shape[dataLayout.GetChannelsIndex()] + c;
+        }
+        else
+        {
+            return ((b * shape[dataLayout.GetChannelsIndex()] + c) * shape[dataLayout.GetHeightIndex()] + h) *
+                shape[dataLayout.GetWidthIndex()] + w;
+        }
+    }
+}
+
+namespace armnn
+{
+
+void SpaceToDepth(const TensorInfo& inputInfo,
+                  const TensorInfo& outputInfo,
+                  const SpaceToDepthDescriptor& params,
+                  Decoder<float>& inputData,
+                  Encoder<float>& outputData)
+{
+    DataLayoutIndexed dataLayout = params.m_DataLayout;
+
+    const TensorShape& inputShape = inputInfo.GetShape();
+    const TensorShape& outputShape = outputInfo.GetShape();
+
+    const unsigned int inputBatchSize = inputShape[0];
+    const unsigned int inputChannels = inputShape[dataLayout.GetChannelsIndex()];
+
+    const unsigned int outputHeight = outputShape[dataLayout.GetHeightIndex()];
+    const unsigned int outputWidth = outputShape[dataLayout.GetWidthIndex()];
+    const unsigned int outputChannels = outputShape[dataLayout.GetChannelsIndex()];
+
+    const unsigned int blockSize = params.m_BlockSize;
+
+    if (blockSize == 0)
+    {
+        throw InvalidArgumentException(
+            "Input shape must be divisible by block size in all spatial dimensions: Block size is"
+            " equal to zero");
+    }
+
+    for (unsigned int outChannelIndex = 0; outChannelIndex < outputChannels; outChannelIndex++)
+    {
+        unsigned int inChannelIndex = outChannelIndex % inputChannels;
+
+        unsigned int shiftW = (outChannelIndex / inputChannels) % blockSize;
+        unsigned int shiftH = (outChannelIndex / inputChannels) / blockSize;
+
+        for (unsigned int outH = 0; outH < outputHeight; outH++)
+        {
+            for (unsigned int outW = 0; outW < outputWidth; outW++)
+            {
+                for (unsigned int inBatchIndex = 0; inBatchIndex < inputBatchSize; inBatchIndex++)
+                {
+                    unsigned int inOffset = GetOffset(inputShape,
+                        inChannelIndex,
+                        (outH * blockSize + shiftH),
+                        (outW * blockSize + shiftW),
+                        inBatchIndex,
+                        dataLayout);
+
+                    unsigned int outOffset = GetOffset(outputShape,
+                        outChannelIndex,
+                        outH,
+                        outW,
+                        inBatchIndex,
+                        dataLayout);
+
+                    outputData += outOffset;
+                    inputData += inOffset;
+                    outputData.Set(inputData.Get());
+                    inputData -= inOffset;
+                    outputData -= outOffset;
+                }
+            }
+        }
+    }
+}
+
+void SpaceToDepth(const TensorInfo& inputInfo,
+    const TensorInfo& outputInfo,
+    const SpaceToDepthDescriptor& params,
+    Decoder<float>& inputData,
+    Encoder<float>& outData);
+
+} //namespace armnn
diff --git a/src/backends/reference/workloads/SpaceToDepth.hpp b/src/backends/reference/workloads/SpaceToDepth.hpp
new file mode 100644
index 0000000..f855884
--- /dev/null
+++ b/src/backends/reference/workloads/SpaceToDepth.hpp
@@ -0,0 +1,24 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "BaseIterator.hpp"
+#include "Decoders.hpp"
+#include "Encoders.hpp"
+
+#include <armnn/Descriptors.hpp>
+#include <armnn/Tensor.hpp>
+
+namespace armnn
+{
+
+void SpaceToDepth(const TensorInfo& inputInfo,
+                  const TensorInfo& outputInfo,
+                  const SpaceToDepthDescriptor& params,
+                  Decoder<float>& inputData,
+                  Encoder<float>& outputData);
+
+} //namespace armnn