IVGCVSW-3885 Add reference workload for DepthToSpace

Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com>
Change-Id: Id937dc4425884ad1985dcdfaae8bf3fb64f0c766
diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt
index 83444ed..c2eb025 100644
--- a/src/backends/reference/workloads/CMakeLists.txt
+++ b/src/backends/reference/workloads/CMakeLists.txt
@@ -24,6 +24,8 @@
     Debug.cpp
     Debug.hpp
     Decoders.hpp
+    DepthToSpace.cpp
+    DepthToSpace.hpp
     DetectionPostProcess.cpp
     DetectionPostProcess.hpp
     ElementwiseFunction.cpp
@@ -71,6 +73,8 @@
     RefElementwiseWorkload.hpp
     RefDebugWorkload.cpp
     RefDebugWorkload.hpp
+    RefDepthToSpaceWorkload.cpp
+    RefDepthToSpaceWorkload.hpp
     RefDepthwiseConvolution2dWorkload.cpp
     RefDepthwiseConvolution2dWorkload.hpp
     RefDequantizeWorkload.cpp
diff --git a/src/backends/reference/workloads/DepthToSpace.cpp b/src/backends/reference/workloads/DepthToSpace.cpp
new file mode 100644
index 0000000..046bd47
--- /dev/null
+++ b/src/backends/reference/workloads/DepthToSpace.cpp
@@ -0,0 +1,79 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "DepthToSpace.hpp"
+
+#include <DataLayoutIndexed.hpp>
+#include <Permute.hpp>
+
+#include <boost/assert.hpp>
+
+using namespace armnnUtils;
+
+namespace armnn
+{
+
+void DepthToSpace(const TensorInfo& inputInfo,
+                  const DepthToSpaceDescriptor& descriptor,
+                  const void* inputData,
+                  void* outputData,
+                  unsigned int dataTypeSize)
+{
+    const unsigned int blockSize = descriptor.m_BlockSize;
+    BOOST_ASSERT(blockSize != 0u);
+
+    const TensorShape& inputShape = inputInfo.GetShape();
+    const unsigned int batches = inputShape[0];
+
+    armnnUtils::DataLayoutIndexed dataLayoutIndexed(descriptor.m_DataLayout);
+    const unsigned int inDepth  = inputShape[dataLayoutIndexed.GetChannelsIndex()];
+    const unsigned int inHeight = inputShape[dataLayoutIndexed.GetHeightIndex()];
+    const unsigned int inWidth  = inputShape[dataLayoutIndexed.GetWidthIndex()];
+
+    const unsigned int outDepth = inDepth / (blockSize * blockSize);
+
+    // The 4D input data can be interpreted as 6D (implicitly reshaped) as follows:
+    //
+    // [batch, block size, block size, inDepth, inHeight, inWidth] for NCHW and
+    // [batch, inHeight, inWidth, blockSize, blockSize, outDepth] for NHWC.
+    //
+    // DepthToSpace can then be implemented as a permutation in 6D resulting in
+    // the following shapes:
+    //
+    // [batch, outDepth, inHeight, blockSize, inWidth, blockSize] for NCHW and
+    // [batch, inHeight, blockSize, inWidth, blockSize, outDepth] for NHWC.
+    //
+    // NOTE:
+    // Since 6D tensors are not currently supported, in practice we need to handle each
+    // batch separately and execute 5D permutations
+
+    TensorShape permDestShape;
+    std::initializer_list<unsigned int> permVector;
+    if (descriptor.m_DataLayout == DataLayout::NCHW)
+    {
+        permDestShape = TensorShape({ outDepth, inHeight, blockSize, inWidth, blockSize });
+        permVector    = { 2, 4, 0, 1, 3 };
+    }
+    else
+    {
+        permDestShape = TensorShape({ inHeight, blockSize, inWidth, blockSize, outDepth });
+        permVector    = { 0, 2, 1, 3, 4 };
+    }
+
+    const unsigned int numElementsPerBatch = inputShape.GetNumElements() / batches;
+
+    for (unsigned int batchIndex = 0u; batchIndex < batches; ++batchIndex)
+    {
+        const uintptr_t batchDataOffset = batchIndex * (numElementsPerBatch * dataTypeSize);
+
+        armnnUtils::Permute(permDestShape,
+                            PermutationVector(permVector),
+                            static_cast<const void*>(reinterpret_cast<const uint8_t*>(inputData) + batchDataOffset),
+                            static_cast<void*>(reinterpret_cast<uint8_t*>(outputData) + batchDataOffset),
+                            dataTypeSize);
+    }
+}
+
+} // namespace armnn
diff --git a/src/backends/reference/workloads/DepthToSpace.hpp b/src/backends/reference/workloads/DepthToSpace.hpp
new file mode 100644
index 0000000..a1805c0
--- /dev/null
+++ b/src/backends/reference/workloads/DepthToSpace.hpp
@@ -0,0 +1,20 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn/Descriptors.hpp>
+#include <armnn/Tensor.hpp>
+
+namespace armnn
+{
+
+void DepthToSpace(const TensorInfo& inputInfo,
+                  const DepthToSpaceDescriptor& descriptor,
+                  const void* inputData,
+                  void* outputData,
+                  unsigned int dataTypeSize);
+
+} // namespace armnn
diff --git a/src/backends/reference/workloads/RefDepthToSpaceWorkload.cpp b/src/backends/reference/workloads/RefDepthToSpaceWorkload.cpp
new file mode 100644
index 0000000..93c1120
--- /dev/null
+++ b/src/backends/reference/workloads/RefDepthToSpaceWorkload.cpp
@@ -0,0 +1,27 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "RefDepthToSpaceWorkload.hpp"
+
+#include "DepthToSpace.hpp"
+#include "RefWorkloadUtils.hpp"
+
+namespace armnn
+{
+
+void RefDepthToSpaceWorkload::Execute() const
+{
+    ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefDepthToSpaceWorkload_Execute");
+
+    const TensorInfo inputInfo = GetTensorInfo(m_Data.m_Inputs[0]);
+
+    DepthToSpace(inputInfo,
+                 m_Data.m_Parameters,
+                 m_Data.m_Inputs[0]->Map(),
+                 m_Data.m_Outputs[0]->Map(),
+                 GetDataTypeSize(inputInfo.GetDataType()));
+}
+
+} // namespace armnn
diff --git a/src/backends/reference/workloads/RefDepthToSpaceWorkload.hpp b/src/backends/reference/workloads/RefDepthToSpaceWorkload.hpp
new file mode 100644
index 0000000..327cd9d
--- /dev/null
+++ b/src/backends/reference/workloads/RefDepthToSpaceWorkload.hpp
@@ -0,0 +1,20 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "backendsCommon/Workload.hpp"
+
+namespace armnn
+{
+
+class RefDepthToSpaceWorkload : public BaseWorkload<DepthToSpaceQueueDescriptor>
+{
+public:
+    using BaseWorkload<DepthToSpaceQueueDescriptor>::BaseWorkload;
+    virtual void Execute() const override;
+};
+
+} // namespace armnn
diff --git a/src/backends/reference/workloads/RefWorkloads.hpp b/src/backends/reference/workloads/RefWorkloads.hpp
index b4721b1..94592cb 100644
--- a/src/backends/reference/workloads/RefWorkloads.hpp
+++ b/src/backends/reference/workloads/RefWorkloads.hpp
@@ -25,9 +25,11 @@
 #include "RefConcatWorkload.hpp"
 #include "RefConvertFp16ToFp32Workload.hpp"
 #include "RefConvertFp32ToFp16Workload.hpp"
-#include "RefDepthwiseConvolution2dWorkload.hpp"
-#include "RefDetectionPostProcessWorkload.hpp"
 #include "RefDebugWorkload.hpp"
+#include "RefDepthToSpaceWorkload.hpp"
+#include "RefDepthwiseConvolution2dWorkload.hpp"
+#include "RefDequantizeWorkload.hpp"
+#include "RefDetectionPostProcessWorkload.hpp"
 #include "RefDequantizeWorkload.hpp"
 #include "RefElementwiseWorkload.hpp"
 #include "RefFullyConnectedWorkload.hpp"