IVGCVSW-3248 Refactor reference BatchToSpace workload
* Add Decoders and Encoders to workload to make it data type agnostic
* Merge float32 and Uint8 into single workload
Change-Id: I8adfa1898a63f13889eaaf55a31c26fd1e2d7ee8
Signed-off-by: Francis Murtagh <francis.murtagh@arm.com>
diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp
index 1f241f0..5b03555 100644
--- a/src/backends/reference/RefWorkloadFactory.cpp
+++ b/src/backends/reference/RefWorkloadFactory.cpp
@@ -391,7 +391,11 @@
std::unique_ptr<IWorkload> RefWorkloadFactory::CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- return MakeWorkload<RefBatchToSpaceNdFloat32Workload, RefBatchToSpaceNdUint8Workload>(descriptor, info);
+ if (IsFloat16(info))
+ {
+ return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
+ }
+ return std::make_unique<RefBatchToSpaceNdWorkload>(descriptor, info);
}
std::unique_ptr<IWorkload> RefWorkloadFactory::CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor,
diff --git a/src/backends/reference/backend.mk b/src/backends/reference/backend.mk
index 6b7e895..849d87c 100644
--- a/src/backends/reference/backend.mk
+++ b/src/backends/reference/backend.mk
@@ -28,8 +28,7 @@
workloads/PreluImpl.cpp \
workloads/RefActivationWorkload.cpp \
workloads/RefBatchNormalizationWorkload.cpp \
- workloads/RefBatchToSpaceNdFloat32Workload.cpp \
- workloads/RefBatchToSpaceNdUint8Workload.cpp \
+ workloads/RefBatchToSpaceNdWorkload.cpp \
workloads/RefConcatWorkload.cpp \
workloads/RefConstantWorkload.cpp \
workloads/RefConvertFp16ToFp32Workload.cpp \
diff --git a/src/backends/reference/workloads/BatchToSpaceNd.cpp b/src/backends/reference/workloads/BatchToSpaceNd.cpp
index 5f64213..7efdb9b 100644
--- a/src/backends/reference/workloads/BatchToSpaceNd.cpp
+++ b/src/backends/reference/workloads/BatchToSpaceNd.cpp
@@ -37,8 +37,8 @@
const TensorInfo& outputTensorInfo,
const std::vector<unsigned int>& blockShape,
const std::vector<std::pair<unsigned int, unsigned int>>& cropsData,
- const float* inputData,
- float* outputData)
+ Decoder<float>& inputDecoder,
+ Encoder<float>& outputEncoder)
{
TensorShape inputShape = inputTensorInfo.GetShape();
@@ -90,7 +90,10 @@
{
unsigned int outOffset = Offset(outputShape, outBatch, outH, outW, c, dataLayout);
unsigned int inOffset = Offset(inputShape, inBatch, inH, inW, c, dataLayout);
- outputData[outOffset] = inputData[inOffset];
+
+ outputEncoder[outOffset];
+ inputDecoder[inOffset];
+ outputEncoder.Set(inputDecoder.Get());
}
}
}
diff --git a/src/backends/reference/workloads/BatchToSpaceNd.hpp b/src/backends/reference/workloads/BatchToSpaceNd.hpp
index f08df93..b757d37 100644
--- a/src/backends/reference/workloads/BatchToSpaceNd.hpp
+++ b/src/backends/reference/workloads/BatchToSpaceNd.hpp
@@ -9,6 +9,9 @@
#include <backendsCommon/Workload.hpp>
#include <backendsCommon/WorkloadData.hpp>
+#include "BaseIterator.hpp"
+#include "Decoders.hpp"
+#include "Encoders.hpp"
#include <DataLayoutIndexed.hpp>
@@ -20,6 +23,6 @@
const TensorInfo& outputTensorInfo,
const std::vector<unsigned int>& blockShape,
const std::vector<std::pair<unsigned int, unsigned int>>& cropsData,
- const float* inputData,
- float* outputData);
+ Decoder<float>& inputDecoder,
+ Encoder<float>& outputEncoder);
} // namespace armnn
diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt
index ca35e27..daa0043 100644
--- a/src/backends/reference/workloads/CMakeLists.txt
+++ b/src/backends/reference/workloads/CMakeLists.txt
@@ -42,10 +42,8 @@
RefActivationWorkload.hpp
RefBatchNormalizationWorkload.cpp
RefBatchNormalizationWorkload.hpp
- RefBatchToSpaceNdFloat32Workload.cpp
- RefBatchToSpaceNdFloat32Workload.hpp
- RefBatchToSpaceNdUint8Workload.cpp
- RefBatchToSpaceNdUint8Workload.hpp
+ RefBatchToSpaceNdWorkload.cpp
+ RefBatchToSpaceNdWorkload.hpp
RefConstantWorkload.cpp
RefConstantWorkload.hpp
RefConvertFp16ToFp32Workload.cpp
diff --git a/src/backends/reference/workloads/RefBatchToSpaceNdFloat32Workload.cpp b/src/backends/reference/workloads/RefBatchToSpaceNdFloat32Workload.cpp
deleted file mode 100644
index bf246c2..0000000
--- a/src/backends/reference/workloads/RefBatchToSpaceNdFloat32Workload.cpp
+++ /dev/null
@@ -1,28 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "BatchToSpaceNd.hpp"
-#include "Profiling.hpp"
-#include "RefBatchToSpaceNdFloat32Workload.hpp"
-#include "RefWorkloadUtils.hpp"
-
-namespace armnn
-{
-
-void RefBatchToSpaceNdFloat32Workload::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefBatchToSpaceNdFloat32Workload_Execute");
-
- const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]);
- const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
- const float* inputData = GetInputTensorDataFloat(0, m_Data);
- float* outputData = GetOutputTensorDataFloat(0, m_Data);
-
- BatchToSpaceNd(m_Data.m_Parameters.m_DataLayout, inputInfo, outputInfo, m_Data.m_Parameters.m_BlockShape,
- m_Data.m_Parameters.m_Crops, inputData, outputData);
-}
-
-
-} //namespace armnn
\ No newline at end of file
diff --git a/src/backends/reference/workloads/RefBatchToSpaceNdUint8Workload.cpp b/src/backends/reference/workloads/RefBatchToSpaceNdUint8Workload.cpp
deleted file mode 100644
index e4e108e..0000000
--- a/src/backends/reference/workloads/RefBatchToSpaceNdUint8Workload.cpp
+++ /dev/null
@@ -1,29 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "BatchToSpaceNd.hpp"
-#include "Profiling.hpp"
-#include "RefBatchToSpaceNdUint8Workload.hpp"
-#include "RefWorkloadUtils.hpp"
-
-namespace armnn
-{
-
-void RefBatchToSpaceNdUint8Workload::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefBatchToSpaceNdUint8Workload_Execute");
-
- const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]);
- const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
- auto dequantizedInputData = Dequantize(GetInputTensorDataU8(0, m_Data), inputInfo);
-
- std::vector<float> results(outputInfo.GetNumElements());
- BatchToSpaceNd(m_Data.m_Parameters.m_DataLayout, inputInfo, outputInfo, m_Data.m_Parameters.m_BlockShape,
- m_Data.m_Parameters.m_Crops, dequantizedInputData.data(), results.data());
-
- Quantize(GetOutputTensorDataU8(0, m_Data), results.data(), outputInfo);
-}
-
-} //namespace armnn
\ No newline at end of file
diff --git a/src/backends/reference/workloads/RefBatchToSpaceNdUint8Workload.hpp b/src/backends/reference/workloads/RefBatchToSpaceNdUint8Workload.hpp
deleted file mode 100644
index 1f221c2..0000000
--- a/src/backends/reference/workloads/RefBatchToSpaceNdUint8Workload.hpp
+++ /dev/null
@@ -1,23 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <backendsCommon/Workload.hpp>
-#include <backendsCommon/WorkloadData.hpp>
-
-namespace armnn
-{
-
-class RefBatchToSpaceNdUint8Workload : public Uint8Workload<BatchToSpaceNdQueueDescriptor>
-{
-
-public:
- using Uint8Workload<BatchToSpaceNdQueueDescriptor>::Uint8Workload;
-
- virtual void Execute() const override;
-};
-
-} // namespace armnn
\ No newline at end of file
diff --git a/src/backends/reference/workloads/RefBatchToSpaceNdWorkload.cpp b/src/backends/reference/workloads/RefBatchToSpaceNdWorkload.cpp
new file mode 100644
index 0000000..c293066
--- /dev/null
+++ b/src/backends/reference/workloads/RefBatchToSpaceNdWorkload.cpp
@@ -0,0 +1,29 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "BatchToSpaceNd.hpp"
+#include "Profiling.hpp"
+#include "RefBatchToSpaceNdWorkload.hpp"
+#include "RefWorkloadUtils.hpp"
+
+namespace armnn
+{
+
+void RefBatchToSpaceNdWorkload::Execute() const
+{
+ ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefBatchToSpaceNdWorkload_Execute");
+
+ const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]);
+ const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
+
+ std::unique_ptr<Decoder<float>> inputDecoder = MakeDecoder<float>(inputInfo, m_Data.m_Inputs[0]->Map());
+ std::unique_ptr<Encoder<float>> outputEncoder = MakeEncoder<float>(outputInfo, m_Data.m_Outputs[0]->Map());
+
+ BatchToSpaceNd(m_Data.m_Parameters.m_DataLayout, inputInfo, outputInfo, m_Data.m_Parameters.m_BlockShape,
+ m_Data.m_Parameters.m_Crops, *inputDecoder, *outputEncoder);
+}
+
+
+} //namespace armnn
\ No newline at end of file
diff --git a/src/backends/reference/workloads/RefBatchToSpaceNdFloat32Workload.hpp b/src/backends/reference/workloads/RefBatchToSpaceNdWorkload.hpp
similarity index 62%
rename from src/backends/reference/workloads/RefBatchToSpaceNdFloat32Workload.hpp
rename to src/backends/reference/workloads/RefBatchToSpaceNdWorkload.hpp
index 4977772..60577ba 100644
--- a/src/backends/reference/workloads/RefBatchToSpaceNdFloat32Workload.hpp
+++ b/src/backends/reference/workloads/RefBatchToSpaceNdWorkload.hpp
@@ -10,11 +10,11 @@
namespace armnn {
-class RefBatchToSpaceNdFloat32Workload : public Float32Workload<BatchToSpaceNdQueueDescriptor>
+class RefBatchToSpaceNdWorkload : public BaseWorkload<BatchToSpaceNdQueueDescriptor>
{
public:
- using Float32Workload<BatchToSpaceNdQueueDescriptor>::Float32Workload;
+ using BaseWorkload<BatchToSpaceNdQueueDescriptor>::BaseWorkload;
virtual void Execute() const override;
};
diff --git a/src/backends/reference/workloads/RefWorkloads.hpp b/src/backends/reference/workloads/RefWorkloads.hpp
index 056127a..9058281 100644
--- a/src/backends/reference/workloads/RefWorkloads.hpp
+++ b/src/backends/reference/workloads/RefWorkloads.hpp
@@ -15,8 +15,7 @@
#include "Pooling2d.hpp"
#include "RefActivationWorkload.hpp"
#include "RefBatchNormalizationWorkload.hpp"
-#include "RefBatchToSpaceNdUint8Workload.hpp"
-#include "RefBatchToSpaceNdFloat32Workload.hpp"
+#include "RefBatchToSpaceNdWorkload.hpp"
#include "RefConvolution2dWorkload.hpp"
#include "RefConstantWorkload.hpp"
#include "RefConcatWorkload.hpp"