IVGCVSW-2875 Reference implementation and unit tests for Dequantize

Change-Id: Ie4ade0519cb0bbe35dc36be6c9cd749b9171c74b
Signed-off-by: Nattapat Chaimanowong <nattapat.chaimanowong@arm.com>
diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt
index 89aed91..c4fc202 100644
--- a/src/backends/reference/workloads/CMakeLists.txt
+++ b/src/backends/reference/workloads/CMakeLists.txt
@@ -63,6 +63,8 @@
     RefDepthwiseConvolution2dFloat32Workload.hpp
     RefDepthwiseConvolution2dUint8Workload.cpp
     RefDepthwiseConvolution2dUint8Workload.hpp
+    RefDequantizeWorkload.cpp
+    RefDequantizeWorkload.hpp
     RefDetectionPostProcessUint8Workload.cpp
     RefDetectionPostProcessUint8Workload.hpp
     RefDetectionPostProcessFloat32Workload.cpp
diff --git a/src/backends/reference/workloads/RefDequantizeWorkload.cpp b/src/backends/reference/workloads/RefDequantizeWorkload.cpp
new file mode 100644
index 0000000..d861c50
--- /dev/null
+++ b/src/backends/reference/workloads/RefDequantizeWorkload.cpp
@@ -0,0 +1,34 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "RefDequantizeWorkload.hpp"
+#include "RefWorkloadUtils.hpp"
+
+namespace armnn
+{
+
+void RefDequantizeWorkload::Execute() const
+{
+    ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefDequantizeWorkload_Execute");
+
+    const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]);
+    const DataType& inputDataType = inputInfo.GetDataType();
+
+    float* outputData = GetOutputTensorData<float>(0, m_Data);
+
+    switch (inputDataType)
+    {
+        case DataType::QuantisedAsymm8:
+            Dequantize<uint8_t>(GetInputTensorData<uint8_t>(0, m_Data), outputData, inputInfo);
+            break;
+        case DataType::QuantisedSymm16:
+            Dequantize<int16_t>(GetInputTensorData<int16_t>(0, m_Data), outputData, inputInfo);
+            break;
+        default:
+            throw InvalidArgumentException("RefDequantizeWorkload: Unsupported input data type");
+    }
+}
+
+} // namespace armnn
diff --git a/src/backends/reference/workloads/RefDequantizeWorkload.hpp b/src/backends/reference/workloads/RefDequantizeWorkload.hpp
new file mode 100644
index 0000000..8d019e3
--- /dev/null
+++ b/src/backends/reference/workloads/RefDequantizeWorkload.hpp
@@ -0,0 +1,22 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/Workload.hpp>
+
+namespace armnn
+{
+
+class RefDequantizeWorkload : public BaseWorkload<DequantizeQueueDescriptor>
+{
+public:
+    using BaseWorkload<DequantizeQueueDescriptor>::m_Data;
+    using BaseWorkload<DequantizeQueueDescriptor>::BaseWorkload;
+
+    void Execute() const override;
+};
+
+} // namespace armnn
diff --git a/src/backends/reference/workloads/RefWorkloadUtils.hpp b/src/backends/reference/workloads/RefWorkloadUtils.hpp
index feb4329..ce79616 100644
--- a/src/backends/reference/workloads/RefWorkloadUtils.hpp
+++ b/src/backends/reference/workloads/RefWorkloadUtils.hpp
@@ -127,6 +127,15 @@
     return ret;
 }
 
+template<typename T>
+inline void Dequantize(const T* inputData, float* outputData, const TensorInfo& info)
+{
+    for (unsigned int i = 0; i < info.GetNumElements(); i++)
+    {
+        outputData[i] = Dequantize<T>(inputData[i], info.GetQuantizationScale(), info.GetQuantizationOffset());
+    }
+}
+
 inline void Quantize(uint8_t* quant, const float* dequant, const TensorInfo& info)
 {
     for (size_t i = 0; i < info.GetNumElements(); i++)
diff --git a/src/backends/reference/workloads/RefWorkloads.hpp b/src/backends/reference/workloads/RefWorkloads.hpp
index 2156388..7d2e813 100644
--- a/src/backends/reference/workloads/RefWorkloads.hpp
+++ b/src/backends/reference/workloads/RefWorkloads.hpp
@@ -63,3 +63,4 @@
 #include "RefDebugWorkload.hpp"
 #include "RefRsqrtFloat32Workload.hpp"
 #include "RefComparisonWorkload.hpp"
+#include "RefDequantizeWorkload.hpp"