Add fp16 support for dequantize

* Changed RefDequantizeWorkload to use Encoder/Decoder
* Added related unit tests for Cl, Neon and Ref

Signed-off-by: Jan Eilers <jan.eilers@arm.com>
Change-Id: Ic2fd4103090dd2127c6859b49305736f7b2dfb05
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index 5972158..716e8d9 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -587,8 +587,9 @@
     supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
                                   "Reference dequantize: input type not supported.");
 
-    std::array<DataType,1> supportedOutputTypes = {
-        DataType::Float32
+    std::array<DataType,2> supportedOutputTypes = {
+        DataType::Float32,
+        DataType::Float16
     };
 
     supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
diff --git a/src/backends/reference/backend.mk b/src/backends/reference/backend.mk
index 7e97acd..5f9af59 100644
--- a/src/backends/reference/backend.mk
+++ b/src/backends/reference/backend.mk
@@ -31,6 +31,7 @@
         workloads/Debug.cpp \
         workloads/DepthToSpace.cpp \
         workloads/DetectionPostProcess.cpp \
+        workloads/Dequantize.cpp \
         workloads/ElementwiseFunction.cpp \
         workloads/FullyConnected.cpp \
         workloads/Gather.cpp \
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index 1b284c3..7f28038 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -1378,6 +1378,8 @@
 ARMNN_AUTO_TEST_CASE(DequantizeSimpleUint8, DequantizeSimpleUint8Test)
 ARMNN_AUTO_TEST_CASE(DequantizeOffsetUint8, DequantizeOffsetUint8Test)
 ARMNN_AUTO_TEST_CASE(DequantizeSimpleInt16, DequantizeSimpleInt16Test)
+ARMNN_AUTO_TEST_CASE(DequantizeSimpleUint8ToFp16, DequantizeSimpleUint8ToFp16Test)
+ARMNN_AUTO_TEST_CASE(DequantizeSimpleInt16ToFp16, DequantizeSimpleInt16ToFp16Test)
 
 // Quantize
 ARMNN_AUTO_TEST_CASE(QuantizeSimpleUint8, QuantizeSimpleUint8Test)
diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt
index 7844518..29abfed 100644
--- a/src/backends/reference/workloads/CMakeLists.txt
+++ b/src/backends/reference/workloads/CMakeLists.txt
@@ -28,6 +28,8 @@
     DepthToSpace.hpp
     DetectionPostProcess.cpp
     DetectionPostProcess.hpp
+    Dequantize.cpp
+    Dequantize.hpp
     ElementwiseFunction.cpp
     ElementwiseFunction.hpp
     Encoders.hpp
diff --git a/src/backends/reference/workloads/Dequantize.cpp b/src/backends/reference/workloads/Dequantize.cpp
new file mode 100644
index 0000000..fafc03e
--- /dev/null
+++ b/src/backends/reference/workloads/Dequantize.cpp
@@ -0,0 +1,29 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "Dequantize.hpp"
+
+namespace armnn
+{
+
+void Dequantize(Decoder<float>& inputDecoder,
+                Encoder<float>& outputEncoder,
+                const TensorInfo& inputInfo,
+                const TensorInfo& outputInfo)
+{
+    BOOST_ASSERT(inputInfo.GetNumElements() == outputInfo.GetNumElements());
+    for (unsigned int i = 0; i < inputInfo.GetNumElements(); i++)
+    {
+        // inputDecoder.Get() dequantizes the data element from whatever
+        // type is given by inputInfo to fp32 (If MakeDecoder supports that dequantization)
+        // outputEncoder.Set() transforms the data element to whatever type is
+        // given by outputInfo (if MakeEncoder supports that transformation)
+        outputEncoder.Set(inputDecoder.Get());
+        ++outputEncoder;
+        ++inputDecoder;
+    }
+}
+
+} // armnn namespace
\ No newline at end of file
diff --git a/src/backends/reference/workloads/Dequantize.hpp b/src/backends/reference/workloads/Dequantize.hpp
new file mode 100644
index 0000000..c01b454
--- /dev/null
+++ b/src/backends/reference/workloads/Dequantize.hpp
@@ -0,0 +1,20 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn/Tensor.hpp>
+#include "Encoders.hpp"
+#include "Decoders.hpp"
+
+namespace armnn
+{
+
+void Dequantize(Decoder<float>& inputDecoder,
+                Encoder<float>& outputEncoder,
+                const TensorInfo& inputInfo,
+                const TensorInfo& outputInfo);
+
+} //namespace armnn
diff --git a/src/backends/reference/workloads/RefDequantizeWorkload.cpp b/src/backends/reference/workloads/RefDequantizeWorkload.cpp
index d861c50..e6f5c6b 100644
--- a/src/backends/reference/workloads/RefDequantizeWorkload.cpp
+++ b/src/backends/reference/workloads/RefDequantizeWorkload.cpp
@@ -5,6 +5,9 @@
 
 #include "RefDequantizeWorkload.hpp"
 #include "RefWorkloadUtils.hpp"
+#include "Encoders.hpp"
+#include "Decoders.hpp"
+#include "Dequantize.hpp"
 
 namespace armnn
 {
@@ -14,21 +17,12 @@
     ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefDequantizeWorkload_Execute");
 
     const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]);
-    const DataType& inputDataType = inputInfo.GetDataType();
+    const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
 
-    float* outputData = GetOutputTensorData<float>(0, m_Data);
+    auto inputDecoder  = MakeDecoder<float>(inputInfo,  m_Data.m_Inputs[0]->Map());
+    auto outputEncoder = MakeEncoder<float>(outputInfo, m_Data.m_Outputs[0]->Map());
 
-    switch (inputDataType)
-    {
-        case DataType::QuantisedAsymm8:
-            Dequantize<uint8_t>(GetInputTensorData<uint8_t>(0, m_Data), outputData, inputInfo);
-            break;
-        case DataType::QuantisedSymm16:
-            Dequantize<int16_t>(GetInputTensorData<int16_t>(0, m_Data), outputData, inputInfo);
-            break;
-        default:
-            throw InvalidArgumentException("RefDequantizeWorkload: Unsupported input data type");
-    }
+    Dequantize(*inputDecoder, *outputEncoder, inputInfo, outputInfo);
 }
 
 } // namespace armnn