Add FP16 support to DebugWorkload

Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com>
Change-Id: Ia879f2d84a1b977474ee0dafa976f2aab32bd3ae
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index ef0cc8c..5a84d8a 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -495,8 +495,9 @@
 {
     bool supported = true;
 
-    std::array<DataType,3> supportedTypes =
+    std::array<DataType, 4> supportedTypes =
     {
+        DataType::Float16,
         DataType::Float32,
         DataType::QuantisedAsymm8,
         DataType::QuantisedSymm16
diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp
index c2cb51a..7fd9343 100644
--- a/src/backends/reference/RefWorkloadFactory.cpp
+++ b/src/backends/reference/RefWorkloadFactory.cpp
@@ -172,10 +172,15 @@
 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateDebug(const DebugQueueDescriptor& descriptor,
                                                            const WorkloadInfo& info) const
 {
+    if (IsFloat16(info))
+    {
+        return std::make_unique<RefDebugFloat16Workload>(descriptor, info);
+    }
     if (IsQSymm16(info))
     {
         return std::make_unique<RefDebugQSymm16Workload>(descriptor, info);
     }
+
     return MakeWorkload<RefDebugFloat32Workload, RefDebugQAsymm8Workload>(descriptor, info);
 }
 
diff --git a/src/backends/reference/workloads/Debug.cpp b/src/backends/reference/workloads/Debug.cpp
index 09a0dfc..b7d0911 100644
--- a/src/backends/reference/workloads/Debug.cpp
+++ b/src/backends/reference/workloads/Debug.cpp
@@ -2,8 +2,11 @@
 // Copyright © 2017 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
+
 #include "Debug.hpp"
 
+#include <Half.hpp>
+
 #include <boost/numeric/conversion/cast.hpp>
 
 #include <algorithm>
@@ -85,6 +88,12 @@
     std::cout << " }" << std::endl;
 }
 
+template void Debug<Half>(const TensorInfo& inputInfo,
+                          const Half* inputData,
+                          LayerGuid guid,
+                          const std::string& layerName,
+                          unsigned int slotIndex);
+
 template void Debug<float>(const TensorInfo& inputInfo,
                            const float* inputData,
                            LayerGuid guid,
@@ -102,4 +111,5 @@
                              LayerGuid guid,
                              const std::string& layerName,
                              unsigned int slotIndex);
+
 } // namespace armnn
diff --git a/src/backends/reference/workloads/RefDebugWorkload.cpp b/src/backends/reference/workloads/RefDebugWorkload.cpp
index 325817b..2a3883f 100644
--- a/src/backends/reference/workloads/RefDebugWorkload.cpp
+++ b/src/backends/reference/workloads/RefDebugWorkload.cpp
@@ -44,6 +44,7 @@
     m_Callback = func;
 }
 
+template class RefDebugWorkload<DataType::Float16>;
 template class RefDebugWorkload<DataType::Float32>;
 template class RefDebugWorkload<DataType::QuantisedAsymm8>;
 template class RefDebugWorkload<DataType::QuantisedSymm16>;
diff --git a/src/backends/reference/workloads/RefDebugWorkload.hpp b/src/backends/reference/workloads/RefDebugWorkload.hpp
index 6a1fceb..0964515 100644
--- a/src/backends/reference/workloads/RefDebugWorkload.hpp
+++ b/src/backends/reference/workloads/RefDebugWorkload.hpp
@@ -37,6 +37,7 @@
     DebugCallbackFunction m_Callback;
 };
 
+using RefDebugFloat16Workload = RefDebugWorkload<DataType::Float16>;
 using RefDebugFloat32Workload = RefDebugWorkload<DataType::Float32>;
 using RefDebugQAsymm8Workload = RefDebugWorkload<DataType::QuantisedAsymm8>;
 using RefDebugQSymm16Workload = RefDebugWorkload<DataType::QuantisedSymm16>;