IVGCVSW-4515 Add ConvertBf16ToFp32Layer and Ref workload support

Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com>
Change-Id: Ida6d7e1d2c9abe0618f8b711bab9d62c011090d6
diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt
index b2d8938..86764d8 100644
--- a/src/backends/reference/workloads/CMakeLists.txt
+++ b/src/backends/reference/workloads/CMakeLists.txt
@@ -69,6 +69,8 @@
     RefConcatWorkload.hpp
     RefConstantWorkload.cpp
     RefConstantWorkload.hpp
+    RefConvertBf16ToFp32Workload.cpp
+    RefConvertBf16ToFp32Workload.hpp
     RefConvertFp16ToFp32Workload.cpp
     RefConvertFp16ToFp32Workload.hpp
     RefConvertFp32ToFp16Workload.cpp
diff --git a/src/backends/reference/workloads/RefConvertBf16ToFp32Workload.cpp b/src/backends/reference/workloads/RefConvertBf16ToFp32Workload.cpp
new file mode 100644
index 0000000..c4b5416
--- /dev/null
+++ b/src/backends/reference/workloads/RefConvertBf16ToFp32Workload.cpp
@@ -0,0 +1,27 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "RefConvertBf16ToFp32Workload.hpp"
+#include "RefWorkloadUtils.hpp"
+
+#include <armnnUtils/FloatingPointConverter.hpp>
+
+#include <BFloat16.hpp>
+
+namespace armnn
+{
+
+void RefConvertBf16ToFp32Workload::Execute() const
+{
+    ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefConvertBf16ToFp32Workload_Execute");
+
+    const BFloat16* const input = GetInputTensorDataBFloat16(0, m_Data);
+    float* const output = GetOutputTensorDataFloat(0, m_Data);
+
+    unsigned int numElements = GetTensorInfo(m_Data.m_Inputs[0]).GetNumElements();
+    armnnUtils::FloatingPointConverter::ConvertBFloat16ToFloat32(input, numElements, output);
+}
+
+} //namespace armnn
diff --git a/src/backends/reference/workloads/RefConvertBf16ToFp32Workload.hpp b/src/backends/reference/workloads/RefConvertBf16ToFp32Workload.hpp
new file mode 100644
index 0000000..87cdc3e
--- /dev/null
+++ b/src/backends/reference/workloads/RefConvertBf16ToFp32Workload.hpp
@@ -0,0 +1,21 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/Workload.hpp>
+#include <backendsCommon/WorkloadData.hpp>
+
+namespace armnn
+{
+
+class RefConvertBf16ToFp32Workload : public BFloat16ToFloat32Workload<ConvertBf16ToFp32QueueDescriptor>
+{
+public:
+    using BFloat16ToFloat32Workload<ConvertBf16ToFp32QueueDescriptor>::BFloat16ToFloat32Workload;
+    virtual void Execute() const override;
+};
+
+} //namespace armnn
diff --git a/src/backends/reference/workloads/RefWorkloadUtils.hpp b/src/backends/reference/workloads/RefWorkloadUtils.hpp
index c3260c8..6971314 100644
--- a/src/backends/reference/workloads/RefWorkloadUtils.hpp
+++ b/src/backends/reference/workloads/RefWorkloadUtils.hpp
@@ -12,6 +12,7 @@
 
 #include <reference/RefTensorHandle.hpp>
 
+#include <BFloat16.hpp>
 #include <Half.hpp>
 #include <boost/polymorphic_cast.hpp>
 
@@ -68,6 +69,12 @@
     return GetOutputTensorData<Half>(idx, data);
 }
 
+template <typename PayloadType>
+const BFloat16* GetInputTensorDataBFloat16(unsigned int idx, const PayloadType& data)
+{
+    return GetInputTensorData<BFloat16>(idx, data);
+}
+
 ////////////////////////////////////////////
 /// u8 helpers
 ////////////////////////////////////////////
diff --git a/src/backends/reference/workloads/RefWorkloads.hpp b/src/backends/reference/workloads/RefWorkloads.hpp
index a0558ff..37d79f0 100644
--- a/src/backends/reference/workloads/RefWorkloads.hpp
+++ b/src/backends/reference/workloads/RefWorkloads.hpp
@@ -22,6 +22,7 @@
 #include "RefConvolution2dWorkload.hpp"
 #include "RefConstantWorkload.hpp"
 #include "RefConcatWorkload.hpp"
+#include "RefConvertBf16ToFp32Workload.hpp"
 #include "RefConvertFp16ToFp32Workload.hpp"
 #include "RefConvertFp32ToFp16Workload.hpp"
 #include "RefDebugWorkload.hpp"