IVGCVSW-4633 Add conversion of BF16 support to Neon

 * Add NeonConvertBf16ToFp32Workload
 * Add NeonConvertFp32ToBf16Workload
 * Add BFloat16 type support to NeonConstantWorkload and NeonTensorHandle
 * Add ConvertBf16ToFp32Weight when ConvertBf16ToFp32Layer is added
 * Unit tests

Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com>
Change-Id: Id5b44a203add5e0c98c1ca4e2162115741b56644
diff --git a/src/backends/neon/workloads/NeonConvertBf16ToFp32Workload.hpp b/src/backends/neon/workloads/NeonConvertBf16ToFp32Workload.hpp
new file mode 100644
index 0000000..0969088
--- /dev/null
+++ b/src/backends/neon/workloads/NeonConvertBf16ToFp32Workload.hpp
@@ -0,0 +1,26 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/Workload.hpp>
+#include <backendsCommon/WorkloadData.hpp>
+#include <neon/workloads/NeonWorkloadUtils.hpp>
+
+namespace armnn
+{
+
+class NeonConvertBf16ToFp32Workload : public BFloat16ToFloat32Workload<ConvertBf16ToFp32QueueDescriptor>
+{
+public:
+    NeonConvertBf16ToFp32Workload(const ConvertBf16ToFp32QueueDescriptor& descriptor, const WorkloadInfo& info);
+    virtual void Execute() const override;
+
+private:
+    using TensorHandlePair = std::pair<const ITensorHandle*, ITensorHandle*>;
+    std::vector<TensorHandlePair> m_TensorHandlePairs;
+};
+
+} //namespace armnn