IVGCVSW-4633 Add conversion of BF16 support to Neon

 * Add NeonConvertBf16ToFp32Workload
 * Add NeonConvertFp32ToBf16Workload
 * Add BFloat16 type support to NeonConstantWorkload and NeonTensorHandle
 * Add ConvertBf16ToFp32Weight when ConvertBf16ToFp32Layer is added
 * Unit tests

Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com>
Change-Id: Id5b44a203add5e0c98c1ca4e2162115741b56644
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index c2da4da..a443721 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -146,6 +146,30 @@
     return noErrors;
 }
 
+template <typename LayerT>
+LayerT* ConvertBf16ToFp32Weight(Layer* l)
+{
+    LayerT* layer = boost::polymorphic_downcast<LayerT*>(l);
+    if ((layer->GetType() == LayerType::Convolution2d || layer->GetType() == LayerType::FullyConnected)
+         && layer->m_Weight)
+    {
+        const TensorInfo& info = layer->m_Weight->GetTensorInfo();
+
+        if (info.GetDataType() == DataType::BFloat16)
+        {
+            std::vector<float> newValues(info.GetNumElements());
+
+            armnnUtils::FloatingPointConverter::ConvertBFloat16ToFloat32(
+                layer->m_Weight->template GetTensor<armnn::BFloat16>(), info.GetNumElements(), newValues.data());
+
+            TensorInfo newInfo(info.GetShape(), DataType::Float32);
+            ConstTensor newInput(newInfo, newValues);
+            layer->m_Weight.reset(new ScopedCpuTensorHandle(newInput));
+        }
+    }
+    return layer;
+}
+
 OptimizationResult AttemptBackendAssignment(BackendSettings& backendSettings,
                                             Graph& graph,
                                             Layer* layer,
@@ -260,6 +284,14 @@
                 {
                     convertBf16ToFp32Layers =
                         InsertConvertBf16ToFp32LayersBefore(graph, *layer);
+                    if (layer->GetType() == LayerType::Convolution2d)
+                    {
+                        ConvertBf16ToFp32Weight<Convolution2dLayer>(layer);
+                    }
+                    else if (layer->GetType() == LayerType::FullyConnected)
+                    {
+                        ConvertBf16ToFp32Weight<FullyConnectedLayer>(layer);
+                    }
                 }
 
                 // Insert FP32 -> BF16 conversion layer after current layer