IVGCVSW-4633 Add conversion of BF16 support to Neon
* Add NeonConvertBf16ToFp32Workload
* Add NeonConvertFp32ToBf16Workload
* Add BFloat16 type support to NeonConstantWorkload and NeonTensorHandle
* Add ConvertBf16ToFp32Weight when ConvertBf16ToFp32Layer is added
* Unit tests
Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com>
Change-Id: Id5b44a203add5e0c98c1ca4e2162115741b56644
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp
index c01a178..44e84fb 100644
--- a/src/backends/neon/NeonLayerSupport.cpp
+++ b/src/backends/neon/NeonLayerSupport.cpp
@@ -259,6 +259,16 @@
&TrueFunc<>);
}
+bool NeonLayerSupport::IsConvertBf16ToFp32Supported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ armnn::IgnoreUnused(input);
+ armnn::IgnoreUnused(output);
+ armnn::IgnoreUnused(reasonIfUnsupported);
+ return true;
+}
+
bool NeonLayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
@@ -269,6 +279,16 @@
return true;
}
+bool NeonLayerSupport::IsConvertFp32ToBf16Supported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ armnn::IgnoreUnused(input);
+ armnn::IgnoreUnused(output);
+ armnn::IgnoreUnused(reasonIfUnsupported);
+ return true;
+}
+
bool NeonLayerSupport::IsConvertFp32ToFp16Supported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const