IVGCVSW-7214 Disable BF16-Turbo-Mode and remove conversion layers

 - Remove Bf16ToFp32 Conversion Layer
 - Remove Fp32ToBf16 Conversion Layer
 - Remove B16 Conversion tests
 * Throw exception if m_ReduceFp32ToBf16 optimzer option is set to true
 * Provide comments to enable fast math in order to use bf16
 * Update docs to inform users to enable fast math for bf16

 Execute Network Changes
 * Require bf16_turbo_mode to also have fast_math_enabled set to true
 - Remove setting m_ReduceFp32ToBf16 optimizer option

Signed-off-by: Ryan OShea <ryan.oshea3@arm.com>
Change-Id: Ibaa6da9d29c96a1ce32ff5196b0847fde9f04a1c
diff --git a/Android.mk b/Android.mk
index 426a628..e9f70c9 100644
--- a/Android.mk
+++ b/Android.mk
@@ -218,9 +218,7 @@
         src/armnn/layers/ConstantLayer.cpp \
         src/armnn/layers/Convolution2dLayer.cpp \
         src/armnn/layers/Convolution3dLayer.cpp \
-        src/armnn/layers/ConvertBf16ToFp32Layer.cpp \
         src/armnn/layers/ConvertFp16ToFp32Layer.cpp \
-        src/armnn/layers/ConvertFp32ToBf16Layer.cpp \
         src/armnn/layers/ConvertFp32ToFp16Layer.cpp \
         src/armnn/layers/DebugLayer.cpp \
         src/armnn/layers/DepthToSpaceLayer.cpp \
@@ -441,10 +439,8 @@
         src/armnn/test/ModelAccuracyCheckerTest.cpp \
         src/armnn/test/NetworkTests.cpp \
         src/armnn/test/ObservableTest.cpp \
-        src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp \
         src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp \
         src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp \
-        src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp \
         src/armnn/test/optimizations/Fp32NetworkToFp16ConverterTests.cpp \
         src/armnn/test/optimizations/FuseActivationTests.cpp \
         src/armnn/test/optimizations/InsertDebugLayerTests.cpp \
diff --git a/CMakeLists.txt b/CMakeLists.txt
index ff05fb3..76fb958 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -207,12 +207,8 @@
     src/armnn/layers/Convolution2dLayer.cpp
     src/armnn/layers/Convolution3dLayer.hpp
     src/armnn/layers/Convolution3dLayer.cpp
-    src/armnn/layers/ConvertBf16ToFp32Layer.cpp
-    src/armnn/layers/ConvertBf16ToFp32Layer.hpp
     src/armnn/layers/ConvertFp16ToFp32Layer.hpp
     src/armnn/layers/ConvertFp16ToFp32Layer.cpp
-    src/armnn/layers/ConvertFp32ToBf16Layer.hpp
-    src/armnn/layers/ConvertFp32ToBf16Layer.cpp
     src/armnn/layers/ConvertFp32ToFp16Layer.hpp
     src/armnn/layers/ConvertFp32ToFp16Layer.cpp
     src/armnn/layers/DebugLayer.hpp
@@ -401,7 +397,6 @@
     src/armnn/optimizations/AddDebug.hpp
     src/armnn/optimizations/All.hpp
     src/armnn/optimizations/ConvertConstants.hpp
-    src/armnn/optimizations/ConvertFp32NetworkToBf16.hpp
     src/armnn/optimizations/ConvertFp32NetworkToFp16.hpp
     src/armnn/optimizations/FoldPadIntoLayer2d.hpp
     src/armnn/optimizations/MovePermuteUp.hpp
@@ -581,16 +576,13 @@
         src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp
         src/armnn/test/optimizations/ConvertConstDequantisationLayersToConstLayersTest.cpp
         src/armnn/test/optimizations/ConvertConstPermuteLayersToConstLayersTest.cpp
-        src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp
         src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp
         src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp
         src/armnn/test/optimizations/FoldPadIntoQuantizedAveragePooling2DTests.cpp
         src/armnn/test/optimizations/FoldPadTests.cpp
-        src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp
         src/armnn/test/optimizations/Fp32NetworkToFp16ConverterTests.cpp
         src/armnn/test/optimizations/FuseActivationTests.cpp
         src/armnn/test/optimizations/FuseBatchNormTests.cpp
-        src/armnn/test/optimizations/FuseConvertF32BF16IntoConstLayerTests.cpp
         src/armnn/test/optimizations/InsertDebugLayerTests.cpp
         src/armnn/test/optimizations/MovePermuteUpTests.cpp
         src/armnn/test/optimizations/MoveTransposeUpTests.cpp
diff --git a/delegate/include/DelegateOptions.hpp b/delegate/include/DelegateOptions.hpp
index 2b0107e..4f157db 100644
--- a/delegate/include/DelegateOptions.hpp
+++ b/delegate/include/DelegateOptions.hpp
@@ -113,7 +113,7 @@
      *
      *    Option key: "reduce-fp32-to-bf16" \n
      *    Possible values: ["true"/"false"] \n
-     *    Description: Reduce Fp32 data to Bf16 for faster processing
+     *    Description: This option is currently ignored. Please enable Fast Math in the CpuAcc or GpuAcc backends.
      *
      *    Option key: "debug-data" \n
      *    Possible values: ["true"/"false"] \n
diff --git a/delegate/python/test/test_external_delegate.py b/delegate/python/test/test_external_delegate.py
index a8dd8e6..fe58d57 100644
--- a/delegate/python/test/test_external_delegate.py
+++ b/delegate/python/test/test_external_delegate.py
@@ -218,39 +218,6 @@
     assert "convert_fp32_to_fp16" in captured.out
     assert "convert_fp16_to_fp32" in captured.out
 
-def test_external_delegate_options_fp32_to_bf16(capfd, delegate_dir, test_data_folder):
-    # create armnn delegate with reduce-fp32-to-bf16 option
-    armnn_delegate = tflite.load_delegate(delegate_dir, options = {"backends": "CpuRef",
-                                                                   "debug-data": "1",
-                                                                   "reduce-fp32-to-bf16": "1"})
-
-    model_file_name = "conv2d.tflite"
-
-    inputShape = [ 1, 5, 5, 1 ]
-    outputShape = [ 1, 3, 3, 1 ]
-
-    inputValues = [ 1, 5, 2, 3, 5,
-                    8, 7, 3, 6, 3,
-                    3, 3, 9, 1, 9,
-                    4, 1, 8, 1, 3,
-                    6, 8, 1, 9, 2 ]
-
-    expectedResult = [ 28, 38, 29,
-                       96, 104, 53,
-                       31, 55, 24 ]
-
-    input = np.array(inputValues, dtype=np.float32).reshape(inputShape)
-    expected_output = np.array(expectedResult, dtype=np.float32).reshape(outputShape)
-
-    # run the inference
-    armnn_outputs = run_inference(test_data_folder, model_file_name, [input], [armnn_delegate])
-
-    # check results
-    compare_outputs(armnn_outputs, [expected_output])
-
-    captured = capfd.readouterr()
-    assert "convert_fp32_to_bf16" in captured.out
-
 def test_external_delegate_options_memory_import(delegate_dir, test_data_folder):
     # create armnn delegate with memory-import option
     armnn_delegate = tflite.load_delegate(delegate_dir, options = {"backends": "CpuAcc,CpuRef",
diff --git a/delegate/src/DelegateOptions.cpp b/delegate/src/DelegateOptions.cpp
index a55a579..bb1edab 100644
--- a/delegate/src/DelegateOptions.cpp
+++ b/delegate/src/DelegateOptions.cpp
@@ -146,11 +146,6 @@
         {
             optimizerOptions.m_ReduceFp32ToFp16 = armnn::stringUtils::StringToBool(options_values[i]);
         }
-            // Process reduce-fp32-to-bf16 option
-        else if (std::string(options_keys[i]) == std::string("reduce-fp32-to-bf16"))
-        {
-            optimizerOptions.m_ReduceFp32ToBf16 = armnn::stringUtils::StringToBool(options_values[i]);
-        }
             // Process debug-data
         else if (std::string(options_keys[i]) == std::string("debug-data"))
         {
diff --git a/delegate/src/test/DelegateOptionsTest.cpp b/delegate/src/test/DelegateOptionsTest.cpp
index 50d3f78..9832313 100644
--- a/delegate/src/test/DelegateOptionsTest.cpp
+++ b/delegate/src/test/DelegateOptionsTest.cpp
@@ -116,19 +116,6 @@
     CHECK(callback);
 }
 
-TEST_CASE ("ArmnnDelegateOptimizerOptionsReduceFp32ToBf16")
-{
-    std::stringstream ss;
-    {
-        StreamRedirector redirect(std::cout, ss.rdbuf());
-
-        ReduceFp32ToBf16TestImpl();
-    }
-
-    // ReduceFp32ToBf16 option is enabled
-    CHECK(ss.str().find("convert_fp32_to_bf16") != std::string::npos);
-}
-
 TEST_CASE ("ArmnnDelegateOptimizerOptionsImport")
 {
     std::vector<armnn::BackendId> backends = {  armnn::Compute::CpuAcc, armnn::Compute::CpuRef };
diff --git a/delegate/src/test/DelegateOptionsTestHelper.hpp b/delegate/src/test/DelegateOptionsTestHelper.hpp
index 87bf0d6..7e147de 100644
--- a/delegate/src/test/DelegateOptionsTestHelper.hpp
+++ b/delegate/src/test/DelegateOptionsTestHelper.hpp
@@ -219,95 +219,6 @@
                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
 }
 
-void ReduceFp32ToBf16TestImpl()
-{
-    using namespace tflite;
-    // Set input data
-    std::vector<int32_t> inputShape{ 1, 5, 5, 1 };
-    std::vector<int32_t> filterShape{ 1, 3, 3, 1 };
-    std::vector<int32_t> biasShape{ 1 };
-    std::vector<int32_t> outputShape{ 1, 3, 3, 1 };
-
-    std::vector<float> inputValues =
-        {
-            1, 5, 2, 3, 5,
-            8, 7, 3, 6, 3,
-            3, 3, 9, 1, 9,
-            4, 1, 8, 1, 3,
-            6, 8, 1, 9, 2
-        };
-
-    std::vector<float> filterValues =
-        {
-            4, 5, 6,
-            0, 0, 0,
-            3, 2, 1
-        };
-
-    std::vector<float> biasValues = { 5 };
-
-    std::vector<float> expectedResult =
-        {
-            28, 38, 29,
-            96, 104, 53,
-            31, 55, 24
-        };
-
-    tflite::Padding padding = Padding_SAME;
-
-    std::vector<char> modelBuffer;
-    modelBuffer = CreateConv2dTfLiteModel<float>(BuiltinOperator_CONV_2D,
-                                                 ::tflite::TensorType_FLOAT32,
-                                                 2,
-                                                 2,
-                                                 1,
-                                                 1,
-                                                 padding,
-                                                 ActivationFunctionType_NONE,
-                                                 inputShape,
-                                                 filterShape,
-                                                 biasShape,
-                                                 outputShape,
-                                                 filterValues,
-                                                 biasValues);
-
-
-    const Model* tfLiteModel = GetModel(modelBuffer.data());
-    // Create TfLite Interpreters
-    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-          (&armnnDelegateInterpreter) == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter != nullptr);
-    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
-    // Create the Armnn Delegate
-    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
-    std::vector<armnn::BackendOptions> backendOptions;
-
-    // Enable debug with BF16 enabled
-    armnn::OptimizerOptions optimizerOptions(false, true, true, false);
-
-    armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
-    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
-        theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
-                         armnnDelegate::TfLiteArmnnDelegateDelete);
-    CHECK(theArmnnDelegate != nullptr);
-    // Modify armnnDelegateInterpreter to use armnnDelegate
-    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
-    // Set input data
-    armnnDelegate::FillInput(armnnDelegateInterpreter, 0, inputValues);
-
-    // Run EnqueueWorkload
-    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
-    // Compare output data
-    auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
-    auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateOutputId);
-    armnnDelegate::CompareData(expectedResult.data(), armnnDelegateOutputData, expectedResult.size());
-    armnnDelegateInterpreter.reset(nullptr);
-}
-
 template <typename T>
 void DelegateOptionTest(tflite::TensorType tensorType,
                         const std::vector<armnn::BackendId>& backends,
diff --git a/docs/02_operator_list.dox b/docs/02_operator_list.dox
index 3a902c8..d9a3d2c 100644
--- a/docs/02_operator_list.dox
+++ b/docs/02_operator_list.dox
@@ -655,48 +655,6 @@
     <tr><td>All
     </table>
 <tr>
-  <td rowspan="3">ConvertBf16ToFp32Layer
-  <td rowspan="3" style="width:200px;"> Layer to convert BFloat16 tensor to Float32 tensor.
-  <td rowspan="3">
-      <ul>
-       <li>N/A
-      </ul>
-   <td>CpuRef
-     <td>
-         <ul>
-          <li>All
-         </ul>
-     <td>
-      <table>
-         <tr><th>
-         <tr><td>BFLOAT16
-         <tr><td>FLOAT32
-      </table>
-<tr>
-  <td>CpuAcc
-  <td>
-      <ul>
-       <li>All
-      </ul>
-  <td>
-    <table>
-         <tr><th>
-         <tr><td>BFLOAT16
-         <tr><td>FLOAT32
-    </table>
-<tr>
-  <td>GpuAcc
-  <td>
-      <ul>
-       <li>All
-      </ul>
-  <td>
-    <table>
-         <tr><th>
-         <tr><td>BFLOAT16
-         <tr><td>FLOAT32
-    </table>
-<tr>
   <td rowspan="3">ConvertFp16ToFp32Layer
   <td rowspan="3" style="width:200px;"> Layer to convert Float16 tensor to Float32 tensor.
   <td rowspan="3">
@@ -739,48 +697,6 @@
          <tr><td>FLOAT32
     </table>
 <tr>
-  <td rowspan="3">ConvertFp32ToBf16Layer
-  <td rowspan="3" style="width:200px;"> Layer to convert Float32 tensor to BFloat16 tensor.
-  <td rowspan="3">
-      <ul>
-       <li>N/A
-      </ul>
-   <td>CpuRef
-     <td>
-         <ul>
-          <li>All
-         </ul>
-     <td>
-      <table>
-         <tr><th>
-         <tr><td>BFLOAT16
-         <tr><td>FLOAT32
-      </table>
-<tr>
-  <td>CpuAcc
-  <td>
-      <ul>
-       <li>All
-      </ul>
-  <td>
-    <table>
-         <tr><th>
-         <tr><td>BFLOAT16
-         <tr><td>FLOAT32
-    </table>
-<tr>
-  <td>GpuAcc
-  <td>
-      <ul>
-       <li>All
-      </ul>
-  <td>
-    <table>
-         <tr><th>
-         <tr><td>BFLOAT16
-         <tr><td>FLOAT32
-    </table>
-<tr>
   <td rowspan="3">ConvertFp32ToFp16Layer
   <td rowspan="3" style="width:200px;"> Layer to convert Float32 tensor to Float16 tensor.
   <td rowspan="3">
diff --git a/docs/05_05_runtimeoptions.dox b/docs/05_05_runtimeoptions.dox
index 454d4af..b5888ee 100644
--- a/docs/05_05_runtimeoptions.dox
+++ b/docs/05_05_runtimeoptions.dox
@@ -81,7 +81,7 @@
 Arm NN Parameter | Delegate  | Support library | Values | Description
 :--------------- | :-------- | :-------------- | :----- | :----------
 reduceFp32ToFp16 | reduce-fp32-to-fp16 | (Not available) | ["true"/"false"] | Note This feature works best if all operators of the model are in Fp32. ArmNN will add conversion layers between layers that weren't in Fp32 in the first place or if the operator is not supported in Fp16. The overhead of these conversions can lead to a slower overall performance if too many conversions are required. 
-reduceFp32ToBf16 | reduce-fp32-to-bf16 | (Not available) | ["true"/"false"] | This feature works best if all operators of the model are in Fp32. ArmNN will add conversion layers between layers that weren't in Fp32 in the first place or if the operator is not supported in Bf16. The overhead of these conversions can lead to a slower overall performance if too many conversions are required.
+reduceFp32ToBf16 | reduce-fp32-to-bf16 | (Not available) | ["true"/"false"] | This feature has been replaced by enabling Fast Math in compute library backend options. This is currently a placeholder option
 debug            | debug-data | (Not available) | ["true"/"false"] | If the debug flag is set a DebugLayer is inserted after each layer. The action of each debug layer is backend specific.
 importEnabled | memory-import | (Not available) | ["true"/"false"] | Instructs the optimizer that this model will be importing it's input tensors. This value must match the MemorySource set for input in INetworkProperties.
 exportEnabled | (Not available) | (Not available) | ["true"/"false"] | Instructs the optimizer that this model will be exporting it's output tensors. This value must match the MemorySource set for output in INetworkProperties.
diff --git a/include/armnn/BackendHelper.hpp b/include/armnn/BackendHelper.hpp
index f78b4f8..25d719a 100644
--- a/include/armnn/BackendHelper.hpp
+++ b/include/armnn/BackendHelper.hpp
@@ -86,14 +86,6 @@
     bool IsConstantSupported(const TensorInfo& output,
                              Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
-    bool IsConvertBf16ToFp32Supported(const TensorInfo& input,
-                                      const TensorInfo& output,
-                                      Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
-    bool IsConvertFp32ToBf16Supported(const TensorInfo& input,
-                                      const TensorInfo& output,
-                                      Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
     bool IsConvertFp16ToFp32Supported(const TensorInfo& input,
                                       const TensorInfo& output,
                                       Optional<std::string&> reasonIfUnsupported = EmptyOptional());
diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp
index 687f2c3..2bb9ad9 100644
--- a/include/armnn/INetwork.hpp
+++ b/include/armnn/INetwork.hpp
@@ -152,10 +152,6 @@
         , m_ExportEnabled(exportEnabled)
         , m_AllowExpandedDims(false)
     {
-        if (m_ReduceFp32ToFp16 && m_ReduceFp32ToBf16)
-        {
-            throw InvalidArgumentException("BFloat16 and Float16 optimization cannot be enabled at the same time.");
-        }
     }
 
     OptimizerOptions(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16 = false,
@@ -173,10 +169,6 @@
         , m_ExportEnabled(exportEnabled)
         , m_AllowExpandedDims(allowExpandedDims)
     {
-        if (m_ReduceFp32ToFp16 && m_ReduceFp32ToBf16)
-        {
-            throw InvalidArgumentException("BFloat16 and Float16 optimization cannot be enabled at the same time.");
-        }
     }
 
     const std::string ToString() const
@@ -216,35 +208,32 @@
     ///       required.
     bool m_ReduceFp32ToFp16;
 
-    // Add debug data for easier troubleshooting
+    /// Add debug data for easier troubleshooting
     bool m_Debug;
 
-    // Pass debug data to separate output files for easier troubleshooting
+    /// Pass debug data to separate output files for easier troubleshooting
     bool m_DebugToFile;
 
-    /// Reduces all Fp32 operators in the model to Bf16 for faster processing.
-    /// @Note This feature works best if all operators of the model are in Fp32. ArmNN will add conversion layers
-    ///       between layers that weren't in Fp32 in the first place or if the operator is not supported in Bf16.
-    ///       The overhead of these conversions can lead to a slower overall performance if too many conversions are
-    ///       required.
+    /// @Note This feature has been replaced by enabling Fast Math in compute library backend options.
+    /// This is currently a placeholder option
     bool m_ReduceFp32ToBf16;
 
-    // Infer output size when not available
+    /// Infer output size when not available
     ShapeInferenceMethod m_shapeInferenceMethod;
 
-    // Enable Import
+    /// Enable Import
     bool m_ImportEnabled;
 
-    // Enable Model Options
+    /// Enable Model Options
     ModelOptions m_ModelOptions;
 
-    // Enable profiling dump of the optimizer phase
+    /// Enable profiling dump of the optimizer phase
     bool m_ProfilingEnabled;
 
-    // Enable Export
+    /// Enable Export
     bool m_ExportEnabled;
 
-    // When calculating tensor sizes dimensions of size == 1 will be ignored
+    /// When calculating tensor sizes, dimensions of size == 1 will be ignored
     bool m_AllowExpandedDims;
 };
 
@@ -782,8 +771,8 @@
 
     void ExecuteStrategy(IStrategy& strategy) const;
 
-    // Creates a copy of the IOptimizedNetwork. The IOptimizedNetwork will not be reoptimized,
-    // the provided ModelOptions will only be used when creating a LoadedNetwork.
+    /// Creates a copy of the IOptimizedNetwork. The IOptimizedNetwork will not be reoptimized,
+    /// the provided ModelOptions will only be used when creating a LoadedNetwork.
     IOptimizedNetwork(const IOptimizedNetwork& other, const ModelOptions& modelOptions);
     IOptimizedNetwork(std::unique_ptr<Graph> graph);
     IOptimizedNetwork(std::unique_ptr<OptimizedNetworkImpl> impl);
diff --git a/include/armnn/Types.hpp b/include/armnn/Types.hpp
index 98229df..eebefa8 100644
--- a/include/armnn/Types.hpp
+++ b/include/armnn/Types.hpp
@@ -394,9 +394,7 @@
     X(Comparison) \
     X(Concat) \
     X(Constant) \
-    X(ConvertBf16ToFp32) \
     X(ConvertFp16ToFp32) \
-    X(ConvertFp32ToBf16) \
     X(ConvertFp32ToFp16) \
     X(Convolution2d) \
     X(Debug) \
diff --git a/include/armnn/backends/ILayerSupport.hpp b/include/armnn/backends/ILayerSupport.hpp
index b7f5f04..92102c1 100644
--- a/include/armnn/backends/ILayerSupport.hpp
+++ b/include/armnn/backends/ILayerSupport.hpp
@@ -109,18 +109,6 @@
 
     ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. "
                                       "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08")
-    virtual bool IsConvertBf16ToFp32Supported(const TensorInfo& input,
-                                              const TensorInfo& output,
-                                              Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const;
-
-    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. "
-                                      "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08")
-    virtual bool IsConvertFp32ToBf16Supported(const TensorInfo& input,
-                                              const TensorInfo& output,
-                                              Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const;
-
-    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. "
-                                      "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08")
     virtual bool IsConvertFp16ToFp32Supported(const TensorInfo& input,
                                               const TensorInfo& output,
                                               Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const;
diff --git a/include/armnn/backends/WorkloadData.hpp b/include/armnn/backends/WorkloadData.hpp
index bd2b3ec..4fbb6d4 100644
--- a/include/armnn/backends/WorkloadData.hpp
+++ b/include/armnn/backends/WorkloadData.hpp
@@ -471,16 +471,6 @@
     void Validate(const WorkloadInfo& workloadInfo) const;
 };
 
-struct ConvertBf16ToFp32QueueDescriptor : QueueDescriptor
-{
-    void Validate(const WorkloadInfo& workloadInfo) const;
-};
-
-struct ConvertFp32ToBf16QueueDescriptor : QueueDescriptor
-{
-    void Validate(const WorkloadInfo& workloadInfo) const;
-};
-
 struct ConvertFp16ToFp32QueueDescriptor : QueueDescriptor
 {
     void Validate(const WorkloadInfo& workloadInfo) const;
diff --git a/include/armnn/backends/WorkloadFactory.hpp b/include/armnn/backends/WorkloadFactory.hpp
index 4ccf1e2..e69743d 100644
--- a/include/armnn/backends/WorkloadFactory.hpp
+++ b/include/armnn/backends/WorkloadFactory.hpp
@@ -126,21 +126,11 @@
 
     ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
     "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "23.08")
-    virtual std::unique_ptr<IWorkload> CreateConvertBf16ToFp32(const ConvertBf16ToFp32QueueDescriptor& descriptor,
-                                                               const WorkloadInfo& info) const;
-
-    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
-    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "23.08")
     virtual std::unique_ptr<IWorkload> CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor& descriptor,
                                                                const WorkloadInfo& info) const;
 
     ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
     "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "23.08")
-    virtual std::unique_ptr<IWorkload> CreateConvertFp32ToBf16(const ConvertFp32ToBf16QueueDescriptor& descriptor,
-                                                               const WorkloadInfo& info) const;
-
-    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
-    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "23.08")
     virtual std::unique_ptr<IWorkload> CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor& descriptor,
                                                                const WorkloadInfo& info) const;
 
diff --git a/python/pyarmnn/src/pyarmnn/swig/modules/armnn_network.i b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_network.i
index f91bccc..c9eef86 100644
--- a/python/pyarmnn/src/pyarmnn/swig/modules/armnn_network.i
+++ b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_network.i
@@ -25,8 +25,8 @@
 
 Contains:
     m_debug (bool): Add debug data for easier troubleshooting.
-    m_ReduceFp32ToBf16 (bool): Reduces Fp32 network to BFloat16 (Bf16) for faster processing. Layers
-                               that can not be reduced will be left in Fp32.
+    m_ReduceFp32ToBf16 (bool): This feature has been replaced by enabling Fast Math in compute library backend options.
+                               This is currently a placeholder option.
     m_ReduceFp32ToFp16 (bool): Reduces Fp32 network to Fp16 for faster processing. Layers
                                that can not be reduced will be left in Fp32.
     m_ImportEnabled (bool):    Enable memory import of inport tensors.
diff --git a/src/armnn/BackendHelper.cpp b/src/armnn/BackendHelper.cpp
index 6638709..ff899d4 100644
--- a/src/armnn/BackendHelper.cpp
+++ b/src/armnn/BackendHelper.cpp
@@ -307,34 +307,6 @@
                                             reasonIfUnsupported);
 }
 
-bool LayerSupportHandle::IsConvertBf16ToFp32Supported(const TensorInfo& input,
-                                                      const TensorInfo& output,
-                                                      Optional<std::string&> reasonIfUnsupported)
-{
-    TensorInfos infos{input, output};
-
-    return m_LayerSupport->IsLayerSupported(LayerType::ConvertBf16ToFp32,
-                                            infos,
-                                            BaseDescriptor(),
-                                            EmptyOptional(),
-                                            EmptyOptional(),
-                                            reasonIfUnsupported);
-}
-
-bool LayerSupportHandle::IsConvertFp32ToBf16Supported(const TensorInfo& input,
-                                                      const TensorInfo& output,
-                                                      Optional<std::string&> reasonIfUnsupported)
-{
-    TensorInfos infos{input, output};
-
-    return m_LayerSupport->IsLayerSupported(LayerType::ConvertFp32ToBf16,
-                                            infos,
-                                            BaseDescriptor(),
-                                            EmptyOptional(),
-                                            EmptyOptional(),
-                                            reasonIfUnsupported);
-}
-
 bool LayerSupportHandle::IsConvertFp16ToFp32Supported(const TensorInfo& input,
                                                       const TensorInfo& output,
                                                       Optional<std::string&> reasonIfUnsupported)
diff --git a/src/armnn/ILayerSupport.cpp b/src/armnn/ILayerSupport.cpp
index 8099782..3ef367e 100644
--- a/src/armnn/ILayerSupport.cpp
+++ b/src/armnn/ILayerSupport.cpp
@@ -77,18 +77,10 @@
         case LayerType::Constant:
             return IsConstantSupported(infos[0],
                                        reasonIfUnsupported);
-        case LayerType::ConvertBf16ToFp32:
-            return IsConvertBf16ToFp32Supported(infos[0],
-                                                infos[1],
-                                                reasonIfUnsupported);
         case LayerType::ConvertFp16ToFp32:
             return IsConvertFp16ToFp32Supported(infos[0],
                                                 infos[1],
                                                 reasonIfUnsupported);
-        case LayerType::ConvertFp32ToBf16:
-            return IsConvertFp32ToBf16Supported(infos[0],
-                                                infos[1],
-                                                reasonIfUnsupported);
         case LayerType::ConvertFp32ToFp16:
             return IsConvertFp32ToFp16Supported(infos[0],
                                                 infos[1],
@@ -634,22 +626,6 @@
     return false;
 }
 
-bool ILayerSupport::IsConvertBf16ToFp32Supported(const TensorInfo& input,
-                                                 const TensorInfo& output,
-                                                 Optional<std::string&> reasonIfUnsupported) const
-{
-    IgnoreUnused(input, output, reasonIfUnsupported);
-    return false;
-}
-
-bool ILayerSupport::IsConvertFp32ToBf16Supported(const TensorInfo& input,
-                                                 const TensorInfo& output,
-                                                 Optional<std::string&> reasonIfUnsupported) const
-{
-    IgnoreUnused(input, output, reasonIfUnsupported);
-    return false;
-}
-
 bool ILayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input,
                                                  const TensorInfo& output,
                                                  Optional<std::string&> reasonIfUnsupported) const
diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp
index acac1f9..43862d5 100644
--- a/src/armnn/LayersFwd.hpp
+++ b/src/armnn/LayersFwd.hpp
@@ -17,9 +17,7 @@
 #include "layers/ComparisonLayer.hpp"
 #include "layers/ConcatLayer.hpp"
 #include "layers/ConstantLayer.hpp"
-#include "layers/ConvertBf16ToFp32Layer.hpp"
 #include "layers/ConvertFp16ToFp32Layer.hpp"
-#include "layers/ConvertFp32ToBf16Layer.hpp"
 #include "layers/ConvertFp32ToFp16Layer.hpp"
 #include "layers/Convolution2dLayer.hpp"
 #include "layers/Convolution3dLayer.hpp"
@@ -119,9 +117,7 @@
 DECLARE_LAYER(Comparison)
 DECLARE_LAYER(Concat)
 DECLARE_LAYER(Constant)
-DECLARE_LAYER(ConvertBf16ToFp32)
 DECLARE_LAYER(ConvertFp16ToFp32)
-DECLARE_LAYER(ConvertFp32ToBf16)
 DECLARE_LAYER(ConvertFp32ToFp16)
 DECLARE_LAYER(Convolution2d)
 DECLARE_LAYER(Convolution3d)
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 9d00a69..6d3058c 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -604,30 +604,6 @@
     return noErrors;
 }
 
-template <typename LayerT>
-LayerT* ConvertBf16ToFp32Weight(Layer* l)
-{
-    LayerT* layer = PolymorphicDowncast<LayerT*>(l);
-    if ((layer->GetType() == LayerType::Convolution2d || layer->GetType() == LayerType::FullyConnected)
-         && layer->m_Weight)
-    {
-        const TensorInfo& info = layer->m_Weight->GetTensorInfo();
-
-        if (info.GetDataType() == DataType::BFloat16)
-        {
-            std::vector<float> newValues(info.GetNumElements());
-
-            armnnUtils::FloatingPointConverter::ConvertBFloat16ToFloat32(
-                layer->m_Weight->template GetConstTensor<armnn::BFloat16>(), info.GetNumElements(), newValues.data());
-
-            TensorInfo newInfo(info.GetShape(), DataType::Float32);
-            ConstTensor newInput(newInfo, newValues);
-            layer->m_Weight.reset(new ScopedTensorHandle(newInput));
-        }
-    }
-    return layer;
-}
-
 OptimizationResult AttemptBackendAssignment(BackendSettings& backendSettings,
                                             Graph& graph,
                                             Layer* layer,
@@ -772,98 +748,6 @@
                 return result;
             }
         }
-        else if (dataTypeIn == DataType::BFloat16 || dataTypeOut == DataType::BFloat16)
-        {
-            const auto layerType = layer->GetType();
-            if (IWorkloadFactory::IsLayerSupported(*layer, DataType::Float32, reasonIfUnsupported)
-                && layerType != LayerType::ConvertFp32ToBf16
-                && layerType != LayerType::ConvertBf16ToFp32)
-            {
-                bool revertConstantWeightsConversion = RevertConstantWeightsToFP32(layer);
-
-                // Insert BF16 -> FP32 conversion layer before current layer.
-                // Unless we have reverted Constant Weights Type above.
-                std::vector<ConvertBf16ToFp32Layer*> convertBf16ToFp32Layers;
-                if (dataTypeIn == DataType::BFloat16 && dataTypeOut != DataType::BFloat16
-                    && !revertConstantWeightsConversion)
-                {
-                    convertBf16ToFp32Layers =
-                        InsertConvertBf16ToFp32LayersBefore(graph, *layer);
-                    if (layer->GetType() == LayerType::Convolution2d)
-                    {
-                        ConvertBf16ToFp32Weight<Convolution2dLayer>(layer);
-                    }
-                    else if (layer->GetType() == LayerType::FullyConnected)
-                    {
-                        ConvertBf16ToFp32Weight<FullyConnectedLayer>(layer);
-                    }
-                }
-
-                // Insert FP32 -> BF16 conversion layer after current layer
-                std::vector<ConvertFp32ToBf16Layer*> convertFp32ToBf16Layers;
-                if (dataTypeOut == DataType::BFloat16)
-                {
-                    convertFp32ToBf16Layers =
-                        InsertConvertFp32ToBf16LayersAfter(graph, *layer);
-                }
-
-                // Assign a supported backend to the newly introduced conversion layers
-                auto AssignFirstSupportedBackend = [&](Layer* layer, BackendId preferredBackend)
-                    {
-                        bool supportedBackendFound = false;
-                        std::string reasonIfUnsupported;
-
-                        // Try preferred backend first
-                        layer->SetBackendId(preferredBackend);
-                        if (IWorkloadFactory::IsLayerSupported(*layer,
-                                                               EmptyOptional(),
-                                                               reasonIfUnsupported))
-                        {
-                            supportedBackendFound = true;
-                        }
-                        else
-                        {
-                            for (const auto& backend : availablePreferredBackends)
-                            {
-                                // Skip preferred backend (we already determined that it is not supported)
-                                if (backend == preferredBackend)
-                                {
-                                    continue;
-                                }
-
-                                layer->SetBackendId(backend);
-                                if (IWorkloadFactory::IsLayerSupported(*layer,
-                                                                       EmptyOptional(),
-                                                                       reasonIfUnsupported))
-                                {
-                                    supportedBackendFound = true;
-                                    break;
-                                }
-                            }
-                        }
-
-                        return supportedBackendFound;
-                    };
-
-                for (ConvertBf16ToFp32Layer* convertLayer : convertBf16ToFp32Layers)
-                {
-                    if (!AssignFirstSupportedBackend(convertLayer, backend))
-                    {
-                        return ReturnError(convertLayer);
-                    }
-                }
-
-                for (ConvertFp32ToBf16Layer* convertLayer : convertFp32ToBf16Layers)
-                {
-                    if (!AssignFirstSupportedBackend(convertLayer, backend))
-                    {
-                        return ReturnError(convertLayer);
-                    }
-                }
-
-                return result;
-            }
-        }
 
         std::stringstream warningMsg;
         warningMsg << "Layer of type " << GetLayerTypeAsCString(layer->GetType())
@@ -1669,6 +1553,12 @@
         throw InvalidArgumentException("Invoked Optimize with no backends specified");
     }
 
+    if (options.m_ReduceFp32ToBf16)
+    {
+        throw InvalidArgumentException("BFloat16 optimization is currently ignored. In order to use Bf16 optimization "
+                                       "Please use the FastMathEnabled backend option for CpuAcc or GpuAcc.");
+    }
+
     if (options.m_ReduceFp32ToFp16 && options.m_ReduceFp32ToBf16)
     {
         throw InvalidArgumentException("BFloat16 and Float16 optimization cannot be enabled at the same time.");
@@ -1745,17 +1635,6 @@
         Optimizer::Pass(optGraph, MakeOptimizations(ConvertConstantsFloatToHalf()));
     }
 
-    // If Fp32 to Bf16 optimization is set convert Fp32 network to Bf16
-    // Convert input of Convolution2d and FullyConnected from Fp32 to Bf16
-    // Only Constant weight of Convolution2d and FullyConnected are converted from Fp32 to Bf16
-    // Constant and Fp32ToBf16 layers will also be fused so conversion is no longer needed at inference time
-    if (options.m_ReduceFp32ToBf16)
-    {
-        ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "Optimizer_ReduceFp32ToBf16");
-        Optimizer::Pass(optGraph, MakeOptimizations(Fp32NetworkToBf16Converter()));
-        Optimizer::Pass(optGraph, MakeOptimizations(FuseConversionLayersIntoConstLayers()));
-    }
-
     // Initialize backend settings
     BackendSettings backendSettings(backendPreferences, deviceSpec);
     if (backendSettings.GetAvailablePreferredBackends().empty())
diff --git a/src/armnn/NetworkUtils.cpp b/src/armnn/NetworkUtils.cpp
index aaee4eb..1d46f02 100644
--- a/src/armnn/NetworkUtils.cpp
+++ b/src/armnn/NetworkUtils.cpp
@@ -5,8 +5,6 @@
 
 #include "NetworkUtils.hpp"
 
-#include <armnnUtils/FloatingPointConverter.hpp>
-#include <BFloat16.hpp>
 #include "SubgraphViewSelector.hpp"
 
 #include <armnn/Exceptions.hpp>
@@ -26,17 +24,6 @@
     outputSlot.SetTensorInfo(newTensorInfo);
 }
 
-void ChangeOutputBf16ToFp32(Layer& layer)
-{
-    for (auto&& outputSlot = layer.BeginOutputSlots(); outputSlot != layer.EndOutputSlots(); ++outputSlot)
-    {
-        if (outputSlot->GetTensorInfo().GetDataType() == DataType::BFloat16)
-        {
-            UpdateOutputSlotToFp32(*outputSlot);
-        }
-    }
-}
-
 void ChangeOutputFp16ToFp32(Layer& layer)
 {
     for (auto&& outputSlot = layer.BeginOutputSlots(); outputSlot != layer.EndOutputSlots(); ++outputSlot)
@@ -50,93 +37,6 @@
 
 } // anonymous namespace
 
-std::vector<ConvertBf16ToFp32Layer*> InsertConvertBf16ToFp32LayersBefore(Graph& graph,
-                                                                         Layer& layer,
-                                                                         bool expectCorrectInputType)
-{
-    std::vector<ConvertBf16ToFp32Layer*> convertLayers;
-    convertLayers.reserve(layer.GetNumInputSlots());
-
-    // Insert a ConvertBf16ToFp32Layer before each input slot
-    for (auto&& inputSlot = layer.BeginInputSlots(); inputSlot != layer.EndInputSlots(); ++inputSlot)
-    {
-        bool allowInsert = true;
-        if (expectCorrectInputType)
-        {
-            // Only insert ConvertBf16ToFp32Layer before BF16 input slots
-            OutputSlot* connectedOutputSlot = inputSlot->GetConnectedOutputSlot();
-            allowInsert =
-                connectedOutputSlot && connectedOutputSlot->GetTensorInfo().GetDataType() == DataType::BFloat16;
-        }
-
-        if (allowInsert)
-        {
-            const std::string name =
-                std::string("convert_bf16_to_fp32-" + std::to_string(inputSlot->GetSlotIndex()) + "-") +
-                layer.GetName();
-            ConvertBf16ToFp32Layer* convertLayer =
-                graph.InsertNewLayer<ConvertBf16ToFp32Layer>(*inputSlot, name.c_str());
-
-            TensorInfo convertInfo = convertLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
-            convertInfo.SetDataType(DataType::Float32);
-
-            convertLayer->GetOutputSlot().SetTensorInfo(convertInfo);
-
-            convertLayers.emplace_back(convertLayer);
-        }
-    }
-
-    return convertLayers;
-}
-
-std::vector<ConvertFp32ToBf16Layer*> InsertConvertFp32ToBf16LayersBefore(Graph& graph,
-                                                                         Layer& layer,
-                                                                         bool expectCorrectInputType)
-{
-    std::vector<ConvertFp32ToBf16Layer*> convertLayers;
-    convertLayers.reserve(layer.GetNumInputSlots());
-
-    // Insert a ConvertFp32ToBf16Layer before each input slot
-    for (auto&& inputSlot = layer.BeginInputSlots(); inputSlot != layer.EndInputSlots(); ++inputSlot)
-    {
-        bool allowInsert = true;
-
-        if ((layer.GetType() == LayerType::Convolution2d ||
-             layer.GetType() == LayerType::FullyConnected ||
-             layer.GetType() == LayerType::DepthwiseConvolution2d)
-                && inputSlot->GetSlotIndex() == 2)
-        {
-            // Refrain from reducing bias to Bf16
-            continue;
-        }
-        if (expectCorrectInputType)
-        {
-            // Only insert ConvertFp32ToBf16Layer before FP32 input slots
-            OutputSlot* connectedOutputSlot = inputSlot->GetConnectedOutputSlot();
-            allowInsert =
-                connectedOutputSlot && connectedOutputSlot->GetTensorInfo().GetDataType() == DataType::Float32;
-        }
-
-        if (allowInsert)
-        {
-            const std::string name =
-                std::string("convert_fp32_to_bf16-" + std::to_string(inputSlot->GetSlotIndex()) + "-") +
-                layer.GetName();
-            ConvertFp32ToBf16Layer* convertLayer =
-                graph.InsertNewLayer<ConvertFp32ToBf16Layer>(*inputSlot, name.c_str());
-
-            TensorInfo convertInfo = convertLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
-            convertInfo.SetDataType(DataType::BFloat16);
-
-            convertLayer->GetOutputSlot().SetTensorInfo(convertInfo);
-
-            convertLayers.emplace_back(convertLayer);
-        }
-    }
-
-    return convertLayers;
-}
-
 std::vector<ConvertFp16ToFp32Layer*> InsertConvertFp16ToFp32LayersBefore(Graph& graph,
                                                                          Layer& layer,
                                                                          bool expectCorrectInputType)
@@ -176,39 +76,6 @@
     return convertLayers;
 }
 
-std::vector<ConvertFp32ToBf16Layer*> InsertConvertFp32ToBf16LayersAfter(Graph& graph, Layer& layer)
-{
-    const unsigned int numOutputSlots = layer.GetNumOutputSlots();
-
-    std::vector<ConvertFp32ToBf16Layer*> convertLayers;
-    convertLayers.reserve(numOutputSlots);
-
-    // Update Bf16 output slots to FP32 on current layer
-    ChangeOutputBf16ToFp32(layer);
-
-    // Insert a ConvertFp32ToBf16Layer after each FP32 output slot
-    for (unsigned int slotIndex = 0u; slotIndex < numOutputSlots; ++slotIndex)
-    {
-        OutputSlot& outputSlot = layer.GetOutputSlot(slotIndex);
-        if(outputSlot.GetTensorInfo().GetDataType() == DataType::Float32)
-        {
-            const std::string name =
-                std::string("convert_fp32_to_bf16-" + std::to_string(slotIndex) + "-") + layer.GetName();
-            ConvertFp32ToBf16Layer* convertLayer =
-                graph.InsertNewLayer<ConvertFp32ToBf16Layer>(outputSlot, name.c_str());
-
-            TensorInfo convertInfo = convertLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
-            convertInfo.SetDataType(DataType::BFloat16);
-
-            convertLayer->GetOutputSlot().SetTensorInfo(convertInfo);
-
-            convertLayers.emplace_back(convertLayer);
-        }
-    }
-
-    return convertLayers;
-}
-
 std::vector<ConvertFp32ToFp16Layer*> InsertConvertFp32ToFp16LayersAfter(Graph& graph, Layer& layer)
 {
     const unsigned int numOutputSlots = layer.GetNumOutputSlots();
@@ -274,50 +141,4 @@
     return debugLayers;
 }
 
-bool RevertConstantWeightsToFP32(Layer* layer)
-{
-    if (layer->GetType() == LayerType::Convolution2d || layer->GetType() == LayerType::FullyConnected)
-    {
-        // Revert Weights on Constant Layer to FP32 so they can be accessed by Conv2d or FullyConnected
-        // This prevents a conversion layer being added in during backend assignment which blocks
-        // the RedirectMembersToConstantInputs backward compatibility workaround/optimization.
-        auto constantLayerInfo = layer->GetInputSlot(1).GetConnection()->GetTensorInfo();
-
-        if (constantLayerInfo.IsConstant() && constantLayerInfo.GetDataType() == DataType::BFloat16)
-        {
-            std::vector<float> newValues(constantLayerInfo.GetNumElements());
-
-            auto weightLayer = PolymorphicDowncast<ConstantLayer*>(
-                    &layer->GetInputSlot(1).GetConnection()->GetOwningIConnectableLayer());
-            armnnUtils::FloatingPointConverter::ConvertBFloat16ToFloat32(
-                    weightLayer->m_LayerOutput->GetConstTensor<BFloat16>(),
-                    constantLayerInfo.GetNumElements(),
-                    newValues.data());
-
-            TensorInfo newInfo(constantLayerInfo.GetShape(), DataType::Float32);
-            newInfo.SetConstant(true);
-            ConstTensor newInput(newInfo, newValues);
-            weightLayer->m_LayerOutput.reset(new ScopedTensorHandle(newInput));
-            weightLayer->GetOutputSlot(0).SetTensorInfo(newInfo);
-
-            // Connect Conv2d/FullyConnected to InputLayer directly leaving out
-            // the ConversionLayer to be cleaned up later
-            auto& conversionLayer = layer->GetInputSlot(0).GetConnection()->GetOwningIConnectableLayer();
-            auto actualInputOutputSlot = conversionLayer.GetInputSlot(0).GetConnection();
-
-            auto& conversionLayerOutputSlot =
-                    layer->GetInputSlot(0).GetConnection()->GetOwningIConnectableLayer().GetOutputSlot(0);
-            auto& conversionLayerInputSlot =
-                    layer->GetInputSlot(0).GetConnection()->GetOwningIConnectableLayer().GetInputSlot(0);
-            actualInputOutputSlot->Disconnect(conversionLayerInputSlot);
-            conversionLayerOutputSlot.Disconnect(layer->GetInputSlot(0));
-
-            actualInputOutputSlot->Connect(layer->GetInputSlot(0));
-
-            return true;
-        }
-    }
-    return false;
-}
-
 } // namespace armnn
diff --git a/src/armnn/NetworkUtils.hpp b/src/armnn/NetworkUtils.hpp
index 38e0aab..74e872c 100644
--- a/src/armnn/NetworkUtils.hpp
+++ b/src/armnn/NetworkUtils.hpp
@@ -11,16 +11,6 @@
 namespace armnn
 {
 
-std::vector<ConvertBf16ToFp32Layer*> InsertConvertBf16ToFp32LayersBefore(Graph& graph,
-                                                                         Layer& layer,
-                                                                         bool expectCorrectInputType = true);
-
-std::vector<ConvertFp32ToBf16Layer*> InsertConvertFp32ToBf16LayersBefore(Graph& graph,
-                                                                         Layer& layer,
-                                                                         bool expectCorrectInputType = true);
-
-std::vector<ConvertFp32ToBf16Layer*> InsertConvertFp32ToBf16LayersAfter(Graph& graph, Layer& layer);
-
 std::vector<ConvertFp16ToFp32Layer*> InsertConvertFp16ToFp32LayersBefore(Graph& graph,
                                                                          Layer& layer,
                                                                          bool expectCorrectInputType = true);
diff --git a/src/armnn/layers/ConvertBf16ToFp32Layer.cpp b/src/armnn/layers/ConvertBf16ToFp32Layer.cpp
deleted file mode 100644
index a0958e3..0000000
--- a/src/armnn/layers/ConvertBf16ToFp32Layer.cpp
+++ /dev/null
@@ -1,55 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ConvertBf16ToFp32Layer.hpp"
-#include "LayerCloneBase.hpp"
-
-#include <armnn/TypesUtils.hpp>
-
-#include <armnn/backends/WorkloadData.hpp>
-#include <armnn/backends/WorkloadFactory.hpp>
-
-namespace armnn
-{
-
-ConvertBf16ToFp32Layer::ConvertBf16ToFp32Layer(const char* name)
-    : Layer(1, 1, LayerType::ConvertBf16ToFp32, name)
-{
-}
-
-std::unique_ptr<IWorkload> ConvertBf16ToFp32Layer::CreateWorkload(const IWorkloadFactory& factory) const
-{
-    ConvertBf16ToFp32QueueDescriptor descriptor;
-    SetAdditionalInfo(descriptor);
-
-    return factory.CreateWorkload(LayerType::ConvertBf16ToFp32, descriptor, PrepInfoAndDesc(descriptor));
-}
-
-ConvertBf16ToFp32Layer* ConvertBf16ToFp32Layer::Clone(Graph& graph) const
-{
-    return CloneBase<ConvertBf16ToFp32Layer>(graph, GetName());
-}
-
-void ConvertBf16ToFp32Layer::ValidateTensorShapesFromInputs()
-{
-    VerifyLayerConnections(1, CHECK_LOCATION());
-
-    const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
-
-    VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
-
-    auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
-
-    ARMNN_ASSERT(inferredShapes.size() == 1);
-
-    ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ConvertBf16ToFp32Layer");
-}
-
-void ConvertBf16ToFp32Layer::ExecuteStrategy(IStrategy& strategy) const
-{
-    strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
-}
-
-} // namespace armnn
diff --git a/src/armnn/layers/ConvertBf16ToFp32Layer.hpp b/src/armnn/layers/ConvertBf16ToFp32Layer.hpp
deleted file mode 100644
index 7131275..0000000
--- a/src/armnn/layers/ConvertBf16ToFp32Layer.hpp
+++ /dev/null
@@ -1,42 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <Layer.hpp>
-
-namespace armnn
-{
-
-/// This layer converts data type BFloat16 to Float32.
-class ConvertBf16ToFp32Layer : public Layer
-{
-public:
-    /// Makes a workload for the ConvertBf16ToFp32 type.
-    /// @param [in] factory The workload factory which will create the workload.
-    /// @return A pointer to the created workload, or nullptr if not created.
-    virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
-
-    /// Creates a dynamically-allocated copy of this layer.
-    /// @param [in] graph The graph into which this layer is being cloned.
-    ConvertBf16ToFp32Layer* Clone(Graph& graph) const override;
-
-    /// Check if the input tensor shape(s)
-    /// will lead to a valid configuration of @ref ConvertBf16ToFp32Layer.
-    /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
-    void ValidateTensorShapesFromInputs() override;
-
-    void ExecuteStrategy(IStrategy& strategy) const override;
-
-protected:
-    /// Constructor to create a ConvertBf16ToFp32Layer.
-    /// @param [in] name Optional name for the layer.
-    ConvertBf16ToFp32Layer(const char* name);
-
-    /// Default destructor
-    ~ConvertBf16ToFp32Layer() = default;
-};
-
-} // namespace
diff --git a/src/armnn/layers/ConvertFp32ToBf16Layer.cpp b/src/armnn/layers/ConvertFp32ToBf16Layer.cpp
deleted file mode 100644
index 7c98eea..0000000
--- a/src/armnn/layers/ConvertFp32ToBf16Layer.cpp
+++ /dev/null
@@ -1,56 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ConvertFp32ToBf16Layer.hpp"
-#include "LayerCloneBase.hpp"
-
-#include <armnn/TypesUtils.hpp>
-
-#include <armnn/backends/WorkloadData.hpp>
-#include <armnn/backends/WorkloadFactory.hpp>
-
-namespace armnn
-{
-
-ConvertFp32ToBf16Layer::ConvertFp32ToBf16Layer(const char* name)
-    : Layer(1, 1, LayerType::ConvertFp32ToBf16, name)
-{
-}
-
-std::unique_ptr<IWorkload> ConvertFp32ToBf16Layer::CreateWorkload(const IWorkloadFactory& factory) const
-{
-    ConvertFp32ToBf16QueueDescriptor descriptor;
-    SetAdditionalInfo(descriptor);
-
-    return factory.CreateWorkload(LayerType::ConvertFp32ToBf16, descriptor, PrepInfoAndDesc(descriptor));
-}
-
-ConvertFp32ToBf16Layer* ConvertFp32ToBf16Layer::Clone(Graph& graph) const
-{
-    return CloneBase<ConvertFp32ToBf16Layer>(graph, GetName());
-}
-
-void ConvertFp32ToBf16Layer::ValidateTensorShapesFromInputs()
-{
-
-    VerifyLayerConnections(1, CHECK_LOCATION());
-
-    const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
-
-    VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
-
-    auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
-
-    ARMNN_ASSERT(inferredShapes.size() == 1);
-
-    ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "LayerName");
-}
-
-void ConvertFp32ToBf16Layer::ExecuteStrategy(IStrategy& strategy) const
-{
-    strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
-}
-
-} // namespace armnn
diff --git a/src/armnn/layers/ConvertFp32ToBf16Layer.hpp b/src/armnn/layers/ConvertFp32ToBf16Layer.hpp
deleted file mode 100644
index 71de4fb..0000000
--- a/src/armnn/layers/ConvertFp32ToBf16Layer.hpp
+++ /dev/null
@@ -1,42 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <Layer.hpp>
-
-namespace armnn
-{
-
-/// This layer converts data type Float32 to BFloat16.
-class ConvertFp32ToBf16Layer : public Layer
-{
-public:
-    /// Makes a workload for the ConvertFp32ToBf16Layer type.
-    /// @param [in] factory The workload factory which will create the workload.
-    /// @return A pointer to the created workload, or nullptr if not created.
-    virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
-
-    /// Creates a dynamically-allocated copy of this layer.
-    /// @param [in] graph The graph into which this layer is being cloned.
-    ConvertFp32ToBf16Layer* Clone(Graph& graph) const override;
-
-    /// Check if the input tensor shape(s)
-    /// will lead to a valid configuration of @ref ConvertFp32ToBf16Layer.
-    /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
-    void ValidateTensorShapesFromInputs() override;
-
-    void ExecuteStrategy(IStrategy& strategy) const override;
-
-protected:
-    /// Constructor to create a ConvertFp32ToBf16Layer.
-    /// @param [in] name Optional name for the layer.
-    ConvertFp32ToBf16Layer(const char* name);
-
-    /// Default destructor
-    ~ConvertFp32ToBf16Layer() = default;
-};
-
-} // namespace
diff --git a/src/armnn/optimizations/All.hpp b/src/armnn/optimizations/All.hpp
index 0421f31..a11dec9 100644
--- a/src/armnn/optimizations/All.hpp
+++ b/src/armnn/optimizations/All.hpp
@@ -9,8 +9,6 @@
 #include "ConvertConstants.hpp"
 #include "ConvertConstDequantisationLayersToConstLayers.hpp"
 #include "ConvertConstPermuteLayersToConstLayers.hpp"
-#include "FuseConvertFp32ToBf16IntoConstLayers.hpp"
-#include "ConvertFp32NetworkToBf16.hpp"
 #include "ConvertFp32NetworkToFp16.hpp"
 #include "FoldPadIntoLayer2d.hpp"
 #include "FuseBatchNorm.hpp"
diff --git a/src/armnn/optimizations/ConvertConstants.hpp b/src/armnn/optimizations/ConvertConstants.hpp
index 54c14e5..7b2f1fd 100644
--- a/src/armnn/optimizations/ConvertConstants.hpp
+++ b/src/armnn/optimizations/ConvertConstants.hpp
@@ -11,7 +11,6 @@
 #include <armnn/backends/TensorHandle.hpp>
 #include <armnn/utility/IgnoreUnused.hpp>
 
-#include <BFloat16.hpp>
 #include <Half.hpp>
 
 namespace armnn
@@ -19,27 +18,6 @@
 namespace optimizations
 {
 
-struct BFloat16ToFloat32
-{
-    static void Func(std::shared_ptr<ConstTensorHandle>& handle)
-    {
-        const TensorInfo& info = handle->GetTensorInfo();
-
-        if (info.GetDataType() == DataType::BFloat16)
-        {
-            std::vector<float> newValues(info.GetNumElements());
-
-            armnnUtils::FloatingPointConverter::ConvertBFloat16ToFloat32(handle->GetConstTensor<BFloat16>(),
-                                                                         info.GetNumElements(),
-                                                                         newValues.data());
-
-            TensorInfo newInfo(info.GetShape(), DataType::Float32, 0.0f, 0, true);
-            ConstTensor newInput(newInfo, newValues);
-            handle.reset(new ScopedTensorHandle(newInput));
-        }
-    }
-};
-
 struct Float16ToFloat32
 {
     static void Func(std::shared_ptr<ConstTensorHandle>& handle)
@@ -61,27 +39,6 @@
     }
 };
 
-struct Float32ToBFloat16
-{
-    static void Func(std::shared_ptr<ConstTensorHandle>& handle)
-    {
-        const TensorInfo& info = handle->GetTensorInfo();
-
-        if (info.GetDataType() == DataType::Float32)
-        {
-            std::vector<BFloat16> newValues(info.GetNumElements());
-
-            armnnUtils::FloatingPointConverter::ConvertFloat32ToBFloat16(handle->GetConstTensor<float>(),
-                                                                         info.GetNumElements(),
-                                                                         newValues.data());
-
-            TensorInfo newInfo(info.GetShape(), DataType::BFloat16, 0.0f, 0, true);
-            ConstTensor newInput(newInfo, newValues);
-            handle.reset(new ScopedTensorHandle(newInput));
-        }
-    }
-};
-
 struct Float32ToFloat16
 {
     static void Func(std::shared_ptr<ConstTensorHandle>& handle)
@@ -138,17 +95,6 @@
     }
 };
 
-struct IsBFloat16Layer
-{
-    static bool Test(const Layer& layer)
-    {
-        return layer.GetDataType() == DataType::BFloat16;
-    }
-};
-
-using ConvertConstantsBFloatToFloat = ConvertConstants<BFloat16ToFloat32, IsFloat32Layer>;
-using ConvertConstantsFloatToBFloat = ConvertConstants<Float32ToBFloat16, IsBFloat16Layer>;
-
 using ConvertConstantsHalfToFloat = ConvertConstants<Float16ToFloat32, IsFloat32Layer>;
 using ConvertConstantsFloatToHalf = ConvertConstants<Float32ToFloat16, IsFloat16Layer>;
 
diff --git a/src/armnn/optimizations/ConvertFp32NetworkToBf16.hpp b/src/armnn/optimizations/ConvertFp32NetworkToBf16.hpp
deleted file mode 100644
index 6c80e74..0000000
--- a/src/armnn/optimizations/ConvertFp32NetworkToBf16.hpp
+++ /dev/null
@@ -1,79 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-#pragma once
-
-#include "NetworkUtils.hpp"
-#include "Optimization.hpp"
-
-#include <armnn/utility/PolymorphicDowncast.hpp>
-
-namespace armnn
-{
-namespace optimizations
-{
-
-template <typename LayerT>
-inline LayerT* ConvertWeight(Layer* l)
-{
-    LayerT* layer = PolymorphicDowncast<LayerT*>(l);
-    if ((layer->GetType() == LayerType::Convolution2d || layer->GetType() == LayerType::FullyConnected)
-         && layer->m_Weight)
-    {
-        const TensorInfo& info = layer->m_Weight->GetTensorInfo();
-
-        if (info.GetDataType() == DataType::Float32)
-        {
-            std::vector<BFloat16> newValues(info.GetNumElements());
-
-            armnnUtils::FloatingPointConverter::ConvertFloat32ToBFloat16(
-                    layer->m_Weight->template GetConstTensor<float>(),
-                    info.GetNumElements(),
-                    newValues.data());
-
-            TensorInfo newInfo(info);
-            newInfo.SetDataType(DataType::BFloat16);
-            ConstTensor newInput(newInfo, newValues);
-            layer->m_Weight.reset(new ScopedTensorHandle(newInput));
-        }
-    }
-    return layer;
-}
-
-class ConvertFp32NetworkToBf16Impl
-{
-public:
-
-    void Run(Graph& graph, Layer& layer) const
-    {
-        // Only convert Float32 To BFloat16 for the Input of Convolution2d layer and FullyConnected layer.
-        // And also convert weight data type from Float32 to Bfloat16.
-        // Do not convert bias data type.
-        if (layer.GetType() == LayerType::Convolution2d)
-        {
-            if (layer.GetDataType() == DataType::Float32)
-            {
-                InsertConvertFp32ToBf16LayersBefore(graph,layer);
-                ConvertWeight<Convolution2dLayer>(&layer);
-            }
-        }
-        else if (layer.GetType() == LayerType::FullyConnected)
-        {
-            if (layer.GetDataType() == DataType::Float32)
-            {
-                InsertConvertFp32ToBf16LayersBefore(graph,layer);
-                ConvertWeight<FullyConnectedLayer>(&layer);
-            }
-        }
-    }
-
-protected:
-    ConvertFp32NetworkToBf16Impl() = default;
-    ~ConvertFp32NetworkToBf16Impl() = default;
-};
-
-using Fp32NetworkToBf16Converter = OptimizeForType<Layer, ConvertFp32NetworkToBf16Impl>;
-
-} // namespace optimizations
-} // namespace armnn
diff --git a/src/armnn/optimizations/FuseConvertFp32ToBf16IntoConstLayers.hpp b/src/armnn/optimizations/FuseConvertFp32ToBf16IntoConstLayers.hpp
deleted file mode 100644
index d112010..0000000
--- a/src/armnn/optimizations/FuseConvertFp32ToBf16IntoConstLayers.hpp
+++ /dev/null
@@ -1,89 +0,0 @@
-//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "Optimization.hpp"
-#include <armnnUtils/Permute.hpp>
-#include <ResolveType.hpp>
-
-namespace armnn
-{
-namespace optimizations
-{
-
-class FuseConvertFp32ToBf16IntoConstLayers
-{
-public:
-    void Run(Graph& graph, InputSlot& connection) const
-    {
-        Layer& base = connection.GetConnectedOutputSlot()->GetOwningLayer();
-        Layer& child = connection.GetOwningLayer();
-
-        ARMNN_ASSERT(base.GetType() == LayerType::Constant);
-        ARMNN_ASSERT(child.GetType() == LayerType::ConvertFp32ToBf16);
-
-        auto dataType = base.GetDataType();
-        switch (dataType)
-        {
-            case DataType::Float32:
-                ReplaceConvertFp32ToBf16Layer<DataType::BFloat16>(
-                        graph,
-                        PolymorphicDowncast<ConstantLayer*>(&base),
-                        PolymorphicDowncast<ConvertFp32ToBf16Layer*>(&child));
-                break;
-            default:
-                throw InvalidArgumentException(GetDataTypeName(dataType) +
-                                               std::string(" Constant Layer cannot be fused into ")  +
-                                               GetDataTypeName(child.GetDataType()) +
-                                               std::string(" conversion layer."));
-        }
-    }
-protected:
-    FuseConvertFp32ToBf16IntoConstLayers()  = default;
-    ~FuseConvertFp32ToBf16IntoConstLayers() = default;
-private:
-    template<armnn::DataType ArmnnType,
-             typename T = armnn::ResolveType<ArmnnType>>
-    static void ReplaceConvertFp32ToBf16Layer(Graph& graph,
-                                              ConstantLayer* constantLayer,
-                                              ConvertFp32ToBf16Layer* convertFp32ToBf16layer)
-    {
-        IgnoreUnused(graph);
-        /**
-         * This optimisation is to find situations where a constant set of inputs is being provided to a
-         * ConvertFp32ToBf16 layer. In this case we don't want the overhead of Converting the values on
-         * every inference, instead we want to Convert them once and store them in a Const layer to be
-         * used everytime as they will not change.
-         */
-        TensorInfo outputConvertFp32ToBf16Info = convertFp32ToBf16layer->GetOutputSlot(0).GetTensorInfo();
-        std::vector<T> newValues(outputConvertFp32ToBf16Info.GetNumElements());
-
-        armnnUtils::FloatingPointConverter::ConvertFloat32ToBFloat16(
-                constantLayer->m_LayerOutput->GetConstTensor<float>(),
-                outputConvertFp32ToBf16Info.GetNumElements(),
-                newValues.data());
-        TensorInfo newInfo = outputConvertFp32ToBf16Info;
-        newInfo.SetConstant(true);
-        ConstTensor newInput(newInfo, newValues);
-
-        constantLayer->m_LayerOutput.reset(new ScopedTensorHandle(newInput));
-
-        // Moves connections in convertFp32ToBf16layer output slot to the constant layer.
-        // ConvertFp32ToBf16layer layer will be removed if left unconnected.
-        convertFp32ToBf16layer->GetOutputSlot().MoveAllConnections(constantLayer->GetOutputSlot());
-
-        // Updating the output tensor
-        constantLayer->GetOutputSlot(0).SetTensorInfo(newInfo);
-        ARMNN_ASSERT(constantLayer->GetOutputSlot(0).GetTensorInfo().IsConstant() == true);
-    }
-};
-
-using FuseConversionLayersIntoConstLayers = OptimizeForConnection<ConstantLayer,
-                                                                  ConvertFp32ToBf16Layer,
-                                                                  FuseConvertFp32ToBf16IntoConstLayers>;
-
-} // namespace optimizations
-} // namespace armnn
\ No newline at end of file
diff --git a/src/armnn/test/FloatingPointConverterTest.cpp b/src/armnn/test/FloatingPointConverterTest.cpp
index 21a16a3..81384ce 100644
--- a/src/armnn/test/FloatingPointConverterTest.cpp
+++ b/src/armnn/test/FloatingPointConverterTest.cpp
@@ -5,7 +5,6 @@
 
 #include <armnnUtils/FloatingPointConverter.hpp>
 
-#include <BFloat16.hpp>
 #include <Half.hpp>
 
 #include <vector>
@@ -55,73 +54,4 @@
     }
 }
 
-TEST_CASE("TestConvertFloat32ToBFloat16")
-{
-    float floatArray[] = { 1.704735E38f,   // 0x7F004000 round down
-                           0.0f,           // 0x00000000 round down
-                           2.2959E-41f,    // 0x00004000 round down
-                           1.7180272E38f,  // 0x7F014000 round down
-                           9.18355E-41f,   // 0x00010000 round down
-                           1.14794E-40f,   // 0x00014000 round down
-                           4.5918E-41f,    // 0x00008000 round down
-                           -1.708058E38f,  // 0xFF008000 round down
-                           -4.3033756E37f, // 0xFE018000 round up
-                           1.60712E-40f,   // 0x0001C000 round up
-                           -2.0234377f,    // 0xC0018001 round up
-                           -1.1800863E-38f,// 0x80808001 round up
-                           4.843037E-35f,  // 0x0680C000 round up
-                           3.9999998f,     // 0x407FFFFF round up
-                           std::numeric_limits<float>::max(),    // 0x7F7FFFFF max positive value
-                           std::numeric_limits<float>::lowest(), // 0xFF7FFFFF max negative value
-                           1.1754942E-38f, // 0x007FFFFF min positive value
-                           -1.1754942E-38f // 0x807FFFFF min negative value
-                          };
-    uint16_t expectedResult[] = { 0x7F00,
-                                  0x0000,
-                                  0x0000,
-                                  0x7F01,
-                                  0x0001,
-                                  0x0001,
-                                  0x0000,
-                                  0xFF00,
-                                  0xFE02,
-                                  0x0002,
-                                  0xC002,
-                                  0x8081,
-                                  0x0681,
-                                  0x4080,
-                                  0x7F80,
-                                  0xFF80,
-                                  0x0080,
-                                  0x8080
-                                 };
-    size_t numFloats = sizeof(floatArray) / sizeof(floatArray[0]);
-
-    std::vector<armnn::BFloat16> convertedBuffer(numFloats);
-
-    armnnUtils::FloatingPointConverter::ConvertFloat32ToBFloat16(floatArray, numFloats, convertedBuffer.data());
-
-    for (size_t i = 0; i < numFloats; i++)
-    {
-        armnn::BFloat16 actual = convertedBuffer[i];
-        CHECK_EQ(expectedResult[i], actual.Val());
-    }
-}
-
-TEST_CASE("TestConvertBFloat16ToFloat32")
-{
-    uint16_t bf16Array[] = { 16256, 16320, 38699, 16384, 49156, 32639 };
-    size_t numFloats = sizeof(bf16Array) / sizeof(bf16Array[0]);
-    float expectedResult[] = { 1.0f, 1.5f, -5.525308E-25f, 2.0f, -2.0625f, 3.3895314E38f };
-    std::vector<float> convertedBuffer(numFloats, 0.0f);
-
-    armnnUtils::FloatingPointConverter::ConvertBFloat16ToFloat32(bf16Array, numFloats, convertedBuffer.data());
-
-    for (size_t i = 0; i < numFloats; i++)
-    {
-        float actual = convertedBuffer[i];
-        CHECK_EQ(expectedResult[i], actual);
-    }
-}
-
 }
diff --git a/src/armnn/test/ShapeInferenceTests.cpp b/src/armnn/test/ShapeInferenceTests.cpp
index a3800ad..1035a3b 100644
--- a/src/armnn/test/ShapeInferenceTests.cpp
+++ b/src/armnn/test/ShapeInferenceTests.cpp
@@ -250,17 +250,6 @@
     CHECK(layer->GetOutputSlot(0).GetTensorInfo().GetShape() == outputShape);
 }
 
-TEST_CASE("ConvertBf16ToFp32Test")
-{
-    CreateGraphAndRunTest<ConvertBf16ToFp32Layer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "floor");
-}
-
-TEST_CASE("ConvertFp16ToBf16Test")
-{
-    const TensorShape tensorShape{5, 7, 6, 2};
-    CreateGraphAndRunTest<ConvertFp32ToBf16Layer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "floor");
-}
-
 TEST_CASE("ConvertFp16ToFp32Test")
 {
     CreateGraphAndRunTest<ConvertFp16ToFp32Layer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "floor");
diff --git a/src/armnn/test/UtilsTests.cpp b/src/armnn/test/UtilsTests.cpp
index 6388437..067c861 100644
--- a/src/armnn/test/UtilsTests.cpp
+++ b/src/armnn/test/UtilsTests.cpp
@@ -123,54 +123,6 @@
     CHECK((GetDataTypeName(armnn::DataType::BFloat16) == std::string("BFloat16")));
 }
 
-TEST_CASE("Float32ToBFloat16Test")
-{
-    // LSB = 0, R = 0 -> round down
-    armnn::BFloat16 roundDown0 = armnn::BFloat16::Float32ToBFloat16(1.704735E38f); // 0x7F004000
-    CHECK_EQ(roundDown0.Val(), 0x7F00);
-    // LSB = 1, R = 0 -> round down
-    armnn::BFloat16 roundDown1 = armnn::BFloat16::Float32ToBFloat16(9.18355E-41f); // 0x00010000
-    CHECK_EQ(roundDown1.Val(), 0x0001);
-    // LSB = 0, R = 1 all 0 -> round down
-    armnn::BFloat16 roundDown2 = armnn::BFloat16::Float32ToBFloat16(1.14794E-40f); // 0x00014000
-    CHECK_EQ(roundDown2.Val(), 0x0001);
-    // LSB = 1, R = 1 -> round up
-    armnn::BFloat16 roundUp = armnn::BFloat16::Float32ToBFloat16(-2.0234377f); // 0xC0018001
-    CHECK_EQ(roundUp.Val(), 0xC002);
-    // LSB = 0, R = 1 -> round up
-    armnn::BFloat16 roundUp1 = armnn::BFloat16::Float32ToBFloat16(4.843037E-35f); // 0x0680C000
-    CHECK_EQ(roundUp1.Val(), 0x0681);
-    // Max positive value -> infinity
-    armnn::BFloat16 maxPositive = armnn::BFloat16::Float32ToBFloat16(std::numeric_limits<float>::max()); // 0x7F7FFFFF
-    CHECK_EQ(maxPositive, armnn::BFloat16::Inf());
-    // Max negative value -> -infinity
-    armnn::BFloat16 maxNeg = armnn::BFloat16::Float32ToBFloat16(std::numeric_limits<float>::lowest()); // 0xFF7FFFFF
-    CHECK_EQ(maxNeg.Val(), 0xFF80);
-    // Min positive value
-    armnn::BFloat16 minPositive = armnn::BFloat16::Float32ToBFloat16(1.1754942E-38f); // 0x007FFFFF
-    CHECK_EQ(minPositive.Val(), 0x0080);
-    // Min negative value
-    armnn::BFloat16 minNeg = armnn::BFloat16::Float32ToBFloat16(-1.1754942E-38f); // 0x807FFFFF
-    CHECK_EQ(minNeg.Val(), 0x8080);
-}
-
-TEST_CASE("BFloat16ToFloat32Test")
-{
-    armnn::BFloat16 bf0(1.5f);
-    CHECK_EQ(bf0.ToFloat32(), 1.5f);
-    armnn::BFloat16 bf1(-5.525308E-25f);
-    CHECK_EQ(bf1.ToFloat32(), -5.525308E-25f);
-    armnn::BFloat16 bf2(-2.0625f);
-    CHECK_EQ(bf2.ToFloat32(), -2.0625f);
-    uint16_t v = 32639;
-    armnn::BFloat16 bf3(v);
-    CHECK_EQ(bf3.ToFloat32(), 3.3895314E38f);
-    // Infinity
-    CHECK_EQ(armnn::BFloat16::Inf().ToFloat32(), std::numeric_limits<float>::infinity());
-    // NaN
-    CHECK(std::isnan(armnn::BFloat16::Nan().ToFloat32()));
-}
-
 TEST_CASE("GraphTopologicalSortSimpleTest")
 {
     std::map<int, std::vector<int>> graph;
diff --git a/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp b/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp
deleted file mode 100644
index 4aacf7f..0000000
--- a/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp
+++ /dev/null
@@ -1,128 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include <TestUtils.hpp>
-
-#include <BFloat16.hpp>
-#include <Optimizer.hpp>
-
-#include <doctest/doctest.h>
-
-using namespace armnn;
-
-TEST_SUITE("Optimizer")
-{
-using namespace armnn::optimizations;
-
-TEST_CASE("ConvertConstantsFloatToBFloatTest")
-{
-    armnn::Graph graph;
-
-    const armnn::TensorInfo info({ 1, 1, 1, 2 }, armnn::DataType::BFloat16);
-
-    // Create const tensor from fp32 data
-    unsigned int dims[] = { 4, 2, 1, 1 };
-    std::vector<float> floatWeights{ 0.0f, -1.0f,
-                                     3.8f, // 0x40733333 Round down
-                                     3.1055E+29f, // 0x707ADC3C Round up
-                                     9.149516E-10f, // 0x307B7FFF Round down
-                                    -3.8f, // 0xC0733333 Round down
-                                    -3.1055E+29f, // 0xF07ADC3C Round up
-                                    -9.149516E-10f // 0xB07B7FFF Round down
-                                   };
-    armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float32, 0.0f, 0, true), floatWeights);
-
-    // Create simple test network
-    auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
-    input->GetOutputSlot().SetTensorInfo(info);
-
-    auto fc      = graph.AddLayer<armnn::FullyConnectedLayer>(armnn::FullyConnectedDescriptor(), "fc");
-    fc->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
-    fc->GetOutputSlot().SetTensorInfo(info);
-
-    auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
-
-    // Connect up the layers
-    input->GetOutputSlot().Connect(fc->GetInputSlot(0));
-    fc->GetOutputSlot().Connect(output->GetInputSlot(0));
-
-    // Check tensor data type before conversion
-    CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float32);
-
-    // Run the optimizer
-    armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(ConvertConstantsFloatToBFloat()));
-
-    // Check tensor data type after conversion
-    CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::BFloat16);
-
-    // Check whether data matches expected Bf16 data
-    const BFloat16* data = fc->m_Weight->GetConstTensor<BFloat16>();
-    CHECK(data[0] == BFloat16(0.0f));
-    CHECK(data[1] == BFloat16(-1.0f));
-    CHECK(data[2] == BFloat16(3.796875f)); // 0x4073
-    CHECK(data[3] == BFloat16(3.1072295E29f)); // 0x707B
-    CHECK(data[4] == BFloat16(9.131327E-10f)); // 0x307B
-    CHECK(data[5] == BFloat16(-3.796875f)); // 0xC073
-    CHECK(data[6] == BFloat16(-3.1072295E29f)); // 0xF07B
-    CHECK(data[7] == BFloat16(-9.131327E-10f)); // 0xB07B
-}
-
-TEST_CASE("ConvertConstantsBFloatToFloatTest")
-{
-    armnn::Graph graph;
-
-    const armnn::TensorInfo info({ 1, 1, 1, 2 }, armnn::DataType::Float32);
-
-    // Create the BFloat16 precision input data
-    unsigned int dims[] = { 4, 2, 1, 1 };
-    std::vector<float> convWeightsData{ 0.f, -1.f,
-                                        3.796875f, // 0x4073
-                                        3.1072295E29f, // 0x707B
-                                        9.131327E-10f, // 0x307B
-                                       -3.796875f, // 0xC073
-                                       -3.1072295E29f, // 0xF07B
-                                       -9.131327E-10f // 0xB07B
-                                       };
-    std::vector<uint16_t> bfWeights(8);
-    armnnUtils::FloatingPointConverter::ConvertFloat32ToBFloat16(convWeightsData.data(), convWeightsData.size(),
-                                                                 bfWeights.data());
-    armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::BFloat16, 0.0f, 0, true), bfWeights);
-
-    //Create the simple test network
-    auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
-    input->GetOutputSlot().SetTensorInfo(info);
-
-    auto fc      = graph.AddLayer<armnn::FullyConnectedLayer>(armnn::FullyConnectedDescriptor(), "fc");
-    fc->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
-    fc->GetOutputSlot().SetTensorInfo(info);
-
-    auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
-
-    //Connect up the layers
-    input->GetOutputSlot().Connect(fc->GetInputSlot(0));
-    fc->GetOutputSlot().Connect(output->GetInputSlot(0));
-
-    //Test the tensor info is correct.
-    CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::BFloat16);
-
-    // Run the optimizer
-    armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(ConvertConstantsBFloatToFloat()));
-
-    //Test the tensor info is correct.
-    CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float32);
-
-    // Now test the data matches float32 data
-    const float* data = fc->m_Weight->GetConstTensor<float>();
-    CHECK(data[0] == 0.0f);
-    CHECK(data[1] == -1.0f);
-    CHECK(data[2] == 3.796875f);
-    CHECK(data[3] == 3.1072295E29f);
-    CHECK(data[4] == 9.131327E-10f);
-    CHECK(data[5] == -3.796875f);
-    CHECK(data[6] == -3.1072295E29f);
-    CHECK(data[7] == -9.131327E-10f);
-}
-
-}
\ No newline at end of file
diff --git a/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp b/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp
deleted file mode 100644
index 66893ce..0000000
--- a/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp
+++ /dev/null
@@ -1,229 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include <TestUtils.hpp>
-
-#include <Optimizer.hpp>
-
-#include <doctest/doctest.h>
-
-TEST_SUITE("Optimizer")
-{
-using namespace armnn::optimizations;
-
-TEST_CASE("Fp32NetworkToBf16OptimizationNoConversionTest")
-{
-    armnn::Graph graph;
-
-    const armnn::TensorInfo infoFP32({ 2, 2, 1, 3 }, armnn::DataType::Float32);
-
-    // Create the simple test network without Conv2D/FullyConnected.
-    auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
-    input->GetOutputSlot().SetTensorInfo(infoFP32);
-
-    auto floor = graph.AddLayer<armnn::FloorLayer>("floor");
-    floor->GetOutputSlot().SetTensorInfo(infoFP32);
-
-    auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
-
-    // Connect up the layers
-    input->GetOutputSlot().Connect(floor->GetInputSlot(0));
-    floor->GetOutputSlot().Connect(output->GetInputSlot(0));
-
-    CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
-                             &IsLayerOfType<armnn::FloorLayer>, &IsLayerOfType<armnn::OutputLayer>));
-
-    // Run the optimizer
-    armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(Fp32NetworkToBf16Converter()));
-
-    CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
-                             &IsLayerOfType<armnn::FloorLayer>,
-                             &IsLayerOfType<armnn::OutputLayer>));
-}
-
-TEST_CASE("Fp32NetworkToBf16OptimizationConv2DTest")
-{
-    armnn::Graph graph;
-
-    const armnn::TensorInfo infoFP32({ 2, 3, 8, 1 }, armnn::DataType::Float32);
-
-    // Create const tensor fp32 data
-    unsigned int dims[] = { 4, 2, 1, 1 };
-    std::vector<float> floatWeights{ 0.0f, -1.0f,
-                                     3.8f, // 0x40733333 Round down
-                                     3.1055E+29f, // 0x707ADC3C Round up
-                                     9.149516E-10f, // 0x307B7FFF Round down
-                                    -3.8f, // 0xC0733333 Round down
-                                    -3.1055E+29f, // 0xF07ADC3C Round up
-                                    -9.149516E-10f // 0xB07B7FFF Round down
-                                   };
-    armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float32, 0.0f, 0, true), floatWeights);
-
-    // Create const bias fp32 data
-    unsigned int biasDims[] {4};
-    std::vector<float> floatBias{ 1.0f, 2.0f, 3.0f, 4.0f };
-    armnn::ConstTensor bias(armnn::TensorInfo(1, biasDims, armnn::DataType::Float32, 0.0f, 0, true), floatBias);
-
-    // A network with Convolution2d layer
-    auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
-    input->GetOutputSlot().SetTensorInfo(infoFP32);
-
-    armnn::Convolution2dDescriptor descriptor;
-    descriptor.m_BiasEnabled = true;
-    auto conv = graph.AddLayer<armnn::Convolution2dLayer>(descriptor, "conv2d");
-    conv->GetOutputSlot().SetTensorInfo(infoFP32);
-
-    auto weightsLayer = graph.AddLayer<armnn::ConstantLayer>("Weights");
-    weightsLayer->m_LayerOutput = std::make_shared<armnn::ScopedTensorHandle>(weights);
-    weightsLayer->GetOutputSlot(0).SetTensorInfo(weightsLayer->m_LayerOutput->GetTensorInfo());
-
-    auto biasLayer = graph.AddLayer<armnn::ConstantLayer>("Bias");
-    biasLayer->m_LayerOutput = std::make_shared<armnn::ScopedTensorHandle>(bias);
-    biasLayer->GetOutputSlot(0).SetTensorInfo(biasLayer->m_LayerOutput->GetTensorInfo());
-
-    auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
-
-    // Connect up the layers
-    input->GetOutputSlot().Connect(conv->GetInputSlot(0));
-    weightsLayer->GetOutputSlot(0).Connect(conv->GetInputSlot(1));
-    biasLayer->GetOutputSlot(0).Connect(conv->GetInputSlot(2));
-    conv->GetOutputSlot().Connect(output->GetInputSlot(0));
-
-    CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
-                                                      &IsLayerOfType<armnn::ConstantLayer>,
-                                                      &IsLayerOfType<armnn::ConstantLayer>,
-                                                      &IsLayerOfType<armnn::Convolution2dLayer>,
-                                                      &IsLayerOfType<armnn::OutputLayer>));
-
-    // Run the optimizer
-    armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(RedirectMembersToConstantInputs(),
-                                                           Fp32NetworkToBf16Converter()));
-
-    CHECK(7 == graph.GetNumLayers());
-    CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
-                                                      &IsLayerOfType<armnn::ConstantLayer>,
-                                                      &IsLayerOfType<armnn::ConstantLayer>,
-                                                      &IsLayerOfType<armnn::ConvertFp32ToBf16Layer>,
-                                                      &IsLayerOfType<armnn::ConvertFp32ToBf16Layer>,
-                                                      &IsLayerOfType<armnn::Convolution2dLayer>,
-                                                      &IsLayerOfType<armnn::OutputLayer>));
-
-    armnn::TensorInfo inputTensor = conv->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
-    armnn::TensorInfo weightTensor = conv->GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo();
-    armnn::TensorInfo biasTensor = conv->GetInputSlot(2).GetConnectedOutputSlot()->GetTensorInfo();
-    armnn::TensorInfo outputTensor = conv->GetOutputSlot(0).GetTensorInfo();
-    CHECK((conv->GetDataType() == armnn::DataType::BFloat16));
-    CHECK((conv->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::BFloat16));
-    CHECK((conv->m_Bias->GetTensorInfo().GetDataType() == armnn::DataType::Float32));
-    CHECK((inputTensor.GetDataType() == armnn::DataType::BFloat16));
-    CHECK((weightTensor.GetDataType() == armnn::DataType::BFloat16));
-    CHECK((biasTensor.GetDataType() == armnn::DataType::Float32));
-    CHECK((outputTensor.GetDataType() == armnn::DataType::Float32));
-
-    // Check whether data matches expected Bf16 data
-    const armnn::BFloat16* data = conv->m_Weight->GetConstTensor<armnn::BFloat16>();
-    CHECK(data[0] == armnn::BFloat16(0.0f));
-    CHECK(data[1] == armnn::BFloat16(-1.0f));
-    CHECK(data[2] == armnn::BFloat16(3.796875f)); // 0x4073
-    CHECK(data[3] == armnn::BFloat16(3.1072295E29f)); // 0x707B
-    CHECK(data[4] == armnn::BFloat16(9.131327E-10f)); // 0x307B
-    CHECK(data[5] == armnn::BFloat16(-3.796875f)); // 0xC073
-    CHECK(data[6] == armnn::BFloat16(-3.1072295E29f)); // 0xF07B
-    CHECK(data[7] == armnn::BFloat16(-9.131327E-10f)); // 0xB07B
-}
-
-TEST_CASE("Fp32NetworkToBf16OptimizationFullyConnectedTest")
-{
-    armnn::Graph graph;
-
-    const armnn::TensorInfo infoFP32({ 2, 3, 8, 1 }, armnn::DataType::Float32);
-
-    // Create const tensor fp32 data
-    unsigned int dims[] = { 4, 2, 1, 1 };
-    std::vector<float> floatWeights{ 0.0f, -1.0f,
-                                     3.8f, // 0x40733333 Round down
-                                     3.1055E+29f, // 0x707ADC3C Round up
-                                     9.149516E-10f, // 0x307B7FFF Round down
-                                    -3.8f, // 0xC0733333 Round down
-                                    -3.1055E+29f, // 0xF07ADC3C Round up
-                                    -9.149516E-10f // 0xB07B7FFF Round down
-                                   };
-    armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float32, 0.0f, 0, true), floatWeights);
-
-    // Create const bias fp32 data
-    unsigned int biasDims[] {4};
-    std::vector<float> floatBias{ 1.0f, 2.0f, 3.0f, 4.0f };
-    armnn::ConstTensor bias(armnn::TensorInfo(1, biasDims, armnn::DataType::Float32, 0.0f, 0, true), floatBias);
-
-    // A network with FullyConnected layer
-    auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
-    input->GetOutputSlot().SetTensorInfo(infoFP32);
-
-    armnn::FullyConnectedDescriptor descriptor;
-    descriptor.m_BiasEnabled = true;
-
-    auto fc = graph.AddLayer<armnn::FullyConnectedLayer>(descriptor, "fully");
-    fc->GetOutputSlot().SetTensorInfo(infoFP32);
-
-    auto weightsLayer = graph.AddLayer<armnn::ConstantLayer>("Weights");
-    weightsLayer->m_LayerOutput = std::make_shared<armnn::ScopedTensorHandle>(weights);
-    weightsLayer->GetOutputSlot(0).SetTensorInfo(weightsLayer->m_LayerOutput->GetTensorInfo());
-
-    auto biasLayer = graph.AddLayer<armnn::ConstantLayer>("Bias");
-    biasLayer->m_LayerOutput = std::make_shared<armnn::ScopedTensorHandle>(bias);
-    biasLayer->GetOutputSlot(0).SetTensorInfo(biasLayer->m_LayerOutput->GetTensorInfo());
-
-    auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
-
-    // Connect up the layers
-    input->GetOutputSlot().Connect(fc->GetInputSlot(0));
-    weightsLayer->GetOutputSlot(0).Connect(fc->GetInputSlot(1));
-    biasLayer->GetOutputSlot(0).Connect(fc->GetInputSlot(2));
-    fc->GetOutputSlot().Connect(output->GetInputSlot(0));
-
-    CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
-                                                      &IsLayerOfType<armnn::ConstantLayer>,
-                                                      &IsLayerOfType<armnn::ConstantLayer>,
-                                                      &IsLayerOfType<armnn::FullyConnectedLayer>,
-                                                      &IsLayerOfType<armnn::OutputLayer>));
-
-    // Run the optimizer
-    armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(RedirectMembersToConstantInputs(),
-                                                           Fp32NetworkToBf16Converter()));
-
-    CHECK(7 == graph.GetNumLayers());
-    CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
-                                                      &IsLayerOfType<armnn::ConstantLayer>,
-                                                      &IsLayerOfType<armnn::ConstantLayer>,
-                                                      &IsLayerOfType<armnn::ConvertFp32ToBf16Layer>,
-                                                      &IsLayerOfType<armnn::ConvertFp32ToBf16Layer>,
-                                                      &IsLayerOfType<armnn::FullyConnectedLayer>,
-                                                      &IsLayerOfType<armnn::OutputLayer>));
-
-    armnn::TensorInfo inputTensor = fc->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
-    armnn::TensorInfo weightTensor = fc->GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo();
-    armnn::TensorInfo biasTensor = fc->GetInputSlot(2).GetConnectedOutputSlot()->GetTensorInfo();
-    armnn::TensorInfo outputTensor = fc->GetOutputSlot(0).GetTensorInfo();
-    CHECK((fc->GetDataType() == armnn::DataType::BFloat16));
-    CHECK((fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::BFloat16));
-    CHECK((fc->m_Bias->GetTensorInfo().GetDataType() == armnn::DataType::Float32));
-    CHECK((inputTensor.GetDataType() == armnn::DataType::BFloat16));
-    CHECK((weightTensor.GetDataType() == armnn::DataType::BFloat16));
-    CHECK((biasTensor.GetDataType() == armnn::DataType::Float32));
-    CHECK((outputTensor.GetDataType() == armnn::DataType::Float32));
-
-    // Check whether data matches expected Bf16 data
-    const armnn::BFloat16* data = fc->m_Weight->GetConstTensor<armnn::BFloat16>();
-    CHECK(data[0] == armnn::BFloat16(0.0f));
-    CHECK(data[1] == armnn::BFloat16(-1.0f));
-    CHECK(data[2] == armnn::BFloat16(3.796875f)); // 0x4073
-    CHECK(data[3] == armnn::BFloat16(3.1072295E29f)); // 0x707B
-    CHECK(data[4] == armnn::BFloat16(9.131327E-10f)); // 0x307B
-    CHECK(data[5] == armnn::BFloat16(-3.796875f)); // 0xC073
-    CHECK(data[6] == armnn::BFloat16(-3.1072295E29f)); // 0xF07B
-    CHECK(data[7] == armnn::BFloat16(-9.131327E-10f)); // 0xB07B
-}
-
-}
\ No newline at end of file
diff --git a/src/armnn/test/optimizations/FuseConvertF32BF16IntoConstLayerTests.cpp b/src/armnn/test/optimizations/FuseConvertF32BF16IntoConstLayerTests.cpp
deleted file mode 100644
index 93d5948..0000000
--- a/src/armnn/test/optimizations/FuseConvertF32BF16IntoConstLayerTests.cpp
+++ /dev/null
@@ -1,151 +0,0 @@
-//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include <LayersFwd.hpp>
-#include <Network.hpp>
-#include <NetworkUtils.hpp>
-#include <Optimizer.hpp>
-#include <TestUtils.hpp>
-
-#include <armnn/backends/TensorHandle.hpp>
-
-#include <doctest/doctest.h>
-
-TEST_SUITE("Optimizer")
-{
-using namespace armnn;
-using namespace armnn::optimizations;
-
-TEST_CASE("FuseConvertFp32Fp16intoConst")
-{
-    Graph graph;
-    const unsigned int shape[] = {1, 2, 2, 3};
-
-    const TensorInfo constTensorInfo(4, shape, DataType::Float32, 1.0, 0, true);
-    const TensorInfo outputConvertInfo(4, shape, DataType::BFloat16, 1.0, 0, true);
-
-    ConstantLayer* constantLayer = graph.AddLayer<ConstantLayer>("constant");
-    std::vector<float> constantValues(constTensorInfo.GetNumElements(), 3.1416f);
-    ConstTensor constTensor(constTensorInfo, constantValues.data());
-    constantLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(constTensor);
-    constantLayer->GetOutputSlot().SetTensorInfo(constTensorInfo);
-
-    ConvertFp32ToBf16Layer* convertLayer = graph.AddLayer<ConvertFp32ToBf16Layer>("convert");
-    convertLayer->GetOutputSlot().SetTensorInfo(outputConvertInfo);
-
-    OutputLayer* output = graph.AddLayer<OutputLayer>(0, "output");
-
-    // Connect up constant -> convert -> output
-    constantLayer->GetOutputSlot().Connect(convertLayer->GetInputSlot(0));
-    convertLayer->GetOutputSlot().Connect(output->GetInputSlot(0));
-
-    auto checkConstantFloat32 = [](const armnn::Layer *const layer) -> bool {
-        return IsLayerOfType<ConstantLayer>(layer) &&
-               (layer->GetDataType() == DataType::Float32);
-    };
-    auto checkConstantBFloat16 = [](const armnn::Layer *const layer) -> bool {
-        return IsLayerOfType<ConstantLayer>(layer) &&
-               (layer->GetDataType() == DataType::BFloat16);
-    };
-
-    CHECK(CheckSequence(graph.cbegin(), graph.cend(),
-                        checkConstantFloat32,
-                        &IsLayerOfType<ConvertFp32ToBf16Layer>,
-                        &IsLayerOfType<OutputLayer>));
-
-    armnn::Optimizer::Pass(graph, MakeOptimizations(FuseConversionLayersIntoConstLayers()));
-
-    CHECK(CheckSequence(graph.cbegin(), graph.cend(),
-                        checkConstantBFloat16,
-                        &IsLayerOfType<OutputLayer>));
-}
-
-TEST_CASE("RevertConstantWeightsToFP32")
-{
-    Graph graph;
-    const unsigned int shape[] = {1, 2, 2, 3};
-
-    const TensorInfo constTensorInfo(4, shape, DataType::Float32, 1.0, 0, true);
-    const TensorInfo outputConvertInfo(4, shape, DataType::BFloat16, 1.0, 0, true);
-
-    TensorInfo inputInfo(4, shape, DataType::Float32);
-    auto* input = graph.AddLayer<InputLayer>(0, "input0");
-    input->GetOutputSlot().SetTensorInfo(inputInfo);
-
-    auto* constantLayer = graph.AddLayer<ConstantLayer>("constant");
-    std::vector<float> constantValues(constTensorInfo.GetNumElements(), 3.1416f);
-    ConstTensor constTensor(constTensorInfo, constantValues.data());
-    constantLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(constTensor);
-    constantLayer->GetOutputSlot().SetTensorInfo(constTensorInfo);
-
-    ConvertFp32ToBf16Layer* convertLayerInputs = graph.AddLayer<ConvertFp32ToBf16Layer>("convert");
-    convertLayerInputs->GetOutputSlot().SetTensorInfo(outputConvertInfo);
-    ConvertFp32ToBf16Layer* convertLayerWeights = graph.AddLayer<ConvertFp32ToBf16Layer>("convert2");
-    convertLayerWeights->GetOutputSlot().SetTensorInfo(outputConvertInfo);
-    ConvertFp32ToBf16Layer* convertLayerBiases = graph.AddLayer<ConvertFp32ToBf16Layer>("convert3");
-    convertLayerBiases->GetOutputSlot().SetTensorInfo(outputConvertInfo);
-
-    auto* biases  = graph.AddLayer<armnn::ConstantLayer>("Biases");
-    biases->m_LayerOutput  = std::make_unique<armnn::ScopedTensorHandle>(constTensor);
-    biases->GetOutputSlot().SetTensorInfo(constTensorInfo);
-
-    armnn::Convolution2dDescriptor descriptor;
-    descriptor.m_BiasEnabled = true;
-    auto* conv = graph.AddLayer<armnn::Convolution2dLayer>(descriptor, "conv2d");
-    const armnn::TensorInfo infoFP32({ 2, 3, 8, 1 }, armnn::DataType::Float32);
-    conv->GetOutputSlot().SetTensorInfo(infoFP32);
-
-    auto* output = graph.AddLayer<OutputLayer>(0, "output");
-
-    // Connect up Input    -> Convert ->
-    //            Constant -> Convert -> Conv2d -> Output
-    //            Constant -> Convert ->
-    input->GetOutputSlot().Connect(convertLayerInputs->GetInputSlot(0));
-    constantLayer->GetOutputSlot().Connect(convertLayerWeights->GetInputSlot(0));
-    biases->GetOutputSlot().Connect(convertLayerBiases->GetInputSlot(0));
-
-    convertLayerInputs->GetOutputSlot().Connect(conv->GetInputSlot(0));
-    convertLayerWeights->GetOutputSlot().Connect(conv->GetInputSlot(1));
-    convertLayerBiases->GetOutputSlot().Connect(conv->GetInputSlot(2));
-
-    conv->GetOutputSlot().Connect(output->GetInputSlot(0));
-
-    auto checkConstantFloat32 = [](const armnn::Layer *const layer) -> bool {
-        return IsLayerOfType<ConstantLayer>(layer) &&
-               (layer->GetDataType() == DataType::Float32);
-    };
-    auto checkConstantBFloat16 = [](const armnn::Layer *const layer) -> bool {
-        return IsLayerOfType<ConstantLayer>(layer) &&
-               (layer->GetDataType() == DataType::BFloat16);
-    };
-
-    CHECK(CheckSequence(graph.cbegin(), graph.cend(),
-                        &IsLayerOfType<InputLayer>,
-                        checkConstantFloat32,
-                        checkConstantFloat32,
-                        &IsLayerOfType<ConvertFp32ToBf16Layer>,
-                        &IsLayerOfType<ConvertFp32ToBf16Layer>,
-                        &IsLayerOfType<ConvertFp32ToBf16Layer>,
-                        &IsLayerOfType<Convolution2dLayer>,
-                        &IsLayerOfType<OutputLayer>));
-
-    armnn::Optimizer::Pass(graph, MakeOptimizations(FuseConversionLayersIntoConstLayers()));
-
-    bool revert = RevertConstantWeightsToFP32(conv);
-
-    // Erase unconnected layer as occurs during Topological Sort.
-    graph.EraseLayer(convertLayerInputs);
-
-    CHECK(revert);
-    CHECK(constantLayer->GetDataType() == DataType::Float32);
-
-    CHECK(CheckSequence(graph.cbegin(), graph.cend(),
-                        &IsLayerOfType<InputLayer>,
-                        checkConstantBFloat16,
-                        checkConstantFloat32,
-                        &IsLayerOfType<Convolution2dLayer>,
-                        &IsLayerOfType<OutputLayer>));
-}
-}
diff --git a/src/armnnUtils/FloatingPointConverter.cpp b/src/armnnUtils/FloatingPointConverter.cpp
index 8123cf3..7a684f1 100644
--- a/src/armnnUtils/FloatingPointConverter.cpp
+++ b/src/armnnUtils/FloatingPointConverter.cpp
@@ -43,34 +43,4 @@
     }
 }
 
-void FloatingPointConverter::ConvertFloat32ToBFloat16(const float* srcFloat32Buffer,
-                                                      size_t numElements,
-                                                      void* dstBFloat16Buffer)
-{
-    ARMNN_ASSERT(srcFloat32Buffer != nullptr);
-    ARMNN_ASSERT(dstBFloat16Buffer != nullptr);
-
-    armnn::BFloat16* bf16 = static_cast<armnn::BFloat16*>(dstBFloat16Buffer);
-
-    for (size_t i = 0; i < numElements; i++)
-    {
-        bf16[i] = armnn::BFloat16(srcFloat32Buffer[i]);
-    }
-}
-
-void FloatingPointConverter::ConvertBFloat16ToFloat32(const void* srcBFloat16Buffer,
-                                                      size_t numElements,
-                                                      float* dstFloat32Buffer)
-{
-    ARMNN_ASSERT(srcBFloat16Buffer != nullptr);
-    ARMNN_ASSERT(dstFloat32Buffer != nullptr);
-
-    const armnn::BFloat16* bf16 = static_cast<const armnn::BFloat16*>(srcBFloat16Buffer);
-
-    for (size_t i = 0; i < numElements; i++)
-    {
-        dstFloat32Buffer[i] = bf16[i].ToFloat32();
-    }
-}
-
 } //namespace armnnUtils
diff --git a/src/backends/backendsCommon/LayerSupportBase.cpp b/src/backends/backendsCommon/LayerSupportBase.cpp
index 0010379..26137f5 100644
--- a/src/backends/backendsCommon/LayerSupportBase.cpp
+++ b/src/backends/backendsCommon/LayerSupportBase.cpp
@@ -164,13 +164,6 @@
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsConvertBf16ToFp32Supported(const TensorInfo&, // input
-                                                    const TensorInfo&, // output
-                                                    Optional<std::string&> reasonIfUnsupported) const
-{
-    return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
-}
-
 bool LayerSupportBase::IsConvertFp16ToFp32Supported(const TensorInfo&, // input
                                                     const TensorInfo&, // output
                                                     Optional<std::string&> reasonIfUnsupported) const
@@ -178,14 +171,6 @@
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsConvertFp32ToBf16Supported(const TensorInfo&, // input
-                                                    const TensorInfo&, // output
-                                                    Optional<std::string&> reasonIfUnsupported) const
-{
-    return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
-}
-
-
 bool LayerSupportBase::IsConvertFp32ToFp16Supported(const TensorInfo&, // input
                                                     const TensorInfo&, // output
                                                     Optional<std::string&> reasonIfUnsupported) const
diff --git a/src/backends/backendsCommon/LayerSupportBase.hpp b/src/backends/backendsCommon/LayerSupportBase.hpp
index b18af35..acf24a2 100644
--- a/src/backends/backendsCommon/LayerSupportBase.hpp
+++ b/src/backends/backendsCommon/LayerSupportBase.hpp
@@ -83,19 +83,11 @@
     bool IsConstantSupported(const TensorInfo& output,
                              Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
-    bool IsConvertBf16ToFp32Supported(const TensorInfo& input,
-                                      const TensorInfo& output,
-                                      Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
     ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "23.08")
     bool IsConvertFp16ToFp32Supported(const TensorInfo& input,
                                       const TensorInfo& output,
                                       Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
-    bool IsConvertFp32ToBf16Supported(const TensorInfo& input,
-                                      const TensorInfo& output,
-                                      Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
     ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "23.08")
     bool IsConvertFp32ToFp16Supported(
             const TensorInfo& input,
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index 753fe06..62dfc6a 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -2222,52 +2222,6 @@
     }
 }
 
-void ConvertBf16ToFp32QueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
-{
-    const std::string descriptorName{"ConvertBf16ToFp32QueueDescriptor"};
-
-    ValidateNumInputs(workloadInfo,  descriptorName, 1);
-    ValidateNumOutputs(workloadInfo, descriptorName, 1);
-
-    const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
-    const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
-
-    if (inputTensorInfo.GetDataType() != DataType::BFloat16)
-    {
-        throw InvalidArgumentException(descriptorName + ": Input tensor type must be BFloat16.");
-    }
-
-    if (outputTensorInfo.GetDataType() != DataType::Float32)
-    {
-        throw InvalidArgumentException(descriptorName + ": Output tensor type must be Float32.");
-    }
-
-    ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
-}
-
-void ConvertFp32ToBf16QueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
-{
-    const std::string descriptorName{"ConvertFp32ToBf16QueueDescriptor"};
-
-    ValidateNumInputs(workloadInfo,  descriptorName, 1);
-    ValidateNumOutputs(workloadInfo, descriptorName, 1);
-
-    const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
-    const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
-
-    if (inputTensorInfo.GetDataType() != DataType::Float32)
-    {
-        throw InvalidArgumentException(descriptorName + ": Input tensor type must be Float32.");
-    }
-
-    if (outputTensorInfo.GetDataType() != DataType::BFloat16)
-    {
-        throw InvalidArgumentException(descriptorName + ": Output tensor type must be BFloat16.");
-    }
-
-    ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
-}
-
 void ConvertFp32ToFp16QueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
 {
     const std::string descriptorName{"ConvertFp32ToFp16QueueDescriptor"};
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index 665ab3f..1283f67 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -227,13 +227,6 @@
             result = layerSupportObject.IsConstantSupported(OverrideDataType(output, dataType), reason);
             break;
         }
-        case LayerType::ConvertBf16ToFp32:
-        {
-            const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
-            const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
-            result = layerSupportObject.IsConvertBf16ToFp32Supported(input, output, reason);
-            break;
-        }
         case LayerType::ConvertFp16ToFp32:
         {
             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
@@ -241,13 +234,6 @@
             result = layerSupportObject.IsConvertFp16ToFp32Supported(input, output, reason);
             break;
         }
-        case LayerType::ConvertFp32ToBf16:
-        {
-            const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
-            const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
-            result = layerSupportObject.IsConvertFp32ToBf16Supported(input, output, reason);
-            break;
-        }
         case LayerType::ConvertFp32ToFp16:
         {
             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
@@ -1630,24 +1616,12 @@
             auto constantQueueDescriptor = PolymorphicDowncast<const ConstantQueueDescriptor*>(&descriptor);
             return CreateConstant(*constantQueueDescriptor, info);
         }
-        case LayerType::ConvertBf16ToFp32 :
-        {
-            auto convertBf16ToFp32QueueDescriptor
-                    = PolymorphicDowncast<const ConvertBf16ToFp32QueueDescriptor*>(&descriptor);
-            return CreateConvertBf16ToFp32(*convertBf16ToFp32QueueDescriptor, info);
-        }
         case LayerType::ConvertFp16ToFp32:
         {
             auto convertFp16ToFp32QueueDescriptor
                     = PolymorphicDowncast<const ConvertFp16ToFp32QueueDescriptor*>(&descriptor);
             return CreateConvertFp16ToFp32(*convertFp16ToFp32QueueDescriptor, info);
         }
-        case LayerType::ConvertFp32ToBf16:
-        {
-            auto convertFp32ToBf16QueueDescriptor
-                    = PolymorphicDowncast<const ConvertFp32ToBf16QueueDescriptor*>(&descriptor);
-            return CreateConvertFp32ToBf16(*convertFp32ToBf16QueueDescriptor, info);
-        }
         case LayerType::ConvertFp32ToFp16:
         {
             auto convertFp32ToFp16QueueDescriptor
@@ -1992,24 +1966,12 @@
     return std::unique_ptr<IWorkload>();
 }
 
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertBf16ToFp32(const ConvertBf16ToFp32QueueDescriptor& /*desc*/,
-                                                                     const WorkloadInfo& /*info*/) const
-{
-    return std::unique_ptr<IWorkload>();
-}
-
 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor& /*desc*/,
                                                                      const WorkloadInfo& /*info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
 
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertFp32ToBf16(const ConvertFp32ToBf16QueueDescriptor& /*desc*/,
-                                                                     const WorkloadInfo& /*info*/) const
-{
-    return std::unique_ptr<IWorkload>();
-}
-
 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor& /*desc*/,
                                                                      const WorkloadInfo& /*info*/) const
 {
diff --git a/src/backends/backendsCommon/common.mk b/src/backends/backendsCommon/common.mk
index 007cca5..3545331 100644
--- a/src/backends/backendsCommon/common.mk
+++ b/src/backends/backendsCommon/common.mk
@@ -55,9 +55,7 @@
     test/layerTests/ConstantTestImpl.cpp \
     test/layerTests/Conv2dTestImpl.cpp \
     test/layerTests/Conv3dTestImpl.cpp \
-    test/layerTests/ConvertBf16ToFp32TestImpl.cpp \
     test/layerTests/ConvertFp16ToFp32TestImpl.cpp \
-    test/layerTests/ConvertFp32ToBf16TestImpl.cpp \
     test/layerTests/ConvertFp32ToFp16TestImpl.cpp \
     test/layerTests/DebugTestImpl.cpp \
     test/layerTests/DepthToSpaceTestImpl.cpp \
diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt
index 5e28399..232226b 100644
--- a/src/backends/backendsCommon/test/CMakeLists.txt
+++ b/src/backends/backendsCommon/test/CMakeLists.txt
@@ -83,12 +83,8 @@
     layerTests/Conv2dTestImpl.hpp
     layerTests/Conv3dTestImpl.cpp
     layerTests/Conv3dTestImpl.hpp
-    layerTests/ConvertBf16ToFp32TestImpl.cpp
-    layerTests/ConvertBf16ToFp32TestImpl.hpp
     layerTests/ConvertFp16ToFp32TestImpl.cpp
     layerTests/ConvertFp16ToFp32TestImpl.hpp
-    layerTests/ConvertFp32ToBf16TestImpl.cpp
-    layerTests/ConvertFp32ToBf16TestImpl.hpp
     layerTests/ConvertFp32ToFp16TestImpl.cpp
     layerTests/ConvertFp32ToFp16TestImpl.hpp
     layerTests/DebugTestImpl.cpp
diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
index 5fdcd9c..18f11a5 100644
--- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
+++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
@@ -630,12 +630,8 @@
 
 DECLARE_LAYER_POLICY_1_PARAM(Constant)
 
-DECLARE_LAYER_POLICY_1_PARAM(ConvertBf16ToFp32)
-
 DECLARE_LAYER_POLICY_1_PARAM(ConvertFp16ToFp32)
 
-DECLARE_LAYER_POLICY_1_PARAM(ConvertFp32ToBf16)
-
 DECLARE_LAYER_POLICY_1_PARAM(ConvertFp32ToFp16)
 
 DECLARE_LAYER_POLICY_2_PARAM(Convolution2d)
diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp
index 25435b2..00bfea5 100644
--- a/src/backends/backendsCommon/test/LayerTests.hpp
+++ b/src/backends/backendsCommon/test/LayerTests.hpp
@@ -16,9 +16,7 @@
 #include <backendsCommon/test/layerTests/ChannelShuffleTestImpl.hpp>
 #include <backendsCommon/test/layerTests/ComparisonTestImpl.hpp>
 #include <backendsCommon/test/layerTests/ConcatTestImpl.hpp>
-#include <backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.hpp>
 #include <backendsCommon/test/layerTests/ConvertFp16ToFp32TestImpl.hpp>
-#include <backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.hpp>
 #include <backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.hpp>
 #include <backendsCommon/test/layerTests/Conv2dTestImpl.hpp>
 #include <backendsCommon/test/layerTests/Conv3dTestImpl.hpp>
diff --git a/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.cpp
deleted file mode 100644
index 0dd8b59..0000000
--- a/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.cpp
+++ /dev/null
@@ -1,62 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ConvertBf16ToFp32TestImpl.hpp"
-
-#include <armnnTestUtils/TensorCopyUtils.hpp>
-#include <armnnTestUtils/WorkloadTestUtils.hpp>
-
-#include <armnnTestUtils/TensorHelpers.hpp>
-
-LayerTestResult<float, 4> ConvertBf16ToFp32Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::ITensorHandleFactory& tensorHandleFactory)
-{
-    IgnoreUnused(memoryManager);
-
-    const armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::BFloat16);
-    const armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float32);
-
-    std::vector<armnn::BFloat16> inputValues = armnnUtils::QuantizedVector<armnn::BFloat16>(
-        {
-            -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f,
-            1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f
-        },
-        1.0f, 0);
-
-    std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
-    std::vector<float> expectedOutput =
-        {
-            -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f,
-            1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f
-        };
-
-    std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
-
-    armnn::ConvertBf16ToFp32QueueDescriptor data;
-    armnn::WorkloadInfo info;
-    AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
-    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
-
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::ConvertBf16ToFp32,
-                                                                                data,
-                                                                                info);
-
-    inputHandle->Allocate();
-    outputHandle->Allocate();
-
-    CopyDataToITensorHandle(inputHandle.get(), inputValues.data());
-
-    workload->Execute();
-
-    CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
-
-    return LayerTestResult<float, 4>(actualOutput,
-                                     expectedOutput,
-                                     outputHandle->GetShape(),
-                                     outputTensorInfo.GetShape());
-}
diff --git a/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.hpp
deleted file mode 100644
index bcb0d6f..0000000
--- a/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.hpp
+++ /dev/null
@@ -1,18 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <armnnTestUtils/LayerTestResult.hpp>
-
-#include <BFloat16.hpp>
-
-#include <armnn/backends/IBackendInternal.hpp>
-#include <armnn/backends/WorkloadFactory.hpp>
-
-LayerTestResult<float, 4> ConvertBf16ToFp32Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::ITensorHandleFactory& tensorHandleFactory);
diff --git a/src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.cpp
deleted file mode 100644
index 5ee8f1d..0000000
--- a/src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.cpp
+++ /dev/null
@@ -1,84 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ConvertFp32ToBf16TestImpl.hpp"
-
-#include <armnnTestUtils/TensorCopyUtils.hpp>
-#include <armnnTestUtils/WorkloadTestUtils.hpp>
-
-#include <armnnTestUtils/TensorHelpers.hpp>
-
-LayerTestResult<armnn::BFloat16, 4> ConvertFp32ToBf16Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::ITensorHandleFactory& tensorHandleFactory)
-{
-    IgnoreUnused(memoryManager);
-
-    const armnn::TensorInfo inputTensorInfo({1, 2, 4, 3}, armnn::DataType::Float32);
-    const armnn::TensorInfo outputTensorInfo({1, 2, 4, 3}, armnn::DataType::BFloat16);
-
-    std::vector<float> input =
-        {
-          -37.5f, -15.2f, -8.76f,
-          -2.0f, -1.5f, -1.3f,
-          -0.5f, -0.4f, 0.0f,
-          1.0f, 0.4f, 0.5f,
-          1.3f, 1.5f, 2.0f,
-          8.76f, 15.2f, 37.5f,
-          3.8f, // 0x40733333 Round down
-          3.1055E+29f, // 0x707ADC3C Round up
-          9.149516E-10f, // 0x307B7FFF Round down
-          -3.8f, // 0xC0733333 Round down
-          -3.1055E+29f, // 0xF07ADC3C Round up
-          -9.149516E-10f // 0xB07B7FFF Round down
-        };
-
-    std::vector<armnn::BFloat16> expectedOutput = armnnUtils::QuantizedVector<armnn::BFloat16>(
-        {
-          -37.5f, -15.2f, -8.76f,
-          -2.0f, -1.5f, -1.3f,
-          -0.5f, -0.4f, 0.0f,
-          1.0f, 0.4f, 0.5f,
-          1.3f, 1.5f, 2.0f,
-          8.76f, 15.2f, 37.5f,
-          3.796875f, // 0x4073
-          3.1072295E29f, // 0x707B
-          9.131327E-10f, // 0x307B
-          -3.796875f, // 0xC073
-          -3.1072295E29f, // 0xF07B
-          -9.131327E-10f // 0xB07B
-        },
-        1.0f, 0);
-
-    std::vector<armnn::BFloat16> actualOutput(outputTensorInfo.GetNumElements());
-
-    std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
-
-    armnn::ConvertFp32ToBf16QueueDescriptor data;
-    armnn::WorkloadInfo info;
-    AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
-    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
-
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::ConvertFp32ToBf16,
-                                                                                data,
-                                                                                info);
-
-    inputHandle->Allocate();
-    outputHandle->Allocate();
-
-    CopyDataToITensorHandle(inputHandle.get(), input.data());
-
-    workload->Execute();
-
-    CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
-
-    return LayerTestResult<armnn::BFloat16, 4>(actualOutput,
-                                               expectedOutput,
-                                               outputHandle->GetShape(),
-                                               outputTensorInfo.GetShape());
-
-}
diff --git a/src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.hpp
deleted file mode 100644
index c2286d9..0000000
--- a/src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.hpp
+++ /dev/null
@@ -1,18 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <armnnTestUtils/LayerTestResult.hpp>
-
-#include <BFloat16.hpp>
-
-#include <armnn/backends/IBackendInternal.hpp>
-#include <armnn/backends/WorkloadFactory.hpp>
-
-LayerTestResult<armnn::BFloat16, 4> ConvertFp32ToBf16Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::ITensorHandleFactory& tensorHandleFactory);
diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp
index 9c40391..a61a5bb 100644
--- a/src/backends/cl/ClLayerSupport.cpp
+++ b/src/backends/cl/ClLayerSupport.cpp
@@ -247,14 +247,6 @@
             return IsConvertFp16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
         case LayerType::ConvertFp32ToFp16:
             return IsConvertFp32ToFp16Supported(infos[0], infos[1], reasonIfUnsupported);
-        case LayerType::ConvertBf16ToFp32:
-            return LayerSupportBase::IsConvertBf16ToFp32Supported(infos[0],
-                                                                  infos[1],
-                                                                  reasonIfUnsupported);
-        case LayerType::ConvertFp32ToBf16:
-            return LayerSupportBase::IsConvertFp32ToBf16Supported(infos[0],
-                                                                  infos[1],
-                                                                  reasonIfUnsupported);
         case LayerType::Convolution2d:
         {
             if (infos.size() != 4)
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp
index 7f311d8..4c97855 100644
--- a/src/backends/neon/NeonLayerSupport.cpp
+++ b/src/backends/neon/NeonLayerSupport.cpp
@@ -220,12 +220,8 @@
         }
         case LayerType::Constant:
             return IsConstantSupported(infos[0], reasonIfUnsupported);
-        case LayerType::ConvertBf16ToFp32:
-            return IsConvertBf16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
         case LayerType::ConvertFp16ToFp32:
             return IsConvertFp16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
-        case LayerType::ConvertFp32ToBf16:
-            return IsConvertFp32ToBf16Supported(infos[0], infos[1], reasonIfUnsupported);
         case LayerType::ConvertFp32ToFp16:
             return IsConvertFp32ToFp16Supported(infos[0], infos[1], reasonIfUnsupported);
         case LayerType::Convolution2d:
@@ -765,16 +761,6 @@
                                    output);
 }
 
-bool NeonLayerSupport::IsConvertBf16ToFp32Supported(const TensorInfo& input,
-                                                    const TensorInfo& output,
-                                                    Optional<std::string&> reasonIfUnsupported) const
-{
-    armnn::IgnoreUnused(input);
-    armnn::IgnoreUnused(output);
-    armnn::IgnoreUnused(reasonIfUnsupported);
-    return true;
-}
-
 bool NeonLayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input,
                                                     const TensorInfo& output,
                                                     Optional<std::string&> reasonIfUnsupported) const
@@ -785,16 +771,6 @@
     return true;
 }
 
-bool NeonLayerSupport::IsConvertFp32ToBf16Supported(const TensorInfo& input,
-                                                    const TensorInfo& output,
-                                                    Optional<std::string&> reasonIfUnsupported) const
-{
-    armnn::IgnoreUnused(input);
-    armnn::IgnoreUnused(output);
-    armnn::IgnoreUnused(reasonIfUnsupported);
-    return true;
-}
-
 bool NeonLayerSupport::IsConvertFp32ToFp16Supported(const TensorInfo& input,
                                                     const TensorInfo& output,
                                                     Optional<std::string&> reasonIfUnsupported) const
diff --git a/src/backends/neon/NeonLayerSupport.hpp b/src/backends/neon/NeonLayerSupport.hpp
index e916162..374a904 100644
--- a/src/backends/neon/NeonLayerSupport.hpp
+++ b/src/backends/neon/NeonLayerSupport.hpp
@@ -84,18 +84,10 @@
     bool IsConstantSupported(const TensorInfo& output,
                              Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
-    bool IsConvertBf16ToFp32Supported(const TensorInfo& input,
-                                      const TensorInfo& output,
-                                      Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
     bool IsConvertFp16ToFp32Supported(const TensorInfo& input,
                                       const TensorInfo& output,
                                       Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
-    bool IsConvertFp32ToBf16Supported(const TensorInfo& input,
-                                      const TensorInfo& output,
-                                      Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
     bool IsConvertFp32ToFp16Supported(const TensorInfo& input,
                                       const TensorInfo& output,
                                       Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp
index d5a7c68..dccd4a3 100644
--- a/src/backends/neon/NeonWorkloadFactory.cpp
+++ b/src/backends/neon/NeonWorkloadFactory.cpp
@@ -194,24 +194,12 @@
             auto constantQueueDescriptor = PolymorphicDowncast<const ConstantQueueDescriptor*>(&descriptor);
             return std::make_unique<NeonConstantWorkload>(*constantQueueDescriptor, info);
         }
-        case LayerType::ConvertBf16ToFp32 :
-        {
-            auto convertBf16ToFp32QueueDescriptor
-                    = PolymorphicDowncast<const ConvertBf16ToFp32QueueDescriptor*>(&descriptor);
-            return std::make_unique<NeonConvertBf16ToFp32Workload>(*convertBf16ToFp32QueueDescriptor, info);
-        }
         case LayerType::ConvertFp16ToFp32 :
         {
             auto convertFp16ToFp32QueueDescriptor
                     = PolymorphicDowncast<const ConvertFp16ToFp32QueueDescriptor*>(&descriptor);
             return std::make_unique<NeonConvertFp16ToFp32Workload>(*convertFp16ToFp32QueueDescriptor, info);
         }
-        case LayerType::ConvertFp32ToBf16 :
-        {
-            auto convertFp32ToBf16QueueDescriptor
-                    = PolymorphicDowncast<const ConvertFp32ToBf16QueueDescriptor*>(&descriptor);
-            return std::make_unique<NeonConvertFp32ToBf16Workload>(*convertFp32ToBf16QueueDescriptor, info);
-        }
         case LayerType::ConvertFp32ToFp16 :
         {
             auto convertFp32ToFp16QueueDescriptor
@@ -655,13 +643,6 @@
     return std::make_unique<NeonConstantWorkload>(descriptor, info);
 }
 
-std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateConvertBf16ToFp32(
-    const ConvertBf16ToFp32QueueDescriptor& descriptor,
-    const WorkloadInfo& info) const
-{
-    return std::make_unique<NeonConvertBf16ToFp32Workload>(descriptor, info);
-}
-
 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateConvertFp16ToFp32(
     const ConvertFp16ToFp32QueueDescriptor& descriptor,
     const WorkloadInfo& info) const
@@ -669,13 +650,6 @@
     return std::make_unique<NeonConvertFp16ToFp32Workload>(descriptor, info);
 }
 
-std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateConvertFp32ToBf16(
-    const ConvertFp32ToBf16QueueDescriptor& descriptor,
-    const WorkloadInfo& info) const
-{
-    return std::make_unique<NeonConvertFp32ToBf16Workload>(descriptor, info);
-}
-
 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateConvertFp32ToFp16(
     const ConvertFp32ToFp16QueueDescriptor& descriptor,
     const WorkloadInfo& info) const
diff --git a/src/backends/neon/NeonWorkloadFactory.hpp b/src/backends/neon/NeonWorkloadFactory.hpp
index 0c11608..e4f5459 100644
--- a/src/backends/neon/NeonWorkloadFactory.hpp
+++ b/src/backends/neon/NeonWorkloadFactory.hpp
@@ -108,21 +108,11 @@
 
     ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
     "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "23.08")
-    std::unique_ptr<IWorkload> CreateConvertBf16ToFp32(const ConvertBf16ToFp32QueueDescriptor& descriptor,
-                                                       const WorkloadInfo& info) const override;
-
-    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
-    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "23.08")
     std::unique_ptr<IWorkload> CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor& descriptor,
                                                        const WorkloadInfo& info) const override;
 
     ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
     "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "23.08")
-    std::unique_ptr<IWorkload> CreateConvertFp32ToBf16(const ConvertFp32ToBf16QueueDescriptor& descriptor,
-                                                       const WorkloadInfo& info) const override;
-
-    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
-    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "23.08")
     std::unique_ptr<IWorkload> CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor& descriptor,
                                                        const WorkloadInfo& info) const override;
 
diff --git a/src/backends/neon/backend.mk b/src/backends/neon/backend.mk
index b1c0103..bbc5554 100644
--- a/src/backends/neon/backend.mk
+++ b/src/backends/neon/backend.mk
@@ -34,8 +34,6 @@
         workloads/NeonComparisonWorkload.cpp \
         workloads/NeonConcatWorkload.cpp \
         workloads/NeonConstantWorkload.cpp \
-        workloads/NeonConvertBf16ToFp32Workload.cpp \
-        workloads/NeonConvertFp32ToBf16Workload.cpp \
         workloads/NeonConvertFp16ToFp32Workload.cpp \
         workloads/NeonConvertFp32ToFp16Workload.cpp \
         workloads/NeonConvolution2dWorkload.cpp \
diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp
index 88e513e..2512821 100644
--- a/src/backends/neon/test/NeonLayerTests.cpp
+++ b/src/backends/neon/test/NeonLayerTests.cpp
@@ -743,12 +743,6 @@
 ARMNN_AUTO_TEST_CASE_WITH_THF(ConcatUint8DifferentInputOutputQParam,
                      ConcatDifferentInputOutputQParamTest<DataType::QAsymmU8>, false)
 
-// Convert from BFloat16 to Float32
-ARMNN_AUTO_TEST_CASE_WITH_THF(ConvertBf16ToFp32, ConvertBf16ToFp32Test)
-
-// Convert from Float32 to BFloat16
-ARMNN_AUTO_TEST_CASE_WITH_THF(ConvertFp32ToBf16, ConvertFp32ToBf16Test)
-
 // Fully Connected
 ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleFullyConnected, FullyConnectedFloat32Test, false, false)
 ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleFullyConnectedWithBias, FullyConnectedFloat32Test, true, false)
@@ -798,7 +792,6 @@
 ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize1Signed32, RankDimSize1Test<DataType::Signed32>)
 ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize1QSymmS16, RankDimSize1Test<DataType::QSymmS16>)
 ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize1QAsymmS8, RankDimSize1Test<DataType::QAsymmS8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize1BFloat16, RankDimSize1Test<DataType::BFloat16>)
 
 ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize2Float16,  RankDimSize2Test<DataType::Float16>)
 ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize2Float32,  RankDimSize2Test<DataType::Float32>)
@@ -806,7 +799,6 @@
 ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize2Signed32, RankDimSize2Test<DataType::Signed32>)
 ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize2QSymmS16, RankDimSize2Test<DataType::QSymmS16>)
 ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize2QAsymmS8, RankDimSize2Test<DataType::QAsymmS8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize2BFloat16, RankDimSize2Test<DataType::BFloat16>)
 
 ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize3Float16,  RankDimSize3Test<DataType::Float16>)
 ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize3Float32,  RankDimSize3Test<DataType::Float32>)
@@ -814,7 +806,6 @@
 ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize3Signed32, RankDimSize3Test<DataType::Signed32>)
 ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize3QSymmS16, RankDimSize3Test<DataType::QSymmS16>)
 ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize3QAsymmS8, RankDimSize3Test<DataType::QAsymmS8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize3BFloat16, RankDimSize3Test<DataType::BFloat16>)
 
 ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize4Float16,  RankDimSize4Test<DataType::Float16>)
 ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize4Float32,  RankDimSize4Test<DataType::Float32>)
@@ -822,7 +813,6 @@
 ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize4Signed32, RankDimSize4Test<DataType::Signed32>)
 ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize4QSymmS16, RankDimSize4Test<DataType::QSymmS16>)
 ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize4QAsymmS8, RankDimSize4Test<DataType::QAsymmS8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize4BFloat16, RankDimSize4Test<DataType::BFloat16>)
 
 // InstanceNormalization
 ARMNN_AUTO_TEST_CASE_WITH_THF(InstanceNormFloat32Nchw, InstanceNormFloat32Test, DataLayout::NCHW);
diff --git a/src/backends/neon/workloads/CMakeLists.txt b/src/backends/neon/workloads/CMakeLists.txt
index dd09ecf..a3eb883 100644
--- a/src/backends/neon/workloads/CMakeLists.txt
+++ b/src/backends/neon/workloads/CMakeLists.txt
@@ -28,12 +28,8 @@
     NeonConcatWorkload.hpp
     NeonConstantWorkload.cpp
     NeonConstantWorkload.hpp
-    NeonConvertBf16ToFp32Workload.cpp
-    NeonConvertBf16ToFp32Workload.hpp
     NeonConvertFp16ToFp32Workload.cpp
     NeonConvertFp16ToFp32Workload.hpp
-    NeonConvertFp32ToBf16Workload.cpp
-    NeonConvertFp32ToBf16Workload.hpp
     NeonConvertFp32ToFp16Workload.cpp
     NeonConvertFp32ToFp16Workload.hpp
     NeonConvolution2dWorkload.cpp
diff --git a/src/backends/neon/workloads/NeonConvertBf16ToFp32Workload.cpp b/src/backends/neon/workloads/NeonConvertBf16ToFp32Workload.cpp
deleted file mode 100644
index 7a2ff9a..0000000
--- a/src/backends/neon/workloads/NeonConvertBf16ToFp32Workload.cpp
+++ /dev/null
@@ -1,81 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "NeonConvertBf16ToFp32Workload.hpp"
-
-#include <armnnUtils/FloatingPointConverter.hpp>
-
-#include <BFloat16.hpp>
-
-#include <backendsCommon/WorkloadUtils.hpp>
-
-namespace armnn
-{
-
-NeonConvertBf16ToFp32Workload::NeonConvertBf16ToFp32Workload(const ConvertBf16ToFp32QueueDescriptor& descriptor,
-                                                             const WorkloadInfo& info)
-     : BFloat16ToFloat32Workload<ConvertBf16ToFp32QueueDescriptor>(descriptor, info)
-{
-    this->m_Data.ValidateInputsOutputs("NeonConvertBf16ToFp32Workload", 1, 1);
-    GatherTensorHandlePairs(descriptor, m_TensorHandlePairs);
-}
-
-void NeonConvertBf16ToFp32Workload::Execute() const
-{
-    ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonConvertBf16ToFp32Workload_Execute", this->GetGuid());
-
-    auto convertFunc = [](uint8_t* dst, const uint8_t* src, size_t size)
-        {
-            auto input = reinterpret_cast<const BFloat16*>(src);
-            auto output = reinterpret_cast<float*>(dst);
-            size_t numElements = size/2; // 2 bytes per Bf16
-            armnnUtils::FloatingPointConverter::ConvertBFloat16ToFloat32(input, numElements, output);
-        };
-
-    for (const auto& pair : m_TensorHandlePairs)
-    {
-        CopyTensorContentsGeneric(pair.first, pair.second, convertFunc);
-    }
-}
-
-void NeonConvertBf16ToFp32Workload::ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot)
-{
-    ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot];
-    this->m_Data.m_Inputs[slot] = tensorHandle;
-    try
-    {
-        Reconfigure();
-    }
-    catch(armnn::UnimplementedException& e)
-    {
-        // Cannot reconfigure, revert the slot back and throw the exception.
-        this->m_Data.m_Inputs[slot] = backupHandle;
-        throw e;
-    }
-}
-
-// Replace output tensor handle with the given TensorHandle
-void NeonConvertBf16ToFp32Workload::ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot)
-{
-    ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot];
-    this->m_Data.m_Inputs[slot] = tensorHandle;
-    try
-    {
-        Reconfigure();
-    }
-    catch(armnn::UnimplementedException& e)
-    {
-        // Cannot reconfigure, revert the slot back and throw the exception.
-        this->m_Data.m_Inputs[slot] = backupHandle;
-        throw e;
-    }
-}
-
-void NeonConvertBf16ToFp32Workload::Reconfigure()
-{
-    throw armnn::UnimplementedException("Reconfigure not implemented for this workload");
-}
-
-} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonConvertBf16ToFp32Workload.hpp b/src/backends/neon/workloads/NeonConvertBf16ToFp32Workload.hpp
deleted file mode 100644
index 9d44ad2..0000000
--- a/src/backends/neon/workloads/NeonConvertBf16ToFp32Workload.hpp
+++ /dev/null
@@ -1,31 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <armnn/backends/Workload.hpp>
-#include <armnn/backends/WorkloadData.hpp>
-#include <neon/workloads/NeonWorkloadUtils.hpp>
-
-namespace armnn
-{
-
-class NeonConvertBf16ToFp32Workload : public BFloat16ToFloat32Workload<ConvertBf16ToFp32QueueDescriptor>
-{
-public:
-    NeonConvertBf16ToFp32Workload(const ConvertBf16ToFp32QueueDescriptor& descriptor, const WorkloadInfo& info);
-    virtual void Execute() const override;
-    // Replace input tensor handle with the given TensorHandle
-    void ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override;
-
-    // Replace output tensor handle with the given TensorHandle
-    void ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override;
-private:
-    using TensorHandlePair = std::pair<const ITensorHandle*, ITensorHandle*>;
-    std::vector<TensorHandlePair> m_TensorHandlePairs;
-    virtual void Reconfigure();
-};
-
-} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonConvertFp32ToBf16Workload.cpp b/src/backends/neon/workloads/NeonConvertFp32ToBf16Workload.cpp
deleted file mode 100644
index acd1a1e..0000000
--- a/src/backends/neon/workloads/NeonConvertFp32ToBf16Workload.cpp
+++ /dev/null
@@ -1,82 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "NeonConvertFp32ToBf16Workload.hpp"
-
-#include <BFloat16.hpp>
-#include <Profiling.hpp>
-
-#include <armnnUtils/FloatingPointConverter.hpp>
-
-#include <backendsCommon/WorkloadUtils.hpp>
-
-namespace armnn
-{
-
-NeonConvertFp32ToBf16Workload::NeonConvertFp32ToBf16Workload(const ConvertFp32ToBf16QueueDescriptor& descriptor,
-                                                             const WorkloadInfo& info)
-    : Float32ToBFloat16Workload<ConvertFp32ToBf16QueueDescriptor>(descriptor, info)
-{
-    this->m_Data.ValidateInputsOutputs("NeonConvertFp32ToBf16Workload", 1, 1);
-    GatherTensorHandlePairs(descriptor, m_TensorHandlePairs);
-}
-
-void NeonConvertFp32ToBf16Workload::Execute() const
-{
-    ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonConvertFp32ToBf16Workload_Execute", this->GetGuid());
-
-    auto convertFunc = [](uint8_t* dst, const uint8_t* src, size_t size)
-        {
-            auto input = reinterpret_cast<const float*>(src);
-            auto output = reinterpret_cast<BFloat16*>(dst);
-            size_t numElements = size/2; // 2 bytes per bf16
-            armnnUtils::FloatingPointConverter::ConvertFloat32ToBFloat16(input, numElements, output);
-        };
-
-    for (const auto& pair : m_TensorHandlePairs)
-    {
-        CopyTensorContentsGeneric(pair.first, pair.second, convertFunc);
-    }
-}
-
-void NeonConvertFp32ToBf16Workload::ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot)
-{
-    ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot];
-    this->m_Data.m_Inputs[slot] = tensorHandle;
-    try
-    {
-        Reconfigure();
-    }
-    catch(armnn::UnimplementedException& e)
-    {
-        // Cannot reconfigure, revert the slot back and throw the exception.
-        this->m_Data.m_Inputs[slot] = backupHandle;
-        throw e;
-    }
-}
-
-// Replace output tensor handle with the given TensorHandle
-void NeonConvertFp32ToBf16Workload::ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot)
-{
-    ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot];
-    this->m_Data.m_Inputs[slot] = tensorHandle;
-    try
-    {
-        Reconfigure();
-    }
-    catch(armnn::UnimplementedException& e)
-    {
-        // Cannot reconfigure, revert the slot back and throw the exception.
-        this->m_Data.m_Inputs[slot] = backupHandle;
-        throw e;
-    }
-}
-
-void NeonConvertFp32ToBf16Workload::Reconfigure()
-{
-    throw armnn::UnimplementedException("Reconfigure not implemented for this workload");
-}
-
-} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonConvertFp32ToBf16Workload.hpp b/src/backends/neon/workloads/NeonConvertFp32ToBf16Workload.hpp
deleted file mode 100644
index 2304f8a..0000000
--- a/src/backends/neon/workloads/NeonConvertFp32ToBf16Workload.hpp
+++ /dev/null
@@ -1,31 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <armnn/backends/Workload.hpp>
-#include <armnn/backends/WorkloadData.hpp>
-#include <neon/workloads/NeonWorkloadUtils.hpp>
-
-namespace armnn
-{
-
-class NeonConvertFp32ToBf16Workload : public Float32ToBFloat16Workload<ConvertFp32ToBf16QueueDescriptor>
-{
-public:
-    NeonConvertFp32ToBf16Workload(const ConvertFp32ToBf16QueueDescriptor& descriptor, const WorkloadInfo& info);
-    virtual void Execute() const override;
-    // Replace input tensor handle with the given TensorHandle
-    void ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override;
-
-    // Replace output tensor handle with the given TensorHandle
-    void ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override;
-private:
-    using TensorHandlePair = std::pair<const ITensorHandle*, ITensorHandle*>;
-    std::vector<TensorHandlePair> m_TensorHandlePairs;
-    virtual void Reconfigure();
-};
-
-} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonWorkloads.hpp b/src/backends/neon/workloads/NeonWorkloads.hpp
index c9c5421..01fd2f7 100644
--- a/src/backends/neon/workloads/NeonWorkloads.hpp
+++ b/src/backends/neon/workloads/NeonWorkloads.hpp
@@ -16,9 +16,7 @@
 #include "NeonComparisonWorkload.hpp"
 #include "NeonConcatWorkload.hpp"
 #include "NeonConstantWorkload.hpp"
-#include "NeonConvertBf16ToFp32Workload.hpp"
 #include "NeonConvertFp16ToFp32Workload.hpp"
-#include "NeonConvertFp32ToBf16Workload.hpp"
 #include "NeonConvertFp32ToFp16Workload.hpp"
 #include "NeonConvolution2dWorkload.hpp"
 #include "NeonConvolution3dWorkload.hpp"
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index 4090901..669c91d 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -120,12 +120,8 @@
         }
         case LayerType::Constant:
             return IsConstantSupported(infos[0], reasonIfUnsupported);
-        case LayerType::ConvertBf16ToFp32:
-            return IsConvertBf16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
         case LayerType::ConvertFp16ToFp32:
             return IsConvertFp16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
-        case LayerType::ConvertFp32ToBf16:
-            return IsConvertFp32ToBf16Supported(infos[0], infos[1], reasonIfUnsupported);
         case LayerType::ConvertFp32ToFp16:
             return IsConvertFp32ToFp16Supported(infos[0], infos[1], reasonIfUnsupported);
         case LayerType::Convolution2d:
@@ -518,7 +514,6 @@
 
     // Define supported types.
     std::array<DataType,6> supportedTypes = {
-        DataType::BFloat16,
         DataType::Float32,
         DataType::Float16,
         DataType::QAsymmS8,
@@ -585,7 +580,6 @@
     bool supported = true;
 
     std::array<DataType,7> supportedTypes = {
-        DataType::BFloat16,
         DataType::Float32,
         DataType::Float16,
         DataType::QAsymmS8,
@@ -623,7 +617,6 @@
 
     std::array<DataType, 8> supportedInputTypes =
     {
-        DataType::BFloat16,
         DataType::Float16,
         DataType::Float32,
         DataType::QAsymmS8,
@@ -658,7 +651,6 @@
 
     std::array<DataType, 6> supportedTypes =
     {
-        DataType::BFloat16,
         DataType::Float16,
         DataType::Float32,
         DataType::QAsymmS8,
@@ -707,7 +699,6 @@
 
     std::array<DataType, 6> supportedTypes =
     {
-        DataType::BFloat16,
         DataType::Float32,
         DataType::Float16,
         DataType::QAsymmS8,
@@ -757,7 +748,6 @@
     // Define supported types.
     std::array<DataType,6> supportedTypes =
     {
-        DataType::BFloat16,
         DataType::Float32,
         DataType::Float16,
         DataType::QAsymmS8,
@@ -797,7 +787,6 @@
 {
     std::array<DataType, 9> supportedInputTypes =
             {
-                    DataType::BFloat16,
                     DataType::Float32,
                     DataType::Float16,
                     DataType::QSymmS8,
@@ -832,7 +821,6 @@
     // Define supported output and inputs types.
     std::array<DataType, 7> supportedTypes =
     {
-        DataType::BFloat16,
         DataType::Float32,
         DataType::Float16,
         DataType::QAsymmS8,
@@ -864,7 +852,6 @@
     std::array<DataType, 8> supportedInputTypes =
     {
         DataType::Boolean,
-        DataType::BFloat16,
         DataType::Float32,
         DataType::Float16,
         DataType::QAsymmS8,
@@ -896,7 +883,6 @@
     bool supported = true;
     std::array<DataType,7> supportedTypes =
     {
-        DataType::BFloat16,
         DataType::Float32,
         DataType::Float16,
         DataType::QAsymmS8,
@@ -925,7 +911,6 @@
 {
     std::array<DataType,8> supportedTypes =
     {
-        DataType::BFloat16,
         DataType::Float16,
         DataType::Float32,
         DataType::QAsymmS8,
@@ -939,21 +924,6 @@
                                   "Reference constant: output is not a supported type.");
 }
 
-bool RefLayerSupport::IsConvertBf16ToFp32Supported(const TensorInfo& input,
-                                                   const TensorInfo& output,
-                                                   Optional<std::string&> reasonIfUnsupported) const
-{
-    bool supported = true;
-
-    supported &= CheckSupportRule(TypeIs(input, DataType::BFloat16), reasonIfUnsupported,
-                                  "Reference for ConvertBf16ToFp32 layer: input type not supported");
-
-    supported &= CheckSupportRule(TypeIs(output, DataType::Float32), reasonIfUnsupported,
-                                  "Reference for ConvertBf16ToFp32 layer: output type not supported");
-
-    return supported;
-}
-
 bool RefLayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input,
                                                    const TensorInfo& output,
                                                    Optional<std::string&> reasonIfUnsupported) const
@@ -974,21 +944,6 @@
                                           &FalseFuncU8<>));
 }
 
-bool RefLayerSupport::IsConvertFp32ToBf16Supported(const TensorInfo& input,
-                                                   const TensorInfo& output,
-                                                   Optional<std::string&> reasonIfUnsupported) const
-{
-    bool supported = true;
-
-    supported &= CheckSupportRule(TypeIs(input, DataType::Float32), reasonIfUnsupported,
-                                  "Reference for ConvertFp32ToBf16 layer: input type not supported");
-
-    supported &= CheckSupportRule(TypeIs(output, DataType::BFloat16), reasonIfUnsupported,
-                                  "Reference for ConvertFp32ToBf16 layer: output type not supported");
-
-    return supported;
-}
-
 bool RefLayerSupport::IsConvertFp32ToFp16Supported(const TensorInfo& input,
                                                    const TensorInfo& output,
                                                    Optional<std::string&> reasonIfUnsupported) const
@@ -1021,7 +976,6 @@
     // Define supported types.
     std::array<DataType,7> supportedTypes =
     {
-        DataType::BFloat16,
         DataType::Float32,
         DataType::Float16,
         DataType::QAsymmS8,
@@ -1036,20 +990,9 @@
     supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
                                   "Reference Convolution2d: output is not a supported type.");
 
-    // For Convolution2d, we allow to have BFloat16 input with Float32 output for optimization.
-    if (input.GetDataType() == DataType::BFloat16)
-    {
-        if (output.GetDataType() != DataType::BFloat16 && output.GetDataType() != DataType::Float32)
-        {
-            reasonIfUnsupported.value() += "Output tensor type must be BFloat16 or Float32 for BFloat16 input.\n";
-            supported = false;
-        }
-    }
-    else
-    {
-        supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
-                                  "Reference Convolution2d: input and output types mismatched.");
-    }
+    supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
+                              "Reference Convolution2d: input and output types mismatched.");
+
 
     const DataType inputType = input.GetDataType();
     if (IsQuantized8BitType(inputType))
@@ -1077,7 +1020,6 @@
     {
         std::array<DataType,4> biasesSupportedTypes =
         {
-            DataType::BFloat16,
             DataType::Float32,
             DataType::Float16,
             DataType::Signed32
@@ -1103,7 +1045,6 @@
     // Define supported types.
     std::array<DataType,7> supportedTypes =
     {
-        DataType::BFloat16,
         DataType::Float32,
         DataType::Float16,
         DataType::QAsymmS8,
@@ -1147,7 +1088,6 @@
     {
         std::array<DataType,4> biasesSupportedTypes =
         {
-            DataType::BFloat16,
             DataType::Float32,
             DataType::Float16,
             DataType::Signed32
@@ -1201,7 +1141,6 @@
 
     std::array<DataType,6> supportedTypes =
     {
-        DataType::BFloat16,
         DataType::Float32,
         DataType::Float16,
         DataType::QAsymmS8,
@@ -1234,7 +1173,6 @@
     // Define supported types.
     std::array<DataType,7> supportedTypes =
     {
-        DataType::BFloat16,
         DataType::Float32,
         DataType::Float16,
         DataType::QAsymmS8,
@@ -1279,7 +1217,6 @@
     {
         std::array<DataType,4> biasesSupportedTypes =
         {
-            DataType::BFloat16,
             DataType::Float32,
             DataType::Float16,
             DataType::Signed32
@@ -1313,7 +1250,6 @@
                                   "Reference for Dequantize layer: per-axis quantized input not supported.");
 
     std::array<DataType,3> supportedOutputTypes = {
-        DataType::BFloat16,
         DataType::Float32,
         DataType::Float16
     };
@@ -1344,7 +1280,6 @@
 
     std::array<DataType,6> supportedInputTypes =
     {
-        DataType::BFloat16,
         DataType::Float32,
         DataType::Float16,
         DataType::QAsymmS8,
@@ -1379,7 +1314,6 @@
     bool supported = true;
 
     std::array<DataType,7> supportedTypes = {
-        DataType::BFloat16,
         DataType::Float32,
         DataType::Float16,
         DataType::QAsymmS8,
@@ -1418,7 +1352,6 @@
 
     std::array<DataType, 7> supportedTypes =
     {
-        DataType::BFloat16,
         DataType::Float32,
         DataType::Float16,
         DataType::QAsymmS8,
@@ -1513,7 +1446,6 @@
 
     std::array<DataType,3> supportedTypes =
     {
-        DataType::BFloat16,
         DataType::Float32,
         DataType::Float16
     };
@@ -1539,7 +1471,6 @@
     // Define supported types.
     std::array<DataType,6> supportedTypes =
     {
-        DataType::BFloat16,
         DataType::Float32,
         DataType::Float16,
         DataType::QAsymmS8,
@@ -1556,20 +1487,8 @@
     supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
                                   "Reference Fully Connected: weights type not supported.");
 
-    // For FullyConnected, we allow to have BFloat16 input with Float32 output for optimization.
-    if (input.GetDataType() == DataType::BFloat16)
-    {
-        if (output.GetDataType() != DataType::BFloat16 && output.GetDataType() != DataType::Float32)
-        {
-            reasonIfUnsupported.value() += "Output tensor type must be BFloat16 or Float32 for BFloat16 input.\n";
-            supported = false;
-        }
-    }
-    else
-    {
-        supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
-                                  "Reference Fully Connected: input and output types mismatched.");
-    }
+    supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
+                              "Reference Fully Connected: input and output types mismatched.");
 
     supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
                                   "Reference Fully Connected: weights is not a supported type.");
@@ -1583,7 +1502,6 @@
         std::array<DataType, 5>
         supportedBiasTypes =
         {
-            DataType::BFloat16,
             DataType::Float32,
             DataType::Float16,
             DataType::Signed32,
@@ -1615,7 +1533,6 @@
     bool supported = true;
     std::array<DataType,7> supportedTypes =
     {
-            DataType::BFloat16,
             DataType::Float32,
             DataType::Float16,
             DataType::QAsymmS8,
@@ -1648,7 +1565,6 @@
     bool supported = true;
     std::array<DataType,7> supportedTypes =
     {
-        DataType::BFloat16,
         DataType::Float32,
         DataType::Float16,
         DataType::QAsymmS8,
@@ -1692,7 +1608,6 @@
     // Define supported types
     std::array<DataType, 3> supportedTypes =
         {
-            DataType::BFloat16,
             DataType::Float32,
             DataType::Float16
         };
@@ -1724,7 +1639,6 @@
     // Define supported types
     std::array<DataType, 6> supportedTypes =
     {
-        DataType::BFloat16,
         DataType::Float32,
         DataType::Float16,
         DataType::QAsymmS8,
@@ -1784,7 +1698,6 @@
 
     std::array<DataType, 3> supportedTypes =
     {
-        DataType::BFloat16,
         DataType::Float32,
         DataType::Float16
     };
@@ -1819,7 +1732,6 @@
     bool supported = true;
 
     std::array<DataType,3> supportedTypes = {
-        DataType::BFloat16,
         DataType::Float32,
         DataType::QSymmS16
     };
@@ -1922,7 +1834,6 @@
     bool supported = true;
 
     std::array<DataType,7> supportedTypes = {
-        DataType::BFloat16,
         DataType::Float32,
         DataType::Float16,
         DataType::QAsymmS8,
@@ -1963,7 +1874,6 @@
 
     std::array<DataType,6> supportedTypes =
     {
-        DataType::BFloat16,
         DataType::Float32,
         DataType::Float16,
         DataType::QAsymmS8,
@@ -2052,7 +1962,6 @@
     bool supported = true;
 
     std::array<DataType,7> supportedTypes = {
-        DataType::BFloat16,
         DataType::Float32,
         DataType::Float16,
         DataType::QAsymmS8,
@@ -2090,7 +1999,6 @@
     bool supported = true;
 
     std::array<DataType,7> supportedTypes = {
-        DataType::BFloat16,
         DataType::Float32,
         DataType::Float16,
         DataType::QAsymmS8,
@@ -2130,7 +2038,6 @@
     // Define supported types
     std::array<DataType, 6> supportedTypes =
     {
-        DataType::BFloat16,
         DataType::Float16,
         DataType::Float32,
         DataType::QAsymmS8,
@@ -2170,7 +2077,6 @@
     // Define supported output and inputs types.
     std::array<DataType,6> supportedTypes =
     {
-        DataType::BFloat16,
         DataType::Float32,
         DataType::Float16,
         DataType::QAsymmS8,
@@ -2232,7 +2138,6 @@
     // Define supported output and inputs types.
     std::array<DataType,6> supportedTypes =
     {
-        DataType::BFloat16,
         DataType::Float32,
         DataType::Float16,
         DataType::QAsymmS8,
@@ -2263,7 +2168,6 @@
     // Define supported output and inputs types.
     std::array<DataType,6> supportedTypes =
     {
-        DataType::BFloat16,
         DataType::Float32,
         DataType::Float16,
         DataType::QAsymmS8,
@@ -2316,7 +2220,6 @@
 
     // Define supported input types.
     std::array<DataType,7> supportedInputTypes = {
-        DataType::BFloat16,
         DataType::Float32,
         DataType::Float16,
         DataType::QAsymmS8,
@@ -2368,7 +2271,6 @@
     bool supported = true;
     std::array<DataType,7> supportedTypes =
     {
-        DataType::BFloat16,
         DataType::Float32,
         DataType::Float16,
         DataType::QAsymmS8,
@@ -2470,7 +2372,6 @@
 
     std::array<DataType, 5> supportedTypes =
     {
-        DataType::BFloat16,
         DataType::Float32,
         DataType::QAsymmS8,
         DataType::QAsymmU8,
@@ -2498,7 +2399,6 @@
     bool supported = true;
     std::array<DataType,7> supportedTypes =
     {
-        DataType::BFloat16,
         DataType::Float32,
         DataType::Float16,
         DataType::QSymmS8,
@@ -2528,7 +2428,6 @@
     bool supported = true;
     std::array<DataType,6> supportedTypes =
     {
-        DataType::BFloat16,
         DataType::Float32,
         DataType::Float16,
         DataType::QAsymmS8,
@@ -2559,7 +2458,6 @@
 
     std::array<DataType,6> supportedTypes =
     {
-        DataType::BFloat16,
         DataType::Float32,
         DataType::Float16,
         DataType::QAsymmS8,
@@ -2588,7 +2486,6 @@
     bool supported = true;
     std::array<DataType,6> supportedTypes =
     {
-        DataType::BFloat16,
         DataType::Float32,
         DataType::Float16,
         DataType::QAsymmS8,
@@ -2620,7 +2517,6 @@
     bool supported = true;
     std::array<DataType,7> supportedTypes =
     {
-        DataType::BFloat16,
         DataType::Float32,
         DataType::Float16,
         DataType::QAsymmS8,
@@ -2654,7 +2550,6 @@
 
     std::array<DataType,5> supportedTypes =
     {
-        DataType::BFloat16,
         DataType::Float32,
         DataType::QAsymmS8,
         DataType::QAsymmU8,
@@ -2681,7 +2576,6 @@
     bool supported = true;
 
     std::array<DataType,7> supportedTypes = {
-        DataType::BFloat16,
         DataType::Float32,
         DataType::Float16,
         DataType::QAsymmS8,
@@ -2720,7 +2614,6 @@
 
     std::array<DataType, 6> supportedTypes
     {
-        DataType::BFloat16,
         DataType::Float32,
         DataType::Float16,
         DataType::QAsymmS8,
@@ -2758,7 +2651,6 @@
 
     std::array<DataType,7> supportedTypes =
     {
-        DataType::BFloat16,
         DataType::Float32,
         DataType::Float16,
         DataType::QAsymmS8,
@@ -2804,7 +2696,6 @@
     {
         std::array<DataType,4> biasesSupportedTypes =
         {
-            DataType::BFloat16,
             DataType::Float32,
             DataType::Float16,
             DataType::Signed32
diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp
index b64244d..f0e9e35 100644
--- a/src/backends/reference/RefLayerSupport.hpp
+++ b/src/backends/reference/RefLayerSupport.hpp
@@ -77,17 +77,10 @@
     bool IsConstantSupported(const TensorInfo& output,
                              Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
-    bool IsConvertBf16ToFp32Supported(const TensorInfo& input,
-                                      const TensorInfo& output,
-                                      Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
     bool IsConvertFp16ToFp32Supported(const TensorInfo& input,
                                       const TensorInfo& output,
                                       Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
-    bool IsConvertFp32ToBf16Supported(const TensorInfo& input,
-                                      const TensorInfo& output,
-                                      Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
     bool IsConvertFp32ToFp16Supported(const TensorInfo& input,
                                       const TensorInfo& output,
diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp
index 093d0d5..69f75ca 100644
--- a/src/backends/reference/RefWorkloadFactory.cpp
+++ b/src/backends/reference/RefWorkloadFactory.cpp
@@ -212,24 +212,12 @@
             auto constantQueueDescriptor = PolymorphicDowncast<const ConstantQueueDescriptor*>(&descriptor);
             return std::make_unique<RefConstantWorkload>(*constantQueueDescriptor, info);
         }
-        case LayerType::ConvertBf16ToFp32 :
-        {
-            auto convertBf16ToFp32QueueDescriptor
-                = PolymorphicDowncast<const ConvertBf16ToFp32QueueDescriptor*>(&descriptor);
-            return std::make_unique<RefConvertBf16ToFp32Workload>(*convertBf16ToFp32QueueDescriptor, info);
-        }
         case LayerType::ConvertFp16ToFp32:
         {
             auto convertFp16ToFp32QueueDescriptor
                     = PolymorphicDowncast<const ConvertFp16ToFp32QueueDescriptor*>(&descriptor);
             return std::make_unique<RefConvertFp16ToFp32Workload>(*convertFp16ToFp32QueueDescriptor, info);
         }
-        case LayerType::ConvertFp32ToBf16:
-        {
-            auto convertFp32ToBf16QueueDescriptor
-                    = PolymorphicDowncast<const ConvertFp32ToBf16QueueDescriptor*>(&descriptor);
-            return std::make_unique<RefConvertFp32ToBf16Workload>(*convertFp32ToBf16QueueDescriptor, info);
-        }
         case LayerType::ConvertFp32ToFp16:
         {
             auto convertFp32ToFp16QueueDescriptor
@@ -724,13 +712,6 @@
     return std::make_unique<RefConstantWorkload>(descriptor, info);
 }
 
-std::unique_ptr<IWorkload> RefWorkloadFactory::CreateConvertBf16ToFp32(
-    const ConvertBf16ToFp32QueueDescriptor& descriptor,
-    const WorkloadInfo& info) const
-{
-    return std::make_unique<RefConvertBf16ToFp32Workload>(descriptor, info);
-}
-
 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateConvertFp16ToFp32(
     const ConvertFp16ToFp32QueueDescriptor& descriptor,
     const WorkloadInfo& info) const
@@ -738,13 +719,6 @@
     return std::make_unique<RefConvertFp16ToFp32Workload>(descriptor, info);
 }
 
-std::unique_ptr<IWorkload> RefWorkloadFactory::CreateConvertFp32ToBf16(
-    const ConvertFp32ToBf16QueueDescriptor& descriptor,
-    const WorkloadInfo& info) const
-{
-    return std::make_unique<RefConvertFp32ToBf16Workload>(descriptor, info);
-}
-
 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateConvertFp32ToFp16(
     const ConvertFp32ToFp16QueueDescriptor& descriptor,
     const WorkloadInfo& info) const
diff --git a/src/backends/reference/RefWorkloadFactory.hpp b/src/backends/reference/RefWorkloadFactory.hpp
index 53d0806..22dc35a 100644
--- a/src/backends/reference/RefWorkloadFactory.hpp
+++ b/src/backends/reference/RefWorkloadFactory.hpp
@@ -122,21 +122,11 @@
 
     ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
     "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "23.08")
-    std::unique_ptr<IWorkload> CreateConvertBf16ToFp32(const ConvertBf16ToFp32QueueDescriptor& descriptor,
-                                                       const WorkloadInfo& info) const override;
-
-    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
-    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "23.08")
     std::unique_ptr<IWorkload> CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor& descriptor,
                                                        const WorkloadInfo& info) const override;
 
     ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
     "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "23.08")
-    std::unique_ptr<IWorkload> CreateConvertFp32ToBf16(const ConvertFp32ToBf16QueueDescriptor& descriptor,
-                                                       const WorkloadInfo& info) const override;
-
-    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
-    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "23.08")
     std::unique_ptr<IWorkload> CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor& descriptor,
                                                        const WorkloadInfo& info) const override;
 
diff --git a/src/backends/reference/backend.mk b/src/backends/reference/backend.mk
index ed942e6..eb2ec2d 100644
--- a/src/backends/reference/backend.mk
+++ b/src/backends/reference/backend.mk
@@ -58,9 +58,7 @@
         workloads/RefComparisonWorkload.cpp \
         workloads/RefConcatWorkload.cpp \
         workloads/RefConstantWorkload.cpp \
-        workloads/RefConvertBf16ToFp32Workload.cpp \
         workloads/RefConvertFp16ToFp32Workload.cpp \
-        workloads/RefConvertFp32ToBf16Workload.cpp \
         workloads/RefConvertFp32ToFp16Workload.cpp \
         workloads/RefConvolution2dWorkload.cpp \
         workloads/RefConvolution3dWorkload.cpp \
diff --git a/src/backends/reference/test/RefEndToEndTests.cpp b/src/backends/reference/test/RefEndToEndTests.cpp
index 369e98a..a8c0634 100644
--- a/src/backends/reference/test/RefEndToEndTests.cpp
+++ b/src/backends/reference/test/RefEndToEndTests.cpp
@@ -634,11 +634,6 @@
     EluEndToEndTest<armnn::DataType::Float16>(defaultBackends);
 }
 
-TEST_CASE("RefEluEndToEndTestBFloat16")
-{
-    EluEndToEndTest<armnn::DataType::BFloat16>(defaultBackends);
-}
-
 TEST_CASE("RefEluEndToEndTestQAsymmS8")
 {
     EluEndToEndTest<armnn::DataType::QAsymmS8>(defaultBackends);
@@ -1006,11 +1001,6 @@
     HardSwishEndToEndTest<armnn::DataType::Float16>(defaultBackends);
 }
 
-TEST_CASE("RefHardSwishEndToEndTestBFloat16")
-{
-    HardSwishEndToEndTest<armnn::DataType::BFloat16>(defaultBackends);
-}
-
 TEST_CASE("RefHardSwishEndToEndTestQAsymmS8")
 {
     HardSwishEndToEndTest<armnn::DataType::QAsymmS8>(defaultBackends);
diff --git a/src/backends/reference/test/RefLayerSupportTests.cpp b/src/backends/reference/test/RefLayerSupportTests.cpp
index 9a27c7c..e671496 100644
--- a/src/backends/reference/test/RefLayerSupportTests.cpp
+++ b/src/backends/reference/test/RefLayerSupportTests.cpp
@@ -49,12 +49,6 @@
     CHECK(supportChecker.IsAdditionSupported(in0, in1, out, reasonNotSupported));
 }
 
-TEST_CASE("IsLayerSupportedBFloat16Reference")
-{
-    armnn::RefWorkloadFactory factory;
-    IsLayerSupportedTests<armnn::RefWorkloadFactory, armnn::DataType::BFloat16>(&factory);
-}
-
 TEST_CASE("IsLayerSupportedFloat16Reference")
 {
     armnn::RefWorkloadFactory factory;
@@ -117,70 +111,6 @@
     CHECK_EQ(reasonIfUnsupported, "Layer is not supported with float16 data type output");
 }
 
-TEST_CASE("IsConvertBf16ToFp32SupportedReference")
-{
-    std::string reasonIfUnsupported;
-
-    bool result = IsConvertLayerSupportedTests<armnn::RefWorkloadFactory, armnn::ConvertBf16ToFp32Layer,
-      armnn::DataType::BFloat16, armnn::DataType::Float32>(reasonIfUnsupported);
-
-    CHECK(result);
-}
-
-TEST_CASE("IsConvertBf16ToFp32SupportedFp32InputReference")
-{
-    std::string reasonIfUnsupported;
-
-    bool result = IsConvertLayerSupportedTests<armnn::RefWorkloadFactory, armnn::ConvertBf16ToFp32Layer,
-      armnn::DataType::Float32, armnn::DataType::Float32>(reasonIfUnsupported);
-
-    CHECK(!result);
-    CHECK_EQ(reasonIfUnsupported, "Reference for ConvertBf16ToFp32 layer: input type not supported\n");
-}
-
-TEST_CASE("IsConvertBf16ToFp32SupportedBf16OutputReference")
-{
-    std::string reasonIfUnsupported;
-
-    bool result = IsConvertLayerSupportedTests<armnn::RefWorkloadFactory, armnn::ConvertBf16ToFp32Layer,
-      armnn::DataType::BFloat16, armnn::DataType::BFloat16>(reasonIfUnsupported);
-
-    CHECK(!result);
-    CHECK_EQ(reasonIfUnsupported, "Reference for ConvertBf16ToFp32 layer: output type not supported\n");
-}
-
-TEST_CASE("IsConvertFp32ToBf16SupportedReference")
-{
-    std::string reasonIfUnsupported;
-
-    bool result = IsConvertLayerSupportedTests<armnn::RefWorkloadFactory, armnn::ConvertFp32ToBf16Layer,
-      armnn::DataType::Float32, armnn::DataType::BFloat16>(reasonIfUnsupported);
-
-    CHECK(result);
-}
-
-TEST_CASE("IsConvertFp32ToBf16SupportedBf16InputReference")
-{
-    std::string reasonIfUnsupported;
-
-    bool result = IsConvertLayerSupportedTests<armnn::RefWorkloadFactory, armnn::ConvertFp32ToBf16Layer,
-      armnn::DataType::BFloat16, armnn::DataType::BFloat16>(reasonIfUnsupported);
-
-    CHECK(!result);
-    CHECK_EQ(reasonIfUnsupported, "Reference for ConvertFp32ToBf16 layer: input type not supported\n");
-}
-
-TEST_CASE("IsConvertFp32ToBf16SupportedFp32OutputReference")
-{
-    std::string reasonIfUnsupported;
-
-    bool result = IsConvertLayerSupportedTests<armnn::RefWorkloadFactory, armnn::ConvertFp32ToBf16Layer,
-      armnn::DataType::Float32, armnn::DataType::Float32>(reasonIfUnsupported);
-
-    CHECK(!result);
-    CHECK_EQ(reasonIfUnsupported, "Reference for ConvertFp32ToBf16 layer: output type not supported\n");
-}
-
 TEST_CASE("IsConvertFp32ToFp16SupportedReference")
 {
     std::string reasonIfUnsupported;
@@ -271,7 +201,9 @@
 
     result = IsConstantLayerSupportedTests<armnn::RefWorkloadFactory,
             armnn::DataType::BFloat16>(reasonIfUnsupported);
-    CHECK(result);
+    CHECK(!result);
+    CHECK(reasonIfUnsupported.find("Reference constant: output is not a supported type.") != std::string::npos);
+
 }
 
 }
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index 7375847..750da8f 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -72,14 +72,6 @@
 
 ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution2dSquareNhwc, SimpleConvolution2d3x3NhwcTest, false)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d3x3Dilation3x3BFloat16,
-                     Convolution2d3x3Dilation3x3Test<DataType::BFloat16, DataType::BFloat16>,
-                     false,
-                     DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d3x3Dilation3x3NhwcBFloat16,
-                     Convolution2d3x3Dilation3x3Test<DataType::BFloat16, DataType::BFloat16>,
-                     false,
-                     DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d3x3Dilation3x3,
                      Convolution2d3x3Dilation3x3Test<DataType::Float32, DataType::Float32>,
                      false,
@@ -113,14 +105,6 @@
                      false,
                      DataLayout::NHWC)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x3x3Dilation3x3BFloat16,
-                     Convolution2d2x3x3Dilation3x3Test<DataType::BFloat16, DataType::BFloat16>,
-                     false,
-                     DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x3x3Dilation3x3NhwcBFloat16,
-                     Convolution2d2x3x3Dilation3x3Test<DataType::BFloat16, DataType::BFloat16>,
-                     false,
-                     DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x3x3Dilation3x3,
                      Convolution2d2x3x3Dilation3x3Test<DataType::Float32, DataType::Float32>,
                      false,
@@ -154,15 +138,6 @@
                      false,
                      DataLayout::NHWC)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x2Dilation2x2Padding2x2Stride3x3BFloat16,
-                     Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::BFloat16, DataType::BFloat16>,
-                     false,
-                     DataLayout::NCHW)
-
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x2Dilation2x2Padding2x2Stride3x3NhwcBFloat16,
-                     Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::BFloat16, DataType::BFloat16>,
-                     false,
-                     DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x2Dilation2x2Padding2x2Stride3x3,
                      Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::Float32, DataType::Float32>,
                      false,
@@ -199,15 +174,6 @@
 ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2dPerAxisQuantTestNchw, Convolution2dPerAxisQuantTest, DataLayout::NCHW);
 ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2dPerAxisQuantTestNhwc, Convolution2dPerAxisQuantTest, DataLayout::NHWC);
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d3x3Stride2x2Bf16,
-                              Convolution2d3x3Stride2x2BFloat16Test,
-                              false,
-                              DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d3x3Stride2x2BFloat16SmallValue,
-                     Convolution2d3x3Stride2x2BFloat16SmallValueTest,
-                     false,
-                     DataLayout::NHWC);
-
 // Convolution 3d - NDHWC
 ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution3d3x3x3Float32,
                               SimpleConvolution3d3x3x3Float32Test,
@@ -354,14 +320,6 @@
                               DepthwiseConvolution2d3x3Dilation3x3Test<DataType::Float32, DataType::Float32>,
                               false,
                               DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2d3x3Dilation3x3BFloat16,
-                              DepthwiseConvolution2d3x3Dilation3x3Test<DataType::BFloat16, DataType::BFloat16>,
-                              false,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2d3x3Dilation3x3NhwcBFloat16,
-                              DepthwiseConvolution2d3x3Dilation3x3Test<DataType::BFloat16, DataType::BFloat16>,
-                              false,
-                              DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2d3x3Dilation3x3Int8,
                               DepthwiseConvolution2d3x3Dilation3x3Test<DataType::QAsymmS8, DataType::Signed32>,
                               false,
@@ -395,14 +353,6 @@
                               DepthwiseConvolution2d2x3x3Dilation3x3Test<DataType::Float32, DataType::Float32>,
                               false,
                               DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2d2x3x3Dilation3x3BFloat16,
-                              DepthwiseConvolution2d2x3x3Dilation3x3Test<DataType::BFloat16, DataType::BFloat16>,
-                              false,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2d2x3x3Dilation3x3NhwcBFloat16,
-                              DepthwiseConvolution2d2x3x3Dilation3x3Test<DataType::BFloat16, DataType::BFloat16>,
-                              false,
-                              DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2d2x3x3Dilation3x3Int8,
                               DepthwiseConvolution2d2x3x3Dilation3x3Test<DataType::QAsymmS8, DataType::Signed32>,
                               false,
@@ -435,14 +385,6 @@
                               DepthwiseConvolution2dMult2Test<armnn::DataType::Float32, armnn::DataType::Float32>,
                               false,
                               armnn::DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dMult4BFloat16,
-                              DepthwiseConvolution2dMult4Test<armnn::DataType::BFloat16, armnn::DataType::BFloat16>,
-                              false,
-                              armnn::DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dMult2BFloat16,
-                              DepthwiseConvolution2dMult2Test<armnn::DataType::BFloat16, armnn::DataType::BFloat16>,
-                              false,
-                              armnn::DataLayout::NCHW)
 
 ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dDepthMul1,
                               DepthwiseConvolution2dDepthMul1Test,
@@ -864,7 +806,6 @@
 
 // Concat
 ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConcat, ConcatTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ConcatBFloat16, ConcatBFloat16Test)
 ARMNN_AUTO_TEST_CASE_WITH_THF(ConcatFloat16, ConcatFloat16Test)
 ARMNN_AUTO_TEST_CASE_WITH_THF(ConcatInt32, ConcatInt32Test)
 ARMNN_AUTO_TEST_CASE_WITH_THF(ConcatUint8, ConcatUint8Test)
@@ -1063,91 +1004,78 @@
 ARMNN_AUTO_TEST_CASE_WITH_THF(Multiplication5d, Multiplication5dTest)
 
 // Batch Mat Mul
-ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DSimpleBFloat16, BatchMatMul2DSimpleTest<DataType::BFloat16>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DSimpleFloat32, BatchMatMul2DSimpleTest<DataType::Float32>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DSimpleFloat16, BatchMatMul2DSimpleTest<DataType::Float16>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DSimpleQAsymmS8, BatchMatMul2DSimpleTest<DataType::QAsymmS8>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DSimpleQAsymmU8, BatchMatMul2DSimpleTest<DataType::QAsymmU8>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DSimpleQASymmS16, BatchMatMul2DSimpleTest<DataType::QSymmS16>);
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DSimpleBFloat16, BatchMatMul3DSimpleTest<DataType::BFloat16>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DSimpleFloat32, BatchMatMul3DSimpleTest<DataType::Float32>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DSimpleFloat16, BatchMatMul3DSimpleTest<DataType::Float16>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DSimpleQAsymmS8, BatchMatMul3DSimpleTest<DataType::QAsymmS8>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DSimpleQAsymmU8, BatchMatMul3DSimpleTest<DataType::QAsymmU8>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DSimpleQASymmS16, BatchMatMul3DSimpleTest<DataType::QSymmS16>);
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNCHWSimpleBFloat16, BatchMatMulNCHWSimpleTest<DataType::BFloat16>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNCHWSimpleFloat32, BatchMatMulNCHWSimpleTest<DataType::Float32>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNCHWSimpleFloat16, BatchMatMulNCHWSimpleTest<DataType::Float16>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNCHWSimpleQAsymmS8, BatchMatMulNCHWSimpleTest<DataType::QAsymmS8>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNCHWSimpleQAsymmU8, BatchMatMulNCHWSimpleTest<DataType::QAsymmU8>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNCHWSimpleQASymmS16, BatchMatMulNCHWSimpleTest<DataType::QSymmS16>);
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNHWCSimpleBFloat16, BatchMatMulNHWCSimpleTest<DataType::BFloat16>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNHWCSimpleFloat32, BatchMatMulNHWCSimpleTest<DataType::Float32>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNHWCSimpleFloat16, BatchMatMulNHWCSimpleTest<DataType::Float16>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNHWCSimpleQAsymmS8, BatchMatMulNHWCSimpleTest<DataType::QAsymmS8>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNHWCSimpleQAsymmU8, BatchMatMulNHWCSimpleTest<DataType::QAsymmU8>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNHWCSimpleQASymmS16, BatchMatMulNHWCSimpleTest<DataType::QSymmS16>);
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DBatchBFloat16, BatchMatMul3DBatchTest<DataType::BFloat16>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DBatchFloat32, BatchMatMul3DBatchTest<DataType::Float32>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DBatchFloat16, BatchMatMul3DBatchTest<DataType::Float16>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DBatchQAsymmS8, BatchMatMul3DBatchTest<DataType::QAsymmS8>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DBatchQAsymmU8, BatchMatMul3DBatchTest<DataType::QAsymmU8>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DBatchQASymmS16, BatchMatMul3DBatchTest<DataType::QSymmS16>);
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DBroadcastBFloat16, BatchMatMul3DBroadcastTest<DataType::BFloat16>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DBroadcastFloat32, BatchMatMul3DBroadcastTest<DataType::Float32>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DBroadcastFloat16, BatchMatMul3DBroadcastTest<DataType::Float16>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DBroadcastQAsymmS8, BatchMatMul3DBroadcastTest<DataType::QAsymmS8>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DBroadcastQAsymmU8, BatchMatMul3DBroadcastTest<DataType::QAsymmU8>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DBroadcastQASymmS16, BatchMatMul3DBroadcastTest<DataType::QSymmS16>);
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3D2DBroadcastBFloat16, BatchMatMul3D2DBroadcastTest<DataType::BFloat16>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3D2DBroadcastFloat32, BatchMatMul3D2DBroadcastTest<DataType::Float32>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3D2DBroadcastFloat16, BatchMatMul3D2DBroadcastTest<DataType::Float16>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3D2DBroadcastQAsymmS8, BatchMatMul3D2DBroadcastTest<DataType::QAsymmS8>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3D2DBroadcastQAsymmU8, BatchMatMul3D2DBroadcastTest<DataType::QAsymmU8>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3D2DBroadcastQASymmSS16, BatchMatMul3D2DBroadcastTest<DataType::QSymmS16>);
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNDHWCNHWCBFloat16, BatchMatMulNDHWCNHWCTest<DataType::BFloat16>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNDHWCNHWCFloat32, BatchMatMulNDHWCNHWCTest<DataType::Float32>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNDHWCNHWCFloat16, BatchMatMulNDHWCNHWCTest<DataType::Float16>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNDHWCNHWCQAsymmS8, BatchMatMulNDHWCNHWCTest<DataType::QAsymmS8>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNDHWCNHWCQAsymmU8, BatchMatMulNDHWCNHWCTest<DataType::QAsymmU8>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNDHWCNHWCQASymmSS16, BatchMatMulNDHWCNHWCTest<DataType::QSymmS16>);
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DTinyBFloat16, BatchMatMul2DTinyTest<DataType::BFloat16>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DTinyFloat32, BatchMatMul2DTinyTest<DataType::Float32>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DTinyFloat16, BatchMatMul2DTinyTest<DataType::Float16>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DTinyQAsymmS8, BatchMatMul2DTinyTest<DataType::QAsymmS8>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DTinyQAsymmU8, BatchMatMul2DTinyTest<DataType::QAsymmU8>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DTinyQASymmS16, BatchMatMul2DTinyTest<DataType::QSymmS16>);
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DNonSquareBFloat16, BatchMatMul3DNonSquareTest<DataType::BFloat16>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DNonSquareFloat32, BatchMatMul3DNonSquareTest<DataType::Float32>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DNonSquareFloat16, BatchMatMul3DNonSquareTest<DataType::Float16>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DNonSquareQAsymmS8, BatchMatMul3DNonSquareTest<DataType::QAsymmS8>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DNonSquareQAsymmU8, BatchMatMul3DNonSquareTest<DataType::QAsymmU8>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DNonSquareQASymmS16, BatchMatMul3DNonSquareTest<DataType::QSymmS16>);
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DTranspSimpleBFloat16, BatchMatMul2DTranspSimpleTest<DataType::BFloat16>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DTranspSimpleFloat32, BatchMatMul2DTranspSimpleTest<DataType::Float32>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DTranspSimpleFloat16, BatchMatMul2DTranspSimpleTest<DataType::Float16>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DTranspSimpleQAsymmS8, BatchMatMul2DTranspSimpleTest<DataType::QAsymmS8>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DTranspSimpleQAsymmU8, BatchMatMul2DTranspSimpleTest<DataType::QAsymmU8>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DTranspSimpleQASymmS16,BatchMatMul2DTranspSimpleTest<DataType::QSymmS16>);
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DAdjointSimpleBFloat16, BatchMatMul2DAdjointSimpleTest<DataType::BFloat16>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DAdjointSimpleFloat32, BatchMatMul2DAdjointSimpleTest<DataType::Float32>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DAdjointSimpleFloat16, BatchMatMul2DAdjointSimpleTest<DataType::Float16>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DAdjointSimpleQAsymmS8, BatchMatMul2DAdjointSimpleTest<DataType::QAsymmS8>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DAdjointSimpleQAsymmU8, BatchMatMul2DAdjointSimpleTest<DataType::QAsymmU8>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DAdjointSimpleQASymmS16,BatchMatMul2DAdjointSimpleTest<DataType::QSymmS16>);
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNHWCParamsBFloat16, BatchMatMulNHWCParamsTest<DataType::BFloat16>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNHWCParamsFloat32, BatchMatMulNHWCParamsTest<DataType::Float32>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNHWCParamsFloat16, BatchMatMulNHWCParamsTest<DataType::Float16>);
 ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNHWCParamsQAsymmS8, BatchMatMulNHWCParamsTest<DataType::QAsymmS8>);
@@ -1172,7 +1100,6 @@
 ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize1QSymmS16, RankDimSize1Test<DataType::QSymmS16>)
 ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize1QSymmS8,  RankDimSize1Test<DataType::QSymmS8>)
 ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize1QAsymmS8, RankDimSize1Test<DataType::QAsymmS8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize1BFloat16, RankDimSize1Test<DataType::BFloat16>)
 
 ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize2Float16,  RankDimSize2Test<DataType::Float16>)
 ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize2Float32,  RankDimSize2Test<DataType::Float32>)
@@ -1181,7 +1108,6 @@
 ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize2QSymmS16, RankDimSize2Test<DataType::QSymmS16>)
 ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize2QSymmS8,  RankDimSize2Test<DataType::QSymmS8>)
 ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize2QAsymmS8, RankDimSize2Test<DataType::QAsymmS8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize2BFloat16, RankDimSize2Test<DataType::BFloat16>)
 
 ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize3Float16,  RankDimSize3Test<DataType::Float16>)
 ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize3Float32,  RankDimSize3Test<DataType::Float32>)
@@ -1190,7 +1116,6 @@
 ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize3QSymmS16, RankDimSize3Test<DataType::QSymmS16>)
 ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize3QSymmS8,  RankDimSize3Test<DataType::QSymmS8>)
 ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize3QAsymmS8, RankDimSize3Test<DataType::QAsymmS8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize3BFloat16, RankDimSize3Test<DataType::BFloat16>)
 
 ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize4Float16,  RankDimSize4Test<DataType::Float16>)
 ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize4Float32,  RankDimSize4Test<DataType::Float32>)
@@ -1199,7 +1124,6 @@
 ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize4QSymmS16, RankDimSize4Test<DataType::QSymmS16>)
 ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize4QSymmS8,  RankDimSize4Test<DataType::QSymmS8>)
 ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize4QAsymmS8, RankDimSize4Test<DataType::QAsymmS8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize4BFloat16, RankDimSize4Test<DataType::BFloat16>)
 
 // Resize Bilinear - NCHW
 ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleResizeBilinear,
@@ -1650,11 +1574,6 @@
 ARMNN_AUTO_TEST_CASE_WITH_THF(LogSoftmaxFloat16_4, LogSoftmaxTest4<DataType::Float16>)
 
 // Pad - Constant
-ARMNN_AUTO_TEST_CASE_WITH_THF(PadBFloat162d, PadBFloat162dTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(PadBFloat162dCustomPadding, PadBFloat162dCustomPaddingTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(PadBFloat163d, PadBFloat163dTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(PadBFloat164d, PadBFloat164dTest)
-
 ARMNN_AUTO_TEST_CASE_WITH_THF(PadFloat322d, PadFloat322dTest)
 ARMNN_AUTO_TEST_CASE_WITH_THF(PadFloat322dCustomPadding, PadFloat322dCustomPaddingTest)
 ARMNN_AUTO_TEST_CASE_WITH_THF(PadFloat323d, PadFloat323dTest)
@@ -1692,8 +1611,6 @@
 
 ARMNN_AUTO_TEST_CASE_WITH_THF(PadSymmetric4dFloat32, PadSymmetric4dFloat32Test)
 ARMNN_AUTO_TEST_CASE_WITH_THF(PadReflect4dFloat32, PadReflect4dFloat32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(PadSymmetric4dBFloat16, PadSymmetric4dBFloat16Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(PadReflect4dBFloat16, PadReflect4dBFloat16Test)
 ARMNN_AUTO_TEST_CASE_WITH_THF(PadSymmetric4dUint8, PadSymmetric4dUint8Test)
 ARMNN_AUTO_TEST_CASE_WITH_THF(PadReflect4dUint8, PadReflect4dUint8Test)
 ARMNN_AUTO_TEST_CASE_WITH_THF(PadSymmetric4dInt8, PadSymmetric4dInt8Test)
@@ -1878,17 +1795,10 @@
 ARMNN_AUTO_TEST_CASE_WITH_THF(CastInt8ToUInt, CastInt8ToUInt82dTest)
 ARMNN_AUTO_TEST_CASE_WITH_THF(CastInt8AsymmToUInt, CastInt8AsymmToUInt82dTest)
 ARMNN_AUTO_TEST_CASE_WITH_THF(CastFloat16ToFloat32, CastFloat16ToFloat322dTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(CastBFloat16ToFloat32, CastBFloat16ToFloat322dTest)
 ARMNN_AUTO_TEST_CASE_WITH_THF(CastFloatToFloat16, CastFloat32ToFloat162dTest)
 ARMNN_AUTO_TEST_CASE_WITH_THF(CastFloatToIn8, CastFloat32ToInt82dTest)
 ARMNN_AUTO_TEST_CASE_WITH_THF(CastFloatToUInt8, CastFloat32ToUInt82dTest)
 
-// Convert from BFloat16 to Float32
-ARMNN_AUTO_TEST_CASE_WITH_THF(ConvertBf16ToFp32, ConvertBf16ToFp32Test)
-
-// Convert from Float32 to BFloat16
-ARMNN_AUTO_TEST_CASE_WITH_THF(ConvertFp32ToBf16, ConvertFp32ToBf16Test)
-
 // Convert from Float16 to Float32
 ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvertFp16ToFp32, SimpleConvertFp16ToFp32Test)
 // Convert from Float32 to Float16
@@ -2139,7 +2049,6 @@
 ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize1QSymmS16, ShapeDimSize1Test<DataType::QSymmS16>)
 ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize1QSymmS8,  ShapeDimSize1Test<DataType::QSymmS8>)
 ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize1QAsymmS8, ShapeDimSize1Test<DataType::QAsymmS8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize1BFloat16, ShapeDimSize1Test<DataType::BFloat16>)
 
 ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize2Float16,  ShapeDimSize2Test<DataType::Float16>)
 ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize2Float32,  ShapeDimSize2Test<DataType::Float32>)
@@ -2148,7 +2057,6 @@
 ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize2QSymmS16, ShapeDimSize2Test<DataType::QSymmS16>)
 ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize2QSymmS8,  ShapeDimSize2Test<DataType::QSymmS8>)
 ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize2QAsymmS8, ShapeDimSize2Test<DataType::QAsymmS8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize2BFloat16, ShapeDimSize2Test<DataType::BFloat16>)
 
 ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize3Float16,  ShapeDimSize3Test<DataType::Float16>)
 ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize3Float32,  ShapeDimSize3Test<DataType::Float32>)
@@ -2157,7 +2065,6 @@
 ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize3QSymmS16, ShapeDimSize3Test<DataType::QSymmS16>)
 ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize3QSymmS8,  ShapeDimSize3Test<DataType::QSymmS8>)
 ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize3QAsymmS8, ShapeDimSize3Test<DataType::QAsymmS8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize3BFloat16, ShapeDimSize3Test<DataType::BFloat16>)
 
 ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize4Float16,  ShapeDimSize4Test<DataType::Float16>)
 ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize4Float32,  ShapeDimSize4Test<DataType::Float32>)
@@ -2166,7 +2073,6 @@
 ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize4QSymmS16, ShapeDimSize4Test<DataType::QSymmS16>)
 ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize4QSymmS8,  ShapeDimSize4Test<DataType::QSymmS8>)
 ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize4QAsymmS8, ShapeDimSize4Test<DataType::QAsymmS8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize4BFloat16, ShapeDimSize4Test<DataType::BFloat16>)
 
 // SpaceToDepth
 ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToDepthNchwAsymmQ8, SpaceToDepthNchwAsymmQ8Test)
diff --git a/src/backends/reference/workloads/BaseIterator.hpp b/src/backends/reference/workloads/BaseIterator.hpp
index e09371f..2d27951 100644
--- a/src/backends/reference/workloads/BaseIterator.hpp
+++ b/src/backends/reference/workloads/BaseIterator.hpp
@@ -260,44 +260,6 @@
 
 };
 
-class BFloat16Decoder : public TypedIterator<const BFloat16, Decoder<float>>
-{
-public:
-    BFloat16Decoder(const BFloat16* data)
-        : TypedIterator(data) {}
-
-    BFloat16Decoder()
-        : BFloat16Decoder(nullptr) {}
-
-    float Get() const override
-    {
-        float val = 0.f;
-        armnnUtils::FloatingPointConverter::ConvertBFloat16ToFloat32(m_Iterator, 1, &val);
-        return val;
-    }
-    std::vector<float> DecodeTensor (const TensorShape& tensorShape,
-                                     const bool isDepthwise) override
-    {
-        IgnoreUnused(isDepthwise);
-
-        const unsigned int size = tensorShape.GetNumElements();
-        std::vector<float> decodedTensor;
-        decodedTensor.reserve(size);
-
-        for (uint32_t i = 0; i < size; ++i)
-        {
-            this->operator[](i);
-
-            float val = 0.f;
-            armnnUtils::FloatingPointConverter::ConvertBFloat16ToFloat32(m_Iterator, 1, &val);
-            decodedTensor.emplace_back(val);
-        }
-
-        return decodedTensor;
-    }
-
-};
-
 class Float16Decoder : public TypedIterator<const Half, Decoder<float>>
 {
 public:
@@ -624,28 +586,6 @@
     const int32_t m_Offset;
 };
 
-class BFloat16Encoder : public TypedIterator<armnn::BFloat16, Encoder<float>>
-{
-public:
-    BFloat16Encoder(armnn::BFloat16* data)
-        : TypedIterator(data) {}
-
-    BFloat16Encoder()
-        : BFloat16Encoder(nullptr) {}
-
-    void Set(float right) override
-    {
-        armnnUtils::FloatingPointConverter::ConvertFloat32ToBFloat16(&right, 1, m_Iterator);
-    }
-
-    float Get() const override
-    {
-        float val = 0.f;
-        armnnUtils::FloatingPointConverter::ConvertBFloat16ToFloat32(m_Iterator, 1, &val);
-        return val;
-    }
-};
-
 class Float16Encoder : public TypedIterator<Half, Encoder<float>>
 {
 public:
diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt
index b8835e3..de6c042 100644
--- a/src/backends/reference/workloads/CMakeLists.txt
+++ b/src/backends/reference/workloads/CMakeLists.txt
@@ -88,12 +88,8 @@
     RefConcatWorkload.hpp
     RefConstantWorkload.cpp
     RefConstantWorkload.hpp
-    RefConvertBf16ToFp32Workload.cpp
-    RefConvertBf16ToFp32Workload.hpp
     RefConvertFp16ToFp32Workload.cpp
     RefConvertFp16ToFp32Workload.hpp
-    RefConvertFp32ToBf16Workload.cpp
-    RefConvertFp32ToBf16Workload.hpp
     RefConvertFp32ToFp16Workload.cpp
     RefConvertFp32ToFp16Workload.hpp
     RefConvolution2dWorkload.cpp
diff --git a/src/backends/reference/workloads/Decoders.hpp b/src/backends/reference/workloads/Decoders.hpp
index c2a456b..54e7008 100644
--- a/src/backends/reference/workloads/Decoders.hpp
+++ b/src/backends/reference/workloads/Decoders.hpp
@@ -88,10 +88,6 @@
                 info.GetQuantizationScale(),
                 info.GetQuantizationOffset());
         }
-        case DataType::BFloat16:
-        {
-            return std::make_unique<BFloat16Decoder>(static_cast<const BFloat16*>(data));
-        }
         case DataType::Float16:
         {
             return std::make_unique<Float16Decoder>(static_cast<const Half*>(data));
diff --git a/src/backends/reference/workloads/Encoders.hpp b/src/backends/reference/workloads/Encoders.hpp
index a7be9e1..d6d6114 100644
--- a/src/backends/reference/workloads/Encoders.hpp
+++ b/src/backends/reference/workloads/Encoders.hpp
@@ -65,10 +65,6 @@
         {
             return std::make_unique<Int32Encoder>(static_cast<int32_t*>(data));
         }
-        case armnn::DataType::BFloat16:
-        {
-            return std::make_unique<BFloat16Encoder>(static_cast<armnn::BFloat16*>(data));
-        }
         case armnn::DataType::Float16:
         {
             return std::make_unique<Float16Encoder>(static_cast<Half*>(data));
diff --git a/src/backends/reference/workloads/RefConvertBf16ToFp32Workload.cpp b/src/backends/reference/workloads/RefConvertBf16ToFp32Workload.cpp
deleted file mode 100644
index 2fe2eaf..0000000
--- a/src/backends/reference/workloads/RefConvertBf16ToFp32Workload.cpp
+++ /dev/null
@@ -1,39 +0,0 @@
-//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "RefConvertBf16ToFp32Workload.hpp"
-#include "RefWorkloadUtils.hpp"
-
-#include <armnnUtils/FloatingPointConverter.hpp>
-
-#include <BFloat16.hpp>
-
-namespace armnn
-{
-
-void RefConvertBf16ToFp32Workload::Execute() const
-{
-    Execute(m_Data.m_Inputs, m_Data.m_Outputs);
-}
-
-void RefConvertBf16ToFp32Workload::ExecuteAsync(ExecutionData& executionData)
-{
-    WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
-    Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
-}
-
-void RefConvertBf16ToFp32Workload::Execute(std::vector<ITensorHandle*> inputs,
-                                           std::vector<ITensorHandle*> outputs) const
-{
-    ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefConvertBf16ToFp32Workload_Execute");
-
-    const BFloat16* const input = reinterpret_cast<const BFloat16*>(inputs[0]->Map());
-    float* const output = reinterpret_cast<float*>(outputs[0]->Map());
-
-    unsigned int numElements = GetTensorInfo(inputs[0]).GetNumElements();
-    armnnUtils::FloatingPointConverter::ConvertBFloat16ToFloat32(input, numElements, output);
-}
-
-} //namespace armnn
diff --git a/src/backends/reference/workloads/RefConvertBf16ToFp32Workload.hpp b/src/backends/reference/workloads/RefConvertBf16ToFp32Workload.hpp
deleted file mode 100644
index 24dcb0f..0000000
--- a/src/backends/reference/workloads/RefConvertBf16ToFp32Workload.hpp
+++ /dev/null
@@ -1,24 +0,0 @@
-//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "RefBaseWorkload.hpp"
-#include <armnn/backends/WorkloadData.hpp>
-
-namespace armnn
-{
-
-class RefConvertBf16ToFp32Workload : public BFloat16ToFloat32Workload<ConvertBf16ToFp32QueueDescriptor>
-{
-public:
-    using BFloat16ToFloat32Workload<ConvertBf16ToFp32QueueDescriptor>::BFloat16ToFloat32Workload;
-    void Execute() const override;
-    void ExecuteAsync(ExecutionData& executionData)  override;
-private:
-    void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
-};
-
-} //namespace armnn
diff --git a/src/backends/reference/workloads/RefConvertFp32ToBf16Workload.cpp b/src/backends/reference/workloads/RefConvertFp32ToBf16Workload.cpp
deleted file mode 100644
index 71ee95b..0000000
--- a/src/backends/reference/workloads/RefConvertFp32ToBf16Workload.cpp
+++ /dev/null
@@ -1,39 +0,0 @@
-//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "RefConvertFp32ToBf16Workload.hpp"
-#include "RefWorkloadUtils.hpp"
-
-#include <armnnUtils/FloatingPointConverter.hpp>
-
-#include <BFloat16.hpp>
-
-namespace armnn
-{
-
-void RefConvertFp32ToBf16Workload::Execute() const
-{
-    Execute(m_Data.m_Inputs, m_Data.m_Outputs);
-}
-
-void RefConvertFp32ToBf16Workload::ExecuteAsync(ExecutionData& executionData)
-{
-    WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
-    Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
-}
-
-void RefConvertFp32ToBf16Workload::Execute(std::vector<ITensorHandle*> inputs,
-                                           std::vector<ITensorHandle*> outputs) const
-{
-    ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefConvertFp32ToBf16Workload_Execute");
-
-    const float* const input = reinterpret_cast<const float*>(inputs[0]->Map());
-    BFloat16* const output = reinterpret_cast<BFloat16*>(outputs[0]->Map());
-
-    unsigned int numElements = GetTensorInfo(inputs[0]).GetNumElements();
-    armnnUtils::FloatingPointConverter::ConvertFloat32ToBFloat16(input, numElements, output);
-}
-
-} //namespace armnn
diff --git a/src/backends/reference/workloads/RefConvertFp32ToBf16Workload.hpp b/src/backends/reference/workloads/RefConvertFp32ToBf16Workload.hpp
deleted file mode 100644
index c1e57ec..0000000
--- a/src/backends/reference/workloads/RefConvertFp32ToBf16Workload.hpp
+++ /dev/null
@@ -1,24 +0,0 @@
-//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "RefBaseWorkload.hpp"
-#include <armnn/backends/WorkloadData.hpp>
-
-namespace armnn
-{
-
-class RefConvertFp32ToBf16Workload : public Float32ToBFloat16Workload<ConvertFp32ToBf16QueueDescriptor>
-{
-public:
-    using Float32ToBFloat16Workload<ConvertFp32ToBf16QueueDescriptor>::Float32ToBFloat16Workload;
-    void Execute() const override;
-    void ExecuteAsync(ExecutionData& executionData)  override;
-private:
-    void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
-};
-
-} //namespace armnn
diff --git a/src/backends/reference/workloads/RefWorkloads.hpp b/src/backends/reference/workloads/RefWorkloads.hpp
index e049d8d..afed71b 100644
--- a/src/backends/reference/workloads/RefWorkloads.hpp
+++ b/src/backends/reference/workloads/RefWorkloads.hpp
@@ -17,9 +17,7 @@
 #include "RefConvolution3dWorkload.hpp"
 #include "RefConstantWorkload.hpp"
 #include "RefConcatWorkload.hpp"
-#include "RefConvertBf16ToFp32Workload.hpp"
 #include "RefConvertFp16ToFp32Workload.hpp"
-#include "RefConvertFp32ToBf16Workload.hpp"
 #include "RefConvertFp32ToFp16Workload.hpp"
 #include "RefDebugWorkload.hpp"
 #include "RefDepthToSpaceWorkload.hpp"
diff --git a/tests/ExecuteNetwork/ArmNNExecutor.cpp b/tests/ExecuteNetwork/ArmNNExecutor.cpp
index e8b5014..943d3aa 100644
--- a/tests/ExecuteNetwork/ArmNNExecutor.cpp
+++ b/tests/ExecuteNetwork/ArmNNExecutor.cpp
@@ -513,7 +513,6 @@
 
     armnn::OptimizerOptions options;
     options.m_ReduceFp32ToFp16 = m_Params.m_EnableFp16TurboMode;
-    options.m_ReduceFp32ToBf16 = m_Params.m_EnableBf16TurboMode;
     options.m_Debug = m_Params.m_PrintIntermediate;
     options.m_DebugToFile = m_Params.m_PrintIntermediateOutputsToFile;
     options.m_shapeInferenceMethod = m_Params.m_InferOutputShape ?
diff --git a/tests/ExecuteNetwork/ExecuteNetworkParams.cpp b/tests/ExecuteNetwork/ExecuteNetworkParams.cpp
index 155a4c4..fa467c9 100644
--- a/tests/ExecuteNetwork/ExecuteNetworkParams.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetworkParams.cpp
@@ -60,10 +60,9 @@
     }
     CheckClTuningParameter(m_TuningLevel, m_TuningPath, m_ComputeDevices);
 
-    if (m_EnableBf16TurboMode && m_EnableFp16TurboMode)
+    if (m_EnableBf16TurboMode && !m_EnableFastMath)
     {
-        throw armnn::InvalidArgumentException("BFloat16 and Float16 turbo mode cannot be "
-                                              "enabled at the same time.");
+        throw armnn::InvalidArgumentException("To use BF16 please use --enable-fast-math. ");
     }
 
     // Check input tensor shapes
@@ -124,7 +123,6 @@
 
     armnn::OptimizerOptions options;
     options.m_ReduceFp32ToFp16 = m_EnableFp16TurboMode;
-    options.m_ReduceFp32ToBf16 = m_EnableBf16TurboMode;
     options.m_Debug = m_PrintIntermediate;
     options.m_DebugToFile = m_PrintIntermediateOutputsToFile;
     options.m_ProfilingEnabled = m_EnableProfiling;
diff --git a/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp b/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
index 5f19a14..e9d7750 100644
--- a/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
@@ -375,14 +375,14 @@
 
         m_CxxOptions.add_options("c) Optimization")
                 ("bf16-turbo-mode",
-                 "If this option is enabled, FP32 layers, "
-                 "weights and biases will be converted to BFloat16 where the backend supports it",
+                 "This option is no longer being used. In order to use bf16 please set enable-fast-math "
+                 "to true",
                  cxxopts::value<bool>(m_ExNetParams.m_EnableBf16TurboMode)
                          ->default_value("false")->implicit_value("true"))
 
                 ("enable-fast-math",
                  "Enables fast_math options in backends that support it. Using the fast_math flag can lead to "
-                 "performance improvements but may result in reduced or different precision.",
+                 "performance improvements but may result in reduced or different precision. ",
                  cxxopts::value<bool>(m_ExNetParams.m_EnableFastMath)->default_value("false")->implicit_value("true"))
 
                 ("number-of-threads",
diff --git a/tests/InferenceModel.hpp b/tests/InferenceModel.hpp
index d837fc1..2806924 100644
--- a/tests/InferenceModel.hpp
+++ b/tests/InferenceModel.hpp
@@ -452,7 +452,6 @@
 
             armnn::OptimizerOptions options;
             options.m_ReduceFp32ToFp16 = params.m_EnableFp16TurboMode;
-            options.m_ReduceFp32ToBf16 = params.m_EnableBf16TurboMode;
             options.m_Debug = params.m_PrintIntermediateLayers;
             options.m_DebugToFile = params.m_PrintIntermediateLayersToFile;
             options.m_shapeInferenceMethod = params.m_InferOutputShape ?