IVGCVSW-7214 Disable BF16-Turbo-Mode and remove conversion layers

 - Remove Bf16ToFp32 Conversion Layer
 - Remove Fp32ToBf16 Conversion Layer
 - Remove B16 Conversion tests
 * Throw exception if m_ReduceFp32ToBf16 optimzer option is set to true
 * Provide comments to enable fast math in order to use bf16
 * Update docs to inform users to enable fast math for bf16

 Execute Network Changes
 * Require bf16_turbo_mode to also have fast_math_enabled set to true
 - Remove setting m_ReduceFp32ToBf16 optimizer option

Signed-off-by: Ryan OShea <ryan.oshea3@arm.com>
Change-Id: Ibaa6da9d29c96a1ce32ff5196b0847fde9f04a1c
diff --git a/tests/ExecuteNetwork/ExecuteNetworkParams.cpp b/tests/ExecuteNetwork/ExecuteNetworkParams.cpp
index 155a4c4..fa467c9 100644
--- a/tests/ExecuteNetwork/ExecuteNetworkParams.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetworkParams.cpp
@@ -60,10 +60,9 @@
     }
     CheckClTuningParameter(m_TuningLevel, m_TuningPath, m_ComputeDevices);
 
-    if (m_EnableBf16TurboMode && m_EnableFp16TurboMode)
+    if (m_EnableBf16TurboMode && !m_EnableFastMath)
     {
-        throw armnn::InvalidArgumentException("BFloat16 and Float16 turbo mode cannot be "
-                                              "enabled at the same time.");
+        throw armnn::InvalidArgumentException("To use BF16 please use --enable-fast-math. ");
     }
 
     // Check input tensor shapes
@@ -124,7 +123,6 @@
 
     armnn::OptimizerOptions options;
     options.m_ReduceFp32ToFp16 = m_EnableFp16TurboMode;
-    options.m_ReduceFp32ToBf16 = m_EnableBf16TurboMode;
     options.m_Debug = m_PrintIntermediate;
     options.m_DebugToFile = m_PrintIntermediateOutputsToFile;
     options.m_ProfilingEnabled = m_EnableProfiling;