COMPMID-970 : Remove QS8 / QS16 support

Removed fixed point related code.

Change-Id: I487acf138dace3b0450e0d72ca7071eaec254566
Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/137678
Tested-by: Jenkins <bsgcomp@arm.com>
Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
diff --git a/tests/validation/CL/ActivationLayer.cpp b/tests/validation/CL/ActivationLayer.cpp
index 4f97d7b..45b23ed 100644
--- a/tests/validation/CL/ActivationLayer.cpp
+++ b/tests/validation/CL/ActivationLayer.cpp
@@ -61,35 +61,14 @@
         case ActivationLayerInfo::ActivationFunction::SQUARE:
             return AbsoluteTolerance<float>(data_type == DataType::F16 ? 0.1f : epsilon);
         case ActivationLayerInfo::ActivationFunction::LOGISTIC:
-            if(is_data_type_fixed_point(data_type))
-            {
-                return AbsoluteTolerance<float>(5.f);
-            }
-            else
-            {
-                return AbsoluteTolerance<float>(data_type == DataType::F16 ? 0.001f : epsilon);
-            }
+            return AbsoluteTolerance<float>(data_type == DataType::F16 ? 0.001f : epsilon);
         case ActivationLayerInfo::ActivationFunction::LEAKY_RELU:
             return AbsoluteTolerance<float>(data_type == DataType::F16 ? 0.00001f : epsilon);
         case ActivationLayerInfo::ActivationFunction::SOFT_RELU:
         case ActivationLayerInfo::ActivationFunction::SQRT:
-            if(is_data_type_fixed_point(data_type))
-            {
-                return AbsoluteTolerance<float>(5.f);
-            }
-            else
-            {
-                return AbsoluteTolerance<float>(data_type == DataType::F16 ? 0.01f : 0.00001f);
-            }
+            return AbsoluteTolerance<float>(data_type == DataType::F16 ? 0.01f : 0.00001f);
         case ActivationLayerInfo::ActivationFunction::TANH:
-            if(is_data_type_fixed_point(data_type))
-            {
-                return AbsoluteTolerance<float>(5.f);
-            }
-            else
-            {
-                return AbsoluteTolerance<float>(data_type == DataType::F16 ? 0.001f : 0.00001f);
-            }
+            return AbsoluteTolerance<float>(data_type == DataType::F16 ? 0.001f : 0.00001f);
         default:
             return AbsoluteTolerance<float>(epsilon);
     }
diff --git a/tests/validation/CL/ArithmeticDivision.cpp b/tests/validation/CL/ArithmeticDivision.cpp
index 42e2d22..5d4fa1f 100644
--- a/tests/validation/CL/ArithmeticDivision.cpp
+++ b/tests/validation/CL/ArithmeticDivision.cpp
@@ -57,19 +57,19 @@
                                                         TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8),      // Window shrink
                                                         TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8),      // Invalid data type combination
                                                         TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),     // Mismatching shapes
-                                                        TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32, 2),
+                                                        TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
                                                       }),
                framework::dataset::make("Input2Info",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8),
                                                        TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8),
                                                        TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16),
                                                        TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32),
-                                                       TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32, 2),
+                                                       TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
                                                      })),
                framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16),
                                                        TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8),
                                                        TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8),
                                                        TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32),
-                                                       TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32, 2),
+                                                       TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
                                                      })),
                framework::dataset::make("Expected", { false, false, false, false, true })),
                input1_info, input2_info, output_info, expected)
diff --git a/tests/validation/CL/ConvolutionLayer.cpp b/tests/validation/CL/ConvolutionLayer.cpp
index 30dd850..4ea2eb8 100644
--- a/tests/validation/CL/ConvolutionLayer.cpp
+++ b/tests/validation/CL/ConvolutionLayer.cpp
@@ -71,32 +71,32 @@
 TEST_SUITE(ConvolutionLayer)
 
 DATA_TEST_CASE(ValidateConvolutionMethod, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zip(
-                                                                                                   framework::dataset::make("InputInfo", { TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32, 0),
-                                                                                                           TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32, 0),
-                                                                                                           TensorInfo(TensorShape(23U, 27U, 5U, 4U), 1, DataType::F32, 0),
-                                                                                                           TensorInfo(TensorShape(23U, 27U, 31U, 4U), 1, DataType::F32, 0),
-                                                                                                           TensorInfo(TensorShape(3U, 3U, 2U, 1U), 1, DataType::F32, 0),
-                                                                                                           TensorInfo(TensorShape(33U, 27U, 7U, 4U), 1, DataType::F32, 0),
-                                                                                                           TensorInfo(TensorShape(17U, 31U, 32U), 1, DataType::F32, 0),
-                                                                                                           TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32, 0)
+                                                                                                   framework::dataset::make("InputInfo", { TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32),
+                                                                                                           TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32),
+                                                                                                           TensorInfo(TensorShape(23U, 27U, 5U, 4U), 1, DataType::F32),
+                                                                                                           TensorInfo(TensorShape(23U, 27U, 31U, 4U), 1, DataType::F32),
+                                                                                                           TensorInfo(TensorShape(3U, 3U, 2U, 1U), 1, DataType::F32),
+                                                                                                           TensorInfo(TensorShape(33U, 27U, 7U, 4U), 1, DataType::F32),
+                                                                                                           TensorInfo(TensorShape(17U, 31U, 32U), 1, DataType::F32),
+                                                                                                           TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32)
                                                                                                                                          }),
-                                                                                                   framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32, 0),
-                                                                                                           TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32, 0),
-                                                                                                           TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32, 0),
-                                                                                                           TensorInfo(TensorShape(3U, 3U, 31U, 21U), 1, DataType::F32, 0),
-                                                                                                           TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32, 0),
-                                                                                                           TensorInfo(TensorShape(5U, 5U, 7U, 16U), 1, DataType::F16, 0),
-                                                                                                           TensorInfo(TensorShape(5U, 5U, 32U, 19U), 1, DataType::F32, 0),
-                                                                                                           TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32, 0)
+                                                                                                   framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32),
+                                                                                                           TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32),
+                                                                                                           TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32),
+                                                                                                           TensorInfo(TensorShape(3U, 3U, 31U, 21U), 1, DataType::F32),
+                                                                                                           TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32),
+                                                                                                           TensorInfo(TensorShape(5U, 5U, 7U, 16U), 1, DataType::F16),
+                                                                                                           TensorInfo(TensorShape(5U, 5U, 32U, 19U), 1, DataType::F32),
+                                                                                                           TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32)
                                                                                                                                            })),
-                                                                                               framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(15U, 15U, 19U), 1, DataType::F32, 0),
-                                                                                                                        TensorInfo(TensorShape(15U, 15U, 19U), 1, DataType::F32, 0),
-                                                                                                                        TensorInfo(TensorShape(21U, 25U, 21U, 4U), 1, DataType::F32, 0),
-                                                                                                                        TensorInfo(TensorShape(21U, 25U, 21U, 4U), 1, DataType::F32, 0),
-                                                                                                                        TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32, 0),
-                                                                                                                        TensorInfo(TensorShape(11U, 12U, 16U, 4U), 1, DataType::F32, 0),
-                                                                                                                        TensorInfo(TensorShape(17U, 31U, 19U), 1, DataType::F32, 0),
-                                                                                                                        TensorInfo(TensorShape(17U, 31U, 19U), 1, DataType::F32, 0)
+                                                                                               framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(15U, 15U, 19U), 1, DataType::F32),
+                                                                                                                        TensorInfo(TensorShape(15U, 15U, 19U), 1, DataType::F32),
+                                                                                                                        TensorInfo(TensorShape(21U, 25U, 21U, 4U), 1, DataType::F32),
+                                                                                                                        TensorInfo(TensorShape(21U, 25U, 21U, 4U), 1, DataType::F32),
+                                                                                                                        TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32),
+                                                                                                                        TensorInfo(TensorShape(11U, 12U, 16U, 4U), 1, DataType::F32),
+                                                                                                                        TensorInfo(TensorShape(17U, 31U, 19U), 1, DataType::F32),
+                                                                                                                        TensorInfo(TensorShape(17U, 31U, 19U), 1, DataType::F32)
                                                                                                                                       })),
                                                                                            framework::dataset::make("ConvInfo", { PadStrideInfo(1, 2, 1, 1),
                                                                                                                     PadStrideInfo(1, 2, 1, 1),
diff --git a/tests/validation/CL/DeconvolutionLayer.cpp b/tests/validation/CL/DeconvolutionLayer.cpp
index 269bf15..0fd7ed4 100644
--- a/tests/validation/CL/DeconvolutionLayer.cpp
+++ b/tests/validation/CL/DeconvolutionLayer.cpp
@@ -103,33 +103,33 @@
 // *INDENT-OFF*
 // clang-format off
 DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zip(
-    framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0),   // Mismatching data type
-                                            TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0),   // Invalid weights shape
-                                            TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8, 4),   // Non supported data type
-                                            TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 11),  // Invalid bias shape
-                                            TensorInfo(TensorShape(13U, 11U, 4U, 3U), 1, DataType::F32, 0), // Window shrink
-                                            TensorInfo(TensorShape(32U, 16U, 2U), 1, DataType::F32, 0),
+    framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),   // Mismatching data type
+                                            TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),   // Invalid weights shape
+                                            TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8),   // Non supported data type
+                                            TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),  // Invalid bias shape
+                                            TensorInfo(TensorShape(13U, 11U, 4U, 3U), 1, DataType::F32), // Window shrink
+                                            TensorInfo(TensorShape(32U, 16U, 2U), 1, DataType::F32),
                                           }),
-    framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(3U, 3U, 2U, 2U), 1, DataType::F16, 0),
-                                            TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32, 0),
-                                            TensorInfo(TensorShape(3U, 3U, 2U, 2U), 1, DataType::QASYMM8, 5),
-                                            TensorInfo(TensorShape(3U, 2U, 2U, 2U), 1, DataType::F32, 11),
-                                            TensorInfo(TensorShape(3U, 3U, 4U), 1, DataType::F32, 0),
-                                              TensorInfo(TensorShape(1U, 1U, 2U, 4U), 1, DataType::F32, 0),
+    framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(3U, 3U, 2U, 2U), 1, DataType::F16),
+                                            TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32),
+                                            TensorInfo(TensorShape(3U, 3U, 2U, 2U), 1, DataType::QASYMM8),
+                                            TensorInfo(TensorShape(3U, 2U, 2U, 2U), 1, DataType::F32),
+                                            TensorInfo(TensorShape(3U, 3U, 4U), 1, DataType::F32),
+                                              TensorInfo(TensorShape(1U, 1U, 2U, 4U), 1, DataType::F32),
                                           })),
-    framework::dataset::make("BiasInfo",  { TensorInfo(TensorShape(1U), 1, DataType::F16, 0),
-                                            TensorInfo(TensorShape(1U), 1, DataType::F32, 0),
-                                            TensorInfo(TensorShape(1U), 1, DataType::F32, 5),
-                                            TensorInfo(TensorShape(25U, 11U), 1, DataType::F32, 11),
-                                            TensorInfo(TensorShape(1U), 1, DataType::F32, 0),
-                                            TensorInfo(TensorShape(4U), 1, DataType::F32, 0),
+    framework::dataset::make("BiasInfo",  { TensorInfo(TensorShape(1U), 1, DataType::F16),
+                                            TensorInfo(TensorShape(1U), 1, DataType::F32),
+                                            TensorInfo(TensorShape(1U), 1, DataType::F32),
+                                            TensorInfo(TensorShape(25U, 11U), 1, DataType::F32),
+                                            TensorInfo(TensorShape(1U), 1, DataType::F32),
+                                            TensorInfo(TensorShape(4U), 1, DataType::F32),
                                           })),
-    framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F16, 0),
-                                            TensorInfo(TensorShape(25U, 10U, 2U), 1, DataType::F32, 0),
-                                            TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32, 5),
-                                            TensorInfo(TensorShape(13U, 13U, 2U), 1, DataType::F32, 0),
-                                            TensorInfo(TensorShape(11U, 9U, 1U, 3U), 1, DataType::F32, 0),
-                                            TensorInfo(TensorShape(32U, 16U, 4U), 1, DataType::F32, 0),
+    framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F16),
+                                            TensorInfo(TensorShape(25U, 10U, 2U), 1, DataType::F32),
+                                            TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32),
+                                            TensorInfo(TensorShape(13U, 13U, 2U), 1, DataType::F32),
+                                            TensorInfo(TensorShape(11U, 9U, 1U, 3U), 1, DataType::F32),
+                                            TensorInfo(TensorShape(32U, 16U, 4U), 1, DataType::F32),
                                           })),
     framework::dataset::make("PadStrideInfo", { PadStrideInfo(1, 1, 0, 0),
                                                 PadStrideInfo(1, 1, 0, 0),
diff --git a/tests/validation/CL/DepthwiseConvolutionLayer.cpp b/tests/validation/CL/DepthwiseConvolutionLayer.cpp
index 5b18f59..fad8140 100644
--- a/tests/validation/CL/DepthwiseConvolutionLayer.cpp
+++ b/tests/validation/CL/DepthwiseConvolutionLayer.cpp
@@ -56,57 +56,57 @@
 // *INDENT-OFF*
 // clang-format off
 DATA_TEST_CASE(Validate3x3, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zip(
-               framework::dataset::make("InputInfo", { TensorInfo(TensorShape(32U, 18U, 2U), 1, DataType::F32, 0),     // Mismatching data type input/weights
-                                                       TensorInfo(TensorShape(32U, 18U, 3U), 1, DataType::F32, 0),     // Mismatching input feature maps
-                                                       TensorInfo(TensorShape(32U, 18U, 2U), 1, DataType::F32, 0),     // Unsupported weights dimensions
-                                                       TensorInfo(TensorShape(32U, 18U, 2U), 1, DataType::QASYMM8, 0), // Unsupported activation
-                                                       TensorInfo(TensorShape(32U, 18U, 2U), 1, DataType::F32, 0),     // Mismatching depth multiplier
-                                                       TensorInfo(TensorShape(32U, 18U, 2U), 1, DataType::F32, 0),     // Invalid stride
-                                                       TensorInfo(TensorShape(32U, 18U, 2U), 1, DataType::F32, 0),     // Invalid biases size
-                                                       TensorInfo(TensorShape(32U, 18U, 2U), 1, DataType::F32, 0),     // Invalid biases dimensions
-                                                       TensorInfo(TensorShape(32U, 18U, 2U), 1, DataType::F32, 0),     // Invalid output size
-                                                       TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0),     // Window shrink
-                                                       TensorInfo(TensorShape(32U, 18U, 8U), 1, DataType::F32, 0),
-                                                       TensorInfo(TensorShape(50U, 32U, 8U), 1, DataType::QASYMM8, 0),
+               framework::dataset::make("InputInfo", { TensorInfo(TensorShape(32U, 18U, 2U), 1, DataType::F32),     // Mismatching data type input/weights
+                                                       TensorInfo(TensorShape(32U, 18U, 3U), 1, DataType::F32),     // Mismatching input feature maps
+                                                       TensorInfo(TensorShape(32U, 18U, 2U), 1, DataType::F32),     // Unsupported weights dimensions
+                                                       TensorInfo(TensorShape(32U, 18U, 2U), 1, DataType::QASYMM8), // Unsupported activation
+                                                       TensorInfo(TensorShape(32U, 18U, 2U), 1, DataType::F32),     // Mismatching depth multiplier
+                                                       TensorInfo(TensorShape(32U, 18U, 2U), 1, DataType::F32),     // Invalid stride
+                                                       TensorInfo(TensorShape(32U, 18U, 2U), 1, DataType::F32),     // Invalid biases size
+                                                       TensorInfo(TensorShape(32U, 18U, 2U), 1, DataType::F32),     // Invalid biases dimensions
+                                                       TensorInfo(TensorShape(32U, 18U, 2U), 1, DataType::F32),     // Invalid output size
+                                                       TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),     // Window shrink
+                                                       TensorInfo(TensorShape(32U, 18U, 8U), 1, DataType::F32),
+                                                       TensorInfo(TensorShape(50U, 32U, 8U), 1, DataType::QASYMM8),
                                                      }),
-               framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::F16, 0),
-                                                         TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::F32, 0),
-                                                         TensorInfo(TensorShape(5U, 5U, 2U), 1, DataType::F32, 0),
-                                                         TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::QASYMM8, 0),
-                                                         TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::F32, 0),
-                                                         TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::F32, 0),
-                                                         TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::F32, 0),
-                                                         TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::F32, 0),
-                                                         TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::F32, 0),
-                                                         TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::F32, 0),
-                                                         TensorInfo(TensorShape(3U, 3U, 16U), 1, DataType::F32, 0),
-                                                         TensorInfo(TensorShape(3U, 3U, 24U), 1, DataType::QASYMM8, 0),
+               framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::F16),
+                                                         TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::F32),
+                                                         TensorInfo(TensorShape(5U, 5U, 2U), 1, DataType::F32),
+                                                         TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::QASYMM8),
+                                                         TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::F32),
+                                                         TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::F32),
+                                                         TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::F32),
+                                                         TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::F32),
+                                                         TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::F32),
+                                                         TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::F32),
+                                                         TensorInfo(TensorShape(3U, 3U, 16U), 1, DataType::F32),
+                                                         TensorInfo(TensorShape(3U, 3U, 24U), 1, DataType::QASYMM8),
                                                        })),
-               framework::dataset::make("BiasesInfo", { TensorInfo(TensorShape(2U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(2U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(2U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(2U), 1, DataType::S32, 0),
-                                                        TensorInfo(TensorShape(2U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(2U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(4U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(2U, 2U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(2U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(2U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(16U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(24U), 1, DataType::S32, 0),
+               framework::dataset::make("BiasesInfo", { TensorInfo(TensorShape(2U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(2U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(2U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(2U), 1, DataType::S32),
+                                                        TensorInfo(TensorShape(2U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(2U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(4U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(2U, 2U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(2U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(2U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(16U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(24U), 1, DataType::S32),
                                                       })),
-               framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(30U, 16U, 2U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(30U, 16U, 2U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(30U, 16U, 2U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(30U, 16U, 2U), 1, DataType::QASYMM8, 0),
-                                                        TensorInfo(TensorShape(30U, 16U, 2U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(30U, 16U, 2U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(30U, 16U, 2U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(30U, 16U, 2U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(32U, 18U, 2U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(30U, 16U, 16U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(48U, 30U, 24U), 1, DataType::QASYMM8, 0),
+               framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(30U, 16U, 2U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(30U, 16U, 2U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(30U, 16U, 2U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(30U, 16U, 2U), 1, DataType::QASYMM8),
+                                                        TensorInfo(TensorShape(30U, 16U, 2U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(30U, 16U, 2U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(30U, 16U, 2U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(30U, 16U, 2U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(32U, 18U, 2U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(30U, 16U, 16U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(48U, 30U, 24U), 1, DataType::QASYMM8),
                                                       })),
                framework::dataset::make("ConvInfo", { PadStrideInfo(1, 1, 0, 0),
                                                       PadStrideInfo(1, 1, 0, 0),
@@ -155,41 +155,41 @@
 }
 
 DATA_TEST_CASE(ValidateGeneric, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(
-                framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0),    // Mismatching data type input/weights
-                                                        TensorInfo(TensorShape(27U, 13U, 3U), 1, DataType::F32, 0),    // Mismatching input feature maps
-                                                        TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0),    // Mismatching depth multiplier
-                                                        TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0),    // Invalid biases size
-                                                        TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0),    // Invalid biases dimensions
-                                                        TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0),    // Invalid output size
-                                                        TensorInfo(TensorShape(27U, 13U, 8U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(32U, 13U, 8U), 1, DataType::QASYMM8, 0),
+                framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),    // Mismatching data type input/weights
+                                                        TensorInfo(TensorShape(27U, 13U, 3U), 1, DataType::F32),    // Mismatching input feature maps
+                                                        TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),    // Mismatching depth multiplier
+                                                        TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),    // Invalid biases size
+                                                        TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),    // Invalid biases dimensions
+                                                        TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),    // Invalid output size
+                                                        TensorInfo(TensorShape(27U, 13U, 8U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(32U, 13U, 8U), 1, DataType::QASYMM8),
                                                       }),
-                framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::F16, 0),
-                                                          TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::F32, 0),
-                                                          TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::F32, 0),
-                                                          TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::F32, 0),
-                                                          TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::F32, 0),
-                                                          TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::F32, 0),
-                                                          TensorInfo(TensorShape(3U, 3U, 16U), 1, DataType::F32, 0),
-                                                          TensorInfo(TensorShape(3U, 3U, 24U), 1, DataType::QASYMM8, 0),
+                framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::F16),
+                                                          TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::F32),
+                                                          TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::F32),
+                                                          TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::F32),
+                                                          TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::F32),
+                                                          TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::F32),
+                                                          TensorInfo(TensorShape(3U, 3U, 16U), 1, DataType::F32),
+                                                          TensorInfo(TensorShape(3U, 3U, 24U), 1, DataType::QASYMM8),
                                                         })),
-                framework::dataset::make("BiasesInfo", { TensorInfo(TensorShape(2U), 1, DataType::F32, 0),
-                                                         TensorInfo(TensorShape(2U), 1, DataType::F32, 0),
-                                                         TensorInfo(TensorShape(2U), 1, DataType::F32, 0),
-                                                         TensorInfo(TensorShape(4U), 1, DataType::F32, 0),
-                                                         TensorInfo(TensorShape(2U, 2U), 1, DataType::F32, 0),
-                                                         TensorInfo(TensorShape(2U), 1, DataType::F32, 0),
-                                                         TensorInfo(TensorShape(16U), 1, DataType::F32, 0),
-                                                         TensorInfo(TensorShape(24U), 1, DataType::S32, 0),
+                framework::dataset::make("BiasesInfo", { TensorInfo(TensorShape(2U), 1, DataType::F32),
+                                                         TensorInfo(TensorShape(2U), 1, DataType::F32),
+                                                         TensorInfo(TensorShape(2U), 1, DataType::F32),
+                                                         TensorInfo(TensorShape(4U), 1, DataType::F32),
+                                                         TensorInfo(TensorShape(2U, 2U), 1, DataType::F32),
+                                                         TensorInfo(TensorShape(2U), 1, DataType::F32),
+                                                         TensorInfo(TensorShape(16U), 1, DataType::F32),
+                                                         TensorInfo(TensorShape(24U), 1, DataType::S32),
                                                        })),
-                framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32, 0),
-                                                         TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32, 0),
-                                                         TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32, 0),
-                                                         TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32, 0),
-                                                         TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32, 0),
-                                                         TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0),
-                                                         TensorInfo(TensorShape(25U, 11U, 16U), 1, DataType::F32, 0),
-                                                         TensorInfo(TensorShape(32U, 11U, 24U), 1, DataType::QASYMM8, 0),
+                framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32),
+                                                         TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32),
+                                                         TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32),
+                                                         TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32),
+                                                         TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32),
+                                                         TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),
+                                                         TensorInfo(TensorShape(25U, 11U, 16U), 1, DataType::F32),
+                                                         TensorInfo(TensorShape(32U, 11U, 24U), 1, DataType::QASYMM8),
                                                        })),
                 framework::dataset::make("ConvInfo", { PadStrideInfo(1, 1, 0, 0),
                                                        PadStrideInfo(1, 1, 0, 0),
diff --git a/tests/validation/CL/DilatedConvolutionLayer.cpp b/tests/validation/CL/DilatedConvolutionLayer.cpp
index fdd6cc8..f748f90 100644
--- a/tests/validation/CL/DilatedConvolutionLayer.cpp
+++ b/tests/validation/CL/DilatedConvolutionLayer.cpp
@@ -61,23 +61,23 @@
 TEST_SUITE(DilatedConvolutionLayer)
 
 DATA_TEST_CASE(ValidateConvolutionMethod, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(
-                                                                                               framework::dataset::make("InputInfo", { TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32, 0),
-                                                                                                                        TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32, 0),
-                                                                                                                        TensorInfo(TensorShape(23U, 27U, 23U, 4U), 1, DataType::F32, 0),
-                                                                                                                        TensorInfo(TensorShape(3U, 3U, 2U, 1U), 1, DataType::F32, 0),
-                                                                                                                        TensorInfo(TensorShape(33U, 27U, 7U, 4U), 1, DataType::F32, 0)
+                                                                                               framework::dataset::make("InputInfo", { TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32),
+                                                                                                                        TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32),
+                                                                                                                        TensorInfo(TensorShape(23U, 27U, 23U, 4U), 1, DataType::F32),
+                                                                                                                        TensorInfo(TensorShape(3U, 3U, 2U, 1U), 1, DataType::F32),
+                                                                                                                        TensorInfo(TensorShape(33U, 27U, 7U, 4U), 1, DataType::F32)
                                                                                                                                      }),
-                                                                                               framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32, 0),
-                                                                                                                        TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32, 0),
-                                                                                                                        TensorInfo(TensorShape(3U, 3U, 23U, 21U), 1, DataType::F32, 0),
-                                                                                                                        TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32, 0),
-                                                                                                                        TensorInfo(TensorShape(5U, 5U, 7U, 16U), 1, DataType::F16, 0)
+                                                                                               framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32),
+                                                                                                                        TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32),
+                                                                                                                        TensorInfo(TensorShape(3U, 3U, 23U, 21U), 1, DataType::F32),
+                                                                                                                        TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32),
+                                                                                                                        TensorInfo(TensorShape(5U, 5U, 7U, 16U), 1, DataType::F16)
                                                                                                                                        })),
-                                                                                           framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(15U, 15U, 19U), 1, DataType::F32, 0),
-                                                                                                                    TensorInfo(TensorShape(15U, 15U, 19U), 1, DataType::F32, 0),
-                                                                                                                    TensorInfo(TensorShape(21U, 25U, 21U, 4U), 1, DataType::F32, 0),
-                                                                                                                    TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32, 0),
-                                                                                                                    TensorInfo(TensorShape(11U, 12U, 16U, 4U), 1, DataType::F32, 0)
+                                                                                           framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(15U, 15U, 19U), 1, DataType::F32),
+                                                                                                                    TensorInfo(TensorShape(15U, 15U, 19U), 1, DataType::F32),
+                                                                                                                    TensorInfo(TensorShape(21U, 25U, 21U, 4U), 1, DataType::F32),
+                                                                                                                    TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32),
+                                                                                                                    TensorInfo(TensorShape(11U, 12U, 16U, 4U), 1, DataType::F32)
                                                                                                                                   })),
                                                                                        framework::dataset::make("ConvInfo", { PadStrideInfo(1, 2, 1, 1),
                                                                                                                 PadStrideInfo(1, 2, 1, 1),
diff --git a/tests/validation/CL/DirectConvolutionLayer.cpp b/tests/validation/CL/DirectConvolutionLayer.cpp
index a796b6e..87f9449 100644
--- a/tests/validation/CL/DirectConvolutionLayer.cpp
+++ b/tests/validation/CL/DirectConvolutionLayer.cpp
@@ -61,16 +61,7 @@
                                                                  combine(framework::dataset::make("PadY", 0, 2),
                                                                          framework::dataset::make("KernelSize", { 3, 5 })))),
                                                   framework::dataset::make("NumKernels", { 1, 4, 8, 16 })))));
-const auto data_fixed_point = combine(datasets::TinyDirectConvolutionShapes(),
-                                      combine(framework::dataset::make("StrideX", 1, 3),
-                                              combine(framework::dataset::make("StrideY", 1, 3),
-                                                      combine(concat(combine(framework::dataset::make("PadX", 0),
-                                                                             combine(framework::dataset::make("PadY", 0),
-                                                                                     framework::dataset::make("KernelSize", 1))),
-                                                                     combine(framework::dataset::make("PadX", 0, 2),
-                                                                             combine(framework::dataset::make("PadY", 0, 2),
-                                                                                     framework::dataset::make("KernelSize", { 3 })))),
-                                                              framework::dataset::make("NumKernels", { 1, 4, 8, 16 })))));
+
 /** Activation function Dataset*/
 const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
 {
@@ -89,53 +80,53 @@
 // *INDENT-OFF*
 // clang-format off
 DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(
-               framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Mismatching data type input/weights
-                                                       TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Mismatching input feature maps
-                                                       TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Unsupported kernel width
-                                                       TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Non-rectangular weights dimensions
-                                                       TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid weights dimensions
-                                                       TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid stride
-                                                       TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid biases size
-                                                       TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid biases dimensions
-                                                       TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid output size
-                                                       TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Window shrink
-                                                       TensorInfo(TensorShape(32U, 16U, 2U), 1, DataType::F32, 0),
+               framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching data type input/weights
+                                                       TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching input feature maps
+                                                       TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Unsupported kernel width
+                                                       TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Non-rectangular weights dimensions
+                                                       TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid weights dimensions
+                                                       TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid stride
+                                                       TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid biases size
+                                                       TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid biases dimensions
+                                                       TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid output size
+                                                       TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Window shrink
+                                                       TensorInfo(TensorShape(32U, 16U, 2U), 1, DataType::F32),
                                                      }),
-               framework::dataset::make("WeightsInfo",{ TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F16, 0),
-                                                        TensorInfo(TensorShape(3U, 3U, 3U, 4U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(9U, 9U, 2U, 4U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(5U, 3U, 2U, 4U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(3U, 3U, 2U, 4U, 3U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(1U, 1U, 2U, 4U), 1, DataType::F32, 0),
+               framework::dataset::make("WeightsInfo",{ TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F16),
+                                                        TensorInfo(TensorShape(3U, 3U, 3U, 4U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(9U, 9U, 2U, 4U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(5U, 3U, 2U, 4U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(3U, 3U, 2U, 4U, 3U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(1U, 1U, 2U, 4U), 1, DataType::F32),
                                                      })),
-               framework::dataset::make("BiasesInfo",{ TensorInfo(TensorShape(4U), 1, DataType::F32, 0),
-                                                       TensorInfo(TensorShape(4U), 1, DataType::F32, 0),
-                                                       TensorInfo(TensorShape(4U), 1, DataType::F32, 0),
-                                                       TensorInfo(TensorShape(4U), 1, DataType::F32, 0),
-                                                       TensorInfo(TensorShape(4U), 1, DataType::F32, 0),
-                                                       TensorInfo(TensorShape(4U), 1, DataType::F32, 0),
-                                                       TensorInfo(TensorShape(3U), 1, DataType::F32, 0),
-                                                       TensorInfo(TensorShape(4U, 2U), 1, DataType::F32, 0),
-                                                       TensorInfo(TensorShape(4U), 1, DataType::F32, 0),
-                                                       TensorInfo(TensorShape(4U), 1, DataType::F32, 0),
-                                                       TensorInfo(TensorShape(4U), 1, DataType::F32, 0),
+               framework::dataset::make("BiasesInfo",{ TensorInfo(TensorShape(4U), 1, DataType::F32),
+                                                       TensorInfo(TensorShape(4U), 1, DataType::F32),
+                                                       TensorInfo(TensorShape(4U), 1, DataType::F32),
+                                                       TensorInfo(TensorShape(4U), 1, DataType::F32),
+                                                       TensorInfo(TensorShape(4U), 1, DataType::F32),
+                                                       TensorInfo(TensorShape(4U), 1, DataType::F32),
+                                                       TensorInfo(TensorShape(3U), 1, DataType::F32),
+                                                       TensorInfo(TensorShape(4U, 2U), 1, DataType::F32),
+                                                       TensorInfo(TensorShape(4U), 1, DataType::F32),
+                                                       TensorInfo(TensorShape(4U), 1, DataType::F32),
+                                                       TensorInfo(TensorShape(4U), 1, DataType::F32),
                                                      })),
-               framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32, 0),
-                                                       TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32, 0),
-                                                       TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32, 0),
-                                                       TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32, 0),
-                                                       TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32, 0),
-                                                       TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32, 0),
-                                                       TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32, 0),
-                                                       TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32, 0),
-                                                       TensorInfo(TensorShape(26U, 11U, 4U), 1, DataType::F32, 0),
-                                                       TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32, 0),
-                                                       TensorInfo(TensorShape(32U, 16U, 4U), 1, DataType::F32, 0),
+               framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32),
+                                                       TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32),
+                                                       TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32),
+                                                       TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32),
+                                                       TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32),
+                                                       TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32),
+                                                       TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32),
+                                                       TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32),
+                                                       TensorInfo(TensorShape(26U, 11U, 4U), 1, DataType::F32),
+                                                       TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32),
+                                                       TensorInfo(TensorShape(32U, 16U, 4U), 1, DataType::F32),
                                                      })),
                framework::dataset::make("ConvInfo",  { PadStrideInfo(1, 1, 0, 0),
                                                        PadStrideInfo(1, 1, 0, 0),
diff --git a/tests/validation/CL/LSTMLayer.cpp b/tests/validation/CL/LSTMLayer.cpp
index bd43678..e1d4cbe 100644
--- a/tests/validation/CL/LSTMLayer.cpp
+++ b/tests/validation/CL/LSTMLayer.cpp
@@ -49,77 +49,77 @@
 // *INDENT-OFF*
 // clang-format off
 DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zip(zip(zip(
-               framework::dataset::make("InputInfo", { TensorInfo(TensorShape(8U, 2U), 1, DataType::U8, 0),      // Wrong data type
-                                                       TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Wrong input size
-                                                       TensorInfo(TensorShape(8U, 2U), 1, DataType::F32, 0),     // Wrong input weights size
-                                                       TensorInfo(TensorShape(8U, 2U), 1, DataType::F32, 0),     // Wrong recurrent weights size
-                                                       TensorInfo(TensorShape(8U, 2U), 1, DataType::F32, 0),     // Wrong cell bias size
-                                                       TensorInfo(TensorShape(8U, 2U), 1, DataType::F32, 0),     // Wrong cell state size
-                                                       TensorInfo(TensorShape(8U, 2U), 1, DataType::F32, 0),     // Wrong output size
-                                                       TensorInfo(TensorShape(8U, 2U), 1, DataType::F32, 0),     // Wrong scratch size
+               framework::dataset::make("InputInfo", { TensorInfo(TensorShape(8U, 2U), 1, DataType::U8),      // Wrong data type
+                                                       TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Wrong input size
+                                                       TensorInfo(TensorShape(8U, 2U), 1, DataType::F32),     // Wrong input weights size
+                                                       TensorInfo(TensorShape(8U, 2U), 1, DataType::F32),     // Wrong recurrent weights size
+                                                       TensorInfo(TensorShape(8U, 2U), 1, DataType::F32),     // Wrong cell bias size
+                                                       TensorInfo(TensorShape(8U, 2U), 1, DataType::F32),     // Wrong cell state size
+                                                       TensorInfo(TensorShape(8U, 2U), 1, DataType::F32),     // Wrong output size
+                                                       TensorInfo(TensorShape(8U, 2U), 1, DataType::F32),     // Wrong scratch size
                }),
-               framework::dataset::make("InputWeightsInfo", { TensorInfo(TensorShape(8U, 16U), 1, DataType::F32, 0),
-                                                       TensorInfo(TensorShape(8U, 16U), 1, DataType::F32, 0),
-                                                       TensorInfo(TensorShape(27U, 11U, 2U), 1, DataType::F32, 0),
-                                                       TensorInfo(TensorShape(8U, 16U), 1, DataType::F32, 0),
-                                                       TensorInfo(TensorShape(8U, 16U), 1, DataType::F32, 0),
-                                                       TensorInfo(TensorShape(8U, 16U), 1, DataType::F32, 0),
-                                                       TensorInfo(TensorShape(8U, 16U), 1, DataType::F32, 0),
-                                                       TensorInfo(TensorShape(8U, 16U), 1, DataType::F32, 0),
+               framework::dataset::make("InputWeightsInfo", { TensorInfo(TensorShape(8U, 16U), 1, DataType::F32),
+                                                       TensorInfo(TensorShape(8U, 16U), 1, DataType::F32),
+                                                       TensorInfo(TensorShape(27U, 11U, 2U), 1, DataType::F32),
+                                                       TensorInfo(TensorShape(8U, 16U), 1, DataType::F32),
+                                                       TensorInfo(TensorShape(8U, 16U), 1, DataType::F32),
+                                                       TensorInfo(TensorShape(8U, 16U), 1, DataType::F32),
+                                                       TensorInfo(TensorShape(8U, 16U), 1, DataType::F32),
+                                                       TensorInfo(TensorShape(8U, 16U), 1, DataType::F32),
                })),
-               framework::dataset::make("RecurrentWeightsInfo", { TensorInfo(TensorShape(16U, 16U), 1, DataType::F32, 0),
-                                                                  TensorInfo(TensorShape(16U, 16U), 1, DataType::F32, 0),
-                                                                  TensorInfo(TensorShape(16U, 16U), 1, DataType::F32, 0),
-                                                                  TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32, 0),
-                                                                  TensorInfo(TensorShape(16U, 16U), 1, DataType::F32, 0),
-                                                                  TensorInfo(TensorShape(16U, 16U), 1, DataType::F32, 0),
-                                                                  TensorInfo(TensorShape(16U, 16U), 1, DataType::F32, 0),
-                                                                  TensorInfo(TensorShape(16U, 16U), 1, DataType::F32, 0),
+               framework::dataset::make("RecurrentWeightsInfo", { TensorInfo(TensorShape(16U, 16U), 1, DataType::F32),
+                                                                  TensorInfo(TensorShape(16U, 16U), 1, DataType::F32),
+                                                                  TensorInfo(TensorShape(16U, 16U), 1, DataType::F32),
+                                                                  TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32),
+                                                                  TensorInfo(TensorShape(16U, 16U), 1, DataType::F32),
+                                                                  TensorInfo(TensorShape(16U, 16U), 1, DataType::F32),
+                                                                  TensorInfo(TensorShape(16U, 16U), 1, DataType::F32),
+                                                                  TensorInfo(TensorShape(16U, 16U), 1, DataType::F32),
                })),
-               framework::dataset::make("CellBiasInfo", { TensorInfo(TensorShape(16U), 1, DataType::F32, 0),
-                                                      TensorInfo(TensorShape(16U), 1, DataType::F32, 0),
-                                                      TensorInfo(TensorShape(16U), 1, DataType::F32, 0),
-                                                      TensorInfo(TensorShape(16U), 1, DataType::F32, 0),
-                                                      TensorInfo(TensorShape(30U), 1, DataType::F32, 0),
-                                                      TensorInfo(TensorShape(16U), 1, DataType::F32, 0),
-                                                      TensorInfo(TensorShape(16U), 1, DataType::F32, 0),
-                                                      TensorInfo(TensorShape(16U), 1, DataType::F32, 0),
+               framework::dataset::make("CellBiasInfo", { TensorInfo(TensorShape(16U), 1, DataType::F32),
+                                                      TensorInfo(TensorShape(16U), 1, DataType::F32),
+                                                      TensorInfo(TensorShape(16U), 1, DataType::F32),
+                                                      TensorInfo(TensorShape(16U), 1, DataType::F32),
+                                                      TensorInfo(TensorShape(30U), 1, DataType::F32),
+                                                      TensorInfo(TensorShape(16U), 1, DataType::F32),
+                                                      TensorInfo(TensorShape(16U), 1, DataType::F32),
+                                                      TensorInfo(TensorShape(16U), 1, DataType::F32),
                })),
-               framework::dataset::make("ProjectionBiasInfo", { TensorInfo(TensorShape(16U), 1, DataType::F32, 0),
-                                                      TensorInfo(TensorShape(16U), 1, DataType::F32, 0),
-                                                      TensorInfo(TensorShape(16U), 1, DataType::F32, 0),
-                                                      TensorInfo(TensorShape(16U), 1, DataType::F32, 0),
-                                                      TensorInfo(TensorShape(16U), 1, DataType::F32, 0),
-                                                      TensorInfo(TensorShape(16U), 1, DataType::F32, 0),
-                                                      TensorInfo(TensorShape(16U), 1, DataType::F32, 0),
-                                                      TensorInfo(TensorShape(16U), 1, DataType::F32, 0),
+               framework::dataset::make("ProjectionBiasInfo", { TensorInfo(TensorShape(16U), 1, DataType::F32),
+                                                      TensorInfo(TensorShape(16U), 1, DataType::F32),
+                                                      TensorInfo(TensorShape(16U), 1, DataType::F32),
+                                                      TensorInfo(TensorShape(16U), 1, DataType::F32),
+                                                      TensorInfo(TensorShape(16U), 1, DataType::F32),
+                                                      TensorInfo(TensorShape(16U), 1, DataType::F32),
+                                                      TensorInfo(TensorShape(16U), 1, DataType::F32),
+                                                      TensorInfo(TensorShape(16U), 1, DataType::F32),
                })),
-               framework::dataset::make("CellStateInfo", { TensorInfo(TensorShape(16U, 2U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(16U, 2U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(16U, 2U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(16U, 2U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(16U, 2U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(11U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(16U, 2U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(16U, 2U), 1, DataType::F32, 0),
+               framework::dataset::make("CellStateInfo", { TensorInfo(TensorShape(16U, 2U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(16U, 2U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(16U, 2U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(16U, 2U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(16U, 2U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(11U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(16U, 2U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(16U, 2U), 1, DataType::F32),
                })),
-               framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(16U, 2U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(16U, 2U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(16U, 2U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(16U, 2U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(16U, 2U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(16U, 2U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(11U, 13U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(16U, 2U), 1, DataType::F32, 0),
+               framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(16U, 2U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(16U, 2U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(16U, 2U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(16U, 2U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(16U, 2U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(16U, 2U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(11U, 13U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(16U, 2U), 1, DataType::F32),
                })),
-               framework::dataset::make("ScratchInfo", { TensorInfo(TensorShape(64U, 2U), 1, DataType::F32, 0),
-                                                             TensorInfo(TensorShape(64U, 2U), 1, DataType::F32, 0),
-                                                             TensorInfo(TensorShape(64U, 2U), 1, DataType::F32, 0),
-                                                             TensorInfo(TensorShape(64U, 2U), 1, DataType::F32, 0),
-                                                             TensorInfo(TensorShape(64U, 2U), 1, DataType::F32, 0),
-                                                             TensorInfo(TensorShape(64U, 2U), 1, DataType::F32, 0),
-                                                             TensorInfo(TensorShape(64U, 2U), 1, DataType::F32, 0),
-                                                             TensorInfo(TensorShape(12U, 2U), 1, DataType::F32, 0),
+               framework::dataset::make("ScratchInfo", { TensorInfo(TensorShape(64U, 2U), 1, DataType::F32),
+                                                             TensorInfo(TensorShape(64U, 2U), 1, DataType::F32),
+                                                             TensorInfo(TensorShape(64U, 2U), 1, DataType::F32),
+                                                             TensorInfo(TensorShape(64U, 2U), 1, DataType::F32),
+                                                             TensorInfo(TensorShape(64U, 2U), 1, DataType::F32),
+                                                             TensorInfo(TensorShape(64U, 2U), 1, DataType::F32),
+                                                             TensorInfo(TensorShape(64U, 2U), 1, DataType::F32),
+                                                             TensorInfo(TensorShape(12U, 2U), 1, DataType::F32),
                })),
                framework::dataset::make("ActivationInfo", { ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
                                                             ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
diff --git a/tests/validation/CL/LocallyConnected.cpp b/tests/validation/CL/LocallyConnected.cpp
index d8f236c..5381072 100644
--- a/tests/validation/CL/LocallyConnected.cpp
+++ b/tests/validation/CL/LocallyConnected.cpp
@@ -52,41 +52,41 @@
 // *INDENT-OFF*
 // clang-format off
 DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(
-    framework::dataset::make("InputInfo",  { TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32, 0), // Mismatching data type input/weights
-                                             TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32, 0), // Mismatching data type input/bias
-                                             TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32, 0), // Mismatching data type input/output
-                                             TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32, 0), // Mismatching shape input/weights
-                                             TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32, 0), // Mismatching shape input/bias
-                                             TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32, 0), // Mismatching shape input/output
-                                             TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32, 0), // Asymmetric padding
-                                             TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32, 0)
+    framework::dataset::make("InputInfo",  { TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32), // Mismatching data type input/weights
+                                             TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32), // Mismatching data type input/bias
+                                             TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32), // Mismatching data type input/output
+                                             TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32), // Mismatching shape input/weights
+                                             TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32), // Mismatching shape input/bias
+                                             TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32), // Mismatching shape input/output
+                                             TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32), // Asymmetric padding
+                                             TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32)
                                            }),
-    framework::dataset::make("WeightsInfo",{ TensorInfo(TensorShape(3U, 3U, 5U, 21U, 275U), 1, DataType::F16, 0),
-                                             TensorInfo(TensorShape(3U, 3U, 5U, 21U, 275U), 1, DataType::F32, 0),
-                                             TensorInfo(TensorShape(3U, 3U, 5U, 21U, 275U), 1, DataType::F32, 0),
-                                             TensorInfo(TensorShape(3U, 3U, 5U, 21U, 274U), 1, DataType::F32, 0),
-                                             TensorInfo(TensorShape(3U, 3U, 5U, 21U, 275U), 1, DataType::F32, 0),
-                                             TensorInfo(TensorShape(3U, 3U, 5U, 21U, 275U), 1, DataType::F32, 0),
-                                             TensorInfo(TensorShape(3U, 3U, 5U, 21U, 275U), 1, DataType::F32, 0),
-                                             TensorInfo(TensorShape(3U, 3U, 5U, 21U, 275U), 1, DataType::F32, 0)
+    framework::dataset::make("WeightsInfo",{ TensorInfo(TensorShape(3U, 3U, 5U, 21U, 275U), 1, DataType::F16),
+                                             TensorInfo(TensorShape(3U, 3U, 5U, 21U, 275U), 1, DataType::F32),
+                                             TensorInfo(TensorShape(3U, 3U, 5U, 21U, 275U), 1, DataType::F32),
+                                             TensorInfo(TensorShape(3U, 3U, 5U, 21U, 274U), 1, DataType::F32),
+                                             TensorInfo(TensorShape(3U, 3U, 5U, 21U, 275U), 1, DataType::F32),
+                                             TensorInfo(TensorShape(3U, 3U, 5U, 21U, 275U), 1, DataType::F32),
+                                             TensorInfo(TensorShape(3U, 3U, 5U, 21U, 275U), 1, DataType::F32),
+                                             TensorInfo(TensorShape(3U, 3U, 5U, 21U, 275U), 1, DataType::F32)
                                            })),
-    framework::dataset::make("BiasInfo",   { TensorInfo(TensorShape(21U, 275U), 1, DataType::F32, 0),
-                                             TensorInfo(TensorShape(21U, 275U), 1, DataType::F16, 0),
-                                             TensorInfo(TensorShape(21U, 275U), 1, DataType::F32, 0),
-                                             TensorInfo(TensorShape(21U, 275U), 1, DataType::F32, 0),
-                                             TensorInfo(TensorShape(21U, 274U), 1, DataType::F32, 0),
-                                             TensorInfo(TensorShape(21U, 275U), 1, DataType::F32, 0),
-                                             TensorInfo(TensorShape(21U, 275U), 1, DataType::F32, 0),
-                                             TensorInfo(TensorShape(21U, 275U), 1, DataType::F32, 0)
+    framework::dataset::make("BiasInfo",   { TensorInfo(TensorShape(21U, 275U), 1, DataType::F32),
+                                             TensorInfo(TensorShape(21U, 275U), 1, DataType::F16),
+                                             TensorInfo(TensorShape(21U, 275U), 1, DataType::F32),
+                                             TensorInfo(TensorShape(21U, 275U), 1, DataType::F32),
+                                             TensorInfo(TensorShape(21U, 274U), 1, DataType::F32),
+                                             TensorInfo(TensorShape(21U, 275U), 1, DataType::F32),
+                                             TensorInfo(TensorShape(21U, 275U), 1, DataType::F32),
+                                             TensorInfo(TensorShape(21U, 275U), 1, DataType::F32)
                                            })),
-    framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32, 0),
-                                             TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32, 0),
-                                             TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F16, 0),
-                                             TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32, 0),
-                                             TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32, 0),
-                                             TensorInfo(TensorShape(11U, 25U, 22U), 1, DataType::F32, 0),
-                                             TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32, 0),
-                                             TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32, 0)
+    framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32),
+                                             TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32),
+                                             TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F16),
+                                             TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32),
+                                             TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32),
+                                             TensorInfo(TensorShape(11U, 25U, 22U), 1, DataType::F32),
+                                             TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32),
+                                             TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32)
                                            })),
     framework::dataset::make("PadStride",  { PadStrideInfo(2, 1, 0, 0),
                                              PadStrideInfo(2, 1, 0, 0),
diff --git a/tests/validation/CL/NormalizationLayer.cpp b/tests/validation/CL/NormalizationLayer.cpp
index a2dbaff..e640e01 100644
--- a/tests/validation/CL/NormalizationLayer.cpp
+++ b/tests/validation/CL/NormalizationLayer.cpp
@@ -71,19 +71,19 @@
 // *INDENT-OFF*
 // clang-format off
 DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
-               framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Mismatching data type input/output
-                                                       TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Mismatching shapes
-                                                       TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Even normalization
-                                                       TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Non implemented IN_MAP_2D
-                                                       TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Window shrink
-                                                       TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32, 0),
+               framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching data type input/output
+                                                       TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching shapes
+                                                       TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Even normalization
+                                                       TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Non implemented IN_MAP_2D
+                                                       TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Window shrink
+                                                       TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
                                                      }),
-               framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16, 0),
-                                                       TensorInfo(TensorShape(27U, 11U, 2U), 1, DataType::F32, 0),
-                                                       TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0),
-                                                       TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0),
-                                                       TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0),
-                                                       TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32, 0),
+               framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16),
+                                                       TensorInfo(TensorShape(27U, 11U, 2U), 1, DataType::F32),
+                                                       TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),
+                                                       TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),
+                                                       TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),
+                                                       TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
                                                      })),
                framework::dataset::make("NormInfo",  { NormalizationLayerInfo(NormType::IN_MAP_1D, 5),
                                                        NormalizationLayerInfo(NormType::IN_MAP_1D, 5),
diff --git a/tests/validation/CL/PoolingLayer.cpp b/tests/validation/CL/PoolingLayer.cpp
index 0b8a11f..1331522 100644
--- a/tests/validation/CL/PoolingLayer.cpp
+++ b/tests/validation/CL/PoolingLayer.cpp
@@ -65,23 +65,23 @@
 // *INDENT-OFF*
 // clang-format off
 DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
-               framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0),     // Mismatching data type
-                                                       TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0),     // Window shrink
-                                                       TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0),     // Invalid pad/size combination
-                                                       TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0),     // Invalid pad/size combination
-                                                       TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8, 0), // Invalid parameters
-                                                       TensorInfo(TensorShape(15U, 13U, 5U), 1, DataType::F32, 0),     // Non-rectangular Global Pooling
-                                                       TensorInfo(TensorShape(13U, 13U, 5U), 1, DataType::F32, 0),     // Invalid output Global Pooling
-                                                       TensorInfo(TensorShape(13U, 13U, 5U), 1, DataType::F32, 0),
+               framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),     // Mismatching data type
+                                                       TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),     // Window shrink
+                                                       TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),     // Invalid pad/size combination
+                                                       TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),     // Invalid pad/size combination
+                                                       TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), // Invalid parameters
+                                                       TensorInfo(TensorShape(15U, 13U, 5U), 1, DataType::F32),     // Non-rectangular Global Pooling
+                                                       TensorInfo(TensorShape(13U, 13U, 5U), 1, DataType::F32),     // Invalid output Global Pooling
+                                                       TensorInfo(TensorShape(13U, 13U, 5U), 1, DataType::F32),
                                                      }),
-               framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F16, 0),
-                                                       TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32, 0),
-                                                       TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32, 0),
-                                                       TensorInfo(TensorShape(25U, 16U, 2U), 1, DataType::F32, 0),
-                                                       TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8, 0),
-                                                       TensorInfo(TensorShape(1U, 1U, 5U), 1, DataType::F32, 0),
-                                                       TensorInfo(TensorShape(2U, 2U, 5U), 1, DataType::F32, 0),
-                                                       TensorInfo(TensorShape(1U, 1U, 5U), 1, DataType::F32, 0),
+               framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F16),
+                                                       TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32),
+                                                       TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32),
+                                                       TensorInfo(TensorShape(25U, 16U, 2U), 1, DataType::F32),
+                                                       TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8),
+                                                       TensorInfo(TensorShape(1U, 1U, 5U), 1, DataType::F32),
+                                                       TensorInfo(TensorShape(2U, 2U, 5U), 1, DataType::F32),
+                                                       TensorInfo(TensorShape(1U, 1U, 5U), 1, DataType::F32),
                                                      })),
                framework::dataset::make("PoolInfo",  { PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 0, 0)),
                                                        PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 0, 0)),
diff --git a/tests/validation/CL/RNNLayer.cpp b/tests/validation/CL/RNNLayer.cpp
index 0af6f8e..9179c09 100644
--- a/tests/validation/CL/RNNLayer.cpp
+++ b/tests/validation/CL/RNNLayer.cpp
@@ -49,53 +49,53 @@
 // *INDENT-OFF*
 // clang-format off
 DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zip(
-               framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U), 1, DataType::U8, 0),      // Wrong data type
-                                                       TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Wrong input size
-                                                       TensorInfo(TensorShape(27U, 13U), 1, DataType::F32, 0),     // Wrong weights size
-                                                       TensorInfo(TensorShape(27U, 13U), 1, DataType::F32, 0),     // Wrong recurrent weights size
-                                                       TensorInfo(TensorShape(27U, 13U), 1, DataType::F32, 0),     // Wrong bias size
-                                                       TensorInfo(TensorShape(27U, 13U), 1, DataType::F32, 0),     // Wrong output size
-                                                       TensorInfo(TensorShape(27U, 13U), 1, DataType::F32, 0),     // Wrong hidden output size
+               framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U), 1, DataType::U8),      // Wrong data type
+                                                       TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Wrong input size
+                                                       TensorInfo(TensorShape(27U, 13U), 1, DataType::F32),     // Wrong weights size
+                                                       TensorInfo(TensorShape(27U, 13U), 1, DataType::F32),     // Wrong recurrent weights size
+                                                       TensorInfo(TensorShape(27U, 13U), 1, DataType::F32),     // Wrong bias size
+                                                       TensorInfo(TensorShape(27U, 13U), 1, DataType::F32),     // Wrong output size
+                                                       TensorInfo(TensorShape(27U, 13U), 1, DataType::F32),     // Wrong hidden output size
                }),
-               framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(27U, 11U), 1, DataType::F32, 0),
-                                                       TensorInfo(TensorShape(27U, 11U), 1, DataType::F32, 0),
-                                                       TensorInfo(TensorShape(27U, 11U, 2U), 1, DataType::F32, 0),
-                                                       TensorInfo(TensorShape(27U, 11U), 1, DataType::F32, 0),
-                                                       TensorInfo(TensorShape(27U, 11U), 1, DataType::F32, 0),
-                                                       TensorInfo(TensorShape(27U, 11U), 1, DataType::F32, 0),
-                                                       TensorInfo(TensorShape(27U, 11U), 1, DataType::F32, 0),
+               framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(27U, 11U), 1, DataType::F32),
+                                                       TensorInfo(TensorShape(27U, 11U), 1, DataType::F32),
+                                                       TensorInfo(TensorShape(27U, 11U, 2U), 1, DataType::F32),
+                                                       TensorInfo(TensorShape(27U, 11U), 1, DataType::F32),
+                                                       TensorInfo(TensorShape(27U, 11U), 1, DataType::F32),
+                                                       TensorInfo(TensorShape(27U, 11U), 1, DataType::F32),
+                                                       TensorInfo(TensorShape(27U, 11U), 1, DataType::F32),
                })),
-               framework::dataset::make("RecurrentWeightsInfo", { TensorInfo(TensorShape(11U, 11U), 1, DataType::F32, 0),
-                                                                  TensorInfo(TensorShape(11U, 11U), 1, DataType::F32, 0),
-                                                                  TensorInfo(TensorShape(11U, 11U), 1, DataType::F32, 0),
-                                                                  TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32, 0),
-                                                                  TensorInfo(TensorShape(11U, 11U), 1, DataType::F32, 0),
-                                                                  TensorInfo(TensorShape(11U, 11U), 1, DataType::F32, 0),
-                                                                  TensorInfo(TensorShape(11U, 11U), 1, DataType::F32, 0),
+               framework::dataset::make("RecurrentWeightsInfo", { TensorInfo(TensorShape(11U, 11U), 1, DataType::F32),
+                                                                  TensorInfo(TensorShape(11U, 11U), 1, DataType::F32),
+                                                                  TensorInfo(TensorShape(11U, 11U), 1, DataType::F32),
+                                                                  TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32),
+                                                                  TensorInfo(TensorShape(11U, 11U), 1, DataType::F32),
+                                                                  TensorInfo(TensorShape(11U, 11U), 1, DataType::F32),
+                                                                  TensorInfo(TensorShape(11U, 11U), 1, DataType::F32),
                })),
-               framework::dataset::make("BiasInfo", { TensorInfo(TensorShape(11U), 1, DataType::F32, 0),
-                                                      TensorInfo(TensorShape(11U), 1, DataType::F32, 0),
-                                                      TensorInfo(TensorShape(11U), 1, DataType::F32, 0),
-                                                      TensorInfo(TensorShape(11U), 1, DataType::F32, 0),
-                                                      TensorInfo(TensorShape(30U), 1, DataType::F32, 0),
-                                                      TensorInfo(TensorShape(11U), 1, DataType::F32, 0),
-                                                      TensorInfo(TensorShape(11U), 1, DataType::F32, 0),
+               framework::dataset::make("BiasInfo", { TensorInfo(TensorShape(11U), 1, DataType::F32),
+                                                      TensorInfo(TensorShape(11U), 1, DataType::F32),
+                                                      TensorInfo(TensorShape(11U), 1, DataType::F32),
+                                                      TensorInfo(TensorShape(11U), 1, DataType::F32),
+                                                      TensorInfo(TensorShape(30U), 1, DataType::F32),
+                                                      TensorInfo(TensorShape(11U), 1, DataType::F32),
+                                                      TensorInfo(TensorShape(11U), 1, DataType::F32),
                })),
-               framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(11U, 13U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(11U, 13U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(11U, 13U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(11U, 13U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(11U, 13U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(11U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(11U, 13U), 1, DataType::F32, 0),
+               framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(11U, 13U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(11U, 13U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(11U, 13U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(11U, 13U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(11U, 13U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(11U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(11U, 13U), 1, DataType::F32),
                })),
-               framework::dataset::make("HiddenStateInfo", { TensorInfo(TensorShape(11U, 13U), 1, DataType::F32, 0),
-                                                             TensorInfo(TensorShape(11U, 13U), 1, DataType::F32, 0),
-                                                             TensorInfo(TensorShape(11U, 13U), 1, DataType::F32, 0),
-                                                             TensorInfo(TensorShape(11U, 13U), 1, DataType::F32, 0),
-                                                             TensorInfo(TensorShape(11U, 13U), 1, DataType::F32, 0),
-                                                             TensorInfo(TensorShape(11U, 13U), 1, DataType::F32, 0),
-                                                             TensorInfo(TensorShape(11U, 13U, 2U), 1, DataType::F32, 0),
+               framework::dataset::make("HiddenStateInfo", { TensorInfo(TensorShape(11U, 13U), 1, DataType::F32),
+                                                             TensorInfo(TensorShape(11U, 13U), 1, DataType::F32),
+                                                             TensorInfo(TensorShape(11U, 13U), 1, DataType::F32),
+                                                             TensorInfo(TensorShape(11U, 13U), 1, DataType::F32),
+                                                             TensorInfo(TensorShape(11U, 13U), 1, DataType::F32),
+                                                             TensorInfo(TensorShape(11U, 13U), 1, DataType::F32),
+                                                             TensorInfo(TensorShape(11U, 13U, 2U), 1, DataType::F32),
                })),
                framework::dataset::make("ActivationInfo", { ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
                                                             ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
diff --git a/tests/validation/CL/WidthConcatenateLayer.cpp b/tests/validation/CL/WidthConcatenateLayer.cpp
index 36a5e6f..6af3c64 100644
--- a/tests/validation/CL/WidthConcatenateLayer.cpp
+++ b/tests/validation/CL/WidthConcatenateLayer.cpp
@@ -44,20 +44,20 @@
 // *INDENT-OFF*
 // clang-format off
 DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
-              framework::dataset::make("InputInfo1", {  TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32, 0), // Mismatching data type input/output
-                                                        TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32, 0), // Mismatching y dimension
-                                                        TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32, 0), // Mismatching total width
-                                                        TensorInfo(TensorShape(16U, 27U, 5U), 1, DataType::F32, 0)
+              framework::dataset::make("InputInfo1", {  TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32), // Mismatching data type input/output
+                                                        TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32), // Mismatching y dimension
+                                                        TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32), // Mismatching total width
+                                                        TensorInfo(TensorShape(16U, 27U, 5U), 1, DataType::F32)
               }),
-              framework::dataset::make("InputInfo2", {  TensorInfo(TensorShape(24U, 27U, 4U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(52U, 27U, 5U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(52U, 27U, 5U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(16U, 27U, 5U), 1, DataType::F32, 0)
+              framework::dataset::make("InputInfo2", {  TensorInfo(TensorShape(24U, 27U, 4U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(52U, 27U, 5U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(52U, 27U, 5U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(16U, 27U, 5U), 1, DataType::F32)
               })),
-              framework::dataset::make("OutputInfo", {  TensorInfo(TensorShape(47U, 27U, 5U), 1, DataType::F16, 0),
-                                                        TensorInfo(TensorShape(75U, 12U, 5U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(11U, 27U, 5U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(32U, 27U, 5U), 1, DataType::F32, 0)
+              framework::dataset::make("OutputInfo", {  TensorInfo(TensorShape(47U, 27U, 5U), 1, DataType::F16),
+                                                        TensorInfo(TensorShape(75U, 12U, 5U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(11U, 27U, 5U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(32U, 27U, 5U), 1, DataType::F32)
               })),
               framework::dataset::make("Expected", { false, false, false, true })),
               input_info1, input_info2, output_info,expected)
diff --git a/tests/validation/FixedPoint.h b/tests/validation/FixedPoint.h
deleted file mode 100644
index 81c4f53..0000000
--- a/tests/validation/FixedPoint.h
+++ /dev/null
@@ -1,997 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __ARM_COMPUTE_TEST_VALIDATION_FIXEDPOINT_H__
-#define __ARM_COMPUTE_TEST_VALIDATION_FIXEDPOINT_H__
-
-#include "support/ToolchainSupport.h"
-#include "tests/Utils.h"
-
-#include <cassert>
-#include <cstdint>
-#include <cstdlib>
-#include <limits>
-#include <string>
-#include <type_traits>
-
-namespace arm_compute
-{
-namespace test
-{
-namespace fixed_point_arithmetic
-{
-namespace detail
-{
-// Forward declare structs
-struct functions;
-template <typename T>
-struct constant_expr;
-}
-
-/** Fixed point traits */
-namespace traits
-{
-// Promote types
-// *INDENT-OFF*
-// clang-format off
-/** Promote a type */
-template <typename T> struct promote { };
-/** Promote uint8_t to uint16_t */
-template <> struct promote<uint8_t> { using type = uint16_t; /**< Promoted type */ };
-/** Promote int8_t to int16_t */
-template <> struct promote<int8_t> { using type = int16_t; /**< Promoted type */ };
-/** Promote uint16_t to uint32_t */
-template <> struct promote<uint16_t> { using type = uint32_t; /**< Promoted type */ };
-/** Promote int16_t to int32_t */
-template <> struct promote<int16_t> { using type = int32_t; /**< Promoted type */ };
-/** Promote uint32_t to uint64_t */
-template <> struct promote<uint32_t> { using type = uint64_t; /**< Promoted type */ };
-/** Promote int32_t to int64_t */
-template <> struct promote<int32_t> { using type = int64_t; /**< Promoted type */ };
-/** Promote float to float */
-template <> struct promote<float> { using type = float; /**< Promoted type */ };
-/** Promote half to half */
-template <> struct promote<half> { using type = half; /**< Promoted type */ };
-
-/** Get promoted type */
-template <typename T>
-using promote_t = typename promote<T>::type;
-// clang-format on
-// *INDENT-ON*
-}
-
-/** Strongly typed enum class representing the overflow policy */
-enum class OverflowPolicy
-{
-    WRAP,    /**< Wrap policy */
-    SATURATE /**< Saturate policy */
-};
-/** Strongly typed enum class representing the rounding policy */
-enum class RoundingPolicy
-{
-    TO_ZERO,        /**< Round to zero policy */
-    TO_NEAREST_EVEN /**< Round to nearest even policy */
-};
-
-/** Arbitrary fixed-point arithmetic class */
-template <typename T>
-class fixed_point
-{
-public:
-    // Static Checks
-    static_assert(std::is_integral<T>::value, "Type is not an integer");
-
-    /** Constructor (from different fixed point type)
-     *
-     * @param[in] val Fixed point
-     * @param[in] p   Fixed point precision
-     */
-    template <typename U>
-    fixed_point(fixed_point<U> val, uint8_t p)
-        : _value(0), _fixed_point_position(p)
-    {
-        assert(p > 0 && p < std::numeric_limits<T>::digits);
-        T v = 0;
-
-        if(std::numeric_limits<T>::digits < std::numeric_limits<U>::digits)
-        {
-            val.rescale(p);
-            v = detail::constant_expr<T>::saturate_cast(val.raw());
-        }
-        else
-        {
-            auto v_cast = static_cast<fixed_point<T>>(val);
-            v_cast.rescale(p);
-            v = v_cast.raw();
-        }
-        _value = static_cast<T>(v);
-    }
-    /** Constructor (from integer)
-     *
-     * @param[in] val    Integer value to be represented as fixed point
-     * @param[in] p      Fixed point precision
-     * @param[in] is_raw If true val is a raw fixed point value else an integer
-     */
-    template <typename U, typename = typename std::enable_if<std::is_integral<U>::value>::type>
-    fixed_point(U val, uint8_t p, bool is_raw = false)
-        : _value(val << p), _fixed_point_position(p)
-    {
-        if(is_raw)
-        {
-            _value = val;
-        }
-    }
-    /** Constructor (from float)
-     *
-     * @param[in] val Float value to be represented as fixed point
-     * @param[in] p   Fixed point precision
-     */
-    fixed_point(float val, uint8_t p)
-        : _value(detail::constant_expr<T>::to_fixed(val, p)), _fixed_point_position(p)
-    {
-        assert(p > 0 && p < std::numeric_limits<T>::digits);
-    }
-    /** Constructor (from float string)
-     *
-     * @param[in] str Float string to be represented as fixed point
-     * @param[in] p   Fixed point precision
-     */
-    fixed_point(std::string str, uint8_t p)
-        : _value(detail::constant_expr<T>::to_fixed(support::cpp11::stof(str), p)), _fixed_point_position(p)
-    {
-        assert(p > 0 && p < std::numeric_limits<T>::digits);
-    }
-    /** Default copy constructor */
-    fixed_point &operator=(const fixed_point &) = default;
-    /** Default move constructor */
-    fixed_point &operator=(fixed_point &&) = default;
-    /** Default copy assignment operator */
-    fixed_point(const fixed_point &) = default;
-    /** Default move assignment operator */
-    fixed_point(fixed_point &&) = default;
-
-    /** Float conversion operator
-     *
-     * @return Float representation of fixed point
-     */
-    operator float() const
-    {
-        return detail::constant_expr<T>::to_float(_value, _fixed_point_position);
-    }
-    /** Integer conversion operator
-     *
-     * @return Integer representation of fixed point
-     */
-    template <typename U, typename = typename std::enable_if<std::is_integral<T>::value>::type>
-    operator U() const
-    {
-        return detail::constant_expr<T>::to_int(_value, _fixed_point_position);
-    }
-    /** Convert to different fixed point of different type but same precision
-     *
-     * @note Down-conversion might fail.
-     */
-    template <typename U>
-    operator fixed_point<U>()
-    {
-        U val = static_cast<U>(_value);
-        if(std::numeric_limits<U>::digits < std::numeric_limits<T>::digits)
-        {
-            val = detail::constant_expr<U>::saturate_cast(_value);
-        }
-        return fixed_point<U>(val, _fixed_point_position, true);
-    }
-
-    /** Arithmetic += assignment operator
-     *
-     * @param[in] rhs Fixed point operand
-     *
-     * @return Reference to this fixed point
-     */
-    template <typename U>
-    fixed_point<T> &operator+=(const fixed_point<U> &rhs)
-    {
-        fixed_point<T> val(rhs, _fixed_point_position);
-        _value += val.raw();
-        return *this;
-    }
-    /** Arithmetic -= assignment operator
-     *
-     * @param[in] rhs Fixed point operand
-     *
-     * @return Reference to this fixed point
-     */
-    template <typename U>
-    fixed_point<T> &operator-=(const fixed_point<U> &rhs)
-    {
-        fixed_point<T> val(rhs, _fixed_point_position);
-        _value -= val.raw();
-        return *this;
-    }
-
-    /** Raw value accessor
-     *
-     * @return Raw fixed point value
-     */
-    T raw() const
-    {
-        return _value;
-    }
-    /** Precision accessor
-     *
-     * @return Precision of fixed point
-     */
-    uint8_t precision() const
-    {
-        return _fixed_point_position;
-    }
-    /** Rescale a fixed point to a new precision
-     *
-     * @param[in] p New fixed point precision
-     */
-    void rescale(uint8_t p)
-    {
-        assert(p > 0 && p < std::numeric_limits<T>::digits);
-
-        using promoted_T = typename traits::promote<T>::type;
-        promoted_T val   = _value;
-        if(p > _fixed_point_position)
-        {
-            val <<= (p - _fixed_point_position);
-        }
-        else if(p < _fixed_point_position)
-        {
-            uint8_t pbar = _fixed_point_position - p;
-            val += (pbar != 0) ? (1 << (pbar - 1)) : 0;
-            val >>= pbar;
-        }
-
-        _value                = detail::constant_expr<T>::saturate_cast(val);
-        _fixed_point_position = p;
-    }
-
-private:
-    T       _value;                /**< Fixed point raw value */
-    uint8_t _fixed_point_position; /**< Fixed point precision */
-};
-
-namespace detail
-{
-/** Count the number of leading zero bits in the given value.
- *
- * @param[in] value Input value.
- *
- * @return Number of leading zero bits.
- */
-template <typename T>
-constexpr int clz(T value)
-{
-    using unsigned_T = typename std::make_unsigned<T>::type;
-    // __builtin_clz is available for int. Need to correct reported number to
-    // match the original type.
-    return __builtin_clz(value) - (32 - std::numeric_limits<unsigned_T>::digits);
-}
-
-/** Constant expressions */
-template <typename T>
-struct constant_expr
-{
-    /** Calculate representation of 1 in fixed point given a fixed point precision
-     *
-     * @param[in] p Fixed point precision
-     *
-     * @return Representation of value 1 in fixed point.
-     */
-    static constexpr T fixed_one(uint8_t p)
-    {
-        return (1 << p);
-    }
-    /** Calculate fixed point precision step given a fixed point precision
-     *
-     * @param[in] p Fixed point precision
-     *
-     * @return Fixed point precision step
-     */
-    static constexpr float fixed_step(uint8_t p)
-    {
-        return (1.0f / static_cast<float>(1 << p));
-    }
-
-    /** Convert a fixed point value to float given its precision.
-     *
-     * @param[in] val Fixed point value
-     * @param[in] p   Fixed point precision
-     *
-     * @return Float representation of the fixed point number
-     */
-    static constexpr float to_float(T val, uint8_t p)
-    {
-        return static_cast<float>(val * fixed_step(p));
-    }
-    /** Convert a fixed point value to integer given its precision.
-     *
-     * @param[in] val Fixed point value
-     * @param[in] p   Fixed point precision
-     *
-     * @return Integer of the fixed point number
-     */
-    static constexpr T to_int(T val, uint8_t p)
-    {
-        return val >> p;
-    }
-    /** Convert a single precision floating point value to a fixed point representation given its precision.
-     *
-     * @param[in] val Floating point value
-     * @param[in] p   Fixed point precision
-     *
-     * @return The raw fixed point representation
-     */
-    static constexpr T to_fixed(float val, uint8_t p)
-    {
-        return static_cast<T>(saturate_cast<float>(val * fixed_one(p) + ((val >= 0) ? 0.5 : -0.5)));
-    }
-    /** Clamp value between two ranges
-     *
-     * @param[in] val Value to clamp
-     * @param[in] min Minimum value to clamp to
-     * @param[in] max Maximum value to clamp to
-     *
-     * @return clamped value
-     */
-    static constexpr T clamp(T val, T min, T max)
-    {
-        return std::min(std::max(val, min), max);
-    }
-    /** Saturate given number
-     *
-     * @param[in] val Value to saturate
-     *
-     * @return Saturated value
-     */
-    template <typename U>
-    static constexpr T saturate_cast(U val)
-    {
-        return static_cast<T>(std::min<U>(std::max<U>(val, static_cast<U>(std::numeric_limits<T>::min())), static_cast<U>(std::numeric_limits<T>::max())));
-    }
-};
-/** Functions */
-struct functions
-{
-    /** Output stream operator
-     *
-     * @param[in] s Output stream
-     * @param[in] x Fixed point value
-     *
-     * @return Reference output to updated stream
-     */
-    template <typename T, typename U, typename traits>
-    static std::basic_ostream<T, traits> &write(std::basic_ostream<T, traits> &s, fixed_point<U> &x)
-    {
-        return s << static_cast<float>(x);
-    }
-    /** Signbit of a fixed point number.
-     *
-     * @param[in] x Fixed point number
-     *
-     * @return True if negative else false.
-     */
-    template <typename T>
-    static bool signbit(fixed_point<T> x)
-    {
-        return ((x.raw() >> std::numeric_limits<T>::digits) != 0);
-    }
-    /** Checks if two fixed point numbers are equal
-     *
-     * @param[in] x First fixed point operand
-     * @param[in] y Second fixed point operand
-     *
-     * @return True if fixed points are equal else false
-     */
-    template <typename T>
-    static bool isequal(fixed_point<T> x, fixed_point<T> y)
-    {
-        uint8_t p = std::min(x.precision(), y.precision());
-        x.rescale(p);
-        y.rescale(p);
-        return (x.raw() == y.raw());
-    }
-    /** Checks if two fixed point number are not equal
-     *
-     * @param[in] x First fixed point operand
-     * @param[in] y Second fixed point operand
-     *
-     * @return True if fixed points are not equal else false
-     */
-    template <typename T>
-    static bool isnotequal(fixed_point<T> x, fixed_point<T> y)
-    {
-        return !isequal(x, y);
-    }
-    /** Checks if one fixed point is greater than the other
-     *
-     * @param[in] x First fixed point operand
-     * @param[in] y Second fixed point operand
-     *
-     * @return True if fixed point is greater than other
-     */
-    template <typename T>
-    static bool isgreater(fixed_point<T> x, fixed_point<T> y)
-    {
-        uint8_t p = std::min(x.precision(), y.precision());
-        x.rescale(p);
-        y.rescale(p);
-        return (x.raw() > y.raw());
-    }
-    /** Checks if one fixed point is greater or equal than the other
-     *
-     * @param[in] x First fixed point operand
-     * @param[in] y Second fixed point operand
-     *
-     * @return True if fixed point is greater or equal than other
-     */
-    template <typename T>
-    static bool isgreaterequal(fixed_point<T> x, fixed_point<T> y)
-    {
-        uint8_t p = std::min(x.precision(), y.precision());
-        x.rescale(p);
-        y.rescale(p);
-        return (x.raw() >= y.raw());
-    }
-    /** Checks if one fixed point is less than the other
-     *
-     * @param[in] x First fixed point operand
-     * @param[in] y Second fixed point operand
-     *
-     * @return True if fixed point is less than other
-     */
-    template <typename T>
-    static bool isless(fixed_point<T> x, fixed_point<T> y)
-    {
-        uint8_t p = std::min(x.precision(), y.precision());
-        x.rescale(p);
-        y.rescale(p);
-        return (x.raw() < y.raw());
-    }
-    /** Checks if one fixed point is less or equal than the other
-     *
-     * @param[in] x First fixed point operand
-     * @param[in] y Second fixed point operand
-     *
-     * @return True if fixed point is less or equal than other
-     */
-    template <typename T>
-    static bool islessequal(fixed_point<T> x, fixed_point<T> y)
-    {
-        uint8_t p = std::min(x.precision(), y.precision());
-        x.rescale(p);
-        y.rescale(p);
-        return (x.raw() <= y.raw());
-    }
-    /** Checks if one fixed point is less or greater than the other
-     *
-     * @param[in] x First fixed point operand
-     * @param[in] y Second fixed point operand
-     *
-     * @return True if fixed point is less or greater than other
-     */
-    template <typename T>
-    static bool islessgreater(fixed_point<T> x, fixed_point<T> y)
-    {
-        return isnotequal(x, y);
-    }
-    /** Clamp fixed point to specific range.
-     *
-     * @param[in] x   Fixed point operand
-     * @param[in] min Minimum value to clamp to
-     * @param[in] max Maximum value to clamp to
-     *
-     * @return Clamped result
-     */
-    template <typename T>
-    static fixed_point<T> clamp(fixed_point<T> x, T min, T max)
-    {
-        return fixed_point<T>(constant_expr<T>::clamp(x.raw(), min, max), x.precision(), true);
-    }
-    /** Negate number
-     *
-     * @param[in] x Fixed point operand
-     *
-     * @return Negated fixed point result
-     */
-    template <OverflowPolicy OP = OverflowPolicy::SATURATE, typename T>
-    static fixed_point<T> negate(fixed_point<T> x)
-    {
-        using promoted_T = typename traits::promote<T>::type;
-        promoted_T val   = -x.raw();
-        if(OP == OverflowPolicy::SATURATE)
-        {
-            val = constant_expr<T>::saturate_cast(val);
-        }
-        return fixed_point<T>(static_cast<T>(val), x.precision(), true);
-    }
-    /** Perform addition among two fixed point numbers
-     *
-     * @param[in] x First fixed point operand
-     * @param[in] y Second fixed point operand
-     *
-     * @return Result fixed point with precision equal to minimum precision of both operands
-     */
-    template <OverflowPolicy OP = OverflowPolicy::SATURATE, typename T>
-    static fixed_point<T> add(fixed_point<T> x, fixed_point<T> y)
-    {
-        uint8_t p = std::min(x.precision(), y.precision());
-        x.rescale(p);
-        y.rescale(p);
-        if(OP == OverflowPolicy::SATURATE)
-        {
-            using type = typename traits::promote<T>::type;
-            type val   = static_cast<type>(x.raw()) + static_cast<type>(y.raw());
-            val        = constant_expr<T>::saturate_cast(val);
-            return fixed_point<T>(static_cast<T>(val), p, true);
-        }
-        else
-        {
-            return fixed_point<T>(x.raw() + y.raw(), p, true);
-        }
-    }
-    /** Perform subtraction among two fixed point numbers
-     *
-     * @param[in] x First fixed point operand
-     * @param[in] y Second fixed point operand
-     *
-     * @return Result fixed point with precision equal to minimum precision of both operands
-     */
-    template <OverflowPolicy OP = OverflowPolicy::SATURATE, typename T>
-    static fixed_point<T> sub(fixed_point<T> x, fixed_point<T> y)
-    {
-        uint8_t p = std::min(x.precision(), y.precision());
-        x.rescale(p);
-        y.rescale(p);
-        if(OP == OverflowPolicy::SATURATE)
-        {
-            using type = typename traits::promote<T>::type;
-            type val   = static_cast<type>(x.raw()) - static_cast<type>(y.raw());
-            val        = constant_expr<T>::saturate_cast(val);
-            return fixed_point<T>(static_cast<T>(val), p, true);
-        }
-        else
-        {
-            return fixed_point<T>(x.raw() - y.raw(), p, true);
-        }
-    }
-    /** Perform multiplication among two fixed point numbers
-     *
-     * @param[in] x First fixed point operand
-     * @param[in] y Second fixed point operand
-     *
-     * @return Result fixed point with precision equal to minimum precision of both operands
-     */
-    template <OverflowPolicy OP = OverflowPolicy::SATURATE, typename T>
-    static fixed_point<T> mul(fixed_point<T> x, fixed_point<T> y)
-    {
-        using promoted_T        = typename traits::promote<T>::type;
-        uint8_t    p_min        = std::min(x.precision(), y.precision());
-        uint8_t    p_max        = std::max(x.precision(), y.precision());
-        promoted_T round_factor = (1 << (p_max - 1));
-        promoted_T val          = ((static_cast<promoted_T>(x.raw()) * static_cast<promoted_T>(y.raw())) + round_factor) >> p_max;
-        if(OP == OverflowPolicy::SATURATE)
-        {
-            val = constant_expr<T>::saturate_cast(val);
-        }
-        return fixed_point<T>(static_cast<T>(val), p_min, true);
-    }
-    /** Perform division among two fixed point numbers
-     *
-     * @param[in] x First fixed point operand
-     * @param[in] y Second fixed point operand
-     *
-     * @return Result fixed point with precision equal to minimum precision of both operands
-     */
-    template <OverflowPolicy OP = OverflowPolicy::SATURATE, typename T>
-    static fixed_point<T> div(fixed_point<T> x, fixed_point<T> y)
-    {
-        using promoted_T = typename traits::promote<T>::type;
-        uint8_t    p     = std::min(x.precision(), y.precision());
-        promoted_T denom = static_cast<promoted_T>(y.raw());
-        if(denom != 0)
-        {
-            promoted_T val = (static_cast<promoted_T>(x.raw()) << std::max(x.precision(), y.precision())) / denom;
-            if(OP == OverflowPolicy::SATURATE)
-            {
-                val = constant_expr<T>::saturate_cast(val);
-            }
-            return fixed_point<T>(static_cast<T>(val), p, true);
-        }
-        else
-        {
-            T val = (x.raw() < 0) ? std::numeric_limits<T>::min() : std::numeric_limits<T>::max();
-            return fixed_point<T>(val, p, true);
-        }
-    }
-    /** Shift left
-     *
-     * @param[in] x     Fixed point operand
-     * @param[in] shift Shift value
-     *
-     * @return Shifted value
-     */
-    template <OverflowPolicy OP = OverflowPolicy::SATURATE, typename T>
-    static fixed_point<T> shift_left(fixed_point<T> x, size_t shift)
-    {
-        using promoted_T = typename traits::promote<T>::type;
-        promoted_T val   = static_cast<promoted_T>(x.raw()) << shift;
-        if(OP == OverflowPolicy::SATURATE)
-        {
-            val = constant_expr<T>::saturate_cast(val);
-        }
-        return fixed_point<T>(static_cast<T>(val), x.precision(), true);
-    }
-    /** Shift right
-     *
-     * @param[in] x     Fixed point operand
-     * @param[in] shift Shift value
-     *
-     * @return Shifted value
-     */
-    template <typename T>
-    static fixed_point<T> shift_right(fixed_point<T> x, size_t shift)
-    {
-        return fixed_point<T>(x.raw() >> shift, x.precision(), true);
-    }
-    /** Calculate absolute value
-     *
-     * @param[in] x Fixed point operand
-     *
-     * @return Absolute value of operand
-     */
-    template <typename T>
-    static fixed_point<T> abs(fixed_point<T> x)
-    {
-        using promoted_T = typename traits::promote<T>::type;
-        T val            = (x.raw() < 0) ? constant_expr<T>::saturate_cast(-static_cast<promoted_T>(x.raw())) : x.raw();
-        return fixed_point<T>(val, x.precision(), true);
-    }
-    /** Calculate the logarithm of a fixed point number
-     *
-     * @param[in] x Fixed point operand
-     *
-     * @return Logarithm value of operand
-     */
-    template <typename T>
-    static fixed_point<T> log(fixed_point<T> x)
-    {
-        uint8_t p         = x.precision();
-        auto    const_one = fixed_point<T>(static_cast<T>(1), p);
-
-        // Logarithm of 1 is zero and logarithm of negative values is not defined in R, so return 0.
-        // Also, log(x) == -log(1/x) for 0 < x < 1.
-        if(isequal(x, const_one) || islessequal(x, fixed_point<T>(static_cast<T>(0), p)))
-        {
-            return fixed_point<T>(static_cast<T>(0), p, true);
-        }
-        else if(isless(x, const_one))
-        {
-            return mul(log(div(const_one, x)), fixed_point<T>(-1, p));
-        }
-
-        // Remove even powers of 2
-        T shift_val = 31 - __builtin_clz(x.raw() >> p);
-        x           = shift_right(x, shift_val);
-        x           = sub(x, const_one);
-
-        // Constants
-        auto ln2 = fixed_point<T>(0.6931471, p);
-        auto A   = fixed_point<T>(1.4384189, p);
-        auto B   = fixed_point<T>(-0.67719, p);
-        auto C   = fixed_point<T>(0.3218538, p);
-        auto D   = fixed_point<T>(-0.0832229, p);
-
-        // Polynomial expansion
-        auto sum = add(mul(x, D), C);
-        sum      = add(mul(x, sum), B);
-        sum      = add(mul(x, sum), A);
-        sum      = mul(x, sum);
-
-        return mul(add(sum, fixed_point<T>(static_cast<T>(shift_val), p)), ln2);
-    }
-    /** Calculate the exponential of a fixed point number.
-     *
-     * exp(x) = exp(floor(x)) * exp(x - floor(x))
-     *        = pow(2, floor(x) / ln(2)) * exp(x - floor(x))
-     *        = exp(x - floor(x)) << (floor(x) / ln(2))
-     *
-     * @param[in] x Fixed point operand
-     *
-     * @return Exponential value of operand
-     */
-    template <typename T>
-    static fixed_point<T> exp(fixed_point<T> x)
-    {
-        uint8_t p = x.precision();
-        // Constants
-        auto const_one = fixed_point<T>(1, p);
-        auto ln2       = fixed_point<T>(0.6931471, p);
-        auto inv_ln2   = fixed_point<T>(1.442695, p);
-        auto A         = fixed_point<T>(0.9978546, p);
-        auto B         = fixed_point<T>(0.4994721, p);
-        auto C         = fixed_point<T>(0.1763723, p);
-        auto D         = fixed_point<T>(0.0435108, p);
-
-        T scaled_int_part = detail::constant_expr<T>::to_int(mul(x, inv_ln2).raw(), p);
-
-        // Polynomial expansion
-        auto frac_part = sub(x, mul(ln2, fixed_point<T>(scaled_int_part, p)));
-        auto taylor    = add(mul(frac_part, D), C);
-        taylor         = add(mul(frac_part, taylor), B);
-        taylor         = add(mul(frac_part, taylor), A);
-        taylor         = mul(frac_part, taylor);
-        taylor         = add(taylor, const_one);
-
-        // Saturate value
-        if(static_cast<T>(clz(taylor.raw())) <= scaled_int_part)
-        {
-            return fixed_point<T>(std::numeric_limits<T>::max(), p, true);
-        }
-
-        return (scaled_int_part < 0) ? shift_right(taylor, -scaled_int_part) : shift_left(taylor, scaled_int_part);
-    }
-    /** Calculate the inverse square root of a fixed point number
-     *
-     * @param[in] x Fixed point operand
-     *
-     * @return Inverse square root value of operand
-     */
-    template <typename T>
-    static fixed_point<T> inv_sqrt(fixed_point<T> x)
-    {
-        const uint8_t p     = x.precision();
-        int8_t        shift = std::numeric_limits<T>::digits - (p + detail::clz(x.raw()));
-
-        shift += std::numeric_limits<T>::is_signed ? 1 : 0;
-
-        // Use volatile to restrict compiler optimizations on shift as compiler reports maybe-uninitialized error on Android
-        volatile int8_t *shift_ptr = &shift;
-
-        auto           const_three = fixed_point<T>(3, p);
-        auto           a           = (*shift_ptr < 0) ? shift_left(x, -(shift)) : shift_right(x, shift);
-        fixed_point<T> x2          = a;
-
-        // We need three iterations to find the result for QS8 and five for QS16
-        constexpr int num_iterations = std::is_same<T, int8_t>::value ? 3 : 5;
-        for(int i = 0; i < num_iterations; ++i)
-        {
-            fixed_point<T> three_minus_dx = sub(const_three, mul(a, mul(x2, x2)));
-            x2                            = shift_right(mul(x2, three_minus_dx), 1);
-        }
-
-        return (shift < 0) ? shift_left(x2, (-shift) >> 1) : shift_right(x2, shift >> 1);
-    }
-    /** Calculate the hyperbolic tangent of a fixed point number
-     *
-     * @param[in] x Fixed point operand
-     *
-     * @return Hyperbolic tangent of the operand
-     */
-    template <typename T>
-    static fixed_point<T> tanh(fixed_point<T> x)
-    {
-        uint8_t p = x.precision();
-        // Constants
-        auto const_one = fixed_point<T>(1, p);
-        auto const_two = fixed_point<T>(2, p);
-
-        auto exp2x = exp(const_two * x);
-        auto num   = exp2x - const_one;
-        auto den   = exp2x + const_one;
-        auto tanh  = num / den;
-
-        return tanh;
-    }
-    /** Calculate the a-th power of a fixed point number.
-     *
-     *  The power is computed as x^a = e^(log(x) * a)
-     *
-     * @param[in] x Fixed point operand
-     * @param[in] a Fixed point exponent
-     *
-     * @return a-th power of the operand
-     */
-    template <typename T>
-    static fixed_point<T> pow(fixed_point<T> x, fixed_point<T> a)
-    {
-        return exp(log(x) * a);
-    }
-};
-
-template <typename T>
-bool operator==(const fixed_point<T> &lhs, const fixed_point<T> &rhs)
-{
-    return functions::isequal(lhs, rhs);
-}
-template <typename T>
-bool operator!=(const fixed_point<T> &lhs, const fixed_point<T> &rhs)
-{
-    return !operator==(lhs, rhs);
-}
-template <typename T>
-bool operator<(const fixed_point<T> &lhs, const fixed_point<T> &rhs)
-{
-    return functions::isless(lhs, rhs);
-}
-template <typename T>
-bool operator>(const fixed_point<T> &lhs, const fixed_point<T> &rhs)
-{
-    return operator<(rhs, lhs);
-}
-template <typename T>
-bool operator<=(const fixed_point<T> &lhs, const fixed_point<T> &rhs)
-{
-    return !operator>(lhs, rhs);
-}
-template <typename T>
-bool operator>=(const fixed_point<T> &lhs, const fixed_point<T> &rhs)
-{
-    return !operator<(lhs, rhs);
-}
-template <typename T>
-fixed_point<T> operator+(const fixed_point<T> &lhs, const fixed_point<T> &rhs)
-{
-    return functions::add(lhs, rhs);
-}
-template <typename T>
-fixed_point<T> operator-(const fixed_point<T> &lhs, const fixed_point<T> &rhs)
-{
-    return functions::sub(lhs, rhs);
-}
-template <typename T>
-fixed_point<T> operator-(const fixed_point<T> &rhs)
-{
-    return functions::negate(rhs);
-}
-template <typename T>
-fixed_point<T> operator*(fixed_point<T> x, fixed_point<T> y)
-{
-    return functions::mul(x, y);
-}
-template <typename T>
-fixed_point<T> operator/(fixed_point<T> x, fixed_point<T> y)
-{
-    return functions::div(x, y);
-}
-template <typename T>
-fixed_point<T> operator>>(fixed_point<T> x, size_t shift)
-{
-    return functions::shift_right(x, shift);
-}
-template <typename T>
-fixed_point<T> operator<<(fixed_point<T> x, size_t shift)
-{
-    return functions::shift_left(x, shift);
-}
-template <typename T, typename U, typename traits>
-std::basic_ostream<T, traits> &operator<<(std::basic_ostream<T, traits> &s, fixed_point<U> x)
-{
-    return functions::write(s, x);
-}
-template <typename T>
-inline fixed_point<T> min(fixed_point<T> x, fixed_point<T> y)
-{
-    return x > y ? y : x;
-}
-template <typename T>
-inline fixed_point<T> max(fixed_point<T> x, fixed_point<T> y)
-{
-    return x > y ? x : y;
-}
-template <OverflowPolicy OP = OverflowPolicy::SATURATE, typename T>
-inline fixed_point<T> add(fixed_point<T> x, fixed_point<T> y)
-{
-    return functions::add<OP>(x, y);
-}
-template <OverflowPolicy OP = OverflowPolicy::SATURATE, typename T>
-inline fixed_point<T> sub(fixed_point<T> x, fixed_point<T> y)
-{
-    return functions::sub<OP>(x, y);
-}
-template <OverflowPolicy OP = OverflowPolicy::SATURATE, typename T>
-inline fixed_point<T> mul(fixed_point<T> x, fixed_point<T> y)
-{
-    return functions::mul<OP>(x, y);
-}
-template <typename T>
-inline fixed_point<T> div(fixed_point<T> x, fixed_point<T> y)
-{
-    return functions::div(x, y);
-}
-template <typename T>
-inline fixed_point<T> abs(fixed_point<T> x)
-{
-    return functions::abs(x);
-}
-template <typename T>
-inline fixed_point<T> clamp(fixed_point<T> x, T min, T max)
-{
-    return functions::clamp(x, min, max);
-}
-template <typename T>
-inline fixed_point<T> exp(fixed_point<T> x)
-{
-    return functions::exp(x);
-}
-template <typename T>
-inline fixed_point<T> log(fixed_point<T> x)
-{
-    return functions::log(x);
-}
-template <typename T>
-inline fixed_point<T> inv_sqrt(fixed_point<T> x)
-{
-    return functions::inv_sqrt(x);
-}
-template <typename T>
-inline fixed_point<T> tanh(fixed_point<T> x)
-{
-    return functions::tanh(x);
-}
-template <typename T>
-inline fixed_point<T> pow(fixed_point<T> x, fixed_point<T> a)
-{
-    return functions::pow(x, a);
-}
-} // namespace detail
-
-// Expose operators
-using detail::operator==;
-using detail::operator!=;
-using detail::operator<;
-using detail::operator>;
-using detail::operator<=;
-using detail::operator>=;
-using detail::operator+;
-using detail::operator-;
-using detail::operator*;
-using detail::operator/;
-using detail::operator>>;
-using detail::operator<<;
-
-// Expose additional functions
-using detail::min;
-using detail::max;
-using detail::add;
-using detail::sub;
-using detail::mul;
-using detail::div;
-using detail::abs;
-using detail::clamp;
-using detail::exp;
-using detail::log;
-using detail::inv_sqrt;
-using detail::tanh;
-using detail::pow;
-// TODO: floor
-// TODO: ceil
-// TODO: sqrt
-} // namespace fixed_point_arithmetic
-} // namespace test
-} // namespace arm_compute
-#endif /*__ARM_COMPUTE_TEST_VALIDATION_FIXEDPOINT_H__ */
diff --git a/tests/validation/GLES_COMPUTE/ActivationLayer.cpp b/tests/validation/GLES_COMPUTE/ActivationLayer.cpp
index a8c7253..7676b85 100644
--- a/tests/validation/GLES_COMPUTE/ActivationLayer.cpp
+++ b/tests/validation/GLES_COMPUTE/ActivationLayer.cpp
@@ -61,35 +61,14 @@
         case ActivationLayerInfo::ActivationFunction::SQUARE:
             return AbsoluteTolerance<float>(data_type == DataType::F16 ? 0.1f : epsilon);
         case ActivationLayerInfo::ActivationFunction::LOGISTIC:
-            if(is_data_type_fixed_point(data_type))
-            {
-                return AbsoluteTolerance<float>(5.f);
-            }
-            else
-            {
-                return AbsoluteTolerance<float>(data_type == DataType::F16 ? 0.001f : epsilon);
-            }
+            return AbsoluteTolerance<float>(data_type == DataType::F16 ? 0.001f : epsilon);
         case ActivationLayerInfo::ActivationFunction::LEAKY_RELU:
             return AbsoluteTolerance<float>(data_type == DataType::F16 ? 0.00001f : epsilon);
         case ActivationLayerInfo::ActivationFunction::SOFT_RELU:
         case ActivationLayerInfo::ActivationFunction::SQRT:
-            if(is_data_type_fixed_point(data_type))
-            {
-                return AbsoluteTolerance<float>(5.f);
-            }
-            else
-            {
-                return AbsoluteTolerance<float>(data_type == DataType::F16 ? 0.01f : 0.00001f);
-            }
+            return AbsoluteTolerance<float>(data_type == DataType::F16 ? 0.01f : 0.00001f);
         case ActivationLayerInfo::ActivationFunction::TANH:
-            if(is_data_type_fixed_point(data_type))
-            {
-                return AbsoluteTolerance<float>(5.f);
-            }
-            else
-            {
-                return AbsoluteTolerance<float>(data_type == DataType::F16 ? 0.001f : 0.00001f);
-            }
+            return AbsoluteTolerance<float>(data_type == DataType::F16 ? 0.001f : 0.00001f);
         default:
             return AbsoluteTolerance<float>(epsilon);
     }
diff --git a/tests/validation/GLES_COMPUTE/PoolingLayer.cpp b/tests/validation/GLES_COMPUTE/PoolingLayer.cpp
index ac1bd72..7679007 100644
--- a/tests/validation/GLES_COMPUTE/PoolingLayer.cpp
+++ b/tests/validation/GLES_COMPUTE/PoolingLayer.cpp
@@ -59,17 +59,17 @@
 DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
                                                                   framework::dataset::make("InputInfo",
 {
-    TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Mismatching data type
-    TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Window shrink
-    TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid pad/size combination
-    TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid pad/size combination
-    TensorInfo(TensorShape(15U, 13U, 5U), 1, DataType::F32, 0), // Non-rectangular Global Pooling
-    TensorInfo(TensorShape(13U, 13U, 5U), 1, DataType::F32, 0), // Invalid output Global Pooling
-    TensorInfo(TensorShape(13U, 13U, 5U), 1, DataType::F32, 0),
+    TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching data type
+    TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Window shrink
+    TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid pad/size combination
+    TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid pad/size combination
+    TensorInfo(TensorShape(15U, 13U, 5U), 1, DataType::F32), // Non-rectangular Global Pooling
+    TensorInfo(TensorShape(13U, 13U, 5U), 1, DataType::F32), // Invalid output Global Pooling
+    TensorInfo(TensorShape(13U, 13U, 5U), 1, DataType::F32),
 }),
 framework::dataset::make("OutputInfo",
 {
-    TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F16, 0), TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32, 0), TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32, 0), TensorInfo(TensorShape(25U, 16U, 2U), 1, DataType::F32, 0), TensorInfo(TensorShape(1U, 1U, 5U), 1, DataType::F32, 0), TensorInfo(TensorShape(2U, 2U, 5U), 1, DataType::F32, 0), TensorInfo(TensorShape(1U, 1U, 5U), 1, DataType::F32, 0),
+    TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F16), TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32), TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32), TensorInfo(TensorShape(25U, 16U, 2U), 1, DataType::F32), TensorInfo(TensorShape(1U, 1U, 5U), 1, DataType::F32), TensorInfo(TensorShape(2U, 2U, 5U), 1, DataType::F32), TensorInfo(TensorShape(1U, 1U, 5U), 1, DataType::F32),
 })),
 framework::dataset::make("PoolInfo",
 {
diff --git a/tests/validation/Helpers.h b/tests/validation/Helpers.h
index 2b4d277..814d1f5 100644
--- a/tests/validation/Helpers.h
+++ b/tests/validation/Helpers.h
@@ -177,12 +177,10 @@
 
 /** Helper function to get the testing range for batch normalization layer.
  *
- * @param[in] fixed_point_position (Optional) Number of bits for the fractional part. Defaults to 0.
- *
  * @return A pair containing the lower upper testing bounds.
  */
 template <typename T>
-std::pair<T, T> get_batchnormalization_layer_test_bounds(int fixed_point_position = 0)
+std::pair<T, T> get_batchnormalization_layer_test_bounds()
 {
     const bool is_float = std::is_floating_point<T>::value;
     std::pair<T, T> bounds;
@@ -194,7 +192,7 @@
     }
     else
     {
-        bounds = std::make_pair(1, 1 << (fixed_point_position));
+        bounds = std::make_pair(1, 1);
     }
 
     return bounds;
diff --git a/tests/validation/NEON/ConvolutionLayer.cpp b/tests/validation/NEON/ConvolutionLayer.cpp
index 591d142..1d82ff0 100644
--- a/tests/validation/NEON/ConvolutionLayer.cpp
+++ b/tests/validation/NEON/ConvolutionLayer.cpp
@@ -75,20 +75,20 @@
 
 TEST_SUITE(ConvolutionLayer)
 DATA_TEST_CASE(ValidateConvolutionMethod, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(
-                                                                                           framework::dataset::make("InputInfo", { TensorInfo(TensorShape(18U, 18U, 32U), 1, DataType::F32, 0),
-                                                                                                                    TensorInfo(TensorShape(23U, 27U, 32U, 4U), 1, DataType::F32, 0),
-                                                                                                                    TensorInfo(TensorShape(3U, 3U, 2U, 1U), 1, DataType::F32, 0),
-                                                                                                                    TensorInfo(TensorShape(33U, 27U, 7U, 4U), 1, DataType::F32, 0)
+                                                                                           framework::dataset::make("InputInfo", { TensorInfo(TensorShape(18U, 18U, 32U), 1, DataType::F32),
+                                                                                                                    TensorInfo(TensorShape(23U, 27U, 32U, 4U), 1, DataType::F32),
+                                                                                                                    TensorInfo(TensorShape(3U, 3U, 2U, 1U), 1, DataType::F32),
+                                                                                                                    TensorInfo(TensorShape(33U, 27U, 7U, 4U), 1, DataType::F32)
                                                                                                                                  }),
-                                                                                           framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(3U, 3U, 32U, 21U), 1, DataType::F32, 0),
-                                                                                                                    TensorInfo(TensorShape(5U, 5U, 32U, 21U), 1, DataType::F32, 0),
-                                                                                                                    TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32, 0),
-                                                                                                                    TensorInfo(TensorShape(5U, 5U, 7U, 16U), 1, DataType::F16, 0)
+                                                                                           framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(3U, 3U, 32U, 21U), 1, DataType::F32),
+                                                                                                                    TensorInfo(TensorShape(5U, 5U, 32U, 21U), 1, DataType::F32),
+                                                                                                                    TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32),
+                                                                                                                    TensorInfo(TensorShape(5U, 5U, 7U, 16U), 1, DataType::F16)
                                                                                                                                    })),
-                                                                                       framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(16U, 16U, 21U), 1, DataType::F32, 0),
-                                                                                                                TensorInfo(TensorShape(19U, 23U, 21U, 4U), 1, DataType::F32, 0),
-                                                                                                                TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32, 0),
-                                                                                                                TensorInfo(TensorShape(11U, 12U, 16U, 4U), 1, DataType::F32, 0)
+                                                                                       framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(16U, 16U, 21U), 1, DataType::F32),
+                                                                                                                TensorInfo(TensorShape(19U, 23U, 21U, 4U), 1, DataType::F32),
+                                                                                                                TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32),
+                                                                                                                TensorInfo(TensorShape(11U, 12U, 16U, 4U), 1, DataType::F32)
                                                                                                                               })),
                                                                                    framework::dataset::make("ConvInfo", { PadStrideInfo(1, 1, 0, 0),
                                                                                                             PadStrideInfo(1, 1, 0, 0),
diff --git a/tests/validation/NEON/DeconvolutionLayer.cpp b/tests/validation/NEON/DeconvolutionLayer.cpp
index 87d413f..277953b 100644
--- a/tests/validation/NEON/DeconvolutionLayer.cpp
+++ b/tests/validation/NEON/DeconvolutionLayer.cpp
@@ -100,33 +100,33 @@
 // *INDENT-OFF*
 // clang-format off
 DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zip(
-    framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0),   // Mismatching data type
-                                            TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0),   // Invalid weights shape
-                                            TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16, 4),   // Non supported data type
-                                            TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 11),  // Invalid bias shape
-                                            TensorInfo(TensorShape(13U, 11U, 4U, 3U), 1, DataType::F32, 0), // Window shrink
-                                            TensorInfo(TensorShape(32U, 16U, 2U), 1, DataType::F32, 0),
+    framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),   // Mismatching data type
+                                            TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),   // Invalid weights shape
+                                            TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16),   // Non supported data type
+                                            TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),  // Invalid bias shape
+                                            TensorInfo(TensorShape(13U, 11U, 4U, 3U), 1, DataType::F32), // Window shrink
+                                            TensorInfo(TensorShape(32U, 16U, 2U), 1, DataType::F32),
                                           }),
-    framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(3U, 3U, 2U, 2U), 1, DataType::F16, 0),
-                                            TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32, 0),
-                                            TensorInfo(TensorShape(3U, 3U, 2U, 2U), 1, DataType::F16, 5),
-                                            TensorInfo(TensorShape(3U, 2U, 2U, 2U), 1, DataType::F32, 11),
-                                            TensorInfo(TensorShape(3U, 3U, 4U), 1, DataType::F32, 0),
-                                              TensorInfo(TensorShape(1U, 1U, 2U, 4U), 1, DataType::F32, 0),
+    framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(3U, 3U, 2U, 2U), 1, DataType::F16),
+                                            TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32),
+                                            TensorInfo(TensorShape(3U, 3U, 2U, 2U), 1, DataType::F16),
+                                            TensorInfo(TensorShape(3U, 2U, 2U, 2U), 1, DataType::F32),
+                                            TensorInfo(TensorShape(3U, 3U, 4U), 1, DataType::F32),
+                                              TensorInfo(TensorShape(1U, 1U, 2U, 4U), 1, DataType::F32),
                                           })),
-    framework::dataset::make("BiasInfo",  { TensorInfo(TensorShape(1U), 1, DataType::F16, 0),
-                                            TensorInfo(TensorShape(1U), 1, DataType::F32, 0),
-                                            TensorInfo(TensorShape(1U), 1, DataType::F32, 5),
-                                            TensorInfo(TensorShape(25U, 11U), 1, DataType::F32, 11),
-                                            TensorInfo(TensorShape(1U), 1, DataType::F32, 0),
-                                            TensorInfo(TensorShape(4U), 1, DataType::F32, 0),
+    framework::dataset::make("BiasInfo",  { TensorInfo(TensorShape(1U), 1, DataType::F16),
+                                            TensorInfo(TensorShape(1U), 1, DataType::F32),
+                                            TensorInfo(TensorShape(1U), 1, DataType::F32),
+                                            TensorInfo(TensorShape(25U, 11U), 1, DataType::F32),
+                                            TensorInfo(TensorShape(1U), 1, DataType::F32),
+                                            TensorInfo(TensorShape(4U), 1, DataType::F32),
                                           })),
-    framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F16, 0),
-                                            TensorInfo(TensorShape(25U, 10U, 2U), 1, DataType::F32, 0),
-                                            TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32, 5),
-                                            TensorInfo(TensorShape(13U, 13U, 2U), 1, DataType::F32, 0),
-                                            TensorInfo(TensorShape(11U, 9U, 1U, 3U), 1, DataType::F32, 0),
-                                            TensorInfo(TensorShape(32U, 16U, 4U), 1, DataType::F32, 0),
+    framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F16),
+                                            TensorInfo(TensorShape(25U, 10U, 2U), 1, DataType::F32),
+                                            TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32),
+                                            TensorInfo(TensorShape(13U, 13U, 2U), 1, DataType::F32),
+                                            TensorInfo(TensorShape(11U, 9U, 1U, 3U), 1, DataType::F32),
+                                            TensorInfo(TensorShape(32U, 16U, 4U), 1, DataType::F32),
                                           })),
     framework::dataset::make("PadStrideInfo", { PadStrideInfo(1, 1, 0, 0),
                                                 PadStrideInfo(1, 1, 0, 0),
diff --git a/tests/validation/NEON/DilatedConvolutionLayer.cpp b/tests/validation/NEON/DilatedConvolutionLayer.cpp
index 7cfffc0..25b357e 100644
--- a/tests/validation/NEON/DilatedConvolutionLayer.cpp
+++ b/tests/validation/NEON/DilatedConvolutionLayer.cpp
@@ -64,20 +64,20 @@
 
 TEST_SUITE(DilatedConvolutionLayer)
 DATA_TEST_CASE(ValidateConvolutionMethod, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(
-                                                                                           framework::dataset::make("InputInfo", { TensorInfo(TensorShape(8U, 8U, 2U), 1, DataType::F32, 0),
-                                                                                                                    TensorInfo(TensorShape(23U, 27U, 5U, 4U), 1, DataType::F32, 0),
-                                                                                                                    TensorInfo(TensorShape(3U, 3U, 2U, 1U), 1, DataType::F32, 0),
-                                                                                                                    TensorInfo(TensorShape(33U, 27U, 7U, 4U), 1, DataType::F32, 0)
+                                                                                           framework::dataset::make("InputInfo", { TensorInfo(TensorShape(8U, 8U, 2U), 1, DataType::F32),
+                                                                                                                    TensorInfo(TensorShape(23U, 27U, 5U, 4U), 1, DataType::F32),
+                                                                                                                    TensorInfo(TensorShape(3U, 3U, 2U, 1U), 1, DataType::F32),
+                                                                                                                    TensorInfo(TensorShape(33U, 27U, 7U, 4U), 1, DataType::F32)
                                                                                                                                  }),
-                                                                                           framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32, 0),
-                                                                                                                    TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32, 0),
-                                                                                                                    TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32, 0),
-                                                                                                                    TensorInfo(TensorShape(5U, 5U, 7U, 16U), 1, DataType::F16, 0)
+                                                                                           framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32),
+                                                                                                                    TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32),
+                                                                                                                    TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32),
+                                                                                                                    TensorInfo(TensorShape(5U, 5U, 7U, 16U), 1, DataType::F16)
                                                                                                                                    })),
-                                                                                       framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(6U, 6U, 1U), 1, DataType::F32, 0),
-                                                                                                                TensorInfo(TensorShape(21U, 25U, 21U, 4U), 1, DataType::F32, 0),
-                                                                                                                TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32, 0),
-                                                                                                                TensorInfo(TensorShape(11U, 12U, 16U, 4U), 1, DataType::F32, 0)
+                                                                                       framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(6U, 6U, 1U), 1, DataType::F32),
+                                                                                                                TensorInfo(TensorShape(21U, 25U, 21U, 4U), 1, DataType::F32),
+                                                                                                                TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32),
+                                                                                                                TensorInfo(TensorShape(11U, 12U, 16U, 4U), 1, DataType::F32)
                                                                                                                               })),
                                                                                    framework::dataset::make("ConvInfo", { PadStrideInfo(1, 1, 0, 0),
                                                                                                             PadStrideInfo(1, 1, 0, 0),
diff --git a/tests/validation/NEON/DirectConvolutionLayer.cpp b/tests/validation/NEON/DirectConvolutionLayer.cpp
index bf5b33c..acd0e5d 100644
--- a/tests/validation/NEON/DirectConvolutionLayer.cpp
+++ b/tests/validation/NEON/DirectConvolutionLayer.cpp
@@ -80,45 +80,45 @@
 // *INDENT-OFF*
 // clang-format off
 DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(
-        framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Mismatching data type input/weights
-                                                TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Mismatching input feature maps
-                                                TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Unsupported kernel width
-                                                TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Non-rectangular weights dimensions
-                                                TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid weights dimensions
-                                                TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid stride
-                                                TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid biases size
-                                                TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid biases dimensions
-                                                TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid output size
+        framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching data type input/weights
+                                                TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching input feature maps
+                                                TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Unsupported kernel width
+                                                TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Non-rectangular weights dimensions
+                                                TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid weights dimensions
+                                                TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid stride
+                                                TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid biases size
+                                                TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid biases dimensions
+                                                TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid output size
                                               }),
-        framework::dataset::make("WeightsInfo",{ TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F16, 0),
-                                                 TensorInfo(TensorShape(3U, 3U, 3U, 4U), 1, DataType::F32, 0),
-                                                 TensorInfo(TensorShape(9U, 9U, 2U, 4U), 1, DataType::F32, 0),
-                                                 TensorInfo(TensorShape(5U, 3U, 2U, 4U), 1, DataType::F32, 0),
-                                                 TensorInfo(TensorShape(3U, 3U, 2U, 4U, 3U), 1, DataType::F32, 0),
-                                                 TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32, 0),
-                                                 TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32, 0),
-                                                 TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32, 0),
-                                                 TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32, 0),
+        framework::dataset::make("WeightsInfo",{ TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F16),
+                                                 TensorInfo(TensorShape(3U, 3U, 3U, 4U), 1, DataType::F32),
+                                                 TensorInfo(TensorShape(9U, 9U, 2U, 4U), 1, DataType::F32),
+                                                 TensorInfo(TensorShape(5U, 3U, 2U, 4U), 1, DataType::F32),
+                                                 TensorInfo(TensorShape(3U, 3U, 2U, 4U, 3U), 1, DataType::F32),
+                                                 TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32),
+                                                 TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32),
+                                                 TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32),
+                                                 TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32),
                                               })),
-        framework::dataset::make("BiasesInfo",{ TensorInfo(TensorShape(4U), 1, DataType::F32, 0),
-                                                TensorInfo(TensorShape(4U), 1, DataType::F32, 0),
-                                                TensorInfo(TensorShape(4U), 1, DataType::F32, 0),
-                                                TensorInfo(TensorShape(4U), 1, DataType::F32, 0),
-                                                TensorInfo(TensorShape(4U), 1, DataType::F32, 0),
-                                                TensorInfo(TensorShape(4U), 1, DataType::F32, 0),
-                                                TensorInfo(TensorShape(3U), 1, DataType::F32, 0),
-                                                TensorInfo(TensorShape(4U, 2U), 1, DataType::F32, 0),
-                                                TensorInfo(TensorShape(4U), 1, DataType::F32, 0),
+        framework::dataset::make("BiasesInfo",{ TensorInfo(TensorShape(4U), 1, DataType::F32),
+                                                TensorInfo(TensorShape(4U), 1, DataType::F32),
+                                                TensorInfo(TensorShape(4U), 1, DataType::F32),
+                                                TensorInfo(TensorShape(4U), 1, DataType::F32),
+                                                TensorInfo(TensorShape(4U), 1, DataType::F32),
+                                                TensorInfo(TensorShape(4U), 1, DataType::F32),
+                                                TensorInfo(TensorShape(3U), 1, DataType::F32),
+                                                TensorInfo(TensorShape(4U, 2U), 1, DataType::F32),
+                                                TensorInfo(TensorShape(4U), 1, DataType::F32),
                                               })),
-        framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32, 0),
-                                                TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32, 0),
-                                                TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32, 0),
-                                                TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32, 0),
-                                                TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32, 0),
-                                                TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32, 0),
-                                                TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32, 0),
-                                                TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32, 0),
-                                                TensorInfo(TensorShape(26U, 11U, 4U), 1, DataType::F32, 0),
+        framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32),
+                                                TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32),
+                                                TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32),
+                                                TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32),
+                                                TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32),
+                                                TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32),
+                                                TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32),
+                                                TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32),
+                                                TensorInfo(TensorShape(26U, 11U, 4U), 1, DataType::F32),
                                               })),
         framework::dataset::make("ConvInfo",  { PadStrideInfo(1, 1, 0, 0),
                                                 PadStrideInfo(1, 1, 0, 0),
diff --git a/tests/validation/NEON/GEMMLowp.cpp b/tests/validation/NEON/GEMMLowp.cpp
index eb350e1..9eba3c8 100644
--- a/tests/validation/NEON/GEMMLowp.cpp
+++ b/tests/validation/NEON/GEMMLowp.cpp
@@ -102,7 +102,7 @@
 // clang-format off
 DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
     framework::dataset::make("InputAInfo", { TensorInfo(TensorShape(21U, 13U), 1, DataType::QASYMM8, QuantizationInfo(1.f/255, 10)), // Input not a multiple of 4
-                                             TensorInfo(TensorShape(21U, 13U), 1, DataType::S32, 2),                                 // Mismatching data type
+                                             TensorInfo(TensorShape(21U, 13U), 1, DataType::S32),                                 // Mismatching data type
                                              TensorInfo(TensorShape(20U, 13U), 1, DataType::QASYMM8, QuantizationInfo(1.f/255, 10)), // Invalid dimensions
                                              TensorInfo(TensorShape(21U, 13U), 1, DataType::QASYMM8, QuantizationInfo(1.f/255, 10)), // Invalid dimensions
                                              TensorInfo(TensorShape(16U, 32U), 1, DataType::QASYMM8, QuantizationInfo(1.f/255, 10)),
diff --git a/tests/validation/NEON/LocallyConnected.cpp b/tests/validation/NEON/LocallyConnected.cpp
index 0c36ff6..bd0999d 100644
--- a/tests/validation/NEON/LocallyConnected.cpp
+++ b/tests/validation/NEON/LocallyConnected.cpp
@@ -51,41 +51,41 @@
 // *INDENT-OFF*
 // clang-format off
 DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(
-    framework::dataset::make("InputInfo",  { TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32, 0), // Mismatching data type input/weights
-                                             TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32, 0), // Mismatching data type input/bias
-                                             TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32, 0), // Mismatching data type input/output
-                                             TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32, 0), // Mismatching shape input/weights
-                                             TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32, 0), // Mismatching shape input/bias
-                                             TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32, 0), // Mismatching shape input/output
-                                             TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32, 0), // Asymmetric padding
-                                             TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32, 0)
+    framework::dataset::make("InputInfo",  { TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32), // Mismatching data type input/weights
+                                             TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32), // Mismatching data type input/bias
+                                             TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32), // Mismatching data type input/output
+                                             TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32), // Mismatching shape input/weights
+                                             TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32), // Mismatching shape input/bias
+                                             TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32), // Mismatching shape input/output
+                                             TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32), // Asymmetric padding
+                                             TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32)
                                            }),
-    framework::dataset::make("WeightsInfo",{ TensorInfo(TensorShape(3U, 3U, 5U, 21U, 275U), 1, DataType::F16, 0),
-                                             TensorInfo(TensorShape(3U, 3U, 5U, 21U, 275U), 1, DataType::F32, 0),
-                                             TensorInfo(TensorShape(3U, 3U, 5U, 21U, 275U), 1, DataType::F32, 0),
-                                             TensorInfo(TensorShape(3U, 3U, 5U, 21U, 274U), 1, DataType::F32, 0),
-                                             TensorInfo(TensorShape(3U, 3U, 5U, 21U, 275U), 1, DataType::F32, 0),
-                                             TensorInfo(TensorShape(3U, 3U, 5U, 21U, 275U), 1, DataType::F32, 0),
-                                             TensorInfo(TensorShape(3U, 3U, 5U, 21U, 275U), 1, DataType::F32, 0),
-                                             TensorInfo(TensorShape(3U, 3U, 5U, 21U, 275U), 1, DataType::F32, 0)
+    framework::dataset::make("WeightsInfo",{ TensorInfo(TensorShape(3U, 3U, 5U, 21U, 275U), 1, DataType::F16),
+                                             TensorInfo(TensorShape(3U, 3U, 5U, 21U, 275U), 1, DataType::F32),
+                                             TensorInfo(TensorShape(3U, 3U, 5U, 21U, 275U), 1, DataType::F32),
+                                             TensorInfo(TensorShape(3U, 3U, 5U, 21U, 274U), 1, DataType::F32),
+                                             TensorInfo(TensorShape(3U, 3U, 5U, 21U, 275U), 1, DataType::F32),
+                                             TensorInfo(TensorShape(3U, 3U, 5U, 21U, 275U), 1, DataType::F32),
+                                             TensorInfo(TensorShape(3U, 3U, 5U, 21U, 275U), 1, DataType::F32),
+                                             TensorInfo(TensorShape(3U, 3U, 5U, 21U, 275U), 1, DataType::F32)
                                            })),
-    framework::dataset::make("BiasInfo",   { TensorInfo(TensorShape(21U, 275U), 1, DataType::F32, 0),
-                                             TensorInfo(TensorShape(21U, 275U), 1, DataType::F16, 0),
-                                             TensorInfo(TensorShape(21U, 275U), 1, DataType::F32, 0),
-                                             TensorInfo(TensorShape(21U, 275U), 1, DataType::F32, 0),
-                                             TensorInfo(TensorShape(21U, 274U), 1, DataType::F32, 0),
-                                             TensorInfo(TensorShape(21U, 275U), 1, DataType::F32, 0),
-                                             TensorInfo(TensorShape(21U, 275U), 1, DataType::F32, 0),
-                                             TensorInfo(TensorShape(21U, 275U), 1, DataType::F32, 0)
+    framework::dataset::make("BiasInfo",   { TensorInfo(TensorShape(21U, 275U), 1, DataType::F32),
+                                             TensorInfo(TensorShape(21U, 275U), 1, DataType::F16),
+                                             TensorInfo(TensorShape(21U, 275U), 1, DataType::F32),
+                                             TensorInfo(TensorShape(21U, 275U), 1, DataType::F32),
+                                             TensorInfo(TensorShape(21U, 274U), 1, DataType::F32),
+                                             TensorInfo(TensorShape(21U, 275U), 1, DataType::F32),
+                                             TensorInfo(TensorShape(21U, 275U), 1, DataType::F32),
+                                             TensorInfo(TensorShape(21U, 275U), 1, DataType::F32)
                                            })),
-    framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32, 0),
-                                             TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32, 0),
-                                             TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F16, 0),
-                                             TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32, 0),
-                                             TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32, 0),
-                                             TensorInfo(TensorShape(11U, 25U, 22U), 1, DataType::F32, 0),
-                                             TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32, 0),
-                                             TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32, 0)
+    framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32),
+                                             TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32),
+                                             TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F16),
+                                             TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32),
+                                             TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32),
+                                             TensorInfo(TensorShape(11U, 25U, 22U), 1, DataType::F32),
+                                             TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32),
+                                             TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32)
                                            })),
     framework::dataset::make("PadStride",  { PadStrideInfo(2, 1, 0, 0),
                                              PadStrideInfo(2, 1, 0, 0),
diff --git a/tests/validation/NEON/NormalizationLayer.cpp b/tests/validation/NEON/NormalizationLayer.cpp
index 02cca0b..a432100 100644
--- a/tests/validation/NEON/NormalizationLayer.cpp
+++ b/tests/validation/NEON/NormalizationLayer.cpp
@@ -66,19 +66,19 @@
 // *INDENT-OFF*
 // clang-format off
 DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
-    framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Mismatching data type input/output
-                                            TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Mismatching shapes
-                                            TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Even normalization
-                                            TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Non implemented IN_MAP_2D
-                                            TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Window shrink
-                                            TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32, 0),
+    framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching data type input/output
+                                            TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching shapes
+                                            TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Even normalization
+                                            TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Non implemented IN_MAP_2D
+                                            TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Window shrink
+                                            TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
                                           }),
-    framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16, 0),
-                                            TensorInfo(TensorShape(27U, 11U, 2U), 1, DataType::F32, 0),
-                                            TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0),
-                                            TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0),
-                                            TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0),
-                                            TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32, 0),
+    framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16),
+                                            TensorInfo(TensorShape(27U, 11U, 2U), 1, DataType::F32),
+                                            TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),
+                                            TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),
+                                            TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),
+                                            TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
                                           })),
     framework::dataset::make("NormInfo",  { NormalizationLayerInfo(NormType::IN_MAP_1D, 5),
                                             NormalizationLayerInfo(NormType::IN_MAP_1D, 5),
diff --git a/tests/validation/NEON/PoolingLayer.cpp b/tests/validation/NEON/PoolingLayer.cpp
index bbfca46..336c066 100644
--- a/tests/validation/NEON/PoolingLayer.cpp
+++ b/tests/validation/NEON/PoolingLayer.cpp
@@ -71,21 +71,21 @@
 // *INDENT-OFF*
 // clang-format off
 DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
-    framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0),     // Mismatching data type
-                                            TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0),     // Window shrink
-                                            TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0),     // Invalid pad/size combination
-                                            TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0),     // Invalid pad/size combination
-                                            TensorInfo(TensorShape(15U, 13U, 5U), 1, DataType::F32, 0),     // Non-rectangular Global Pooling
-                                            TensorInfo(TensorShape(13U, 13U, 5U), 1, DataType::F32, 0),     // Invalid output Global Pooling
-                                            TensorInfo(TensorShape(13U, 13U, 5U), 1, DataType::F32, 0),
+    framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),     // Mismatching data type
+                                            TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),     // Window shrink
+                                            TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),     // Invalid pad/size combination
+                                            TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),     // Invalid pad/size combination
+                                            TensorInfo(TensorShape(15U, 13U, 5U), 1, DataType::F32),     // Non-rectangular Global Pooling
+                                            TensorInfo(TensorShape(13U, 13U, 5U), 1, DataType::F32),     // Invalid output Global Pooling
+                                            TensorInfo(TensorShape(13U, 13U, 5U), 1, DataType::F32),
                                           }),
-    framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F16, 0),
-                                            TensorInfo(TensorShape(25U, 10U, 2U), 1, DataType::F32, 0),
-                                            TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32, 0),
-                                            TensorInfo(TensorShape(25U, 16U, 2U), 1, DataType::F32, 0),
-                                            TensorInfo(TensorShape(1U, 1U, 5U), 1, DataType::F32, 0),
-                                            TensorInfo(TensorShape(2U, 2U, 5U), 1, DataType::F32, 0),
-                                            TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32, 0),
+    framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F16),
+                                            TensorInfo(TensorShape(25U, 10U, 2U), 1, DataType::F32),
+                                            TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32),
+                                            TensorInfo(TensorShape(25U, 16U, 2U), 1, DataType::F32),
+                                            TensorInfo(TensorShape(1U, 1U, 5U), 1, DataType::F32),
+                                            TensorInfo(TensorShape(2U, 2U, 5U), 1, DataType::F32),
+                                            TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32),
                                           })),
     framework::dataset::make("PoolInfo",  { PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 0, 0)),
                                             PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 0, 0)),
diff --git a/tests/validation/NEON/RNNLayer.cpp b/tests/validation/NEON/RNNLayer.cpp
index 7aa3bef..a5f8499 100644
--- a/tests/validation/NEON/RNNLayer.cpp
+++ b/tests/validation/NEON/RNNLayer.cpp
@@ -49,59 +49,59 @@
 // *INDENT-OFF*
 // clang-format off
 DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zip(
-               framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U), 1, DataType::U8, 0),      // Wrong data type
-                                                       TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Wrong input size
-                                                       TensorInfo(TensorShape(27U, 13U), 1, DataType::F32, 0),     // Wrong weights size
-                                                       TensorInfo(TensorShape(27U, 13U), 1, DataType::F32, 0),     // Wrong recurrent weights size
-                                                       TensorInfo(TensorShape(27U, 13U), 1, DataType::F32, 0),     // Wrong bias size
-                                                       TensorInfo(TensorShape(27U, 13U), 1, DataType::F32, 0),     // Wrong output size
-                                                       TensorInfo(TensorShape(27U, 13U), 1, DataType::F32, 0),     // Wrong hidden output size
-                                                       TensorInfo(TensorShape(32U, 32U), 1, DataType::F32, 0),
+               framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U), 1, DataType::U8),      // Wrong data type
+                                                       TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Wrong input size
+                                                       TensorInfo(TensorShape(27U, 13U), 1, DataType::F32),     // Wrong weights size
+                                                       TensorInfo(TensorShape(27U, 13U), 1, DataType::F32),     // Wrong recurrent weights size
+                                                       TensorInfo(TensorShape(27U, 13U), 1, DataType::F32),     // Wrong bias size
+                                                       TensorInfo(TensorShape(27U, 13U), 1, DataType::F32),     // Wrong output size
+                                                       TensorInfo(TensorShape(27U, 13U), 1, DataType::F32),     // Wrong hidden output size
+                                                       TensorInfo(TensorShape(32U, 32U), 1, DataType::F32),
                }),
-               framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(27U, 11U), 1, DataType::F32, 0),
-                                                       TensorInfo(TensorShape(27U, 11U), 1, DataType::F32, 0),
-                                                       TensorInfo(TensorShape(27U, 11U, 2U), 1, DataType::F32, 0),
-                                                       TensorInfo(TensorShape(27U, 11U), 1, DataType::F32, 0),
-                                                       TensorInfo(TensorShape(27U, 11U), 1, DataType::F32, 0),
-                                                       TensorInfo(TensorShape(27U, 11U), 1, DataType::F32, 0),
-                                                       TensorInfo(TensorShape(27U, 11U), 1, DataType::F32, 0),
-                                                       TensorInfo(TensorShape(32U, 32U), 1, DataType::F32, 0),
+               framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(27U, 11U), 1, DataType::F32),
+                                                       TensorInfo(TensorShape(27U, 11U), 1, DataType::F32),
+                                                       TensorInfo(TensorShape(27U, 11U, 2U), 1, DataType::F32),
+                                                       TensorInfo(TensorShape(27U, 11U), 1, DataType::F32),
+                                                       TensorInfo(TensorShape(27U, 11U), 1, DataType::F32),
+                                                       TensorInfo(TensorShape(27U, 11U), 1, DataType::F32),
+                                                       TensorInfo(TensorShape(27U, 11U), 1, DataType::F32),
+                                                       TensorInfo(TensorShape(32U, 32U), 1, DataType::F32),
                })),
-               framework::dataset::make("RecurrentWeightsInfo", { TensorInfo(TensorShape(11U, 11U), 1, DataType::F32, 0),
-                                                                  TensorInfo(TensorShape(11U, 11U), 1, DataType::F32, 0),
-                                                                  TensorInfo(TensorShape(11U, 11U), 1, DataType::F32, 0),
-                                                                  TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32, 0),
-                                                                  TensorInfo(TensorShape(11U, 11U), 1, DataType::F32, 0),
-                                                                  TensorInfo(TensorShape(11U, 11U), 1, DataType::F32, 0),
-                                                                  TensorInfo(TensorShape(11U, 11U), 1, DataType::F32, 0),
-                                                                  TensorInfo(TensorShape(32U, 32U), 1, DataType::F32, 0),
+               framework::dataset::make("RecurrentWeightsInfo", { TensorInfo(TensorShape(11U, 11U), 1, DataType::F32),
+                                                                  TensorInfo(TensorShape(11U, 11U), 1, DataType::F32),
+                                                                  TensorInfo(TensorShape(11U, 11U), 1, DataType::F32),
+                                                                  TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32),
+                                                                  TensorInfo(TensorShape(11U, 11U), 1, DataType::F32),
+                                                                  TensorInfo(TensorShape(11U, 11U), 1, DataType::F32),
+                                                                  TensorInfo(TensorShape(11U, 11U), 1, DataType::F32),
+                                                                  TensorInfo(TensorShape(32U, 32U), 1, DataType::F32),
                })),
-               framework::dataset::make("BiasInfo", { TensorInfo(TensorShape(11U), 1, DataType::F32, 0),
-                                                      TensorInfo(TensorShape(11U), 1, DataType::F32, 0),
-                                                      TensorInfo(TensorShape(11U), 1, DataType::F32, 0),
-                                                      TensorInfo(TensorShape(11U), 1, DataType::F32, 0),
-                                                      TensorInfo(TensorShape(30U), 1, DataType::F32, 0),
-                                                      TensorInfo(TensorShape(11U), 1, DataType::F32, 0),
-                                                      TensorInfo(TensorShape(11U), 1, DataType::F32, 0),
-                                                      TensorInfo(TensorShape(32U), 1, DataType::F32, 0),
+               framework::dataset::make("BiasInfo", { TensorInfo(TensorShape(11U), 1, DataType::F32),
+                                                      TensorInfo(TensorShape(11U), 1, DataType::F32),
+                                                      TensorInfo(TensorShape(11U), 1, DataType::F32),
+                                                      TensorInfo(TensorShape(11U), 1, DataType::F32),
+                                                      TensorInfo(TensorShape(30U), 1, DataType::F32),
+                                                      TensorInfo(TensorShape(11U), 1, DataType::F32),
+                                                      TensorInfo(TensorShape(11U), 1, DataType::F32),
+                                                      TensorInfo(TensorShape(32U), 1, DataType::F32),
                })),
-               framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(11U, 13U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(11U, 13U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(11U, 13U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(11U, 13U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(11U, 13U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(11U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(11U, 13U), 1, DataType::F32, 0),
-                                                        TensorInfo(TensorShape(32U, 32U), 1, DataType::F32, 0),
+               framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(11U, 13U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(11U, 13U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(11U, 13U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(11U, 13U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(11U, 13U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(11U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(11U, 13U), 1, DataType::F32),
+                                                        TensorInfo(TensorShape(32U, 32U), 1, DataType::F32),
                })),
-               framework::dataset::make("HiddenStateInfo", { TensorInfo(TensorShape(11U, 13U), 1, DataType::F32, 0),
-                                                             TensorInfo(TensorShape(11U, 13U), 1, DataType::F32, 0),
-                                                             TensorInfo(TensorShape(11U, 13U), 1, DataType::F32, 0),
-                                                             TensorInfo(TensorShape(11U, 13U), 1, DataType::F32, 0),
-                                                             TensorInfo(TensorShape(11U, 13U), 1, DataType::F32, 0),
-                                                             TensorInfo(TensorShape(11U, 13U), 1, DataType::F32, 0),
-                                                             TensorInfo(TensorShape(11U, 13U, 2U), 1, DataType::F32, 0),
-                                                             TensorInfo(TensorShape(32U, 32U), 1, DataType::F32, 0),
+               framework::dataset::make("HiddenStateInfo", { TensorInfo(TensorShape(11U, 13U), 1, DataType::F32),
+                                                             TensorInfo(TensorShape(11U, 13U), 1, DataType::F32),
+                                                             TensorInfo(TensorShape(11U, 13U), 1, DataType::F32),
+                                                             TensorInfo(TensorShape(11U, 13U), 1, DataType::F32),
+                                                             TensorInfo(TensorShape(11U, 13U), 1, DataType::F32),
+                                                             TensorInfo(TensorShape(11U, 13U), 1, DataType::F32),
+                                                             TensorInfo(TensorShape(11U, 13U, 2U), 1, DataType::F32),
+                                                             TensorInfo(TensorShape(32U, 32U), 1, DataType::F32),
                })),
                framework::dataset::make("ActivationInfo", { ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
                                                             ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
diff --git a/tests/validation/NEON/Scale.cpp b/tests/validation/NEON/Scale.cpp
index 5f5cfdd..0d4a86e 100644
--- a/tests/validation/NEON/Scale.cpp
+++ b/tests/validation/NEON/Scale.cpp
@@ -77,17 +77,17 @@
 // *INDENT-OFF*
 // clang-format off
 DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(
-        framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8, 0),  // Mismatching data type
-                                                TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Unsupported sampling point
-                                                TensorInfo(TensorShape(4U, 27U, 13U), 1, DataType::F32, 0), // Invalid policy
-                                                TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Insufficient padding
-                                                TensorInfo(TensorShape(4U, 27U, 13U), 1, DataType::F32, 0),
+        framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8),  // Mismatching data type
+                                                TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Unsupported sampling point
+                                                TensorInfo(TensorShape(4U, 27U, 13U), 1, DataType::F32), // Invalid policy
+                                                TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Insufficient padding
+                                                TensorInfo(TensorShape(4U, 27U, 13U), 1, DataType::F32),
                                               }),
-        framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(132U, 25U, 2U), 1, DataType::F32, 0),
-                                                TensorInfo(TensorShape(132U, 25U, 2U), 1, DataType::F32, 0),
-                                                TensorInfo(TensorShape(4U, 132U, 25U), 1, DataType::F32, 0),
-                                                TensorInfo(TensorShape(132U, 25U, 2U), 1, DataType::F32, 0),
-                                                TensorInfo(TensorShape(4U, 132U, 25U), 1, DataType::F32, 0),
+        framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(132U, 25U, 2U), 1, DataType::F32),
+                                                TensorInfo(TensorShape(132U, 25U, 2U), 1, DataType::F32),
+                                                TensorInfo(TensorShape(4U, 132U, 25U), 1, DataType::F32),
+                                                TensorInfo(TensorShape(132U, 25U, 2U), 1, DataType::F32),
+                                                TensorInfo(TensorShape(4U, 132U, 25U), 1, DataType::F32),
                                               })),
         framework::dataset::make("InterpolationPolicy", { InterpolationPolicy::NEAREST_NEIGHBOR,
                                                           InterpolationPolicy::NEAREST_NEIGHBOR,
diff --git a/tests/validation/Validation.h b/tests/validation/Validation.h
index 0c96052..9ce597b 100644
--- a/tests/validation/Validation.h
+++ b/tests/validation/Validation.h
@@ -24,7 +24,6 @@
 #ifndef __ARM_COMPUTE_TEST_VALIDATION_H__
 #define __ARM_COMPUTE_TEST_VALIDATION_H__
 
-#include "arm_compute/core/FixedPoint.h"
 #include "arm_compute/core/IArray.h"
 #include "arm_compute/core/Types.h"
 #include "support/ToolchainSupport.h"
diff --git a/tests/validation/fixtures/PoolingLayerFixture.h b/tests/validation/fixtures/PoolingLayerFixture.h
index 2453954..499628c 100644
--- a/tests/validation/fixtures/PoolingLayerFixture.h
+++ b/tests/validation/fixtures/PoolingLayerFixture.h
@@ -65,16 +65,10 @@
             std::uniform_real_distribution<> distribution(-1.f, 1.f);
             library->fill(tensor, distribution, 0);
         }
-        else if(is_data_type_quantized_asymmetric(tensor.data_type()))
+        else // data type is quantized_asymmetric
         {
             library->fill_tensor_uniform(tensor, 0);
         }
-        else
-        {
-            const int                       one_fixed = 1;
-            std::uniform_int_distribution<> distribution(-one_fixed, one_fixed);
-            library->fill(tensor, distribution, 0);
-        }
     }
 
     TensorType compute_target(TensorShape shape, PoolingLayerInfo info,
diff --git a/tests/validation/fixtures/SoftmaxLayerFixture.h b/tests/validation/fixtures/SoftmaxLayerFixture.h
index 59ce519..99c0710 100644
--- a/tests/validation/fixtures/SoftmaxLayerFixture.h
+++ b/tests/validation/fixtures/SoftmaxLayerFixture.h
@@ -64,17 +64,11 @@
             std::uniform_real_distribution<> distribution(-1000.f, 1000.f);
             library->fill(tensor, distribution, 0);
         }
-        else if(is_data_type_quantized_asymmetric(tensor.data_type()))
+        else // data type is quantized_asymmetric
         {
             std::uniform_int_distribution<> distribution(0, 100);
             library->fill(tensor, distribution, 0);
         }
-        else
-        {
-            const int                       one_fixed = 1;
-            std::uniform_int_distribution<> distribution(-one_fixed, one_fixed);
-            library->fill(tensor, distribution, 0);
-        }
     }
 
     TensorType compute_target(const TensorShape &shape, DataType data_type,
@@ -139,20 +133,6 @@
 };
 
 template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class SoftmaxValidationFixedPointFixture : public SoftmaxValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
-{
-public:
-    template <typename...>
-    void setup(TensorShape shape, DataType data_type)
-    {
-        SoftmaxValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape,
-                                                                                          data_type,
-                                                                                          QuantizationInfo(),
-                                                                                          1.0f);
-    }
-};
-
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
 class SoftmaxValidationQuantizedFixture : public SoftmaxValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
 {
 public:
diff --git a/tests/validation/reference/ArithmeticSubtraction.cpp b/tests/validation/reference/ArithmeticSubtraction.cpp
index bed2d37..f39d01f 100644
--- a/tests/validation/reference/ArithmeticSubtraction.cpp
+++ b/tests/validation/reference/ArithmeticSubtraction.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -23,7 +23,6 @@
  */
 #include "ArithmeticSubtraction.h"
 
-#include "tests/validation/FixedPoint.h"
 #include "tests/validation/Helpers.h"
 
 namespace arm_compute
diff --git a/tests/validation/reference/BatchNormalizationLayer.cpp b/tests/validation/reference/BatchNormalizationLayer.cpp
index 3d1a6ed..4ea3769 100644
--- a/tests/validation/reference/BatchNormalizationLayer.cpp
+++ b/tests/validation/reference/BatchNormalizationLayer.cpp
@@ -25,7 +25,6 @@
 
 #include "ActivationLayer.h"
 
-#include "tests/validation/FixedPoint.h"
 #include "tests/validation/Helpers.h"
 
 namespace arm_compute
diff --git a/tests/validation/reference/ChannelCombine.cpp b/tests/validation/reference/ChannelCombine.cpp
index c1ec3ec..b76dcac 100644
--- a/tests/validation/reference/ChannelCombine.cpp
+++ b/tests/validation/reference/ChannelCombine.cpp
@@ -24,7 +24,6 @@
 #include "ChannelCombine.h"
 
 #include "arm_compute/core/Types.h"
-#include "tests/validation/FixedPoint.h"
 #include "tests/validation/Helpers.h"
 
 namespace arm_compute
diff --git a/tests/validation/reference/ChannelExtract.cpp b/tests/validation/reference/ChannelExtract.cpp
index 595bb13..6f17fc0 100644
--- a/tests/validation/reference/ChannelExtract.cpp
+++ b/tests/validation/reference/ChannelExtract.cpp
@@ -24,7 +24,6 @@
 #include "ChannelExtract.h"
 
 #include "arm_compute/core/Types.h"
-#include "tests/validation/FixedPoint.h"
 #include "tests/validation/Helpers.h"
 
 namespace arm_compute
diff --git a/tests/validation/reference/ColorConvert.cpp b/tests/validation/reference/ColorConvert.cpp
index a8a5304..6aa2ffa 100644
--- a/tests/validation/reference/ColorConvert.cpp
+++ b/tests/validation/reference/ColorConvert.cpp
@@ -24,7 +24,6 @@
 #include "ColorConvert.h"
 
 #include "arm_compute/core/Types.h"
-#include "tests/validation/FixedPoint.h"
 #include "tests/validation/Helpers.h"
 #include "tests/validation/reference/ColorConvertHelper.h"
 
diff --git a/tests/validation/reference/Convolution3d.h b/tests/validation/reference/Convolution3d.h
index 7001758..2e5fefd 100644
--- a/tests/validation/reference/Convolution3d.h
+++ b/tests/validation/reference/Convolution3d.h
@@ -25,7 +25,6 @@
 #define __ARM_COMPUTE_TEST_VALIDATION_CONVOLUTION_H__
 
 #include "arm_compute/core/utils/quantization/AsymmHelpers.h"
-#include "tests/validation/FixedPoint.h"
 #include "tests/validation/Helpers.h"
 #include "tests/validation/reference/UtilsQuantizedAsymm.h"
 
@@ -91,74 +90,16 @@
     *out_ptr = acc + (*b_ptr);
 }
 
-// 3D convolution for fixed point type
-template < typename T, typename TB, typename std::enable_if < std::is_integral<T>::value &&std::is_integral<TB>::value, int >::type = 0 >
+// 3D convolution for QASYMM8 type
+template < typename T, typename TB, typename std::enable_if < std::is_same<T, uint8_t>::value &&std::is_same<TB, int32_t>::value, int >::type = 0 >
 inline void convolution3d(const SimpleTensor<T> &in, const SimpleTensor<T> &weights, const SimpleTensor<TB> &bias, SimpleTensor<T> &out,
                           int i_offset, int w_offset, int b_offset, int o_offset,
                           int xi, int yi, int width_in, int height_in, int depth_in, int width_weights, int height_weights, int dilation_x = 1, int dilation_y = 1)
 {
-    const T *in_ptr               = in.data() + i_offset;
-    const T *w_ptr                = weights.data() + w_offset;
-    const T *b_ptr                = bias.data() + b_offset;
-    T       *out_ptr              = out.data() + o_offset;
-    int      fixed_point_position = in.fixed_point_position();
-
-    const int half_width_weights_start  = width_weights / 2;
-    const int half_width_weights_end    = ((width_weights % 2) == 0) ? (half_width_weights_start - 1) : half_width_weights_start;
-    const int half_height_weights_start = height_weights / 2;
-    const int half_height_weights_end   = ((height_weights % 2) == 0) ? (half_height_weights_start - 1) : half_height_weights_start;
-
-    using namespace fixed_point_arithmetic;
-    using promoted_type = fixed_point_arithmetic::traits::promote_t<T>;
-
-    // Reset accumulator
-    fixed_point<promoted_type> acc(0, fixed_point_position);
-
-    // Compute a 2D convolution for each IFM and accumulate the result
-    for(int ifm = 0; ifm < depth_in; ++ifm)
-    {
-        // Compute the offset for the input slice
-        const int offset_slice_in = xi + yi * width_in + ifm * width_in * height_in;
-
-        // Compute 2D convolution
-        for(int yk = -half_height_weights_start; yk <= half_height_weights_end; ++yk)
-        {
-            for(int xk = -half_width_weights_start; xk <= half_width_weights_end; ++xk)
-            {
-                // Check if the pixel is out-of-bound
-                if(is_valid_pixel(xi + xk * dilation_x, 0, width_in) && is_valid_pixel(yi + yk * dilation_y, 0, height_in))
-                {
-                    const int idx = xk + half_width_weights_start;
-                    const int idy = yk + half_height_weights_start;
-
-                    const fixed_point<promoted_type> i_value(in_ptr[offset_slice_in + xk * dilation_x + yk * dilation_y * width_in], fixed_point_position, true);
-                    const fixed_point<promoted_type> w_value(w_ptr[idx + idy * width_weights + ifm * width_weights * height_weights], fixed_point_position, true);
-                    const fixed_point<promoted_type> iw = i_value * w_value;
-                    acc                                 = iw + acc;
-                }
-            }
-        }
-    }
-
-    // Get the bias
-    const fixed_point<promoted_type> b(*b_ptr, fixed_point_position, true);
-
-    // Accumulate the bias and covert back
-    acc = acc + b;
-    fixed_point<T> res(acc);
-    *out_ptr = res.raw();
-}
-
-// 3D convolution for QASYMM8 type
-template <>
-inline void convolution3d(const SimpleTensor<uint8_t> &in, const SimpleTensor<uint8_t> &weights, const SimpleTensor<int32_t> &bias, SimpleTensor<uint8_t> &out,
-                          int i_offset, int w_offset, int b_offset, int o_offset,
-                          int xi, int yi, int width_in, int height_in, int depth_in, int width_weights, int height_weights, int dilation_x, int dilation_y)
-{
-    const uint8_t *in_ptr  = in.data() + i_offset;
-    const uint8_t *w_ptr   = weights.data() + w_offset;
-    const int32_t *b_ptr   = bias.data() + b_offset;
-    uint8_t       *out_ptr = out.data() + o_offset;
+    const T *in_ptr  = in.data() + i_offset;
+    const T *w_ptr   = weights.data() + w_offset;
+    const TB *b_ptr   = bias.data() + b_offset;
+    T        *out_ptr = out.data() + o_offset;
 
     const int   input_offset   = -in.quantization_info().offset;
     const float input_scale    = in.quantization_info().scale;
diff --git a/tests/validation/reference/ConvolutionLayer.cpp b/tests/validation/reference/ConvolutionLayer.cpp
index 00c839d..e212e27 100644
--- a/tests/validation/reference/ConvolutionLayer.cpp
+++ b/tests/validation/reference/ConvolutionLayer.cpp
@@ -23,7 +23,6 @@
  */
 #include "ConvolutionLayer.h"
 
-#include "tests/validation/FixedPoint.h"
 #include "tests/validation/Helpers.h"
 #include "tests/validation/reference/Convolution3d.h"
 #include "tests/validation/reference/Permute.h"
diff --git a/tests/validation/reference/DeconvolutionLayer.cpp b/tests/validation/reference/DeconvolutionLayer.cpp
index d073bbf..e73023e 100644
--- a/tests/validation/reference/DeconvolutionLayer.cpp
+++ b/tests/validation/reference/DeconvolutionLayer.cpp
@@ -23,7 +23,6 @@
  */
 #include "ConvolutionLayer.h"
 
-#include "tests/validation/FixedPoint.h"
 #include "tests/validation/Helpers.h"
 
 namespace arm_compute
diff --git a/tests/validation/reference/DepthConcatenateLayer.cpp b/tests/validation/reference/DepthConcatenateLayer.cpp
index c9a2352..dbcd575 100644
--- a/tests/validation/reference/DepthConcatenateLayer.cpp
+++ b/tests/validation/reference/DepthConcatenateLayer.cpp
@@ -23,7 +23,6 @@
  */
 #include "DepthConcatenateLayer.h"
 
-#include "tests/validation/FixedPoint.h"
 #include "tests/validation/Helpers.h"
 
 namespace arm_compute
diff --git a/tests/validation/reference/DepthConvertLayer.cpp b/tests/validation/reference/DepthConvertLayer.cpp
index 0220077..6f90963 100644
--- a/tests/validation/reference/DepthConvertLayer.cpp
+++ b/tests/validation/reference/DepthConvertLayer.cpp
@@ -23,7 +23,6 @@
  */
 #include "DepthConvertLayer.h"
 
-#include "tests/validation/FixedPoint.h"
 #include "tests/validation/Helpers.h"
 
 #include "tests/Types.h"
@@ -61,33 +60,6 @@
     return result;
 }
 
-template < typename T1, typename T2, typename std::enable_if < std::is_integral<T1>::value &&std::is_integral<T2>::value &&std::is_same<T1, T2>::value, int >::type >
-SimpleTensor<T2> depth_convert(const SimpleTensor<T1> &src, DataType dt_out, ConvertPolicy policy, uint32_t shift)
-{
-    ARM_COMPUTE_UNUSED(policy);
-
-    using namespace fixed_point_arithmetic;
-
-    SimpleTensor<T2> result(src.shape(), dt_out);
-
-    bool is_in_place = (&src == &result);
-
-    const int fixed_point_position_in  = src.fixed_point_position();
-    const int fixed_point_position_out = (is_in_place) ? static_cast<int>(shift) : result.fixed_point_position();
-
-    if(!is_in_place || (fixed_point_position_in != fixed_point_position_out))
-    {
-        for(int i = 0; i < src.num_elements(); ++i)
-        {
-            auto x = fixed_point<T2>(src[i], fixed_point_position_in, true);
-            x.resacle(fixed_point_position_out);
-            result[i] = x.raw();
-        }
-    }
-
-    return result;
-}
-
 template SimpleTensor<uint16_t> depth_convert(const SimpleTensor<uint8_t> &src, DataType dt_out, ConvertPolicy policy, uint32_t shift);
 template SimpleTensor<int16_t> depth_convert(const SimpleTensor<uint8_t> &src, DataType dt_out, ConvertPolicy policy, uint32_t shift);
 template SimpleTensor<int32_t> depth_convert(const SimpleTensor<uint8_t> &src, DataType dt_out, ConvertPolicy policy, uint32_t shift);
diff --git a/tests/validation/reference/DepthwiseConvolutionLayer.cpp b/tests/validation/reference/DepthwiseConvolutionLayer.cpp
index d8f3cba..39429e2 100644
--- a/tests/validation/reference/DepthwiseConvolutionLayer.cpp
+++ b/tests/validation/reference/DepthwiseConvolutionLayer.cpp
@@ -26,7 +26,6 @@
 #include "ConvolutionLayer.h"
 #include "Utils.h"
 
-#include "tests/validation/FixedPoint.h"
 #include "tests/validation/Helpers.h"
 #include "tests/validation/reference/Utils.h"
 #include "tests/validation/reference/UtilsQuantizedAsymm.h"
diff --git a/tests/validation/reference/FixedPoint.cpp b/tests/validation/reference/FixedPoint.cpp
deleted file mode 100644
index a016093..0000000
--- a/tests/validation/reference/FixedPoint.cpp
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "FixedPoint.h"
-
-#include "arm_compute/core/Types.h"
-#include "tests/validation/FixedPoint.h"
-#include "tests/validation/Helpers.h"
-
-namespace arm_compute
-{
-namespace test
-{
-namespace validation
-{
-namespace reference
-{
-template <typename T>
-SimpleTensor<T> fixed_point_operation(const SimpleTensor<T> &src, FixedPointOp op)
-{
-    SimpleTensor<T> result(src.shape(), src.data_type());
-
-    const int p = src.fixed_point_position();
-    switch(op)
-    {
-        case FixedPointOp::EXP:
-            for(int i = 0; i < src.num_elements(); ++i)
-            {
-                result[i] = fixed_point_arithmetic::exp(fixed_point_arithmetic::fixed_point<T>(src[i], p, true)).raw();
-            }
-            break;
-        case FixedPointOp::LOG:
-            for(int i = 0; i < src.num_elements(); ++i)
-            {
-                result[i] = fixed_point_arithmetic::log(fixed_point_arithmetic::fixed_point<T>(src[i], p, true)).raw();
-            }
-            break;
-        case FixedPointOp::INV_SQRT:
-            for(int i = 0; i < src.num_elements(); ++i)
-            {
-                result[i] = fixed_point_arithmetic::inv_sqrt(fixed_point_arithmetic::fixed_point<T>(src[i], p, true)).raw();
-            }
-            break;
-        case FixedPointOp::RECIPROCAL:
-            for(int i = 0; i < src.num_elements(); ++i)
-            {
-                result[i] = fixed_point_arithmetic::div(fixed_point_arithmetic::fixed_point<T>(1, p), fixed_point_arithmetic::fixed_point<T>(src[i], p, true)).raw();
-            }
-            break;
-        default:
-            ARM_COMPUTE_ERROR("Fixed point operation not supported");
-            break;
-    }
-
-    return result;
-}
-
-template SimpleTensor<int8_t> fixed_point_operation(const SimpleTensor<int8_t> &src, FixedPointOp op);
-template SimpleTensor<int16_t> fixed_point_operation(const SimpleTensor<int16_t> &src, FixedPointOp op);
-} // namespace reference
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
diff --git a/tests/validation/reference/FixedPoint.h b/tests/validation/reference/FixedPoint.h
deleted file mode 100644
index f0117f9..0000000
--- a/tests/validation/reference/FixedPoint.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __ARM_COMPUTE_TEST_FIXED_POINT_OPERATION_H__
-#define __ARM_COMPUTE_TEST_FIXED_POINT_OPERATION_H__
-
-#include "tests/SimpleTensor.h"
-#include "tests/Types.h"
-
-namespace arm_compute
-{
-namespace test
-{
-namespace validation
-{
-namespace reference
-{
-template <typename T>
-SimpleTensor<T> fixed_point_operation(const SimpleTensor<T> &src, FixedPointOp op);
-} // namespace reference
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
-#endif /* __ARM_COMPUTE_TEST_FIXED_POINT_OPERATION_H__ */
diff --git a/tests/validation/reference/FlattenLayer.cpp b/tests/validation/reference/FlattenLayer.cpp
index e140d75..381ce37 100644
--- a/tests/validation/reference/FlattenLayer.cpp
+++ b/tests/validation/reference/FlattenLayer.cpp
@@ -23,8 +23,6 @@
  */
 #include "FlattenLayer.h"
 
-#include "tests/validation/FixedPoint.h"
-
 namespace arm_compute
 {
 namespace test
diff --git a/tests/validation/reference/FullyConnectedLayer.cpp b/tests/validation/reference/FullyConnectedLayer.cpp
index 3ef10ea..d65d0ca 100644
--- a/tests/validation/reference/FullyConnectedLayer.cpp
+++ b/tests/validation/reference/FullyConnectedLayer.cpp
@@ -24,7 +24,6 @@
 #include "FullyConnectedLayer.h"
 
 #include "arm_compute/core/Types.h"
-#include "tests/validation/FixedPoint.h"
 #include "tests/validation/reference/UtilsQuantizedAsymm.h"
 
 #include "arm_compute/core/utils/quantization/AsymmHelpers.h"
diff --git a/tests/validation/reference/GEMM.cpp b/tests/validation/reference/GEMM.cpp
index 7378ada..2feab89 100644
--- a/tests/validation/reference/GEMM.cpp
+++ b/tests/validation/reference/GEMM.cpp
@@ -24,7 +24,6 @@
 #include "GEMM.h"
 
 #include "arm_compute/core/Types.h"
-#include "tests/validation/FixedPoint.h"
 
 namespace arm_compute
 {
@@ -85,75 +84,6 @@
     return dst;
 }
 
-template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type>
-SimpleTensor<T> gemm(const SimpleTensor<T> &a, const SimpleTensor<T> &b, const SimpleTensor<T> &c, float alpha, float beta)
-{
-    using namespace fixed_point_arithmetic;
-
-    // Create reference
-    SimpleTensor<T> dst{ c.shape(), c.data_type(), 1 };
-
-    // Compute reference
-    using promoted_type = fixed_point_arithmetic::traits::promote_t<T>;
-
-    const int M = dst.shape().y();
-    const int N = dst.shape().x();
-    const int K = a.shape().x();
-    const int D = a.shape().z(); // Number of matrices in a batch
-    const int W = a.shape()[3];  // Number of batched-gemm (Winograd case)
-
-    const int a_stride_z = K * M;
-    const int a_stride_w = K * M * D;
-
-    const int b_stride_z = b.shape().num_dimensions() > 2 ? N * K : 0;     // Do not slide the matrix B along the 3th dimension in case matrix B has less than 3 dimensions
-    const int b_stride_w = b.shape().num_dimensions() > 3 ? K * N * D : 0; // Do not slide the matrix B along the 4th dimension in case matrix B has less than 4 dimensions
-
-    const int c_stride_z = N * M;
-    const int c_stride_w = N * M * D;
-
-    const int            fixed_point_position = a.fixed_point_position();
-    const fixed_point<T> alpha_q(alpha, fixed_point_position);
-    const fixed_point<T> beta_q(beta, fixed_point_position);
-
-    for(int w = 0; w < W; ++w)
-    {
-        for(int depth = 0; depth < D; ++depth)
-        {
-            const int base_addr_a = depth * a_stride_z + w * a_stride_w;
-            const int base_addr_b = depth * b_stride_z + w * b_stride_w;
-            const int base_addr_c = depth * c_stride_z + w * c_stride_w;
-
-            for(int row = 0; row < M; ++row)
-            {
-                for(int col = 0; col < N; ++col)
-                {
-                    fixed_point<promoted_type> acc_q(0, fixed_point_position);
-
-                    for(int k = 0; k < K; ++k)
-                    {
-                        const fixed_point<promoted_type> a0_q(a[base_addr_a + row * K + k], fixed_point_position, true);
-                        const fixed_point<promoted_type> b0_q(b[base_addr_b + k * N + col], fixed_point_position, true);
-
-                        acc_q = acc_q + (a0_q * b0_q);
-                    }
-
-                    // Finalize the result: alpha * A * B + beta * C
-                    const fixed_point<T> c0_q(c[base_addr_c + col + row * N], fixed_point_position, true);
-
-                    fixed_point<T> res_q(acc_q);
-                    res_q = alpha_q * res_q;
-                    res_q = res_q + (beta_q * c0_q);
-
-                    // Store the result
-                    dst[base_addr_c + col + row * N] = res_q.raw();
-                }
-            }
-        }
-    }
-
-    return dst;
-}
-
 template SimpleTensor<float> gemm(const SimpleTensor<float> &a, const SimpleTensor<float> &b, const SimpleTensor<float> &c, float alpha, float beta);
 template SimpleTensor<half> gemm(const SimpleTensor<half> &a, const SimpleTensor<half> &b, const SimpleTensor<half> &c, float alpha, float beta);
 } // namespace reference
diff --git a/tests/validation/reference/GEMM.h b/tests/validation/reference/GEMM.h
index cda792b..39007c6 100644
--- a/tests/validation/reference/GEMM.h
+++ b/tests/validation/reference/GEMM.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -38,8 +38,6 @@
 template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type = 0>
 SimpleTensor<T> gemm(const SimpleTensor<T> &a, const SimpleTensor<T> &b, const SimpleTensor<T> &c, float alpha, float beta);
 
-template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type = 0>
-SimpleTensor<T> gemm(const SimpleTensor<T> &a, const SimpleTensor<T> &b, const SimpleTensor<T> &c, float alpha, float beta);
 } // namespace reference
 } // namespace validation
 } // namespace test
diff --git a/tests/validation/reference/GEMMInterleave4x4.h b/tests/validation/reference/GEMMInterleave4x4.h
index e6b09af..e3d72d9 100644
--- a/tests/validation/reference/GEMMInterleave4x4.h
+++ b/tests/validation/reference/GEMMInterleave4x4.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -24,7 +24,6 @@
 #include "GEMM.h"
 
 #include "arm_compute/core/Types.h"
-#include "tests/validation/FixedPoint.h"
 
 namespace arm_compute
 {
diff --git a/tests/validation/reference/GEMMInterleaveBlocked.h b/tests/validation/reference/GEMMInterleaveBlocked.h
index ff5a0d6..d649a51 100644
--- a/tests/validation/reference/GEMMInterleaveBlocked.h
+++ b/tests/validation/reference/GEMMInterleaveBlocked.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -24,7 +24,6 @@
 #include "GEMM.h"
 
 #include "arm_compute/core/Types.h"
-#include "tests/validation/FixedPoint.h"
 
 namespace arm_compute
 {
diff --git a/tests/validation/reference/GEMMTranspose1xW.h b/tests/validation/reference/GEMMTranspose1xW.h
index d6a2e89..6ec70b1 100644
--- a/tests/validation/reference/GEMMTranspose1xW.h
+++ b/tests/validation/reference/GEMMTranspose1xW.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, 2018 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -24,7 +24,6 @@
 #include "GEMM.h"
 
 #include "arm_compute/core/Types.h"
-#include "tests/validation/FixedPoint.h"
 
 namespace arm_compute
 {
diff --git a/tests/validation/reference/NormalizationLayer.cpp b/tests/validation/reference/NormalizationLayer.cpp
index 85872c8..2ae68c6 100644
--- a/tests/validation/reference/NormalizationLayer.cpp
+++ b/tests/validation/reference/NormalizationLayer.cpp
@@ -24,7 +24,6 @@
 #include "NormalizationLayer.h"
 
 #include "arm_compute/core/Types.h"
-#include "tests/validation/FixedPoint.h"
 
 namespace arm_compute
 {
@@ -146,125 +145,6 @@
     return dst;
 }
 
-template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type>
-SimpleTensor<T> normalization_layer(const SimpleTensor<T> &src, NormalizationLayerInfo info)
-{
-    using namespace fixed_point_arithmetic;
-
-    // Create reference
-    SimpleTensor<T> dst{ src.shape(), src.data_type(), 1 };
-
-    // Compute reference
-    const int fixed_point_position = src.fixed_point_position();
-
-    const uint32_t norm_size = info.norm_size();
-    NormType       type      = info.type();
-    fixed_point<T> beta(info.beta(), fixed_point_position);
-    fixed_point<T> kappa(info.kappa(), fixed_point_position);
-
-    const int cols       = src.shape()[0];
-    const int rows       = src.shape()[1];
-    const int depth      = src.shape()[2];
-    int       upper_dims = src.shape().total_size() / (cols * rows);
-
-    fixed_point<T> coeff(info.scale_coeff(), fixed_point_position);
-    int            radius_cols = norm_size / 2;
-
-    // IN_MAP_1D and CROSS_MAP normalize over a single axis only
-    int radius_rows = (NormType::IN_MAP_2D == type) ? norm_size / 2 : 0;
-
-    if(type == NormType::CROSS_MAP)
-    {
-        // Remove also depth from upper dimensions since it is the dimension we
-        // want to use for normalization
-        upper_dims /= depth;
-
-        for(int r = 0; r < upper_dims; ++r)
-        {
-            for(int i = 0; i < rows; ++i)
-            {
-                for(int k = 0; k < cols; ++k)
-                {
-                    for(int l = 0; l < depth; ++l)
-                    {
-                        fixed_point<T> accumulated_scale(0.f, fixed_point_position);
-
-                        for(int j = -radius_cols; j <= radius_cols; ++j)
-                        {
-                            const int z = l + j;
-
-                            if(z >= 0 && z < depth)
-                            {
-                                const T              value = src[k + i * cols + z * rows * cols + r * cols * rows * depth];
-                                const fixed_point<T> fp_value(value, fixed_point_position, true);
-                                accumulated_scale = add(accumulated_scale, mul(fp_value, fp_value));
-                            }
-                        }
-
-                        accumulated_scale                                             = add(kappa, mul(accumulated_scale, coeff));
-                        dst[k + i * cols + l * rows * cols + r * cols * rows * depth] = accumulated_scale.raw();
-                    }
-                }
-            }
-        }
-    }
-    else
-    {
-        for(int r = 0; r < upper_dims; ++r)
-        {
-            for(int i = 0; i < rows; ++i)
-            {
-                for(int k = 0; k < cols; ++k)
-                {
-                    fixed_point<T> accumulated_scale(0.f, fixed_point_position);
-
-                    for(int j = -radius_rows; j <= radius_rows; ++j)
-                    {
-                        const int y = i + j;
-
-                        for(int l = -radius_cols; l <= radius_cols; ++l)
-                        {
-                            const int x = k + l;
-
-                            if((x >= 0 && y >= 0) && (x < cols && y < rows))
-                            {
-                                const T              value = src[x + y * cols + r * cols * rows];
-                                const fixed_point<T> fp_value(value, fixed_point_position, true);
-                                accumulated_scale = add(accumulated_scale, mul(fp_value, fp_value));
-                            }
-                        }
-                    }
-
-                    accumulated_scale                   = add(kappa, mul(accumulated_scale, coeff));
-                    dst[k + i * cols + r * cols * rows] = accumulated_scale.raw();
-                }
-            }
-        }
-    }
-
-    if(info.beta() == 1.f)
-    {
-        for(int i = 0; i < dst.num_elements(); ++i)
-        {
-            fixed_point<T> res = div(fixed_point<T>(src[i], fixed_point_position, true), fixed_point<T>(dst[i], fixed_point_position, true));
-            dst[i]             = res.raw();
-        }
-    }
-    else
-    {
-        const fixed_point<T> beta(info.beta(), fixed_point_position);
-
-        for(int i = 0; i < dst.num_elements(); ++i)
-        {
-            fixed_point<T> res = pow(fixed_point<T>(dst[i], fixed_point_position, true), beta);
-            res                = div(fixed_point<T>(src[i], fixed_point_position, true), res);
-            dst[i]             = res.raw();
-        }
-    }
-
-    return dst;
-}
-
 template SimpleTensor<float> normalization_layer(const SimpleTensor<float> &src, NormalizationLayerInfo info);
 template SimpleTensor<half> normalization_layer(const SimpleTensor<half> &src, NormalizationLayerInfo info);
 } // namespace reference
diff --git a/tests/validation/reference/NormalizationLayer.h b/tests/validation/reference/NormalizationLayer.h
index 3f624ff..3448baf 100644
--- a/tests/validation/reference/NormalizationLayer.h
+++ b/tests/validation/reference/NormalizationLayer.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -38,8 +38,6 @@
 template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type = 0>
 SimpleTensor<T> normalization_layer(const SimpleTensor<T> &src, NormalizationLayerInfo info);
 
-template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type = 0>
-SimpleTensor<T> normalization_layer(const SimpleTensor<T> &src, NormalizationLayerInfo info);
 } // namespace reference
 } // namespace validation
 } // namespace test
diff --git a/tests/validation/reference/PixelWiseMultiplication.cpp b/tests/validation/reference/PixelWiseMultiplication.cpp
index 7304fb0..859da5c 100644
--- a/tests/validation/reference/PixelWiseMultiplication.cpp
+++ b/tests/validation/reference/PixelWiseMultiplication.cpp
@@ -23,8 +23,6 @@
  */
 #include "PixelWiseMultiplication.h"
 
-#include "tests/validation/FixedPoint.h"
-
 namespace arm_compute
 {
 namespace test
diff --git a/tests/validation/reference/PoolingLayer.cpp b/tests/validation/reference/PoolingLayer.cpp
index e9054b9..02c430a 100644
--- a/tests/validation/reference/PoolingLayer.cpp
+++ b/tests/validation/reference/PoolingLayer.cpp
@@ -25,7 +25,6 @@
 
 #include "arm_compute/core/Types.h"
 #include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "tests/validation/FixedPoint.h"
 #include "tests/validation/Helpers.h"
 
 namespace arm_compute
@@ -44,7 +43,7 @@
     ARM_COMPUTE_ERROR_ON(info.is_global_pooling() && (src.shape().x() != src.shape().y()));
 
     // Create reference
-    SimpleTensor<T> dst{ compute_pool_shape(TensorInfo(src.shape(), 1, src.data_type(), src.fixed_point_position()), info), src.data_type(), 1 };
+    SimpleTensor<T> dst{ compute_pool_shape(TensorInfo(src.shape(), 1, src.data_type()), info), src.data_type(), 1 };
 
     const int   pool_size_x     = info.is_global_pooling() ? src.shape().x() : info.pool_size().width;
     const int   pool_size_y     = info.is_global_pooling() ? src.shape().y() : info.pool_size().height;
diff --git a/tests/validation/reference/SoftmaxLayer.cpp b/tests/validation/reference/SoftmaxLayer.cpp
index ae4bcd8..aa640ad 100644
--- a/tests/validation/reference/SoftmaxLayer.cpp
+++ b/tests/validation/reference/SoftmaxLayer.cpp
@@ -24,7 +24,6 @@
 #include "SoftmaxLayer.h"
 
 #include "arm_compute/core/Types.h"
-#include "tests/validation/FixedPoint.h"
 
 namespace arm_compute
 {
@@ -71,63 +70,21 @@
     return dst;
 }
 
-template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type>
+template <typename T, typename std::enable_if<std::is_same<T, uint8_t>::value, int>::type>
 SimpleTensor<T> softmax_layer(const SimpleTensor<T> &src, float beta)
 {
-    ARM_COMPUTE_UNUSED(beta);
-
-    using namespace fixed_point_arithmetic;
-
-    // Create reference
-    SimpleTensor<T> dst{ src.shape(), src.data_type(), 1 };
-
-    // Compute reference
-    const int cols       = src.shape()[0];
-    const int upper_dims = src.num_elements() / cols;
-
-    for(int r = 0; r < upper_dims; ++r)
-    {
-        const T *src_row_ptr = src.data() + r * cols;
-        T       *dst_row_ptr = dst.data() + r * cols;
-
-        // Find max
-        const fixed_point<T> max(*std::max_element(src_row_ptr, src_row_ptr + cols), src.fixed_point_position(), true);
-
-        // Regularize
-        using promoted_type = fixed_point_arithmetic::traits::promote_t<T>;
-        fixed_point<promoted_type> sum(0, src.fixed_point_position(), true);
-        std::transform(src_row_ptr, src_row_ptr + cols, dst_row_ptr, [&](T val)
-        {
-            const fixed_point<T> res = exp(fixed_point<T>(val, src.fixed_point_position(), true) - max);
-            sum                      = add(sum, fixed_point<promoted_type>(res.raw(), src.fixed_point_position(), true));
-            return res.raw();
-        });
-
-        // Normalize
-        fixed_point<T> saturated_sum(sum);
-        std::transform(dst_row_ptr, dst_row_ptr + cols, dst_row_ptr, [&](T val)
-        {
-            return div(fixed_point<T>(val, src.fixed_point_position(), true), saturated_sum).raw();
-        });
-    }
-
-    return dst;
-}
-
-template <>
-SimpleTensor<uint8_t> softmax_layer<uint8_t>(const SimpleTensor<uint8_t> &src, float beta)
-{
     // Note: Output quantization info should always have scale = 1/256 and offset = 0
     const QuantizationInfo output_quantization_info = QuantizationInfo(1.f / 256, 0);
 
-    SimpleTensor<float>   src_tmp = convert_from_asymmetric(src);
-    SimpleTensor<float>   dst_tmp = softmax_layer<float>(src_tmp, beta);
-    SimpleTensor<uint8_t> dst     = convert_to_asymmetric(dst_tmp, output_quantization_info);
+    SimpleTensor<float> src_tmp = convert_from_asymmetric(src);
+    SimpleTensor<float> dst_tmp = softmax_layer<float>(src_tmp, beta);
+    SimpleTensor<T>     dst     = convert_to_asymmetric(dst_tmp, output_quantization_info);
     return dst;
 }
 
 template SimpleTensor<float> softmax_layer(const SimpleTensor<float> &src, float beta);
 template SimpleTensor<half> softmax_layer(const SimpleTensor<half> &src, float beta);
+template SimpleTensor<uint8_t> softmax_layer(const SimpleTensor<uint8_t> &src, float beta);
 } // namespace reference
 } // namespace validation
 } // namespace test
diff --git a/tests/validation/reference/SoftmaxLayer.h b/tests/validation/reference/SoftmaxLayer.h
index a6d4c3b..21dca1e 100644
--- a/tests/validation/reference/SoftmaxLayer.h
+++ b/tests/validation/reference/SoftmaxLayer.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -38,7 +38,7 @@
 template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type = 0>
 SimpleTensor<T> softmax_layer(const SimpleTensor<T> &src, float beta);
 
-template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type = 0>
+template <typename T, typename std::enable_if<std::is_same<T, uint8_t>::value, int>::type = 0>
 SimpleTensor<T> softmax_layer(const SimpleTensor<T> &src, float beta);
 } // namespace reference
 } // namespace validation
diff --git a/tests/validation/reference/Transpose.cpp b/tests/validation/reference/Transpose.cpp
index 736f37e..348c703 100644
--- a/tests/validation/reference/Transpose.cpp
+++ b/tests/validation/reference/Transpose.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -24,7 +24,6 @@
 #include "Transpose.h"
 
 #include "arm_compute/core/Types.h"
-#include "tests/validation/FixedPoint.h"
 #include "tests/validation/Helpers.h"
 
 namespace arm_compute
diff --git a/tests/validation/reference/WidthConcatenateLayer.cpp b/tests/validation/reference/WidthConcatenateLayer.cpp
index 5b89934..7a5ece8 100644
--- a/tests/validation/reference/WidthConcatenateLayer.cpp
+++ b/tests/validation/reference/WidthConcatenateLayer.cpp
@@ -23,7 +23,6 @@
  */
 #include "WidthConcatenateLayer.h"
 
-#include "tests/validation/FixedPoint.h"
 #include "tests/validation/Helpers.h"
 
 namespace arm_compute