COMPMID-970 : Remove QS8 / QS16 support

Removed Fixed point position arguments from test sources

Change-Id: I8343724723b71611fd501ed34de0866d3fb60e7e
Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/136382
Tested-by: Jenkins <bsgcomp@arm.com>
Reviewed-by: Michele DiGiorgio <michele.digiorgio@arm.com>
Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
diff --git a/tests/validation/CL/ActivationLayer.cpp b/tests/validation/CL/ActivationLayer.cpp
index f122f6d..4f97d7b 100644
--- a/tests/validation/CL/ActivationLayer.cpp
+++ b/tests/validation/CL/ActivationLayer.cpp
@@ -221,9 +221,6 @@
 TEST_SUITE_END()
 
 template <typename T>
-using CLActivationLayerFixedPointFixture = ActivationValidationFixedPointFixture<CLTensor, CLAccessor, CLActivationLayer, T>;
-
-template <typename T>
 using CLActivationLayerQuantizedFixture = ActivationValidationQuantizedFixture<CLTensor, CLAccessor, CLActivationLayer, T>;
 
 /** Input data sets. */
diff --git a/tests/validation/CL/ArithmeticAddition.cpp b/tests/validation/CL/ArithmeticAddition.cpp
index 0646c05..256d93f 100644
--- a/tests/validation/CL/ArithmeticAddition.cpp
+++ b/tests/validation/CL/ArithmeticAddition.cpp
@@ -202,9 +202,6 @@
 }
 TEST_SUITE_END()
 
-template <typename T>
-using CLArithmeticAdditionFixedPointFixture = ArithmeticAdditionValidationFixedPointFixture<CLTensor, CLAccessor, CLArithmeticAddition, T>;
-
 TEST_SUITE(Float)
 TEST_SUITE(FP16)
 FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticAdditionFixture<half>, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), ArithmeticAdditionFP16Dataset),
diff --git a/tests/validation/CL/ArithmeticSubtraction.cpp b/tests/validation/CL/ArithmeticSubtraction.cpp
index 4ba5387..b19d963 100644
--- a/tests/validation/CL/ArithmeticSubtraction.cpp
+++ b/tests/validation/CL/ArithmeticSubtraction.cpp
@@ -231,9 +231,6 @@
 TEST_SUITE_END()
 TEST_SUITE_END()
 
-template <typename T1, typename T2 = T1, typename T3 = T1>
-using CLArithmeticSubtractionFixedPointFixture = ArithmeticSubtractionValidationFixedPointFixture<CLTensor, CLAccessor, CLArithmeticSubtraction, T1, T2, T3>;
-
 TEST_SUITE(Float)
 TEST_SUITE(FP16)
 FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticSubtractionFixture<half>, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), ArithmeticSubtractionFP16Dataset),
diff --git a/tests/validation/CL/BatchNormalizationLayer.cpp b/tests/validation/CL/BatchNormalizationLayer.cpp
index de775bf..0d80ff7 100644
--- a/tests/validation/CL/BatchNormalizationLayer.cpp
+++ b/tests/validation/CL/BatchNormalizationLayer.cpp
@@ -67,9 +67,6 @@
                                                                    framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
                shape0, shape1, epsilon, use_gamma, use_beta, dt, data_layout)
 {
-    // Set fixed point position data type allowed
-    const int fixed_point_position = (arm_compute::is_data_type_fixed_point(dt)) ? 3 : 0;
-
     TensorShape src_dst_shapes = shape0;
     if(data_layout == DataLayout::NHWC)
     {
@@ -77,12 +74,12 @@
     }
 
     // Create tensors
-    CLTensor src   = create_tensor<CLTensor>(src_dst_shapes, dt, 1, fixed_point_position, QuantizationInfo(), data_layout);
-    CLTensor dst   = create_tensor<CLTensor>(src_dst_shapes, dt, 1, fixed_point_position, QuantizationInfo(), data_layout);
-    CLTensor mean  = create_tensor<CLTensor>(shape1, dt, 1, fixed_point_position);
-    CLTensor var   = create_tensor<CLTensor>(shape1, dt, 1, fixed_point_position);
-    CLTensor beta  = create_tensor<CLTensor>(shape1, dt, 1, fixed_point_position);
-    CLTensor gamma = create_tensor<CLTensor>(shape1, dt, 1, fixed_point_position);
+    CLTensor src   = create_tensor<CLTensor>(src_dst_shapes, dt, 1, QuantizationInfo(), data_layout);
+    CLTensor dst   = create_tensor<CLTensor>(src_dst_shapes, dt, 1, QuantizationInfo(), data_layout);
+    CLTensor mean  = create_tensor<CLTensor>(shape1, dt, 1);
+    CLTensor var   = create_tensor<CLTensor>(shape1, dt, 1);
+    CLTensor beta  = create_tensor<CLTensor>(shape1, dt, 1);
+    CLTensor gamma = create_tensor<CLTensor>(shape1, dt, 1);
 
     // Create and Configure function
     CLBatchNormalizationLayer norm;
diff --git a/tests/validation/CL/ConvolutionLayer.cpp b/tests/validation/CL/ConvolutionLayer.cpp
index 7fd29f4..30dd850 100644
--- a/tests/validation/CL/ConvolutionLayer.cpp
+++ b/tests/validation/CL/ConvolutionLayer.cpp
@@ -153,16 +153,13 @@
                                                                    ActivationFunctionsDataset),
                input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type, act_info)
 {
-    // Set fixed point position data type allowed
-    int fixed_point_position = is_data_type_fixed_point(data_type) ? 3 : 0;
-
     auto bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
 
     // Create tensors
-    CLTensor src     = create_tensor<CLTensor>(input_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
-    CLTensor weights = create_tensor<CLTensor>(weights_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
-    CLTensor bias    = create_tensor<CLTensor>(bias_shape, bias_data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
-    CLTensor dst     = create_tensor<CLTensor>(output_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
+    CLTensor src     = create_tensor<CLTensor>(input_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
+    CLTensor weights = create_tensor<CLTensor>(weights_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
+    CLTensor bias    = create_tensor<CLTensor>(bias_shape, bias_data_type, 1, QuantizationInfo(2.f / 255.f, 127));
+    CLTensor dst     = create_tensor<CLTensor>(output_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
 
     ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
     ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -251,9 +248,6 @@
 TEST_SUITE_END()
 
 template <typename T>
-using CLGEMMConvolutionLayerFixedPointFixture = ConvolutionValidationFixedPointFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T>;
-
-template <typename T>
 using CLGEMMConvolutionLayerQuantizedFixture = ConvolutionValidationQuantizedFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T>;
 
 const auto QuantizedActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
diff --git a/tests/validation/CL/DepthConvertLayer.cpp b/tests/validation/CL/DepthConvertLayer.cpp
index c6e9f75..ed1f54c 100644
--- a/tests/validation/CL/DepthConvertLayer.cpp
+++ b/tests/validation/CL/DepthConvertLayer.cpp
@@ -67,19 +67,15 @@
 using CLDepthConvertLayerToU8Fixture = DepthConvertLayerValidationFixture<CLTensor, CLAccessor, CLDepthConvertLayer, T, uint8_t>;
 template <typename T>
 using CLDepthConvertLayerToU32Fixture = DepthConvertLayerValidationFixture<CLTensor, CLAccessor, CLDepthConvertLayer, T, uint32_t>;
-template <typename T>
-using CLDepthConvertLayerToFP32FixedPointFixture = DepthConvertLayerValidationFractionalBitsFixture<CLTensor, CLAccessor, CLDepthConvertLayer, T, float>;
 
 TEST_SUITE(U8_to_U16)
 DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(framework::dataset::concat(datasets::SmallShapes(), datasets::LargeShapes()), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
                                                                    DepthConvertLayerShiftDataset),
                shape, policy, shift)
 {
-    int fixed_point_position = 0;
-
     // Create tensors
-    CLTensor src = create_tensor<CLTensor>(shape, DataType::U8, 1, fixed_point_position);
-    CLTensor dst = create_tensor<CLTensor>(shape, DataType::U16, 1, fixed_point_position);
+    CLTensor src = create_tensor<CLTensor>(shape, DataType::U8, 1);
+    CLTensor dst = create_tensor<CLTensor>(shape, DataType::U16, 1);
 
     // Create and Configure function
     CLDepthConvertLayer depth_convert;
@@ -117,11 +113,9 @@
                                                                    DepthConvertLayerShiftDataset),
                shape, policy, shift)
 {
-    int fixed_point_position = 0;
-
     // Create tensors
-    CLTensor src = create_tensor<CLTensor>(shape, DataType::U8, 1, fixed_point_position);
-    CLTensor dst = create_tensor<CLTensor>(shape, DataType::S16, 1, fixed_point_position);
+    CLTensor src = create_tensor<CLTensor>(shape, DataType::U8, 1);
+    CLTensor dst = create_tensor<CLTensor>(shape, DataType::S16, 1);
 
     // Create and Configure function
     CLDepthConvertLayer depth_convert;
@@ -158,11 +152,9 @@
                                                                    DepthConvertLayerShiftDataset),
                shape, policy, shift)
 {
-    int fixed_point_position = 0;
-
     // Create tensors
-    CLTensor src = create_tensor<CLTensor>(shape, DataType::U8, 1, fixed_point_position);
-    CLTensor dst = create_tensor<CLTensor>(shape, DataType::S32, 1, fixed_point_position);
+    CLTensor src = create_tensor<CLTensor>(shape, DataType::U8, 1);
+    CLTensor dst = create_tensor<CLTensor>(shape, DataType::S32, 1);
 
     // Create and Configure function
     CLDepthConvertLayer depth_convert;
@@ -200,11 +192,9 @@
                                                                    DepthConvertLayerShiftDataset),
                shape, policy, shift)
 {
-    int fixed_point_position = 0;
-
     // Create tensors
-    CLTensor src = create_tensor<CLTensor>(shape, DataType::U16, 1, fixed_point_position);
-    CLTensor dst = create_tensor<CLTensor>(shape, DataType::U8, 1, fixed_point_position);
+    CLTensor src = create_tensor<CLTensor>(shape, DataType::U16, 1);
+    CLTensor dst = create_tensor<CLTensor>(shape, DataType::U8, 1);
 
     // Create and Configure function
     CLDepthConvertLayer depth_convert;
@@ -241,11 +231,9 @@
                                                                    DepthConvertLayerShiftDataset),
                shape, policy, shift)
 {
-    int fixed_point_position = 0;
-
     // Create tensors
-    CLTensor src = create_tensor<CLTensor>(shape, DataType::U16, 1, fixed_point_position);
-    CLTensor dst = create_tensor<CLTensor>(shape, DataType::U32, 1, fixed_point_position);
+    CLTensor src = create_tensor<CLTensor>(shape, DataType::U16, 1);
+    CLTensor dst = create_tensor<CLTensor>(shape, DataType::U32, 1);
 
     // Create and Configure function
     CLDepthConvertLayer depth_convert;
@@ -282,11 +270,9 @@
                                                                    DepthConvertLayerShiftDataset),
                shape, policy, shift)
 {
-    int fixed_point_position = 0;
-
     // Create tensors
-    CLTensor src = create_tensor<CLTensor>(shape, DataType::S16, 1, fixed_point_position);
-    CLTensor dst = create_tensor<CLTensor>(shape, DataType::U8, 1, fixed_point_position);
+    CLTensor src = create_tensor<CLTensor>(shape, DataType::S16, 1);
+    CLTensor dst = create_tensor<CLTensor>(shape, DataType::U8, 1);
 
     // Create and Configure function
     CLDepthConvertLayer depth_convert;
@@ -323,11 +309,9 @@
                                                                    DepthConvertLayerShiftDataset),
                shape, policy, shift)
 {
-    int fixed_point_position = 0;
-
     // Create tensors
-    CLTensor src = create_tensor<CLTensor>(shape, DataType::S16, 1, fixed_point_position);
-    CLTensor dst = create_tensor<CLTensor>(shape, DataType::S32, 1, fixed_point_position);
+    CLTensor src = create_tensor<CLTensor>(shape, DataType::S16, 1);
+    CLTensor dst = create_tensor<CLTensor>(shape, DataType::S32, 1);
 
     // Create and Configure function
     CLDepthConvertLayer depth_convert;
diff --git a/tests/validation/CL/DilatedConvolutionLayer.cpp b/tests/validation/CL/DilatedConvolutionLayer.cpp
index 4b22390..fdd6cc8 100644
--- a/tests/validation/CL/DilatedConvolutionLayer.cpp
+++ b/tests/validation/CL/DilatedConvolutionLayer.cpp
@@ -114,16 +114,13 @@
                                                                    CNNDataTypes),
                input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type)
 {
-    // Set fixed point position data type allowed
-    int fixed_point_position = is_data_type_fixed_point(data_type) ? 3 : 0;
-
     auto bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
 
     // Create tensors
-    CLTensor src     = create_tensor<CLTensor>(input_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
-    CLTensor weights = create_tensor<CLTensor>(weights_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
-    CLTensor bias    = create_tensor<CLTensor>(bias_shape, bias_data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
-    CLTensor dst     = create_tensor<CLTensor>(output_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
+    CLTensor src     = create_tensor<CLTensor>(input_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
+    CLTensor weights = create_tensor<CLTensor>(weights_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
+    CLTensor bias    = create_tensor<CLTensor>(bias_shape, bias_data_type, 1, QuantizationInfo(2.f / 255.f, 127));
+    CLTensor dst     = create_tensor<CLTensor>(output_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
 
     ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
     ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -204,9 +201,6 @@
 TEST_SUITE_END()
 
 template <typename T>
-using CLGEMMDilatedConvolutionLayerFixedPointFixture = ConvolutionValidationFixedPointFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T>;
-
-template <typename T>
 using CLGEMMDilatedConvolutionLayerQuantizedFixture = ConvolutionValidationQuantizedFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T>;
 
 TEST_SUITE(Quantized)
diff --git a/tests/validation/CL/DirectConvolutionLayer.cpp b/tests/validation/CL/DirectConvolutionLayer.cpp
index d8b2d7e..a796b6e 100644
--- a/tests/validation/CL/DirectConvolutionLayer.cpp
+++ b/tests/validation/CL/DirectConvolutionLayer.cpp
@@ -200,9 +200,6 @@
 TEST_SUITE_END()
 
 template <typename T>
-using CLDirectConvolutionLayerFixedPointFixture = DirectConvolutionValidationFixedPointFixture<CLTensor, CLAccessor, CLDirectConvolutionLayer, T>;
-
-template <typename T>
 using CLDirectConvolutionLayerQuantizedFixture = DirectConvolutionValidationQuantizedFixture<CLTensor, CLAccessor, CLDirectConvolutionLayer, T>;
 template <typename T>
 using CLDirectConvolutionValidationWithTensorShapesQuantizedFixture = DirectConvolutionValidationWithTensorShapesQuantizedFixture<CLTensor, CLAccessor, CLDirectConvolutionLayer, T>;
diff --git a/tests/validation/CL/FullyConnectedLayer.cpp b/tests/validation/CL/FullyConnectedLayer.cpp
index 069d8a7..9958a88 100644
--- a/tests/validation/CL/FullyConnectedLayer.cpp
+++ b/tests/validation/CL/FullyConnectedLayer.cpp
@@ -69,10 +69,8 @@
                                                                    CNNDataTypes),
                src_shape, weights_shape, bias_shape, dst_shape, transpose_weights, reshape_weights, data_type)
 {
-    // Set fixed point position data type allowed
-    const int              fixed_point_position = is_data_type_fixed_point(data_type) ? 3 : 0;
-    const DataType         bias_data_type       = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
-    const QuantizationInfo quantization_info    = is_data_type_quantized_asymmetric(data_type) ? QuantizationInfo(2.f / 255.f, 127) : QuantizationInfo();
+    const DataType         bias_data_type    = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
+    const QuantizationInfo quantization_info = is_data_type_quantized_asymmetric(data_type) ? QuantizationInfo(2.f / 255.f, 127) : QuantizationInfo();
 
     TensorShape ws(weights_shape);
 
@@ -85,10 +83,10 @@
     }
 
     // Create tensors
-    CLTensor src     = create_tensor<CLTensor>(src_shape, data_type, 1, fixed_point_position, quantization_info);
-    CLTensor weights = create_tensor<CLTensor>(ws, data_type, 1, fixed_point_position, quantization_info);
-    CLTensor bias    = create_tensor<CLTensor>(bias_shape, bias_data_type, 1, fixed_point_position, quantization_info);
-    CLTensor dst     = create_tensor<CLTensor>(dst_shape, data_type, 1, fixed_point_position, quantization_info);
+    CLTensor src     = create_tensor<CLTensor>(src_shape, data_type, 1, quantization_info);
+    CLTensor weights = create_tensor<CLTensor>(ws, data_type, 1, quantization_info);
+    CLTensor bias    = create_tensor<CLTensor>(bias_shape, bias_data_type, 1, quantization_info);
+    CLTensor dst     = create_tensor<CLTensor>(dst_shape, data_type, 1, quantization_info);
 
     ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
     ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -192,9 +190,6 @@
 TEST_SUITE_END()
 
 template <typename T>
-using CLFullyConnectedLayerFixedPointFixture = FullyConnectedLayerValidationFixedPointFixture<CLTensor, CLAccessor, CLFullyConnectedLayer, T, false>;
-
-template <typename T>
 using CLFullyConnectedLayerQuantizedFixture = FullyConnectedLayerValidationQuantizedFixture<CLTensor, CLAccessor, CLFullyConnectedLayer, T, false>;
 
 TEST_SUITE(Quantized)
diff --git a/tests/validation/CL/GEMM.cpp b/tests/validation/CL/GEMM.cpp
index d066281..6391820 100644
--- a/tests/validation/CL/GEMM.cpp
+++ b/tests/validation/CL/GEMM.cpp
@@ -86,14 +86,11 @@
 DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::SmallGEMMDataset(), datasets::LargeGEMMDataset()), CNNDataTypes),
                shape_a, shape_b, shape_c, output_shape, alpha, beta, data_type)
 {
-    // Set fixed point position data type allowed
-    const int fixed_point_position = is_data_type_fixed_point(data_type) ? 3 : 0;
-
     // Create tensors
-    CLTensor a   = create_tensor<CLTensor>(shape_a, data_type, 1, fixed_point_position);
-    CLTensor b   = create_tensor<CLTensor>(shape_b, data_type, 1, fixed_point_position);
-    CLTensor c   = create_tensor<CLTensor>(shape_c, data_type, 1, fixed_point_position);
-    CLTensor dst = create_tensor<CLTensor>(output_shape, data_type, 1, fixed_point_position);
+    CLTensor a   = create_tensor<CLTensor>(shape_a, data_type, 1);
+    CLTensor b   = create_tensor<CLTensor>(shape_b, data_type, 1);
+    CLTensor c   = create_tensor<CLTensor>(shape_c, data_type, 1);
+    CLTensor dst = create_tensor<CLTensor>(output_shape, data_type, 1);
 
     ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
     ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -152,9 +149,6 @@
 TEST_SUITE_END()
 TEST_SUITE_END()
 
-template <typename T>
-using CLGEMMFixedPointFixture = GEMMValidationFixedPointFixture<CLTensor, CLAccessor, CLGEMM, T>;
-
 TEST_SUITE(OUTPUT_3D)
 TEST_SUITE(Float)
 TEST_SUITE(FP32)
diff --git a/tests/validation/CL/NormalizationLayer.cpp b/tests/validation/CL/NormalizationLayer.cpp
index f6a8e7a..a2dbaff 100644
--- a/tests/validation/CL/NormalizationLayer.cpp
+++ b/tests/validation/CL/NormalizationLayer.cpp
@@ -52,10 +52,6 @@
                                                           framework::dataset::make("NormalizationSize", 3, 9, 2)),
                                                   framework::dataset::make("Beta", { 0.5f, 1.f, 2.f })),
                                           framework::dataset::make("IsScaled", { true }));
-const auto NormalizationDatasetQS = combine(combine(combine(combine(datasets::TinyShapes(), datasets::NormalizationTypes()),
-                                                            framework::dataset::make("NormalizationSize", 3, 9, 2)),
-                                                    framework::dataset::make("Beta", { 0.5f, 1.f, 2.f })),
-                                            framework::dataset::make("IsScaled", { true }));
 const auto NormalizationDatasetFP16 = combine(combine(combine(combine(datasets::SmallShapes(), framework::dataset::make("NormType", { NormType::IN_MAP_1D, NormType::CROSS_MAP })),
                                                               framework::dataset::make("NormalizationSize", 3, 9, 2)),
                                                       framework::dataset::make("Beta", { 0.5f, 1.f, 2.f })),
@@ -135,9 +131,6 @@
 TEST_SUITE_END()
 TEST_SUITE_END()
 
-template <typename T>
-using CLNormalizationLayerFixedPointFixture = NormalizationValidationFixedPointFixture<CLTensor, CLAccessor, CLNormalizationLayer, T>;
-
 TEST_SUITE_END()
 TEST_SUITE_END()
 } // namespace validation
diff --git a/tests/validation/CL/PoolingLayer.cpp b/tests/validation/CL/PoolingLayer.cpp
index b28a5eb..0b8a11f 100644
--- a/tests/validation/CL/PoolingLayer.cpp
+++ b/tests/validation/CL/PoolingLayer.cpp
@@ -148,9 +148,6 @@
 TEST_SUITE_END() // FP16
 TEST_SUITE_END() // Float
 
-template <typename T>
-using CLPoolingLayerFixedPointFixture = PoolingLayerValidationFixedPointFixture<CLTensor, CLAccessor, CLPoolingLayer, T>;
-
 TEST_SUITE(Quantized)
 
 template <typename T>
diff --git a/tests/validation/CL/SYSTEM/AlexNet.cpp b/tests/validation/CL/SYSTEM/AlexNet.cpp
index 75f8d19..9be6f2c 100644
--- a/tests/validation/CL/SYSTEM/AlexNet.cpp
+++ b/tests/validation/CL/SYSTEM/AlexNet.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -79,7 +79,7 @@
                                             "cnn_data/alexnet_model/fc8_b.npy"
                                           };
     CLAlexNetModel network{};
-    network.init(dt, 4, batches);
+    network.init(dt, batches);
     network.build();
     network.allocate();
     network.fill(weight_files, bias_files);
diff --git a/tests/validation/CL/SoftmaxLayer.cpp b/tests/validation/CL/SoftmaxLayer.cpp
index b47f84f..66ca0b8 100644
--- a/tests/validation/CL/SoftmaxLayer.cpp
+++ b/tests/validation/CL/SoftmaxLayer.cpp
@@ -64,13 +64,11 @@
 
 DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(concat(datasets::SoftmaxLayerSmallShapes(), datasets::SoftmaxLayerLargeShapes()), CNNDataTypes), shape, data_type)
 {
-    // Set fixed point position and quantization info if is allowed
-    const int              fixed_point_position = is_data_type_fixed_point(data_type) ? 3 : 0;
-    const QuantizationInfo quantization_info    = is_data_type_quantized_asymmetric(data_type) ? QuantizationInfo(1.f / 255.f, 0) : QuantizationInfo();
+    const QuantizationInfo quantization_info = is_data_type_quantized_asymmetric(data_type) ? QuantizationInfo(1.f / 255.f, 0) : QuantizationInfo();
 
     // Create tensors
-    CLTensor src = create_tensor<CLTensor>(shape, data_type, 1, fixed_point_position, quantization_info);
-    CLTensor dst = create_tensor<CLTensor>(shape, data_type, 1, fixed_point_position, QuantizationInfo(1.f / 256.f, 0));
+    CLTensor src = create_tensor<CLTensor>(shape, data_type, 1, quantization_info);
+    CLTensor dst = create_tensor<CLTensor>(shape, data_type, 1, QuantizationInfo(1.f / 256.f, 0));
 
     ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
     ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -167,9 +165,6 @@
 TEST_SUITE_END()
 
 template <typename T>
-using CLSoftmaxLayerFixedPointFixture = SoftmaxValidationFixedPointFixture<CLTensor, CLAccessor, CLSoftmaxLayer, T>;
-
-template <typename T>
 using CLSoftmaxLayerQuantizedFixture = SoftmaxValidationQuantizedFixture<CLTensor, CLAccessor, CLSoftmaxLayer, T>;
 
 TEST_SUITE(Quantized)
diff --git a/tests/validation/CL/Winograd.cpp b/tests/validation/CL/Winograd.cpp
index f68ec8c..501afac 100644
--- a/tests/validation/CL/Winograd.cpp
+++ b/tests/validation/CL/Winograd.cpp
@@ -169,7 +169,7 @@
     TensorShape shape_out = compute_winograd_input_transform_shape(tensor_info_in, winograd_info);
 
     // Create tensors
-    CLTensor in  = create_tensor<CLTensor>(shape_in, data_type, 1, 0, QuantizationInfo(), data_layout);
+    CLTensor in  = create_tensor<CLTensor>(shape_in, data_type, 1, QuantizationInfo(), data_layout);
     CLTensor out = create_tensor<CLTensor>(shape_out, data_type);
 
     ARM_COMPUTE_EXPECT(in.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -216,7 +216,7 @@
     TensorShape shape_out = compute_winograd_input_transform_shape(tensor_info_in, winograd_info);
 
     // Create tensors
-    CLTensor in  = create_tensor<CLTensor>(shape_in_nhwc, data_type, 1, 0, QuantizationInfo(), data_layout);
+    CLTensor in  = create_tensor<CLTensor>(shape_in_nhwc, data_type, 1, QuantizationInfo(), data_layout);
     CLTensor out = create_tensor<CLTensor>(shape_out, data_type);
 
     ARM_COMPUTE_EXPECT(in.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -296,8 +296,8 @@
     TensorShape shape_b = compute_winograd_filter_transform_shape(TensorInfo(shape_a, 1, data_type), winograd_info);
 
     // Create tensors
-    CLTensor a = create_tensor<CLTensor>(shape_a, data_type, 1, 0, QuantizationInfo(), data_layout);
-    CLTensor b = create_tensor<CLTensor>(shape_b, data_type, 1, 0, QuantizationInfo(), data_layout);
+    CLTensor a = create_tensor<CLTensor>(shape_a, data_type, 1, QuantizationInfo(), data_layout);
+    CLTensor b = create_tensor<CLTensor>(shape_b, data_type, 1, QuantizationInfo(), data_layout);
 
     ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
     ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -348,8 +348,8 @@
     TensorShape shape_b = compute_winograd_filter_transform_shape(tensor_info_in, winograd_info);
 
     // Create tensors
-    CLTensor a = create_tensor<CLTensor>(shape_in_nhwc, data_type, 1, 0, QuantizationInfo(), data_layout);
-    CLTensor b = create_tensor<CLTensor>(shape_b, data_type, 1, 0, QuantizationInfo(), data_layout);
+    CLTensor a = create_tensor<CLTensor>(shape_in_nhwc, data_type, 1, QuantizationInfo(), data_layout);
+    CLTensor b = create_tensor<CLTensor>(shape_b, data_type, 1, QuantizationInfo(), data_layout);
 
     ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
     ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -444,7 +444,7 @@
 
     // Create tensors
     CLTensor a = create_tensor<CLTensor>(shape_a, data_type);
-    CLTensor b = create_tensor<CLTensor>(shape_b, data_type, 1, 0, QuantizationInfo(), winograd_info.output_data_layout);
+    CLTensor b = create_tensor<CLTensor>(shape_b, data_type, 1, QuantizationInfo(), winograd_info.output_data_layout);
 
     ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
     ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -481,7 +481,7 @@
 
     // Create tensors
     CLTensor a = create_tensor<CLTensor>(shape_a, data_type);
-    CLTensor b = create_tensor<CLTensor>(shape_b, data_type, 1, 0, QuantizationInfo(), winograd_info.output_data_layout);
+    CLTensor b = create_tensor<CLTensor>(shape_b, data_type, 1, QuantizationInfo(), winograd_info.output_data_layout);
 
     ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
     ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
diff --git a/tests/validation/GLES_COMPUTE/ActivationLayer.cpp b/tests/validation/GLES_COMPUTE/ActivationLayer.cpp
index 23821d3..a8c7253 100644
--- a/tests/validation/GLES_COMPUTE/ActivationLayer.cpp
+++ b/tests/validation/GLES_COMPUTE/ActivationLayer.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -112,12 +112,9 @@
 DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(concat(datasets::SmallShapes(), datasets::LargeShapes()), CNNDataTypes), framework::dataset::make("InPlace", { false, true })),
                shape, data_type, in_place)
 {
-    // Set fixed point position data type allowed
-    const int fixed_point_position = 0;
-
     // Create tensors
-    GCTensor src = create_tensor<GCTensor>(shape, data_type, 1, fixed_point_position);
-    GCTensor dst = create_tensor<GCTensor>(shape, data_type, 1, fixed_point_position);
+    GCTensor src = create_tensor<GCTensor>(shape, data_type, 1);
+    GCTensor dst = create_tensor<GCTensor>(shape, data_type, 1);
 
     ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
     ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
diff --git a/tests/validation/GLES_COMPUTE/BatchNormalizationLayer.cpp b/tests/validation/GLES_COMPUTE/BatchNormalizationLayer.cpp
index d22f1e9..3a3d1d7 100644
--- a/tests/validation/GLES_COMPUTE/BatchNormalizationLayer.cpp
+++ b/tests/validation/GLES_COMPUTE/BatchNormalizationLayer.cpp
@@ -67,9 +67,6 @@
                                                                    framework::dataset::make("DataLayout", { DataLayout::NCHW })),
                shape0, shape1, epsilon, use_beta, use_gamma, dt, data_layout)
 {
-    // Set fixed point position data type allowed
-    int fixed_point_position = (arm_compute::is_data_type_fixed_point(dt)) ? 3 : 0;
-
     TensorShape src_dst_shapes = shape0;
     if(data_layout == DataLayout::NHWC)
     {
@@ -77,12 +74,12 @@
     }
 
     // Create tensors
-    GCTensor src   = create_tensor<GCTensor>(src_dst_shapes, dt, 1, fixed_point_position, QuantizationInfo(), data_layout);
-    GCTensor dst   = create_tensor<GCTensor>(src_dst_shapes, dt, 1, fixed_point_position, QuantizationInfo(), data_layout);
-    GCTensor mean  = create_tensor<GCTensor>(shape1, dt, 1, fixed_point_position);
-    GCTensor var   = create_tensor<GCTensor>(shape1, dt, 1, fixed_point_position);
-    GCTensor beta  = create_tensor<GCTensor>(shape1, dt, 1, fixed_point_position);
-    GCTensor gamma = create_tensor<GCTensor>(shape1, dt, 1, fixed_point_position);
+    GCTensor src   = create_tensor<GCTensor>(src_dst_shapes, dt, 1, QuantizationInfo(), data_layout);
+    GCTensor dst   = create_tensor<GCTensor>(src_dst_shapes, dt, 1, QuantizationInfo(), data_layout);
+    GCTensor mean  = create_tensor<GCTensor>(shape1, dt, 1);
+    GCTensor var   = create_tensor<GCTensor>(shape1, dt, 1);
+    GCTensor beta  = create_tensor<GCTensor>(shape1, dt, 1);
+    GCTensor gamma = create_tensor<GCTensor>(shape1, dt, 1);
 
     // Create and Configure function
     GCBatchNormalizationLayer norm;
diff --git a/tests/validation/GLES_COMPUTE/ConvolutionLayer.cpp b/tests/validation/GLES_COMPUTE/ConvolutionLayer.cpp
index 0f81512..2961dc9 100644
--- a/tests/validation/GLES_COMPUTE/ConvolutionLayer.cpp
+++ b/tests/validation/GLES_COMPUTE/ConvolutionLayer.cpp
@@ -70,16 +70,13 @@
                                                                    ActivationFunctionsDataset),
                input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type, act_info)
 {
-    // Set fixed point position data type allowed
-    int fixed_point_position = is_data_type_fixed_point(data_type) ? 3 : 0;
-
     auto bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
 
     // Create tensors
-    GCTensor src     = create_tensor<GCTensor>(input_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
-    GCTensor weights = create_tensor<GCTensor>(weights_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
-    GCTensor bias    = create_tensor<GCTensor>(bias_shape, bias_data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
-    GCTensor dst     = create_tensor<GCTensor>(output_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
+    GCTensor src     = create_tensor<GCTensor>(input_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
+    GCTensor weights = create_tensor<GCTensor>(weights_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
+    GCTensor bias    = create_tensor<GCTensor>(bias_shape, bias_data_type, 1, QuantizationInfo(2.f / 255.f, 127));
+    GCTensor dst     = create_tensor<GCTensor>(output_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
 
     ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
     ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
diff --git a/tests/validation/GLES_COMPUTE/FullyConnectedLayer.cpp b/tests/validation/GLES_COMPUTE/FullyConnectedLayer.cpp
index 4040f46..49716dc 100644
--- a/tests/validation/GLES_COMPUTE/FullyConnectedLayer.cpp
+++ b/tests/validation/GLES_COMPUTE/FullyConnectedLayer.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -66,9 +66,6 @@
                                                                    CNNDataTypes),
                src_shape, weights_shape, bias_shape, dst_shape, transpose_weights, reshape_weights, data_type)
 {
-    // Set fixed point position data type allowed
-    int fixed_point_position = is_data_type_fixed_point(data_type) ? 3 : 0;
-
     TensorShape ws(weights_shape);
 
     // Transpose weights if not done in the function
@@ -80,10 +77,10 @@
     }
 
     // Create tensors
-    GCTensor src     = create_tensor<GCTensor>(src_shape, data_type, 1, fixed_point_position);
-    GCTensor weights = create_tensor<GCTensor>(ws, data_type, 1, fixed_point_position);
-    GCTensor bias    = create_tensor<GCTensor>(bias_shape, data_type, 1, fixed_point_position);
-    GCTensor dst     = create_tensor<GCTensor>(dst_shape, data_type, 1, fixed_point_position);
+    GCTensor src     = create_tensor<GCTensor>(src_shape, data_type, 1);
+    GCTensor weights = create_tensor<GCTensor>(ws, data_type, 1);
+    GCTensor bias    = create_tensor<GCTensor>(bias_shape, data_type, 1);
+    GCTensor dst     = create_tensor<GCTensor>(dst_shape, data_type, 1);
 
     ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
     ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
diff --git a/tests/validation/GLES_COMPUTE/GEMM.cpp b/tests/validation/GLES_COMPUTE/GEMM.cpp
index 2abad32..6417143 100644
--- a/tests/validation/GLES_COMPUTE/GEMM.cpp
+++ b/tests/validation/GLES_COMPUTE/GEMM.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -59,14 +59,11 @@
 DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::SmallGEMMDataset(), datasets::LargeGEMMDataset()), CNNDataTypes),
                shape_a, shape_b, shape_c, output_shape, alpha, beta, data_type)
 {
-    // Set fixed point position data type allowed
-    const int fixed_point_position = is_data_type_fixed_point(data_type) ? 3 : 0;
-
     // Create tensors
-    GCTensor a   = create_tensor<GCTensor>(shape_a, data_type, 1, fixed_point_position);
-    GCTensor b   = create_tensor<GCTensor>(shape_b, data_type, 1, fixed_point_position);
-    GCTensor c   = create_tensor<GCTensor>(shape_c, data_type, 1, fixed_point_position);
-    GCTensor dst = create_tensor<GCTensor>(output_shape, data_type, 1, fixed_point_position);
+    GCTensor a   = create_tensor<GCTensor>(shape_a, data_type, 1);
+    GCTensor b   = create_tensor<GCTensor>(shape_b, data_type, 1);
+    GCTensor c   = create_tensor<GCTensor>(shape_c, data_type, 1);
+    GCTensor dst = create_tensor<GCTensor>(output_shape, data_type, 1);
 
     ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
     ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
diff --git a/tests/validation/GLES_COMPUTE/SoftmaxLayer.cpp b/tests/validation/GLES_COMPUTE/SoftmaxLayer.cpp
index 2c28141..abc277a 100644
--- a/tests/validation/GLES_COMPUTE/SoftmaxLayer.cpp
+++ b/tests/validation/GLES_COMPUTE/SoftmaxLayer.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -59,12 +59,9 @@
 
 DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(concat(datasets::SoftmaxLayerSmallShapes(), datasets::SoftmaxLayerLargeShapes()), CNNDataTypes), shape, data_type)
 {
-    // Set fixed point position data type allowed
-    const int fixed_point_position = is_data_type_fixed_point(data_type) ? 3 : 0;
-
     // Create tensors
-    GCTensor src = create_tensor<GCTensor>(shape, data_type, 1, fixed_point_position);
-    GCTensor dst = create_tensor<GCTensor>(shape, data_type, 1, fixed_point_position);
+    GCTensor src = create_tensor<GCTensor>(shape, data_type, 1);
+    GCTensor dst = create_tensor<GCTensor>(shape, data_type, 1);
 
     ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
     ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
diff --git a/tests/validation/Helpers.cpp b/tests/validation/Helpers.cpp
index ff69b1c..8832fce 100644
--- a/tests/validation/Helpers.cpp
+++ b/tests/validation/Helpers.cpp
@@ -150,7 +150,7 @@
 SimpleTensor<float> convert_from_asymmetric(const SimpleTensor<uint8_t> &src)
 {
     const QuantizationInfo &quantization_info = src.quantization_info();
-    SimpleTensor<float>     dst{ src.shape(), DataType::F32, 1, 0, QuantizationInfo(), src.data_layout() };
+    SimpleTensor<float>     dst{ src.shape(), DataType::F32, 1, QuantizationInfo(), src.data_layout() };
 
     for(int i = 0; i < src.num_elements(); ++i)
     {
@@ -161,7 +161,7 @@
 
 SimpleTensor<uint8_t> convert_to_asymmetric(const SimpleTensor<float> &src, const QuantizationInfo &quantization_info)
 {
-    SimpleTensor<uint8_t> dst{ src.shape(), DataType::QASYMM8, 1, 0, quantization_info };
+    SimpleTensor<uint8_t> dst{ src.shape(), DataType::QASYMM8, 1, quantization_info };
     for(int i = 0; i < src.num_elements(); ++i)
     {
         dst[i] = quantization_info.quantize(src[i], RoundingPolicy::TO_NEAREST_UP);
diff --git a/tests/validation/Helpers.h b/tests/validation/Helpers.h
index 88262d5..2b4d277 100644
--- a/tests/validation/Helpers.h
+++ b/tests/validation/Helpers.h
@@ -52,14 +52,13 @@
 
 /** Helper function to get the testing range for each activation layer.
  *
- * @param[in] activation           Activation function to test.
- * @param[in] data_type            Data type.
- * @param[in] fixed_point_position Number of bits for the fractional part. Defaults to 1.
+ * @param[in] activation Activation function to test.
+ * @param[in] data_type  Data type.
  *
  * @return A pair containing the lower upper testing bounds for a given function.
  */
 template <typename T>
-std::pair<T, T> get_activation_layer_test_bounds(ActivationLayerInfo::ActivationFunction activation, DataType data_type, int fixed_point_position = 0)
+std::pair<T, T> get_activation_layer_test_bounds(ActivationLayerInfo::ActivationFunction activation, DataType data_type)
 {
     std::pair<T, T> bounds;
 
@@ -178,12 +177,12 @@
 
 /** Helper function to get the testing range for batch normalization layer.
  *
- * @param[in] fixed_point_position (Optional) Number of bits for the fractional part. Defaults to 1.
+ * @param[in] fixed_point_position (Optional) Number of bits for the fractional part. Defaults to 0.
  *
  * @return A pair containing the lower upper testing bounds.
  */
 template <typename T>
-std::pair<T, T> get_batchnormalization_layer_test_bounds(int fixed_point_position = 1)
+std::pair<T, T> get_batchnormalization_layer_test_bounds(int fixed_point_position = 0)
 {
     const bool is_float = std::is_floating_point<T>::value;
     std::pair<T, T> bounds;
diff --git a/tests/validation/NEON/ActivationLayer.cpp b/tests/validation/NEON/ActivationLayer.cpp
index 289ca48..dee264c 100644
--- a/tests/validation/NEON/ActivationLayer.cpp
+++ b/tests/validation/NEON/ActivationLayer.cpp
@@ -90,12 +90,9 @@
 DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(concat(datasets::SmallShapes(), datasets::LargeShapes()), CNNDataTypes), framework::dataset::make("InPlace", { false, true })),
                shape, data_type, in_place)
 {
-    // Set fixed point position data type allowed
-    const int fixed_point_position = is_data_type_fixed_point(data_type) ? 3 : 0;
-
     // Create tensors
-    Tensor src = create_tensor<Tensor>(shape, data_type, 1, fixed_point_position);
-    Tensor dst = create_tensor<Tensor>(shape, data_type, 1, fixed_point_position);
+    Tensor src = create_tensor<Tensor>(shape, data_type, 1);
+    Tensor dst = create_tensor<Tensor>(shape, data_type, 1);
 
     ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
     ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -196,9 +193,6 @@
 TEST_SUITE_END()
 
 template <typename T>
-using NEActivationLayerFixedPointFixture = ActivationValidationFixedPointFixture<Tensor, Accessor, NEActivationLayer, T>;
-
-template <typename T>
 using NEActivationLayerQuantizedFixture = ActivationValidationQuantizedFixture<Tensor, Accessor, NEActivationLayer, T>;
 
 /** Input data sets. */
diff --git a/tests/validation/NEON/ArithmeticAddition.cpp b/tests/validation/NEON/ArithmeticAddition.cpp
index b01e5d9..3632c3c 100644
--- a/tests/validation/NEON/ArithmeticAddition.cpp
+++ b/tests/validation/NEON/ArithmeticAddition.cpp
@@ -163,9 +163,6 @@
 }
 TEST_SUITE_END()
 
-template <typename T>
-using NEArithmeticAdditionFixedPointFixture = ArithmeticAdditionValidationFixedPointFixture<Tensor, Accessor, NEArithmeticAddition, T>;
-
 TEST_SUITE(Float)
 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
 TEST_SUITE(F16)
diff --git a/tests/validation/NEON/ArithmeticSubtraction.cpp b/tests/validation/NEON/ArithmeticSubtraction.cpp
index fc25465..210ed45 100644
--- a/tests/validation/NEON/ArithmeticSubtraction.cpp
+++ b/tests/validation/NEON/ArithmeticSubtraction.cpp
@@ -233,9 +233,6 @@
 TEST_SUITE_END()
 TEST_SUITE_END()
 
-template <typename T1, typename T2 = T1, typename T3 = T1>
-using NEArithmeticSubtractionFixedPointFixture = ArithmeticSubtractionValidationFixedPointFixture<Tensor, Accessor, NEArithmeticSubtraction, T1, T2, T3>;
-
 TEST_SUITE(Float)
 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
 TEST_SUITE(FP16)
diff --git a/tests/validation/NEON/BatchNormalizationLayer.cpp b/tests/validation/NEON/BatchNormalizationLayer.cpp
index 3a18a0a..ca13d26 100644
--- a/tests/validation/NEON/BatchNormalizationLayer.cpp
+++ b/tests/validation/NEON/BatchNormalizationLayer.cpp
@@ -68,9 +68,6 @@
                                                                    framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
                shape0, shape1, epsilon, use_beta, use_gamma, dt, data_layout)
 {
-    // Set fixed point position data type allowed
-    const int fixed_point_position = (arm_compute::is_data_type_fixed_point(dt)) ? 3 : 0;
-
     TensorShape src_dst_shapes = shape0;
     if(data_layout == DataLayout::NHWC)
     {
@@ -78,12 +75,12 @@
     }
 
     // Create tensors
-    Tensor src   = create_tensor<Tensor>(src_dst_shapes, dt, 1, fixed_point_position, QuantizationInfo(), data_layout);
-    Tensor dst   = create_tensor<Tensor>(src_dst_shapes, dt, 1, fixed_point_position, QuantizationInfo(), data_layout);
-    Tensor mean  = create_tensor<Tensor>(shape1, dt, 1, fixed_point_position);
-    Tensor var   = create_tensor<Tensor>(shape1, dt, 1, fixed_point_position);
-    Tensor beta  = create_tensor<Tensor>(shape1, dt, 1, fixed_point_position);
-    Tensor gamma = create_tensor<Tensor>(shape1, dt, 1, fixed_point_position);
+    Tensor src   = create_tensor<Tensor>(src_dst_shapes, dt, 1, QuantizationInfo(), data_layout);
+    Tensor dst   = create_tensor<Tensor>(src_dst_shapes, dt, 1, QuantizationInfo(), data_layout);
+    Tensor mean  = create_tensor<Tensor>(shape1, dt, 1);
+    Tensor var   = create_tensor<Tensor>(shape1, dt, 1);
+    Tensor beta  = create_tensor<Tensor>(shape1, dt, 1);
+    Tensor gamma = create_tensor<Tensor>(shape1, dt, 1);
 
     // Create and Configure function
     NEBatchNormalizationLayer norm;
diff --git a/tests/validation/NEON/ConvolutionLayer.cpp b/tests/validation/NEON/ConvolutionLayer.cpp
index 94b38c2..591d142 100644
--- a/tests/validation/NEON/ConvolutionLayer.cpp
+++ b/tests/validation/NEON/ConvolutionLayer.cpp
@@ -154,16 +154,13 @@
 { ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) })),
 input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type, act_info)
 {
-    // Set fixed point position data type allowed
-    int fixed_point_position = is_data_type_fixed_point(data_type) ? 3 : 0;
-
     auto bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
 
     // Create tensors
-    Tensor src     = create_tensor<Tensor>(input_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
-    Tensor weights = create_tensor<Tensor>(weights_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
-    Tensor bias    = create_tensor<Tensor>(bias_shape, bias_data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
-    Tensor dst     = create_tensor<Tensor>(output_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
+    Tensor src     = create_tensor<Tensor>(input_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
+    Tensor weights = create_tensor<Tensor>(weights_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
+    Tensor bias    = create_tensor<Tensor>(bias_shape, bias_data_type, 1, QuantizationInfo(2.f / 255.f, 127));
+    Tensor dst     = create_tensor<Tensor>(output_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
 
     ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
     ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -246,9 +243,6 @@
 TEST_SUITE_END()
 
 template <typename T>
-using NEGEMMConvolutionLayerFixedPointFixture = ConvolutionValidationFixedPointFixture<Tensor, Accessor, NEGEMMConvolutionLayer, T>;
-
-template <typename T>
 using NEGEMMConvolutionLayerQuantizedFixture = ConvolutionValidationQuantizedFixture<Tensor, Accessor, NEGEMMConvolutionLayer, T>;
 
 const auto QuantizedActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
diff --git a/tests/validation/NEON/DepthConvertLayer.cpp b/tests/validation/NEON/DepthConvertLayer.cpp
index 2bd3db7..78070d0 100644
--- a/tests/validation/NEON/DepthConvertLayer.cpp
+++ b/tests/validation/NEON/DepthConvertLayer.cpp
@@ -66,19 +66,15 @@
 using NEDepthConvertLayerToU8Fixture = DepthConvertLayerValidationFixture<Tensor, Accessor, NEDepthConvertLayer, T, uint8_t>;
 template <typename T>
 using NEDepthConvertLayerToU32Fixture = DepthConvertLayerValidationFixture<Tensor, Accessor, NEDepthConvertLayer, T, uint32_t>;
-template <typename T>
-using NEDepthConvertLayerToFP32FixedPointFixture = DepthConvertLayerValidationFractionalBitsFixture<Tensor, Accessor, NEDepthConvertLayer, T, float>;
 
 TEST_SUITE(U8_to_U16)
 DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(framework::dataset::concat(datasets::SmallShapes(), datasets::LargeShapes()), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
                                                                    DepthConvertLayerShiftDataset),
                shape, policy, shift)
 {
-    int fixed_point_position = 0;
-
     // Create tensors
-    Tensor src = create_tensor<Tensor>(shape, DataType::U8, 1, fixed_point_position);
-    Tensor dst = create_tensor<Tensor>(shape, DataType::U16, 1, fixed_point_position);
+    Tensor src = create_tensor<Tensor>(shape, DataType::U8, 1);
+    Tensor dst = create_tensor<Tensor>(shape, DataType::U16, 1);
 
     // Create and Configure function
     NEDepthConvertLayer depth_convert;
@@ -116,11 +112,9 @@
                                                                    DepthConvertLayerShiftDataset),
                shape, policy, shift)
 {
-    int fixed_point_position = 0;
-
     // Create tensors
-    Tensor src = create_tensor<Tensor>(shape, DataType::U8, 1, fixed_point_position);
-    Tensor dst = create_tensor<Tensor>(shape, DataType::S16, 1, fixed_point_position);
+    Tensor src = create_tensor<Tensor>(shape, DataType::U8, 1);
+    Tensor dst = create_tensor<Tensor>(shape, DataType::S16, 1);
 
     // Create and Configure function
     NEDepthConvertLayer depth_convert;
@@ -157,11 +151,9 @@
                                                                    DepthConvertLayerShiftDataset),
                shape, policy, shift)
 {
-    int fixed_point_position = 0;
-
     // Create tensors
-    Tensor src = create_tensor<Tensor>(shape, DataType::U8, 1, fixed_point_position);
-    Tensor dst = create_tensor<Tensor>(shape, DataType::S32, 1, fixed_point_position);
+    Tensor src = create_tensor<Tensor>(shape, DataType::U8, 1);
+    Tensor dst = create_tensor<Tensor>(shape, DataType::S32, 1);
 
     // Create and Configure function
     NEDepthConvertLayer depth_convert;
@@ -199,11 +191,9 @@
                                                                    DepthConvertLayerShiftDataset),
                shape, policy, shift)
 {
-    int fixed_point_position = 0;
-
     // Create tensors
-    Tensor src = create_tensor<Tensor>(shape, DataType::U16, 1, fixed_point_position);
-    Tensor dst = create_tensor<Tensor>(shape, DataType::U8, 1, fixed_point_position);
+    Tensor src = create_tensor<Tensor>(shape, DataType::U16, 1);
+    Tensor dst = create_tensor<Tensor>(shape, DataType::U8, 1);
 
     // Create and Configure function
     NEDepthConvertLayer depth_convert;
@@ -240,11 +230,9 @@
                                                                    DepthConvertLayerShiftDataset),
                shape, policy, shift)
 {
-    int fixed_point_position = 0;
-
     // Create tensors
-    Tensor src = create_tensor<Tensor>(shape, DataType::U16, 1, fixed_point_position);
-    Tensor dst = create_tensor<Tensor>(shape, DataType::U32, 1, fixed_point_position);
+    Tensor src = create_tensor<Tensor>(shape, DataType::U16, 1);
+    Tensor dst = create_tensor<Tensor>(shape, DataType::U32, 1);
 
     // Create and Configure function
     NEDepthConvertLayer depth_convert;
@@ -281,11 +269,9 @@
                                                                    DepthConvertLayerShiftDataset),
                shape, policy, shift)
 {
-    int fixed_point_position = 0;
-
     // Create tensors
-    Tensor src = create_tensor<Tensor>(shape, DataType::S16, 1, fixed_point_position);
-    Tensor dst = create_tensor<Tensor>(shape, DataType::U8, 1, fixed_point_position);
+    Tensor src = create_tensor<Tensor>(shape, DataType::S16, 1);
+    Tensor dst = create_tensor<Tensor>(shape, DataType::U8, 1);
 
     // Create and Configure function
     NEDepthConvertLayer depth_convert;
@@ -322,11 +308,9 @@
                                                                    DepthConvertLayerShiftDataset),
                shape, policy, shift)
 {
-    int fixed_point_position = 0;
-
     // Create tensors
-    Tensor src = create_tensor<Tensor>(shape, DataType::S16, 1, fixed_point_position);
-    Tensor dst = create_tensor<Tensor>(shape, DataType::S32, 1, fixed_point_position);
+    Tensor src = create_tensor<Tensor>(shape, DataType::S16, 1);
+    Tensor dst = create_tensor<Tensor>(shape, DataType::S32, 1);
 
     // Create and Configure function
     NEDepthConvertLayer depth_convert;
diff --git a/tests/validation/NEON/DilatedConvolutionLayer.cpp b/tests/validation/NEON/DilatedConvolutionLayer.cpp
index e703c67..7cfffc0 100644
--- a/tests/validation/NEON/DilatedConvolutionLayer.cpp
+++ b/tests/validation/NEON/DilatedConvolutionLayer.cpp
@@ -106,16 +106,13 @@
                                                                    CNNDataTypes),
                input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type)
 {
-    // Set fixed point position data type allowed
-    int fixed_point_position = is_data_type_fixed_point(data_type) ? 3 : 0;
-
     auto bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
 
     // Create tensors
-    Tensor src     = create_tensor<Tensor>(input_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
-    Tensor weights = create_tensor<Tensor>(weights_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
-    Tensor bias    = create_tensor<Tensor>(bias_shape, bias_data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
-    Tensor dst     = create_tensor<Tensor>(output_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
+    Tensor src     = create_tensor<Tensor>(input_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
+    Tensor weights = create_tensor<Tensor>(weights_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
+    Tensor bias    = create_tensor<Tensor>(bias_shape, bias_data_type, 1, QuantizationInfo(2.f / 255.f, 127));
+    Tensor dst     = create_tensor<Tensor>(output_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
 
     ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
     ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -198,9 +195,6 @@
 TEST_SUITE_END()
 
 template <typename T>
-using NEGEMMDilatedConvolutionLayerFixedPointFixture = ConvolutionValidationFixedPointFixture<Tensor, Accessor, NEGEMMConvolutionLayer, T>;
-
-template <typename T>
 using NEGEMMDilatedConvolutionLayerQuantizedFixture = ConvolutionValidationQuantizedFixture<Tensor, Accessor, NEGEMMConvolutionLayer, T>;
 
 TEST_SUITE(Quantized)
diff --git a/tests/validation/NEON/DirectConvolutionLayer.cpp b/tests/validation/NEON/DirectConvolutionLayer.cpp
index 4995d88..bf5b33c 100644
--- a/tests/validation/NEON/DirectConvolutionLayer.cpp
+++ b/tests/validation/NEON/DirectConvolutionLayer.cpp
@@ -173,9 +173,6 @@
 TEST_SUITE_END()
 TEST_SUITE_END()
 
-template <typename T>
-using NEDirectConvolutionLayerFixedPointFixture = DirectConvolutionValidationFixedPointFixture<Tensor, Accessor, NEDirectConvolutionLayer, T>;
-
 const auto QuantizedActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
 {
     ActivationLayerInfo(),
diff --git a/tests/validation/NEON/FullyConnectedLayer.cpp b/tests/validation/NEON/FullyConnectedLayer.cpp
index 3adcf61..174778b 100644
--- a/tests/validation/NEON/FullyConnectedLayer.cpp
+++ b/tests/validation/NEON/FullyConnectedLayer.cpp
@@ -68,9 +68,6 @@
                                                                    CNNDataTypes),
                src_shape, weights_shape, bias_shape, dst_shape, transpose_weights, reshape_weights, data_type)
 {
-    // Set fixed point position data type allowed
-    int fixed_point_position = is_data_type_fixed_point(data_type) ? 3 : 0;
-
     TensorShape ws(weights_shape);
 
     // Transpose weights if not done in the function
@@ -92,10 +89,10 @@
     }
 
     // Create tensors
-    Tensor src     = create_tensor<Tensor>(src_shape, data_type, 1, fixed_point_position);
-    Tensor weights = create_tensor<Tensor>(ws, data_type, 1, fixed_point_position);
-    Tensor bias    = create_tensor<Tensor>(bias_shape, data_type, 1, fixed_point_position);
-    Tensor dst     = create_tensor<Tensor>(dst_shape, data_type, 1, fixed_point_position);
+    Tensor src     = create_tensor<Tensor>(src_shape, data_type, 1);
+    Tensor weights = create_tensor<Tensor>(ws, data_type, 1);
+    Tensor bias    = create_tensor<Tensor>(bias_shape, data_type, 1);
+    Tensor dst     = create_tensor<Tensor>(dst_shape, data_type, 1);
 
     ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
     ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -192,9 +189,6 @@
 TEST_SUITE_END()
 TEST_SUITE_END()
 
-template <typename T>
-using NEFullyConnectedLayerFixedPointFixture = FullyConnectedLayerValidationFixedPointFixture<Tensor, Accessor, NEFullyConnectedLayer, T, true>;
-
 TEST_SUITE_END()
 TEST_SUITE_END()
 } // namespace validation
diff --git a/tests/validation/NEON/GEMM.cpp b/tests/validation/NEON/GEMM.cpp
index e0f63a8..9c64131 100644
--- a/tests/validation/NEON/GEMM.cpp
+++ b/tests/validation/NEON/GEMM.cpp
@@ -98,14 +98,11 @@
 DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::SmallGEMMDataset(), datasets::LargeGEMMDataset()), CNNDataTypes),
                shape_a, shape_b, shape_c, output_shape, alpha, beta, data_type)
 {
-    // Set fixed point position data type allowed
-    const int fixed_point_position = is_data_type_fixed_point(data_type) ? 3 : 0;
-
     // Create tensors
-    Tensor a   = create_tensor<Tensor>(shape_a, data_type, 1, fixed_point_position);
-    Tensor b   = create_tensor<Tensor>(shape_b, data_type, 1, fixed_point_position);
-    Tensor c   = create_tensor<Tensor>(shape_c, data_type, 1, fixed_point_position);
-    Tensor dst = create_tensor<Tensor>(output_shape, data_type, 1, fixed_point_position);
+    Tensor a   = create_tensor<Tensor>(shape_a, data_type, 1);
+    Tensor b   = create_tensor<Tensor>(shape_b, data_type, 1);
+    Tensor c   = create_tensor<Tensor>(shape_c, data_type, 1);
+    Tensor dst = create_tensor<Tensor>(output_shape, data_type, 1);
 
     ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
     ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -153,9 +150,6 @@
 TEST_SUITE_END()
 TEST_SUITE_END()
 
-template <typename T>
-using NEGEMMFixedPointFixture = GEMMValidationFixedPointFixture<Tensor, Accessor, NEGEMM, T>;
-
 TEST_SUITE_END()
 TEST_SUITE_END()
 } // namespace validation
diff --git a/tests/validation/NEON/NormalizationLayer.cpp b/tests/validation/NEON/NormalizationLayer.cpp
index 8c66611..02cca0b 100644
--- a/tests/validation/NEON/NormalizationLayer.cpp
+++ b/tests/validation/NEON/NormalizationLayer.cpp
@@ -50,9 +50,6 @@
 constexpr AbsoluteTolerance<float> tolerance_f32(0.00001f);
 
 /** Input data set. */
-const auto NormalizationDatasetQS = combine(combine(combine(combine(datasets::TinyShapes(), datasets::NormalizationTypes()), framework::dataset::make("NormalizationSize", 3, 9, 2)),
-                                                    framework::dataset::make("Beta", { 0.5f, 1.f, 2.f })),
-                                            framework::dataset::make("IsScaled", { true }));
 const auto NormalizationDataset = combine(combine(combine(combine(datasets::SmallShapes(), datasets::NormalizationTypes()), framework::dataset::make("NormalizationSize", 3, 9, 2)),
                                                   framework::dataset::make("Beta", { 0.5f, 1.f, 2.f })),
                                           framework::dataset::make("IsScaled", { true }));
@@ -132,9 +129,6 @@
 TEST_SUITE_END()
 TEST_SUITE_END()
 
-template <typename T>
-using NENormalizationLayerFixedPointFixture = NormalizationValidationFixedPointFixture<Tensor, Accessor, NENormalizationLayer, T>;
-
 TEST_SUITE_END()
 TEST_SUITE_END()
 } // namespace validation
diff --git a/tests/validation/NEON/PoolingLayer.cpp b/tests/validation/NEON/PoolingLayer.cpp
index 8762f1f..bbfca46 100644
--- a/tests/validation/NEON/PoolingLayer.cpp
+++ b/tests/validation/NEON/PoolingLayer.cpp
@@ -50,11 +50,6 @@
                                                    framework::dataset::make("PadStride", { PadStrideInfo(1, 1, 0, 0), PadStrideInfo(2, 1, 0, 0), PadStrideInfo(1, 2, 1, 1), PadStrideInfo(2, 2, 1, 0) })),
                                            framework::dataset::make("ExcludePadding", { true, false }));
 
-/** Input data set for quantized data types */
-const auto PoolingLayerDatasetQS = combine(combine(combine(framework::dataset::make("PoolingType", { PoolingType::MAX, PoolingType::AVG }), framework::dataset::make("PoolingSize", { Size2D(2, 2), Size2D(3, 3) })),
-                                                   framework::dataset::make("PadStride", { PadStrideInfo(1, 1, 0, 0), PadStrideInfo(2, 1, 0, 0), PadStrideInfo(1, 2, 1, 1), PadStrideInfo(2, 2, 1, 0) })),
-                                           framework::dataset::make("ExcludePadding", { false }));
-
 /** Input data set for asymmetric data type */
 
 const auto PoolingLayerDatasetQASYMM8 = combine(combine(combine(framework::dataset::make("PoolingType", { PoolingType::MAX, PoolingType::AVG }), framework::dataset::make("PoolingSize", { Size2D(2, 2), Size2D(3, 3), Size2D(4, 4), Size2D(9, 9), Size2D(3, 7), Size2D(7, 8) })),
@@ -159,9 +154,6 @@
 #endif           /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
 TEST_SUITE_END() // Float
 
-template <typename T>
-using NEPoolingLayerFixedPointFixture = PoolingLayerValidationFixedPointFixture<Tensor, Accessor, NEPoolingLayer, T>;
-
 TEST_SUITE(Quantized)
 
 template <typename T>
diff --git a/tests/validation/NEON/SYSTEM/AlexNet.cpp b/tests/validation/NEON/SYSTEM/AlexNet.cpp
index 3fa19e4..adcfe72 100644
--- a/tests/validation/NEON/SYSTEM/AlexNet.cpp
+++ b/tests/validation/NEON/SYSTEM/AlexNet.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -80,7 +80,7 @@
                                           };
     NEAlexNetModel network{};
 
-    network.init(dt, 4, batches);
+    network.init(dt, batches);
     network.build();
     network.allocate();
     network.fill(weight_files, bias_files);
diff --git a/tests/validation/NEON/Scale.cpp b/tests/validation/NEON/Scale.cpp
index 8940259..5f5cfdd 100644
--- a/tests/validation/NEON/Scale.cpp
+++ b/tests/validation/NEON/Scale.cpp
@@ -159,8 +159,8 @@
     shape_scaled.set(idx_height, src_shape[idx_height] * scale_y);
 
     // Create tensors
-    Tensor src = create_tensor<Tensor>(src_shape, data_type, 1, 0, QuantizationInfo(), data_layout);
-    Tensor dst = create_tensor<Tensor>(shape_scaled, data_type, 1, 0, QuantizationInfo(), data_layout);
+    Tensor src = create_tensor<Tensor>(src_shape, data_type, 1, QuantizationInfo(), data_layout);
+    Tensor dst = create_tensor<Tensor>(shape_scaled, data_type, 1, QuantizationInfo(), data_layout);
 
     ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
     ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
diff --git a/tests/validation/NEON/SoftmaxLayer.cpp b/tests/validation/NEON/SoftmaxLayer.cpp
index b6efc8f..8c0d46b 100644
--- a/tests/validation/NEON/SoftmaxLayer.cpp
+++ b/tests/validation/NEON/SoftmaxLayer.cpp
@@ -66,12 +66,9 @@
 
 DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(concat(datasets::SoftmaxLayerSmallShapes(), datasets::SoftmaxLayerLargeShapes()), CNNDataTypes), shape, data_type)
 {
-    // Set fixed point position data type allowed
-    const int fixed_point_position = is_data_type_fixed_point(data_type) ? 3 : 0;
-
     // Create tensors
-    Tensor src = create_tensor<Tensor>(shape, data_type, 1, fixed_point_position);
-    Tensor dst = create_tensor<Tensor>(shape, data_type, 1, fixed_point_position);
+    Tensor src = create_tensor<Tensor>(shape, data_type, 1);
+    Tensor dst = create_tensor<Tensor>(shape, data_type, 1);
 
     ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
     ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -156,9 +153,6 @@
 TEST_SUITE_END()
 
 template <typename T>
-using NESoftmaxLayerFixedPointFixture = SoftmaxValidationFixedPointFixture<Tensor, Accessor, NESoftmaxLayer, T>;
-
-template <typename T>
 using NESoftmaxLayerQuantizedFixture = SoftmaxValidationQuantizedFixture<Tensor, Accessor, NESoftmaxLayer, T>;
 
 TEST_SUITE(Quantized)
diff --git a/tests/validation/fixtures/ActivationLayerFixture.h b/tests/validation/fixtures/ActivationLayerFixture.h
index e212c7b..d29d67c 100644
--- a/tests/validation/fixtures/ActivationLayerFixture.h
+++ b/tests/validation/fixtures/ActivationLayerFixture.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -47,17 +47,16 @@
 {
 public:
     template <typename...>
-    void setup(TensorShape shape, bool in_place, ActivationLayerInfo::ActivationFunction function, float alpha_beta, DataType data_type, int fractional_bits, QuantizationInfo quantization_info)
+    void setup(TensorShape shape, bool in_place, ActivationLayerInfo::ActivationFunction function, float alpha_beta, DataType data_type, QuantizationInfo quantization_info)
     {
-        _fractional_bits   = fractional_bits;
         _quantization_info = quantization_info;
         _data_type         = data_type;
         _function          = function;
 
         ActivationLayerInfo info(function, alpha_beta, alpha_beta);
 
-        _target    = compute_target(shape, in_place, info, data_type, fractional_bits, quantization_info);
-        _reference = compute_reference(shape, info, data_type, fractional_bits, quantization_info);
+        _target    = compute_target(shape, in_place, info, data_type, quantization_info);
+        _reference = compute_reference(shape, info, data_type, quantization_info);
     }
 
 protected:
@@ -80,17 +79,17 @@
         {
             int min_bound = 0;
             int max_bound = 0;
-            std::tie(min_bound, max_bound) = get_activation_layer_test_bounds<T>(_function, _data_type, _fractional_bits);
+            std::tie(min_bound, max_bound) = get_activation_layer_test_bounds<T>(_function, _data_type);
             std::uniform_int_distribution<> distribution(min_bound, max_bound);
             library->fill(tensor, distribution, 0);
         }
     }
 
-    TensorType compute_target(const TensorShape &shape, bool in_place, ActivationLayerInfo info, DataType data_type, int fixed_point_position, QuantizationInfo quantization_info)
+    TensorType compute_target(const TensorShape &shape, bool in_place, ActivationLayerInfo info, DataType data_type, QuantizationInfo quantization_info)
     {
         // Create tensors
-        TensorType src = create_tensor<TensorType>(shape, data_type, 1, fixed_point_position, quantization_info);
-        TensorType dst = create_tensor<TensorType>(shape, data_type, 1, fixed_point_position, quantization_info);
+        TensorType src = create_tensor<TensorType>(shape, data_type, 1, quantization_info);
+        TensorType dst = create_tensor<TensorType>(shape, data_type, 1, quantization_info);
 
         // Create and configure function
         FunctionType act_layer;
@@ -128,10 +127,10 @@
         }
     }
 
-    SimpleTensor<T> compute_reference(const TensorShape &shape, ActivationLayerInfo info, DataType data_type, int fixed_point_position, QuantizationInfo quantization_info)
+    SimpleTensor<T> compute_reference(const TensorShape &shape, ActivationLayerInfo info, DataType data_type, QuantizationInfo quantization_info)
     {
         // Create reference
-        SimpleTensor<T> src{ shape, data_type, 1, fixed_point_position, quantization_info };
+        SimpleTensor<T> src{ shape, data_type, 1, quantization_info };
 
         // Fill reference
         fill(src);
@@ -141,7 +140,6 @@
 
     TensorType                              _target{};
     SimpleTensor<T>                         _reference{};
-    int                                     _fractional_bits{};
     QuantizationInfo                        _quantization_info{};
     DataType                                _data_type{};
     ActivationLayerInfo::ActivationFunction _function{};
@@ -154,18 +152,7 @@
     template <typename...>
     void setup(TensorShape shape, bool in_place, ActivationLayerInfo::ActivationFunction function, float alpha_beta, DataType data_type)
     {
-        ActivationValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, in_place, function, alpha_beta, data_type, 0, QuantizationInfo());
-    }
-};
-
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class ActivationValidationFixedPointFixture : public ActivationValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
-{
-public:
-    template <typename...>
-    void setup(TensorShape shape, bool in_place, ActivationLayerInfo::ActivationFunction function, float alpha_beta, DataType data_type, int fractional_bits)
-    {
-        ActivationValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, in_place, function, alpha_beta, data_type, fractional_bits, QuantizationInfo());
+        ActivationValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, in_place, function, alpha_beta, data_type, QuantizationInfo());
     }
 };
 
@@ -176,7 +163,7 @@
     template <typename...>
     void setup(TensorShape shape, bool in_place, ActivationLayerInfo::ActivationFunction function, float alpha_beta, DataType data_type, QuantizationInfo quantization_info)
     {
-        ActivationValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, in_place, function, alpha_beta, data_type, 0, quantization_info);
+        ActivationValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, in_place, function, alpha_beta, data_type, quantization_info);
     }
 };
 
diff --git a/tests/validation/fixtures/ArithmeticAdditionFixture.h b/tests/validation/fixtures/ArithmeticAdditionFixture.h
index 99a5677..6d529a8 100644
--- a/tests/validation/fixtures/ArithmeticAdditionFixture.h
+++ b/tests/validation/fixtures/ArithmeticAdditionFixture.h
@@ -45,11 +45,11 @@
 {
 public:
     template <typename...>
-    void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, int fractional_bits,
+    void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy,
                QuantizationInfo quantization_info)
     {
-        _target    = compute_target(shape0, shape1, data_type0, data_type1, output_data_type, convert_policy, fractional_bits, quantization_info);
-        _reference = compute_reference(shape0, shape1, data_type0, data_type1, output_data_type, convert_policy, fractional_bits, quantization_info);
+        _target    = compute_target(shape0, shape1, data_type0, data_type1, output_data_type, convert_policy, quantization_info);
+        _reference = compute_reference(shape0, shape1, data_type0, data_type1, output_data_type, convert_policy, quantization_info);
     }
 
 protected:
@@ -60,12 +60,12 @@
     }
 
     TensorType compute_target(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy,
-                              int fixed_point_position, QuantizationInfo quantization_info)
+                              QuantizationInfo quantization_info)
     {
         // Create tensors
-        TensorType ref_src1 = create_tensor<TensorType>(shape0, data_type0, 1, fixed_point_position, quantization_info);
-        TensorType ref_src2 = create_tensor<TensorType>(shape1, data_type1, 1, fixed_point_position, quantization_info);
-        TensorType dst      = create_tensor<TensorType>(TensorShape::broadcast_shape(shape0, shape1), output_data_type, 1, fixed_point_position, quantization_info);
+        TensorType ref_src1 = create_tensor<TensorType>(shape0, data_type0, 1, quantization_info);
+        TensorType ref_src2 = create_tensor<TensorType>(shape1, data_type1, 1, quantization_info);
+        TensorType dst      = create_tensor<TensorType>(TensorShape::broadcast_shape(shape0, shape1), output_data_type, 1, quantization_info);
 
         // Create and configure function
         FunctionType add;
@@ -95,11 +95,11 @@
     }
 
     SimpleTensor<T> compute_reference(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy,
-                                      int fixed_point_position, QuantizationInfo quantization_info)
+                                      QuantizationInfo quantization_info)
     {
         // Create reference
-        SimpleTensor<T> ref_src1{ shape0, data_type0, 1, fixed_point_position, quantization_info };
-        SimpleTensor<T> ref_src2{ shape1, data_type1, 1, fixed_point_position, quantization_info };
+        SimpleTensor<T> ref_src1{ shape0, data_type0, 1, quantization_info };
+        SimpleTensor<T> ref_src2{ shape1, data_type1, 1, quantization_info };
 
         // Fill reference
         fill(ref_src1, 0);
@@ -117,9 +117,9 @@
 {
 public:
     template <typename...>
-    void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, int fractional_bits)
+    void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy)
     {
-        ArithmeticAdditionGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape0, shape1, data_type0, data_type1, output_data_type, convert_policy, 0, QuantizationInfo());
+        ArithmeticAdditionGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape0, shape1, data_type0, data_type1, output_data_type, convert_policy, QuantizationInfo());
     }
 };
 
@@ -130,7 +130,7 @@
     template <typename...>
     void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy)
     {
-        ArithmeticAdditionGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape0, shape1, data_type0, data_type1, output_data_type, convert_policy, 0, QuantizationInfo());
+        ArithmeticAdditionGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape0, shape1, data_type0, data_type1, output_data_type, convert_policy, QuantizationInfo());
     }
 };
 
@@ -139,9 +139,9 @@
 {
 public:
     template <typename...>
-    void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, int fractional_bits)
+    void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy)
     {
-        ArithmeticAdditionGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, shape, data_type0, data_type1, output_data_type, convert_policy, fractional_bits, QuantizationInfo());
+        ArithmeticAdditionGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, shape, data_type0, data_type1, output_data_type, convert_policy, QuantizationInfo());
     }
 };
 
@@ -152,7 +152,7 @@
     template <typename...>
     void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy)
     {
-        ArithmeticAdditionValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type0, data_type1, output_data_type, convert_policy, 0);
+        ArithmeticAdditionValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type0, data_type1, output_data_type, convert_policy);
     }
 };
 
@@ -163,7 +163,7 @@
     template <typename...>
     void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, QuantizationInfo quantization_info)
     {
-        ArithmeticAdditionGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, shape, data_type0, data_type1, output_data_type, convert_policy, 0, quantization_info);
+        ArithmeticAdditionGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, shape, data_type0, data_type1, output_data_type, convert_policy, quantization_info);
     }
 };
 } // namespace validation
diff --git a/tests/validation/fixtures/ArithmeticSubtractionFixture.h b/tests/validation/fixtures/ArithmeticSubtractionFixture.h
index ba0dd14..04bb53a 100644
--- a/tests/validation/fixtures/ArithmeticSubtractionFixture.h
+++ b/tests/validation/fixtures/ArithmeticSubtractionFixture.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -45,11 +45,10 @@
 {
 public:
     template <typename...>
-    void setup(TensorShape shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, int fractional_bits)
+    void setup(TensorShape shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy)
     {
-        _fractional_bits = fractional_bits;
-        _target          = compute_target(shape, data_type0, data_type1, output_data_type, convert_policy, fractional_bits);
-        _reference       = compute_reference(shape, data_type0, data_type1, output_data_type, convert_policy, fractional_bits);
+        _target    = compute_target(shape, data_type0, data_type1, output_data_type, convert_policy);
+        _reference = compute_reference(shape, data_type0, data_type1, output_data_type, convert_policy);
     }
 
 protected:
@@ -59,12 +58,12 @@
         library->fill_tensor_uniform(tensor, i);
     }
 
-    TensorType compute_target(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, int fixed_point_position)
+    TensorType compute_target(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy)
     {
         // Create tensors
-        TensorType ref_src1 = create_tensor<TensorType>(shape, data_type0, 1, fixed_point_position);
-        TensorType ref_src2 = create_tensor<TensorType>(shape, data_type1, 1, fixed_point_position);
-        TensorType dst      = create_tensor<TensorType>(shape, output_data_type, 1, fixed_point_position);
+        TensorType ref_src1 = create_tensor<TensorType>(shape, data_type0, 1);
+        TensorType ref_src2 = create_tensor<TensorType>(shape, data_type1, 1);
+        TensorType dst      = create_tensor<TensorType>(shape, output_data_type, 1);
 
         // Create and configure function
         FunctionType sub;
@@ -93,11 +92,11 @@
         return dst;
     }
 
-    SimpleTensor<T3> compute_reference(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, int fixed_point_position)
+    SimpleTensor<T3> compute_reference(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy)
     {
         // Create reference
-        SimpleTensor<T1> ref_src1{ shape, data_type0, 1, fixed_point_position };
-        SimpleTensor<T2> ref_src2{ shape, data_type1, 1, fixed_point_position };
+        SimpleTensor<T1> ref_src1{ shape, data_type0, 1 };
+        SimpleTensor<T2> ref_src2{ shape, data_type1, 1 };
 
         // Fill reference
         fill(ref_src1, 0);
@@ -108,7 +107,6 @@
 
     TensorType       _target{};
     SimpleTensor<T3> _reference{};
-    int              _fractional_bits{};
 };
 template <typename TensorType, typename AccessorType, typename FunctionType, typename T1, typename T2 = T1, typename T3 = T1>
 class ArithmeticSubtractionValidationFixture : public ArithmeticSubtractionValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T1, T2, T3>
@@ -117,7 +115,7 @@
     template <typename...>
     void setup(TensorShape shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy)
     {
-        ArithmeticSubtractionValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T1, T2, T3>::setup(shape, data_type0, data_type1, output_data_type, convert_policy, 0);
+        ArithmeticSubtractionValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T1, T2, T3>::setup(shape, data_type0, data_type1, output_data_type, convert_policy);
     }
 };
 } // namespace validation
diff --git a/tests/validation/fixtures/BatchNormalizationLayerFixture.h b/tests/validation/fixtures/BatchNormalizationLayerFixture.h
index b7e32a6..bc3b488 100644
--- a/tests/validation/fixtures/BatchNormalizationLayerFixture.h
+++ b/tests/validation/fixtures/BatchNormalizationLayerFixture.h
@@ -45,15 +45,14 @@
 {
 public:
     template <typename...>
-    void setup(TensorShape shape0, TensorShape shape1, float epsilon, bool use_beta, bool use_gamma, ActivationLayerInfo act_info, DataType dt, DataLayout data_layout, int fractional_bits)
+    void setup(TensorShape shape0, TensorShape shape1, float epsilon, bool use_beta, bool use_gamma, ActivationLayerInfo act_info, DataType dt, DataLayout data_layout)
     {
-        _fractional_bits = fractional_bits;
-        _data_type       = dt;
-        _use_beta        = use_beta;
-        _use_gamma       = use_gamma;
+        _data_type = dt;
+        _use_beta  = use_beta;
+        _use_gamma = use_gamma;
 
-        _target    = compute_target(shape0, shape1, epsilon, act_info, dt, data_layout, fractional_bits);
-        _reference = compute_reference(shape0, shape1, epsilon, act_info, dt, fractional_bits);
+        _target    = compute_target(shape0, shape1, epsilon, act_info, dt, data_layout);
+        _reference = compute_reference(shape0, shape1, epsilon, act_info, dt);
     }
 
 protected:
@@ -93,7 +92,7 @@
         {
             int min_bound = 0;
             int max_bound = 0;
-            std::tie(min_bound, max_bound) = get_batchnormalization_layer_test_bounds<T>(_fractional_bits);
+            std::tie(min_bound, max_bound) = get_batchnormalization_layer_test_bounds<T>();
             std::uniform_int_distribution<> distribution(min_bound, max_bound);
             std::uniform_int_distribution<> distribution_var(0, max_bound);
             library->fill(src_tensor, distribution, 0);
@@ -115,12 +114,12 @@
             else
             {
                 // Fill with default value 1
-                library->fill_tensor_value(gamma_tensor, static_cast<T>(1 << (_fractional_bits)));
+                library->fill_tensor_value(gamma_tensor, static_cast<T>(1));
             }
         }
     }
 
-    TensorType compute_target(TensorShape shape0, const TensorShape &shape1, float epsilon, ActivationLayerInfo act_info, DataType dt, DataLayout data_layout, int fixed_point_position)
+    TensorType compute_target(TensorShape shape0, const TensorShape &shape1, float epsilon, ActivationLayerInfo act_info, DataType dt, DataLayout data_layout)
     {
         if(data_layout == DataLayout::NHWC)
         {
@@ -128,12 +127,12 @@
         }
 
         // Create tensors
-        TensorType src   = create_tensor<TensorType>(shape0, dt, 1, fixed_point_position, QuantizationInfo(), data_layout);
-        TensorType dst   = create_tensor<TensorType>(shape0, dt, 1, fixed_point_position, QuantizationInfo(), data_layout);
-        TensorType mean  = create_tensor<TensorType>(shape1, dt, 1, fixed_point_position);
-        TensorType var   = create_tensor<TensorType>(shape1, dt, 1, fixed_point_position);
-        TensorType beta  = create_tensor<TensorType>(shape1, dt, 1, fixed_point_position);
-        TensorType gamma = create_tensor<TensorType>(shape1, dt, 1, fixed_point_position);
+        TensorType src   = create_tensor<TensorType>(shape0, dt, 1, QuantizationInfo(), data_layout);
+        TensorType dst   = create_tensor<TensorType>(shape0, dt, 1, QuantizationInfo(), data_layout);
+        TensorType mean  = create_tensor<TensorType>(shape1, dt, 1);
+        TensorType var   = create_tensor<TensorType>(shape1, dt, 1);
+        TensorType beta  = create_tensor<TensorType>(shape1, dt, 1);
+        TensorType gamma = create_tensor<TensorType>(shape1, dt, 1);
 
         // Create and configure function
         FunctionType norm;
@@ -172,24 +171,23 @@
         return dst;
     }
 
-    SimpleTensor<T> compute_reference(const TensorShape &shape0, const TensorShape &shape1, float epsilon, ActivationLayerInfo act_info, DataType dt, int fixed_point_position)
+    SimpleTensor<T> compute_reference(const TensorShape &shape0, const TensorShape &shape1, float epsilon, ActivationLayerInfo act_info, DataType dt)
     {
         // Create reference
-        SimpleTensor<T> ref_src{ shape0, dt, 1, fixed_point_position };
-        SimpleTensor<T> ref_mean{ shape1, dt, 1, fixed_point_position };
-        SimpleTensor<T> ref_var{ shape1, dt, 1, fixed_point_position };
-        SimpleTensor<T> ref_beta{ shape1, dt, 1, fixed_point_position };
-        SimpleTensor<T> ref_gamma{ shape1, dt, 1, fixed_point_position };
+        SimpleTensor<T> ref_src{ shape0, dt, 1 };
+        SimpleTensor<T> ref_mean{ shape1, dt, 1 };
+        SimpleTensor<T> ref_var{ shape1, dt, 1 };
+        SimpleTensor<T> ref_beta{ shape1, dt, 1 };
+        SimpleTensor<T> ref_gamma{ shape1, dt, 1 };
 
         // Fill reference
         fill(ref_src, ref_mean, ref_var, ref_beta, ref_gamma);
 
-        return reference::batch_normalization_layer(ref_src, ref_mean, ref_var, ref_beta, ref_gamma, epsilon, act_info, fixed_point_position);
+        return reference::batch_normalization_layer(ref_src, ref_mean, ref_var, ref_beta, ref_gamma, epsilon, act_info);
     }
 
     TensorType      _target{};
     SimpleTensor<T> _reference{};
-    int             _fractional_bits{};
     DataType        _data_type{};
     bool            _use_beta{};
     bool            _use_gamma{};
@@ -202,7 +200,7 @@
     template <typename...>
     void setup(TensorShape shape0, TensorShape shape1, float epsilon, bool use_beta, bool use_gamma, ActivationLayerInfo act_info, DataType dt, DataLayout data_layout)
     {
-        BatchNormalizationLayerValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>::setup(shape0, shape1, epsilon, use_beta, use_gamma, act_info, dt, data_layout, 0);
+        BatchNormalizationLayerValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>::setup(shape0, shape1, epsilon, use_beta, use_gamma, act_info, dt, data_layout);
     }
 };
 } // namespace validation
diff --git a/tests/validation/fixtures/ConvolutionLayerFixture.h b/tests/validation/fixtures/ConvolutionLayerFixture.h
index 00ca077..7ba2583 100644
--- a/tests/validation/fixtures/ConvolutionLayerFixture.h
+++ b/tests/validation/fixtures/ConvolutionLayerFixture.h
@@ -57,12 +57,11 @@
 public:
     template <typename...>
     void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights,
-               DataType data_type, DataLayout data_layout, int fractional_bits, QuantizationInfo quantization_info, ActivationLayerInfo act_info)
+               DataType data_type, DataLayout data_layout, QuantizationInfo quantization_info, ActivationLayerInfo act_info)
     {
         _data_type         = data_type;
         _is_quantized      = is_data_type_quantized_asymmetric(data_type);
         _bias_data_type    = _is_quantized ? DataType::S32 : data_type;
-        _fractional_bits   = fractional_bits;
         _quantization_info = quantization_info;
         _data_layout       = data_layout;
 
@@ -117,10 +116,10 @@
         TensorShape reshaped_weights_shape(weights_shape);
 
         // Create tensors
-        TensorType src     = create_tensor<TensorType>(input_shape, _data_type, 1, _fractional_bits, _quantization_info, _data_layout);
-        TensorType weights = create_tensor<TensorType>(reshaped_weights_shape, _data_type, 1, _fractional_bits, _quantization_info, _data_layout);
-        TensorType bias    = create_tensor<TensorType>(bias_shape, _bias_data_type, 1, _fractional_bits, _quantization_info, _data_layout);
-        TensorType dst     = create_tensor<TensorType>(output_shape, _data_type, 1, _fractional_bits, _quantization_info, _data_layout);
+        TensorType src     = create_tensor<TensorType>(input_shape, _data_type, 1, _quantization_info, _data_layout);
+        TensorType weights = create_tensor<TensorType>(reshaped_weights_shape, _data_type, 1, _quantization_info, _data_layout);
+        TensorType bias    = create_tensor<TensorType>(bias_shape, _bias_data_type, 1, _quantization_info, _data_layout);
+        TensorType dst     = create_tensor<TensorType>(output_shape, _data_type, 1, _quantization_info, _data_layout);
 
         // Create and configure function
         FunctionType conv;
@@ -157,9 +156,9 @@
                                       const Size2D &dilation, const ActivationLayerInfo act_info)
     {
         // Create reference
-        SimpleTensor<T>     src{ input_shape, _data_type, 1, _fractional_bits, _quantization_info };
-        SimpleTensor<T>     weights{ weights_shape, _data_type, 1, _fractional_bits, _quantization_info };
-        SimpleTensor<TBias> bias{ bias_shape, _bias_data_type, 1, _fractional_bits, _quantization_info };
+        SimpleTensor<T>     src{ input_shape, _data_type, 1, _quantization_info };
+        SimpleTensor<T>     weights{ weights_shape, _data_type, 1, _quantization_info };
+        SimpleTensor<TBias> bias{ bias_shape, _bias_data_type, 1, _quantization_info };
 
         // Fill reference
         fill(src, 0);
@@ -176,7 +175,6 @@
     DataType         _data_type{};
     DataType         _bias_data_type{};
     DataLayout       _data_layout{};
-    int              _fractional_bits{};
     QuantizationInfo _quantization_info{};
     bool             _is_quantized = false;
 };
@@ -189,7 +187,7 @@
     void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights, DataType data_type,
                DataLayout data_layout, ActivationLayerInfo act_info)
     {
-        ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, reshape_weights, data_type, data_layout, 0,
+        ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, reshape_weights, data_type, data_layout,
                                                                                               QuantizationInfo(), act_info);
     }
 };
@@ -200,11 +198,11 @@
 public:
     template <typename...>
     void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights, DataType data_type,
-               int fractional_bits, ActivationLayerInfo act_info)
+               ActivationLayerInfo act_info)
     {
         ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, reshape_weights, data_type,
                                                                                               DataLayout::NCHW,
-                                                                                              fractional_bits, QuantizationInfo(), act_info);
+                                                                                              QuantizationInfo(), act_info);
     }
 };
 
@@ -217,7 +215,7 @@
                DataLayout data_layout, QuantizationInfo quantization_info, ActivationLayerInfo act_info)
     {
         ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, reshape_weights,
-                                                                                              data_type, data_layout, 0, quantization_info, act_info);
+                                                                                              data_type, data_layout, quantization_info, act_info);
     }
 };
 } // namespace validation
diff --git a/tests/validation/fixtures/DeconvolutionLayerFixture.h b/tests/validation/fixtures/DeconvolutionLayerFixture.h
index 137068a..12ce9ce 100644
--- a/tests/validation/fixtures/DeconvolutionLayerFixture.h
+++ b/tests/validation/fixtures/DeconvolutionLayerFixture.h
@@ -45,13 +45,12 @@
 public:
     template <typename...>
     void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info,
-               const std::pair<unsigned int, unsigned int> &inner_border, DataType data_type, int fractional_bits)
+               const std::pair<unsigned int, unsigned int> &inner_border, DataType data_type)
     {
-        _fractional_bits = fractional_bits;
-        _data_type       = data_type;
+        _data_type = data_type;
 
-        _target    = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, inner_border, data_type, fractional_bits);
-        _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, inner_border, data_type, fractional_bits);
+        _target    = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, inner_border, data_type);
+        _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, inner_border, data_type);
     }
 
 protected:
@@ -70,13 +69,13 @@
     }
 
     TensorType compute_target(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape,
-                              const PadStrideInfo &info, const std::pair<unsigned int, unsigned int> &inner_border, DataType data_type, int fixed_point_position)
+                              const PadStrideInfo &info, const std::pair<unsigned int, unsigned int> &inner_border, DataType data_type)
     {
         // Create tensors
-        TensorType src     = create_tensor<TensorType>(input_shape, data_type, 1, fixed_point_position);
-        TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1, fixed_point_position);
-        TensorType bias    = create_tensor<TensorType>(bias_shape, data_type, 1, fixed_point_position);
-        TensorType dst     = create_tensor<TensorType>(output_shape, data_type, 1, fixed_point_position);
+        TensorType src     = create_tensor<TensorType>(input_shape, data_type, 1);
+        TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1);
+        TensorType bias    = create_tensor<TensorType>(bias_shape, data_type, 1);
+        TensorType dst     = create_tensor<TensorType>(output_shape, data_type, 1);
 
         // Create and configure function
         FunctionType conv;
@@ -110,12 +109,12 @@
     }
 
     SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape,
-                                      const PadStrideInfo &info, const std::pair<unsigned int, unsigned int> inner_border, DataType data_type, int fixed_point_position)
+                                      const PadStrideInfo &info, const std::pair<unsigned int, unsigned int> inner_border, DataType data_type)
     {
         // Create reference
-        SimpleTensor<T> src{ input_shape, data_type, 1, fixed_point_position };
-        SimpleTensor<T> weights{ weights_shape, data_type, 1, fixed_point_position };
-        SimpleTensor<T> bias{ bias_shape, data_type, 1, fixed_point_position };
+        SimpleTensor<T> src{ input_shape, data_type, 1 };
+        SimpleTensor<T> weights{ weights_shape, data_type, 1 };
+        SimpleTensor<T> bias{ bias_shape, data_type, 1 };
 
         // Fill reference
         fill(src, 0);
@@ -127,7 +126,6 @@
 
     TensorType      _target{};
     SimpleTensor<T> _reference{};
-    int             _fractional_bits{};
     DataType        _data_type{};
 };
 
@@ -146,7 +144,7 @@
         const std::pair<unsigned int, unsigned int> inner_border(inner_border_right, inner_border_top);
         auto        out_dim      = deconvolution_output_dimensions(input_shape.x(), input_shape.y(), kernel_size_x, kernel_size_y, padx, pady, inner_border.first, inner_border.second, sx, sy);
         TensorShape output_shape = deconvolution_output_shape(out_dim, input_shape, weights_shape);
-        DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, inner_border, data_type, 0);
+        DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, inner_border, data_type);
     }
 };
 
diff --git a/tests/validation/fixtures/DepthConcatenateLayerFixture.h b/tests/validation/fixtures/DepthConcatenateLayerFixture.h
index 6e112c7..76b56ad 100644
--- a/tests/validation/fixtures/DepthConcatenateLayerFixture.h
+++ b/tests/validation/fixtures/DepthConcatenateLayerFixture.h
@@ -102,12 +102,12 @@
 
         for(const auto &shape : shapes)
         {
-            srcs.emplace_back(create_tensor<TensorType>(shape, data_type, 1, _fractional_bits));
+            srcs.emplace_back(create_tensor<TensorType>(shape, data_type, 1));
             src_ptrs.emplace_back(&srcs.back());
         }
 
         TensorShape dst_shape = calculate_depth_concatenate_shape(shapes);
-        TensorType  dst       = create_tensor<TensorType>(dst_shape, data_type, 1, _fractional_bits);
+        TensorType  dst       = create_tensor<TensorType>(dst_shape, data_type, 1);
 
         // Create and configure function
         FunctionType depth_concat;
@@ -151,7 +151,7 @@
         int i = 0;
         for(const auto &shape : shapes)
         {
-            srcs.emplace_back(shape, data_type, 1, _fractional_bits);
+            srcs.emplace_back(shape, data_type, 1);
             fill(srcs.back(), i++);
         }
 
@@ -160,9 +160,6 @@
 
     TensorType      _target{};
     SimpleTensor<T> _reference{};
-
-private:
-    int _fractional_bits{ 1 };
 };
 } // namespace validation
 } // namespace test
diff --git a/tests/validation/fixtures/DepthConvertLayerFixture.h b/tests/validation/fixtures/DepthConvertLayerFixture.h
index 4b4e959..eb1c083 100644
--- a/tests/validation/fixtures/DepthConvertLayerFixture.h
+++ b/tests/validation/fixtures/DepthConvertLayerFixture.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -45,12 +45,11 @@
 {
 public:
     template <typename...>
-    void setup(TensorShape shape, DataType dt_in, DataType dt_out, ConvertPolicy policy, uint32_t shift, uint32_t fractional_bits)
+    void setup(TensorShape shape, DataType dt_in, DataType dt_out, ConvertPolicy policy, uint32_t shift)
     {
-        _shift           = shift;
-        _fractional_bits = fractional_bits;
-        _target          = compute_target(shape, dt_in, dt_out, policy, shift, fractional_bits);
-        _reference       = compute_reference(shape, dt_in, dt_out, policy, shift, fractional_bits);
+        _shift     = shift;
+        _target    = compute_target(shape, dt_in, dt_out, policy, shift);
+        _reference = compute_reference(shape, dt_in, dt_out, policy, shift);
     }
 
 protected:
@@ -60,11 +59,11 @@
         library->fill_tensor_uniform(tensor, i);
     }
 
-    TensorType compute_target(const TensorShape &shape, DataType dt_in, DataType dt_out, ConvertPolicy policy, uint32_t shift, uint32_t fixed_point_position)
+    TensorType compute_target(const TensorShape &shape, DataType dt_in, DataType dt_out, ConvertPolicy policy, uint32_t shift)
     {
         // Create tensors
-        TensorType src = create_tensor<TensorType>(shape, dt_in, 1, static_cast<int>(fixed_point_position));
-        TensorType dst = create_tensor<TensorType>(shape, dt_out, 1, static_cast<int>(fixed_point_position));
+        TensorType src = create_tensor<TensorType>(shape, dt_in, 1);
+        TensorType dst = create_tensor<TensorType>(shape, dt_out, 1);
 
         // Create and configure function
         FunctionType depth_convert;
@@ -89,10 +88,10 @@
         return dst;
     }
 
-    SimpleTensor<T2> compute_reference(const TensorShape &shape, DataType dt_in, DataType dt_out, ConvertPolicy policy, uint32_t shift, uint32_t fixed_point_position)
+    SimpleTensor<T2> compute_reference(const TensorShape &shape, DataType dt_in, DataType dt_out, ConvertPolicy policy, uint32_t shift)
     {
         // Create reference
-        SimpleTensor<T1> src{ shape, dt_in, 1, static_cast<int>(fixed_point_position) };
+        SimpleTensor<T1> src{ shape, dt_in, 1 };
 
         // Fill reference
         fill(src, 0);
@@ -102,7 +101,6 @@
 
     TensorType       _target{};
     SimpleTensor<T2> _reference{};
-    int              _fractional_bits{};
     int              _shift{};
 };
 template <typename TensorType, typename AccessorType, typename FunctionType, typename T1, typename T2>
@@ -112,7 +110,7 @@
     template <typename...>
     void setup(TensorShape shape, DataType dt_in, DataType dt_out, ConvertPolicy policy, uint32_t shift)
     {
-        DepthConvertLayerValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T1, T2>::setup(shape, dt_in, dt_out, policy, shift, 0);
+        DepthConvertLayerValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T1, T2>::setup(shape, dt_in, dt_out, policy, shift);
     }
 };
 template <typename TensorType, typename AccessorType, typename FunctionType, typename T1, typename T2>
@@ -120,9 +118,9 @@
 {
 public:
     template <typename...>
-    void setup(TensorShape shape, DataType dt_in, DataType dt_out, ConvertPolicy policy, uint32_t fractional_bits)
+    void setup(TensorShape shape, DataType dt_in, DataType dt_out, ConvertPolicy policy)
     {
-        DepthConvertLayerValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T1, T2>::setup(shape, dt_in, dt_out, policy, 0, fractional_bits);
+        DepthConvertLayerValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T1, T2>::setup(shape, dt_in, dt_out, policy, 0);
     }
 };
 } // namespace validation
diff --git a/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h b/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h
index 2f01f43..5428154 100644
--- a/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h
+++ b/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h
@@ -115,10 +115,10 @@
         }
 
         // Create tensors
-        TensorType src     = create_tensor<TensorType>(input_shape, data_type, 1, 0, quantization_info, data_layout);
-        TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1, 0, quantization_info, data_layout);
-        TensorType biases  = create_tensor<TensorType>(biases_shape, bias_data_type, 1, 0, quantization_info, data_layout);
-        TensorType dst     = create_tensor<TensorType>(output_shape, data_type, 1, 0, quantization_info, data_layout);
+        TensorType src     = create_tensor<TensorType>(input_shape, data_type, 1, quantization_info, data_layout);
+        TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1, quantization_info, data_layout);
+        TensorType biases  = create_tensor<TensorType>(biases_shape, bias_data_type, 1, quantization_info, data_layout);
+        TensorType dst     = create_tensor<TensorType>(output_shape, data_type, 1, quantization_info, data_layout);
 
         // Create Depthwise Convolution configure function
         FunctionType dwc;
@@ -155,9 +155,9 @@
                                       unsigned int   depth_multiplier,
                                       const DataType data_type, const DataType bias_data_type, const QuantizationInfo quantization_info)
     {
-        SimpleTensor<T>     src{ in_shape, data_type, 1, 0, quantization_info };
-        SimpleTensor<T>     weights{ weights_shape, data_type, 1, 0, quantization_info };
-        SimpleTensor<TBias> biases{ biases_shape, bias_data_type, 1, 0, quantization_info };
+        SimpleTensor<T>     src{ in_shape, data_type, 1, quantization_info };
+        SimpleTensor<T>     weights{ weights_shape, data_type, 1, quantization_info };
+        SimpleTensor<TBias> biases{ biases_shape, bias_data_type, 1, quantization_info };
 
         fill(src, 0);
         fill(weights, 1);
diff --git a/tests/validation/fixtures/DirectConvolutionLayerFixture.h b/tests/validation/fixtures/DirectConvolutionLayerFixture.h
index 38ddf33..9a58167 100644
--- a/tests/validation/fixtures/DirectConvolutionLayerFixture.h
+++ b/tests/validation/fixtures/DirectConvolutionLayerFixture.h
@@ -54,11 +54,10 @@
 public:
     template <typename...>
     void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels,
-               DataType data_type, int fractional_bits, QuantizationInfo quantization_info, ActivationLayerInfo act_info, DataLayout data_layout)
+               DataType data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info, DataLayout data_layout)
     {
         ARM_COMPUTE_ERROR_ON(data_layout == DataLayout::UNKNOWN);
 
-        _fractional_bits   = fractional_bits;
         _quantization_info = quantization_info;
         _data_type         = data_type;
 
@@ -67,30 +66,29 @@
         const PadStrideInfo info(stride_x, stride_y, pad_x, pad_y, DimensionRoundingType::FLOOR);
         const DataType      bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
 
-        TensorInfo input_info   = TensorInfo(input_shape, 1, data_type, _fractional_bits);
-        TensorInfo weights_info = TensorInfo(weights_shape, 1, data_type, _fractional_bits);
+        TensorInfo input_info   = TensorInfo(input_shape, 1, data_type);
+        TensorInfo weights_info = TensorInfo(weights_shape, 1, data_type);
 
         const TensorShape output_shape = compute_deep_convolution_shape(input_info, weights_info, info);
 
-        _target    = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, fractional_bits, quantization_info, act_info, data_layout);
-        _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, fractional_bits, quantization_info, act_info);
+        _target    = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info, act_info, data_layout);
+        _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info, act_info);
     }
 
     template <typename...>
     void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation,
-               DataType data_type, int fractional_bits, QuantizationInfo quantization_info, ActivationLayerInfo act_info, DataLayout data_layout)
+               DataType data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info, DataLayout data_layout)
     {
         ARM_COMPUTE_ERROR_ON(data_layout == DataLayout::UNKNOWN);
         ARM_COMPUTE_UNUSED(dilation);
 
-        _fractional_bits   = fractional_bits;
         _quantization_info = quantization_info;
         _data_type         = data_type;
 
         const DataType bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
 
-        _target    = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, fractional_bits, quantization_info, act_info, data_layout);
-        _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, fractional_bits, quantization_info, act_info);
+        _target    = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info, act_info, data_layout);
+        _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info, act_info);
     }
 
 protected:
@@ -124,7 +122,7 @@
     }
 
     TensorType compute_target(TensorShape input_shape, TensorShape weights_shape, const TensorShape &bias_shape, TensorShape output_shape, const PadStrideInfo &info,
-                              DataType data_type, DataType bias_data_type, int fixed_point_position, QuantizationInfo quantization_info, ActivationLayerInfo act_info, const DataLayout &data_layout)
+                              DataType data_type, DataType bias_data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info, const DataLayout &data_layout)
     {
         if(data_layout == DataLayout::NHWC)
         {
@@ -134,10 +132,10 @@
         }
 
         // Create tensors
-        TensorType src     = create_tensor<TensorType>(input_shape, data_type, 1, fixed_point_position, quantization_info, data_layout);
-        TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1, fixed_point_position, quantization_info, data_layout);
-        TensorType bias    = create_tensor<TensorType>(bias_shape, bias_data_type, 1, fixed_point_position, quantization_info);
-        TensorType dst     = create_tensor<TensorType>(output_shape, data_type, 1, fixed_point_position, quantization_info, data_layout);
+        TensorType src     = create_tensor<TensorType>(input_shape, data_type, 1, quantization_info, data_layout);
+        TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1, quantization_info, data_layout);
+        TensorType bias    = create_tensor<TensorType>(bias_shape, bias_data_type, 1, quantization_info);
+        TensorType dst     = create_tensor<TensorType>(output_shape, data_type, 1, quantization_info, data_layout);
 
         // Create and configure function
         FunctionType conv;
@@ -171,12 +169,12 @@
     }
 
     SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
-                                      DataType data_type, DataType bias_data_type, int fixed_point_position, QuantizationInfo quantization_info, ActivationLayerInfo act_info)
+                                      DataType data_type, DataType bias_data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info)
     {
         // Create reference
-        SimpleTensor<T>     src{ input_shape, data_type, 1, fixed_point_position, quantization_info };
-        SimpleTensor<T>     weights{ weights_shape, data_type, 1, fixed_point_position, quantization_info };
-        SimpleTensor<TBias> bias{ bias_shape, bias_data_type, 1, fixed_point_position, quantization_info };
+        SimpleTensor<T>     src{ input_shape, data_type, 1, quantization_info };
+        SimpleTensor<T>     weights{ weights_shape, data_type, 1, quantization_info };
+        SimpleTensor<TBias> bias{ bias_shape, bias_data_type, 1, quantization_info };
 
         // Fill reference
         fill(src, 0);
@@ -190,7 +188,6 @@
 
     TensorType       _target{};
     SimpleTensor<T>  _reference{};
-    int              _fractional_bits{};
     QuantizationInfo _quantization_info{};
     DataType         _data_type{};
 };
@@ -203,7 +200,7 @@
     void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, DataType data_type, ActivationLayerInfo act_info,
                DataLayout data_layout)
     {
-        DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type, 0, QuantizationInfo(),
+        DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type, QuantizationInfo(),
                                                                                                     act_info, data_layout);
     }
 };
@@ -213,10 +210,10 @@
 {
 public:
     template <typename...>
-    void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, DataType data_type, int fractional_bits,
+    void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, DataType data_type,
                ActivationLayerInfo act_info)
     {
-        DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type, fractional_bits,
+        DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type,
                                                                                                     QuantizationInfo(), act_info, DataLayout::NCHW);
     }
 };
@@ -229,7 +226,7 @@
     void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, DataType data_type, QuantizationInfo quantization_info,
                ActivationLayerInfo act_info)
     {
-        DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type, 0, quantization_info,
+        DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type, quantization_info,
                                                                                                     act_info, DataLayout::NCHW);
     }
 };
@@ -242,7 +239,7 @@
     void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation,
                DataType data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info)
     {
-        DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type, 0, quantization_info,
+        DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type, quantization_info,
                                                                                                     act_info, DataLayout::NCHW);
     }
 };
@@ -255,7 +252,7 @@
     void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation,
                DataType data_type, ActivationLayerInfo act_info)
     {
-        DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type, 0, QuantizationInfo(),
+        DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type, QuantizationInfo(),
                                                                                                     act_info, DataLayout::NCHW);
     }
 };
diff --git a/tests/validation/fixtures/DirectConvolutionLayerTensorShiftFixture.h b/tests/validation/fixtures/DirectConvolutionLayerTensorShiftFixture.h
index 09b6d83..144c7b7 100644
--- a/tests/validation/fixtures/DirectConvolutionLayerTensorShiftFixture.h
+++ b/tests/validation/fixtures/DirectConvolutionLayerTensorShiftFixture.h
@@ -50,9 +50,8 @@
 public:
     template <typename...>
     void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels,
-               DataType data_type, int fractional_bits, QuantizationInfo quantization_info)
+               DataType data_type, QuantizationInfo quantization_info)
     {
-        _fractional_bits   = fractional_bits;
         _quantization_info = quantization_info;
         _data_type         = data_type;
 
@@ -62,24 +61,23 @@
         const TensorShape   output_shape   = get_output_shape(input_shape, weights_shape, info);
         const DataType      bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
 
-        _target    = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, fractional_bits, quantization_info);
-        _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, fractional_bits, quantization_info);
+        _target    = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info);
+        _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info);
     }
 
     template <typename...>
     void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, unsigned int dilation_x, unsigned int dilation_y,
-               DataType data_type, int fractional_bits, QuantizationInfo quantization_info)
+               DataType data_type, QuantizationInfo quantization_info)
     {
         ARM_COMPUTE_UNUSED(dilation_x, dilation_y);
 
-        _fractional_bits   = fractional_bits;
         _quantization_info = quantization_info;
         _data_type         = data_type;
 
         const DataType bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
 
-        _target    = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, fractional_bits, quantization_info);
-        _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, fractional_bits, quantization_info);
+        _target    = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info);
+        _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info);
     }
 
 protected:
@@ -113,16 +111,16 @@
     }
 
     TensorType compute_target(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
-                              DataType data_type, DataType bias_data_type, int fixed_point_position, QuantizationInfo quantization_info)
+                              DataType data_type, DataType bias_data_type, QuantizationInfo quantization_info)
     {
         // Create tensors
-        TensorType src     = create_tensor<TensorType>(input_shape, data_type, 1, fixed_point_position, quantization_info);
-        TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1, fixed_point_position, quantization_info);
-        TensorType bias    = create_tensor<TensorType>(bias_shape, bias_data_type, 1, fixed_point_position, quantization_info);
-        TensorType dst     = create_tensor<TensorType>(output_shape, data_type, 1, fixed_point_position, quantization_info);
+        TensorType src     = create_tensor<TensorType>(input_shape, data_type, 1, quantization_info);
+        TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1, quantization_info);
+        TensorType bias    = create_tensor<TensorType>(bias_shape, bias_data_type, 1, quantization_info);
+        TensorType dst     = create_tensor<TensorType>(output_shape, data_type, 1, quantization_info);
 
         TensorShape output_shape1 = get_output_shape(output_shape, weights_shape, info);
-        TensorType  dst1          = create_tensor<TensorType>(output_shape1, data_type, 1, fixed_point_position, quantization_info);
+        TensorType  dst1          = create_tensor<TensorType>(output_shape1, data_type, 1, quantization_info);
 
         // Create and configure function
         FunctionType conv;
@@ -164,14 +162,14 @@
     }
 
     SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
-                                      DataType data_type, DataType bias_data_type, int fixed_point_position, QuantizationInfo quantization_info)
+                                      DataType data_type, DataType bias_data_type, QuantizationInfo quantization_info)
     {
         // Create reference
-        SimpleTensor<T>     src{ input_shape, data_type, 1, fixed_point_position, quantization_info };
-        SimpleTensor<T>     weights{ weights_shape, data_type, 1, fixed_point_position, quantization_info };
-        SimpleTensor<TBias> bias{ bias_shape, bias_data_type, 1, fixed_point_position, quantization_info };
+        SimpleTensor<T>     src{ input_shape, data_type, 1, quantization_info };
+        SimpleTensor<T>     weights{ weights_shape, data_type, 1, quantization_info };
+        SimpleTensor<TBias> bias{ bias_shape, bias_data_type, 1, quantization_info };
 
-        SimpleTensor<T> dst{ output_shape, data_type, 1, fixed_point_position, quantization_info };
+        SimpleTensor<T> dst{ output_shape, data_type, 1, quantization_info };
         TensorShape     output_shape1 = get_output_shape(output_shape, weights_shape, info);
 
         // Fill reference
@@ -185,7 +183,6 @@
 
     TensorType       _target{};
     SimpleTensor<T>  _reference{};
-    int              _fractional_bits{};
     QuantizationInfo _quantization_info{};
     DataType         _data_type{};
 
@@ -212,7 +209,7 @@
     template <typename...>
     void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, DataType data_type)
     {
-        DirectConvolutionValidationGenericTensorShiftFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type, 0,
+        DirectConvolutionValidationGenericTensorShiftFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type,
                                                                                                                QuantizationInfo());
     }
 };
@@ -222,10 +219,9 @@
 {
 public:
     template <typename...>
-    void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, DataType data_type, int fractional_bits)
+    void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, DataType data_type)
     {
         DirectConvolutionValidationGenericTensorShiftFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type,
-                                                                                                               fractional_bits,
                                                                                                                QuantizationInfo());
     }
 };
@@ -237,7 +233,7 @@
     template <typename...>
     void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, DataType data_type, QuantizationInfo quantization_info)
     {
-        DirectConvolutionValidationGenericTensorShiftFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type, 0,
+        DirectConvolutionValidationGenericTensorShiftFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type,
                                                                                                                quantization_info);
     }
 };
@@ -250,7 +246,7 @@
     void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, unsigned int dilation_x, unsigned int dilation_y,
                DataType data_type, QuantizationInfo quantization_info)
     {
-        DirectConvolutionValidationGenericTensorShiftFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation_x, dilation_y, data_type, 0,
+        DirectConvolutionValidationGenericTensorShiftFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation_x, dilation_y, data_type,
                                                                                                                quantization_info);
     }
 };
@@ -263,7 +259,7 @@
     void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, unsigned int dilation_x, unsigned int dilation_y,
                DataType data_type)
     {
-        DirectConvolutionValidationGenericTensorShiftFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation_x, dilation_y, data_type, 0,
+        DirectConvolutionValidationGenericTensorShiftFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation_x, dilation_y, data_type,
                                                                                                                QuantizationInfo());
     }
 };
diff --git a/tests/validation/fixtures/DropoutLayerFixture.h b/tests/validation/fixtures/DropoutLayerFixture.h
index 3a077db..771de30 100644
--- a/tests/validation/fixtures/DropoutLayerFixture.h
+++ b/tests/validation/fixtures/DropoutLayerFixture.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -97,7 +97,6 @@
 
     TensorType      _target{};
     SimpleTensor<T> _reference{};
-    int             _fractional_bits{};
 };
 } // namespace validation
 } // namespace test
diff --git a/tests/validation/fixtures/FlattenLayerFixture.h b/tests/validation/fixtures/FlattenLayerFixture.h
index ef94ea8..f273e93 100644
--- a/tests/validation/fixtures/FlattenLayerFixture.h
+++ b/tests/validation/fixtures/FlattenLayerFixture.h
@@ -53,10 +53,8 @@
     template <typename...>
     void setup(TensorShape shape, DataType data_type)
     {
-        _fractional_bits = is_data_type_fixed_point(data_type) ? 4 : 0;
-
         TensorShape shape_flatten;
-        TensorInfo  input_info(shape, 1, data_type, _fractional_bits);
+        TensorInfo  input_info(shape, 1, data_type);
         shape_flatten = compute_im2col_flatten_shape(&input_info);
 
         _target    = compute_target(shape, shape_flatten, data_type);
@@ -68,24 +66,15 @@
     template <typename U>
     void fill(U &&tensor)
     {
-        if(_fractional_bits == 0)
-        {
-            std::uniform_real_distribution<> distribution(-1.f, 1.f);
-            library->fill(tensor, distribution, 0);
-        }
-        else
-        {
-            const int                       one_fixed = 1 << _fractional_bits;
-            std::uniform_int_distribution<> distribution(-one_fixed, one_fixed);
-            library->fill(tensor, distribution, 0);
-        }
+        std::uniform_real_distribution<> distribution(-1.f, 1.f);
+        library->fill(tensor, distribution, 0);
     }
 
     TensorType compute_target(const TensorShape &shape, const TensorShape &shape_flatten, DataType data_type)
     {
         // Create tensors
-        TensorType src = create_tensor<TensorType>(shape, data_type, 1, _fractional_bits);
-        TensorType dst = create_tensor<TensorType>(shape_flatten, data_type, 1, _fractional_bits);
+        TensorType src = create_tensor<TensorType>(shape, data_type, 1);
+        TensorType dst = create_tensor<TensorType>(shape_flatten, data_type, 1);
 
         // Create and configure function
         FunctionType flatten_layer;
@@ -113,7 +102,7 @@
     SimpleTensor<T> compute_reference(const TensorShape &shape, const TensorShape &shape_flatten, DataType data_type)
     {
         // Create reference
-        SimpleTensor<T> src{ shape, data_type, 1, _fractional_bits };
+        SimpleTensor<T> src{ shape, data_type, 1 };
 
         // Fill reference
         fill(src);
@@ -123,7 +112,6 @@
 
     TensorType      _target{};
     SimpleTensor<T> _reference{};
-    int             _fractional_bits{};
 };
 } // namespace validation
 } // namespace test
diff --git a/tests/validation/fixtures/FullyConnectedLayerFixture.h b/tests/validation/fixtures/FullyConnectedLayerFixture.h
index f23fc20..895e43b 100644
--- a/tests/validation/fixtures/FullyConnectedLayerFixture.h
+++ b/tests/validation/fixtures/FullyConnectedLayerFixture.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -54,14 +54,13 @@
 public:
     template <typename...>
     void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, bool transpose_weights, bool reshape_weights,
-               DataType data_type, int fractional_bits, QuantizationInfo quantization_info)
+               DataType data_type, QuantizationInfo quantization_info)
     {
         ARM_COMPUTE_UNUSED(weights_shape);
         ARM_COMPUTE_UNUSED(bias_shape);
 
         _data_type         = data_type;
         _bias_data_type    = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
-        _fractional_bits   = fractional_bits;
         _quantization_info = quantization_info;
 
         _target    = compute_target(input_shape, weights_shape, bias_shape, output_shape, transpose_weights, reshape_weights);
@@ -126,10 +125,10 @@
         }
 
         // Create tensors
-        TensorType src     = create_tensor<TensorType>(input_shape, _data_type, 1, _fractional_bits, _quantization_info);
-        TensorType weights = create_tensor<TensorType>(reshaped_weights_shape, _data_type, 1, _fractional_bits, _quantization_info);
-        TensorType bias    = create_tensor<TensorType>(bias_shape, _bias_data_type, 1, _fractional_bits, _quantization_info);
-        TensorType dst     = create_tensor<TensorType>(output_shape, _data_type, 1, _fractional_bits, _quantization_info);
+        TensorType src     = create_tensor<TensorType>(input_shape, _data_type, 1, _quantization_info);
+        TensorType weights = create_tensor<TensorType>(reshaped_weights_shape, _data_type, 1, _quantization_info);
+        TensorType bias    = create_tensor<TensorType>(bias_shape, _bias_data_type, 1, _quantization_info);
+        TensorType dst     = create_tensor<TensorType>(output_shape, _data_type, 1, _quantization_info);
 
         // Create and configure function.
         FunctionType fc;
@@ -158,7 +157,7 @@
         if(!reshape_weights || !transpose_weights)
         {
             TensorShape tmp_shape(weights_shape);
-            RawTensor   tmp(tmp_shape, _data_type, 1, _fractional_bits);
+            RawTensor   tmp(tmp_shape, _data_type, 1);
 
             // Fill with original shape
             fill(tmp, 1);
@@ -199,9 +198,9 @@
                                       bool reshape_weights)
     {
         // Create reference
-        SimpleTensor<T>     src{ input_shape, _data_type, 1, _fractional_bits, _quantization_info };
-        SimpleTensor<T>     weights{ weights_shape, _data_type, 1, _fractional_bits, _quantization_info };
-        SimpleTensor<TBias> bias{ bias_shape, _bias_data_type, 1, _fractional_bits, _quantization_info };
+        SimpleTensor<T>     src{ input_shape, _data_type, 1, _quantization_info };
+        SimpleTensor<T>     weights{ weights_shape, _data_type, 1, _quantization_info };
+        SimpleTensor<TBias> bias{ bias_shape, _bias_data_type, 1, _quantization_info };
 
         // Fill reference
         fill(src, 0);
@@ -215,7 +214,6 @@
     SimpleTensor<T>  _reference{};
     DataType         _data_type{};
     DataType         _bias_data_type{};
-    int              _fractional_bits{};
     QuantizationInfo _quantization_info{};
 };
 
@@ -228,7 +226,7 @@
     {
         FullyConnectedLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T, run_interleave>::setup(input_shape, weights_shape, bias_shape, output_shape, transpose_weights,
                                                                                                                       reshape_weights, data_type,
-                                                                                                                      0, QuantizationInfo());
+                                                                                                                      QuantizationInfo());
     }
 };
 
@@ -237,11 +235,11 @@
 {
 public:
     template <typename...>
-    void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, bool transpose_weights, bool reshape_weights, DataType data_type, int fractional_bits)
+    void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, bool transpose_weights, bool reshape_weights, DataType data_type)
     {
         FullyConnectedLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T, run_interleave>::setup(input_shape, weights_shape, bias_shape, output_shape, transpose_weights,
                                                                                                                       reshape_weights, data_type,
-                                                                                                                      fractional_bits, QuantizationInfo());
+                                                                                                                      QuantizationInfo());
     }
 };
 
@@ -255,7 +253,7 @@
     {
         FullyConnectedLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T, run_interleave>::setup(input_shape, weights_shape, bias_shape, output_shape, transpose_weights,
                                                                                                                       reshape_weights, data_type,
-                                                                                                                      0, quantization_info);
+                                                                                                                      quantization_info);
     }
 };
 } // namespace validation
diff --git a/tests/validation/fixtures/GEMMFixture.h b/tests/validation/fixtures/GEMMFixture.h
index 8dd2998..e4762cc 100644
--- a/tests/validation/fixtures/GEMMFixture.h
+++ b/tests/validation/fixtures/GEMMFixture.h
@@ -47,13 +47,12 @@
 {
 public:
     template <typename...>
-    void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_c, TensorShape output_shape, float alpha, float beta, DataType data_type, int fractional_bits)
+    void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_c, TensorShape output_shape, float alpha, float beta, DataType data_type)
     {
-        _fractional_bits = fractional_bits;
-        _data_type       = data_type;
+        _data_type = data_type;
 
-        _target    = compute_target(shape_a, shape_b, shape_c, output_shape, alpha, beta, data_type, fractional_bits);
-        _reference = compute_reference(shape_a, shape_b, shape_c, output_shape, alpha, beta, data_type, fractional_bits);
+        _target    = compute_target(shape_a, shape_b, shape_c, output_shape, alpha, beta, data_type);
+        _reference = compute_reference(shape_a, shape_b, shape_c, output_shape, alpha, beta, data_type);
     }
 
 protected:
@@ -75,13 +74,13 @@
     }
 
     TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c, const TensorShape &output_shape, float alpha, float beta,
-                              DataType data_type, int fixed_point_position)
+                              DataType data_type)
     {
         // Create tensors
-        TensorType a   = create_tensor<TensorType>(shape_a, data_type, 1, fixed_point_position);
-        TensorType b   = create_tensor<TensorType>(shape_b, data_type, 1, fixed_point_position);
-        TensorType c   = create_tensor<TensorType>(shape_c, data_type, 1, fixed_point_position);
-        TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, fixed_point_position);
+        TensorType a   = create_tensor<TensorType>(shape_a, data_type, 1);
+        TensorType b   = create_tensor<TensorType>(shape_b, data_type, 1);
+        TensorType c   = create_tensor<TensorType>(shape_c, data_type, 1);
+        TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1);
 
         // Create and configure function
         FunctionType gemm;
@@ -120,12 +119,12 @@
     }
 
     SimpleTensor<T> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c, const TensorShape &output_shape, float alpha, float beta,
-                                      DataType data_type, int fixed_point_position)
+                                      DataType data_type)
     {
         // Create reference
-        SimpleTensor<T> a{ shape_a, data_type, 1, fixed_point_position };
-        SimpleTensor<T> b{ shape_b, data_type, 1, fixed_point_position };
-        SimpleTensor<T> c{ shape_c, data_type, 1, fixed_point_position };
+        SimpleTensor<T> a{ shape_a, data_type, 1 };
+        SimpleTensor<T> b{ shape_b, data_type, 1 };
+        SimpleTensor<T> c{ shape_c, data_type, 1 };
 
         // Fill reference
         fill(a, 0);
@@ -137,7 +136,6 @@
 
     TensorType      _target{};
     SimpleTensor<T> _reference{};
-    int             _fractional_bits{};
     DataType        _data_type{};
 };
 
@@ -148,7 +146,7 @@
     template <typename...>
     void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_c, TensorShape output_shape, float alpha, float beta, DataType data_type)
     {
-        GEMMValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>::setup(shape_a, shape_b, shape_c, output_shape, alpha, beta, data_type, 0);
+        GEMMValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>::setup(shape_a, shape_b, shape_c, output_shape, alpha, beta, data_type);
     }
 };
 } // namespace validation
diff --git a/tests/validation/fixtures/GEMMInterleave4x4Fixture.h b/tests/validation/fixtures/GEMMInterleave4x4Fixture.h
index 1f0a742..9ad730c 100644
--- a/tests/validation/fixtures/GEMMInterleave4x4Fixture.h
+++ b/tests/validation/fixtures/GEMMInterleave4x4Fixture.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -47,14 +47,13 @@
 {
 public:
     template <typename...>
-    void setup(size_t x, size_t y, DataType data_type, int fractional_bits)
+    void setup(size_t x, size_t y, DataType data_type)
     {
-        _fractional_bits = fractional_bits;
-        _data_type       = data_type;
+        _data_type = data_type;
         const TensorShape shape_a(x, y);
         const TensorShape shape_b(static_cast<size_t>(x * 4.f), static_cast<size_t>(std::ceil(y / 4.f)));
-        _target    = compute_target(shape_a, shape_b, data_type, fractional_bits);
-        _reference = compute_reference(shape_a, shape_b, data_type, fractional_bits);
+        _target    = compute_target(shape_a, shape_b, data_type);
+        _reference = compute_reference(shape_a, shape_b, data_type);
     }
 
 protected:
@@ -76,11 +75,11 @@
         }
     }
 
-    TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, DataType data_type, int fixed_point_position)
+    TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, DataType data_type)
     {
         // Create tensors
-        TensorType a = create_tensor<TensorType>(shape_a, data_type, 1, fixed_point_position);
-        TensorType b = create_tensor<TensorType>(shape_b, data_type, 1, fixed_point_position);
+        TensorType a = create_tensor<TensorType>(shape_a, data_type, 1);
+        TensorType b = create_tensor<TensorType>(shape_b, data_type, 1);
 
         // Create and configure function
         FunctionType f;
@@ -105,11 +104,11 @@
         return b;
     }
 
-    SimpleTensor<T> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, DataType data_type, int fixed_point_position)
+    SimpleTensor<T> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, DataType data_type)
     {
         // Create reference
-        SimpleTensor<T> a{ shape_a, data_type, 1, fixed_point_position };
-        SimpleTensor<T> b{ shape_b, data_type, 1, fixed_point_position };
+        SimpleTensor<T> a{ shape_a, data_type, 1 };
+        SimpleTensor<T> b{ shape_b, data_type, 1 };
 
         // Fill reference
         fill(a, 0);
@@ -120,7 +119,6 @@
 
     TensorType      _target{};
     SimpleTensor<T> _reference{};
-    int             _fractional_bits{};
     DataType        _data_type{};
 };
 
@@ -131,7 +129,7 @@
     template <typename...>
     void setup(size_t x, size_t y, DataType data_type)
     {
-        GEMMInterleave4x4ValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>::setup(x, y, data_type, 0);
+        GEMMInterleave4x4ValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>::setup(x, y, data_type);
     }
 };
 
diff --git a/tests/validation/fixtures/GEMMTranspose1xWFixture.h b/tests/validation/fixtures/GEMMTranspose1xWFixture.h
index d83d5e9..48fa55e 100644
--- a/tests/validation/fixtures/GEMMTranspose1xWFixture.h
+++ b/tests/validation/fixtures/GEMMTranspose1xWFixture.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, 2018 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -47,15 +47,14 @@
 {
 public:
     template <typename...>
-    void setup(size_t x, size_t y, DataType data_type, int fractional_bits)
+    void setup(size_t x, size_t y, DataType data_type)
     {
-        _fractional_bits = fractional_bits;
-        _data_type       = data_type;
+        _data_type = data_type;
         const TensorShape  shape_a(x, y);
         const unsigned int transpose_w = 16 / data_size_from_type(data_type);
         const TensorShape  shape_b(static_cast<size_t>(y * transpose_w), static_cast<size_t>(std::ceil(x / static_cast<float>(transpose_w))));
-        _target    = compute_target(shape_a, shape_b, data_type, fractional_bits);
-        _reference = compute_reference(shape_a, shape_b, data_type, fractional_bits);
+        _target    = compute_target(shape_a, shape_b, data_type);
+        _reference = compute_reference(shape_a, shape_b, data_type);
     }
 
 protected:
@@ -77,11 +76,11 @@
         }
     }
 
-    TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, DataType data_type, int fixed_point_position)
+    TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, DataType data_type)
     {
         // Create tensors
-        TensorType a = create_tensor<TensorType>(shape_a, data_type, 1, fixed_point_position);
-        TensorType b = create_tensor<TensorType>(shape_b, data_type, 1, fixed_point_position);
+        TensorType a = create_tensor<TensorType>(shape_a, data_type, 1);
+        TensorType b = create_tensor<TensorType>(shape_b, data_type, 1);
 
         // Create and configure function
         FunctionType f;
@@ -107,10 +106,10 @@
         return b;
     }
 
-    SimpleTensor<T> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, DataType data_type, int fixed_point_position)
+    SimpleTensor<T> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, DataType data_type)
     {
         // Create reference
-        SimpleTensor<T> a{ shape_a, data_type, 1, fixed_point_position };
+        SimpleTensor<T> a{ shape_a, data_type, 1 };
 
         // Fill reference
         fill(a, 0);
@@ -120,7 +119,6 @@
 
     TensorType      _target{};
     SimpleTensor<T> _reference{};
-    int             _fractional_bits{};
     DataType        _data_type{};
 };
 
@@ -131,7 +129,7 @@
     template <typename...>
     void setup(size_t x, size_t y, DataType data_type)
     {
-        GEMMTranspose1xWValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>::setup(x, y, data_type, 0);
+        GEMMTranspose1xWValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>::setup(x, y, data_type);
     }
 };
 
diff --git a/tests/validation/fixtures/Im2ColFixture.h b/tests/validation/fixtures/Im2ColFixture.h
index 6e532e7..6abea27 100644
--- a/tests/validation/fixtures/Im2ColFixture.h
+++ b/tests/validation/fixtures/Im2ColFixture.h
@@ -81,8 +81,8 @@
     TensorType compute_target(const TensorShape &input_shape, const TensorShape &output_shape, DataType data_type)
     {
         // Create tensors
-        TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, 0, _quant_info, _data_layout);
-        TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, 0, _quant_info);
+        TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, _quant_info, _data_layout);
+        TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, _quant_info);
 
         // Create and configure function
         FunctionType im2col_func;
@@ -110,8 +110,8 @@
     void compute_reference(const TensorShape &input_shape, const TensorShape &output_shape, DataType data_type)
     {
         // Create reference
-        SimpleTensor<T> src{ input_shape, data_type, 1, 0, _quant_info, _data_layout };
-        _reference = SimpleTensor<T>(output_shape, data_type, 1, 0, _quant_info, DataLayout::NCHW);
+        SimpleTensor<T> src{ input_shape, data_type, 1, _quant_info, _data_layout };
+        _reference = SimpleTensor<T>(output_shape, data_type, 1, _quant_info, DataLayout::NCHW);
         // Fill reference
         fill(src);
         reference::im2col<T>(src, _reference, _kernel_dims, _conv_info, _has_bias);
diff --git a/tests/validation/fixtures/NormalizationLayerFixture.h b/tests/validation/fixtures/NormalizationLayerFixture.h
index e7d83c7..f4f9c64 100644
--- a/tests/validation/fixtures/NormalizationLayerFixture.h
+++ b/tests/validation/fixtures/NormalizationLayerFixture.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -43,40 +43,30 @@
 namespace validation
 {
 template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class NormalizationValidationFixedPointFixture : public framework::Fixture
+class NormalizationValidationGenericFixture : public framework::Fixture
 {
 public:
     template <typename...>
-    void setup(TensorShape shape, NormType norm_type, int norm_size, float beta, bool is_scaled, DataType data_type, int fractional_bits)
+    void setup(TensorShape shape, NormType norm_type, int norm_size, float beta, bool is_scaled, DataType data_type)
     {
-        _fractional_bits = fractional_bits;
         NormalizationLayerInfo info(norm_type, norm_size, 5, beta, 1.f, is_scaled);
 
-        _target    = compute_target(shape, info, data_type, fractional_bits);
-        _reference = compute_reference(shape, info, data_type, fractional_bits);
+        _target    = compute_target(shape, info, data_type);
+        _reference = compute_reference(shape, info, data_type);
     }
 
 protected:
     template <typename U>
     void fill(U &&tensor)
     {
-        if(_fractional_bits == 0)
-        {
-            library->fill_tensor_uniform(tensor, 0);
-        }
-        else
-        {
-            const int                       one_fixed = 1 << _fractional_bits;
-            std::uniform_int_distribution<> distribution(-one_fixed, one_fixed);
-            library->fill(tensor, distribution, 0);
-        }
+        library->fill_tensor_uniform(tensor, 0);
     }
 
-    TensorType compute_target(const TensorShape &shape, NormalizationLayerInfo info, DataType data_type, int fixed_point_position = 0)
+    TensorType compute_target(const TensorShape &shape, NormalizationLayerInfo info, DataType data_type)
     {
         // Create tensors
-        TensorType src = create_tensor<TensorType>(shape, data_type, 1, fixed_point_position);
-        TensorType dst = create_tensor<TensorType>(shape, data_type, 1, fixed_point_position);
+        TensorType src = create_tensor<TensorType>(shape, data_type, 1);
+        TensorType dst = create_tensor<TensorType>(shape, data_type, 1);
 
         // Create and configure function
         FunctionType norm_layer;
@@ -101,10 +91,10 @@
         return dst;
     }
 
-    SimpleTensor<T> compute_reference(const TensorShape &shape, NormalizationLayerInfo info, DataType data_type, int fixed_point_position = 0)
+    SimpleTensor<T> compute_reference(const TensorShape &shape, NormalizationLayerInfo info, DataType data_type)
     {
         // Create reference
-        SimpleTensor<T> src{ shape, data_type, 1, fixed_point_position };
+        SimpleTensor<T> src{ shape, data_type, 1 };
 
         // Fill reference
         fill(src);
@@ -114,17 +104,16 @@
 
     TensorType      _target{};
     SimpleTensor<T> _reference{};
-    int             _fractional_bits{};
 };
 
 template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class NormalizationValidationFixture : public NormalizationValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>
+class NormalizationValidationFixture : public NormalizationValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
 {
 public:
     template <typename...>
     void setup(TensorShape shape, NormType norm_type, int norm_size, float beta, bool is_scaled, DataType data_type)
     {
-        NormalizationValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, norm_type, norm_size, beta, is_scaled, data_type, 0);
+        NormalizationValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, norm_type, norm_size, beta, is_scaled, data_type);
     }
 };
 } // namespace validation
diff --git a/tests/validation/fixtures/PoolingLayerFixture.h b/tests/validation/fixtures/PoolingLayerFixture.h
index 27b033a..2453954 100644
--- a/tests/validation/fixtures/PoolingLayerFixture.h
+++ b/tests/validation/fixtures/PoolingLayerFixture.h
@@ -47,14 +47,13 @@
 {
 public:
     template <typename...>
-    void setup(TensorShape shape, PoolingLayerInfo pool_info, DataType data_type, DataLayout data_layout, int fractional_bits, QuantizationInfo quantization_info)
+    void setup(TensorShape shape, PoolingLayerInfo pool_info, DataType data_type, DataLayout data_layout, QuantizationInfo quantization_info)
     {
-        _fractional_bits   = fractional_bits;
         _quantization_info = quantization_info;
         _pool_info         = pool_info;
 
-        _target    = compute_target(shape, pool_info, data_type, data_layout, fractional_bits, quantization_info);
-        _reference = compute_reference(shape, pool_info, data_type, fractional_bits, quantization_info);
+        _target    = compute_target(shape, pool_info, data_type, data_layout, quantization_info);
+        _reference = compute_reference(shape, pool_info, data_type, quantization_info);
     }
 
 protected:
@@ -72,14 +71,14 @@
         }
         else
         {
-            const int                       one_fixed = 1 << _fractional_bits;
+            const int                       one_fixed = 1;
             std::uniform_int_distribution<> distribution(-one_fixed, one_fixed);
             library->fill(tensor, distribution, 0);
         }
     }
 
     TensorType compute_target(TensorShape shape, PoolingLayerInfo info,
-                              DataType data_type, DataLayout data_layout, int fixed_point_position, QuantizationInfo quantization_info)
+                              DataType data_type, DataLayout data_layout, QuantizationInfo quantization_info)
     {
         // Change shape in case of NHWC.
         if(data_layout == DataLayout::NHWC)
@@ -88,7 +87,7 @@
         }
 
         // Create tensors
-        TensorType src = create_tensor<TensorType>(shape, data_type, 1, fixed_point_position, quantization_info, data_layout);
+        TensorType src = create_tensor<TensorType>(shape, data_type, 1, quantization_info, data_layout);
         TensorType dst;
 
         // Create and configure function
@@ -115,10 +114,10 @@
     }
 
     SimpleTensor<T> compute_reference(const TensorShape &shape, PoolingLayerInfo info,
-                                      DataType data_type, int fixed_point_position, QuantizationInfo quantization_info)
+                                      DataType data_type, QuantizationInfo quantization_info)
     {
         // Create reference
-        SimpleTensor<T> src{ shape, data_type, 1, fixed_point_position, quantization_info };
+        SimpleTensor<T> src{ shape, data_type, 1, quantization_info };
 
         // Fill reference
         fill(src);
@@ -128,7 +127,6 @@
 
     TensorType       _target{};
     SimpleTensor<T>  _reference{};
-    int              _fractional_bits{};
     QuantizationInfo _quantization_info{};
     PoolingLayerInfo _pool_info{};
 };
@@ -141,7 +139,7 @@
     void setup(TensorShape shape, PoolingType pool_type, Size2D pool_size, PadStrideInfo pad_stride_info, bool exclude_padding, DataType data_type, DataLayout data_layout)
     {
         PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, PoolingLayerInfo(pool_type, pool_size, pad_stride_info, exclude_padding),
-                                                                                               data_type, data_layout, 0, QuantizationInfo());
+                                                                                               data_type, data_layout, QuantizationInfo());
     }
 };
 
@@ -150,10 +148,10 @@
 {
 public:
     template <typename...>
-    void setup(TensorShape shape, PoolingType pool_type, Size2D pool_size, PadStrideInfo pad_stride_info, bool exclude_padding, DataType data_type, int fractional_bits)
+    void setup(TensorShape shape, PoolingType pool_type, Size2D pool_size, PadStrideInfo pad_stride_info, bool exclude_padding, DataType data_type)
     {
         PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, PoolingLayerInfo(pool_type, pool_size, pad_stride_info, exclude_padding),
-                                                                                               data_type, DataLayout::NCHW, fractional_bits, QuantizationInfo());
+                                                                                               data_type, DataLayout::NCHW, QuantizationInfo());
     }
 };
 
@@ -166,7 +164,7 @@
                QuantizationInfo quantization_info, DataLayout data_layout = DataLayout::NCHW)
     {
         PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, PoolingLayerInfo(pool_type, pool_size, pad_stride_info, exclude_padding),
-                                                                                               data_type, data_layout, 0, quantization_info);
+                                                                                               data_type, data_layout, quantization_info);
     }
 };
 
@@ -177,7 +175,7 @@
     template <typename...>
     void setup(TensorShape src_shape, PoolingLayerInfo pool_info, DataType data_type)
     {
-        PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(src_shape, pool_info, data_type, DataLayout::NCHW, 0, QuantizationInfo());
+        PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(src_shape, pool_info, data_type, DataLayout::NCHW, QuantizationInfo());
     }
 };
 
@@ -188,7 +186,7 @@
     template <typename...>
     void setup(TensorShape shape, PoolingType pool_type, DataType data_type, DataLayout data_layout = DataLayout::NCHW)
     {
-        PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, PoolingLayerInfo(pool_type), data_type, DataLayout::NCHW, 0, QuantizationInfo());
+        PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, PoolingLayerInfo(pool_type), data_type, DataLayout::NCHW, QuantizationInfo());
     }
 };
 
diff --git a/tests/validation/fixtures/ScaleFixture.h b/tests/validation/fixtures/ScaleFixture.h
index 05e1bf5..5413147 100644
--- a/tests/validation/fixtures/ScaleFixture.h
+++ b/tests/validation/fixtures/ScaleFixture.h
@@ -100,7 +100,7 @@
         }
 
         // Create tensors
-        TensorType src = create_tensor<TensorType>(shape, _data_type, 1, 0, QuantizationInfo(), data_layout);
+        TensorType src = create_tensor<TensorType>(shape, _data_type, 1, QuantizationInfo(), data_layout);
 
         const int idx_width  = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
         const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
@@ -108,7 +108,7 @@
         TensorShape shape_scaled(shape);
         shape_scaled.set(idx_width, shape[idx_width] * scale_x);
         shape_scaled.set(idx_height, shape[idx_height] * scale_y);
-        TensorType dst = create_tensor<TensorType>(shape_scaled, _data_type, 1, 0, QuantizationInfo(), data_layout);
+        TensorType dst = create_tensor<TensorType>(shape_scaled, _data_type, 1, QuantizationInfo(), data_layout);
 
         // Create and configure function
         FunctionType scale;
@@ -137,7 +137,7 @@
                                       InterpolationPolicy policy, BorderMode border_mode, T constant_border_value, SamplingPolicy sampling_policy)
     {
         // Create reference
-        SimpleTensor<T> src{ shape, _data_type, 1, 0, QuantizationInfo() };
+        SimpleTensor<T> src{ shape, _data_type, 1, QuantizationInfo() };
 
         // Fill reference
         fill(src);
diff --git a/tests/validation/fixtures/SoftmaxLayerFixture.h b/tests/validation/fixtures/SoftmaxLayerFixture.h
index c2ab2e2..59ce519 100644
--- a/tests/validation/fixtures/SoftmaxLayerFixture.h
+++ b/tests/validation/fixtures/SoftmaxLayerFixture.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -47,13 +47,12 @@
 {
 public:
     template <typename...>
-    void setup(TensorShape shape, DataType data_type, int fractional_bits, QuantizationInfo quantization_info, float beta)
+    void setup(TensorShape shape, DataType data_type, QuantizationInfo quantization_info, float beta)
     {
-        _fractional_bits   = fractional_bits;
         _quantization_info = quantization_info;
 
-        _target    = compute_target(shape, data_type, fractional_bits, quantization_info, beta);
-        _reference = compute_reference(shape, data_type, fractional_bits, quantization_info, beta);
+        _target    = compute_target(shape, data_type, quantization_info, beta);
+        _reference = compute_reference(shape, data_type, quantization_info, beta);
     }
 
 protected:
@@ -72,18 +71,18 @@
         }
         else
         {
-            const int                       one_fixed = 1 << _fractional_bits;
+            const int                       one_fixed = 1;
             std::uniform_int_distribution<> distribution(-one_fixed, one_fixed);
             library->fill(tensor, distribution, 0);
         }
     }
 
-    TensorType compute_target(const TensorShape &shape, DataType data_type, int fixed_point_position,
+    TensorType compute_target(const TensorShape &shape, DataType data_type,
                               QuantizationInfo quantization_info, float beta)
     {
         // Create tensors
-        TensorType src = create_tensor<TensorType>(shape, data_type, 1, fixed_point_position, quantization_info);
-        TensorType dst = create_tensor<TensorType>(shape, data_type, 1, fixed_point_position, QuantizationInfo(1.f / 256, 0));
+        TensorType src = create_tensor<TensorType>(shape, data_type, 1, quantization_info);
+        TensorType dst = create_tensor<TensorType>(shape, data_type, 1, QuantizationInfo(1.f / 256, 0));
 
         // Create and configure function
         FunctionType smx_layer;
@@ -108,11 +107,11 @@
         return dst;
     }
 
-    SimpleTensor<T> compute_reference(const TensorShape &shape, DataType data_type, int fixed_point_position,
+    SimpleTensor<T> compute_reference(const TensorShape &shape, DataType data_type,
                                       QuantizationInfo quantization_info, float beta)
     {
         // Create reference
-        SimpleTensor<T> src{ shape, data_type, 1, fixed_point_position, quantization_info };
+        SimpleTensor<T> src{ shape, data_type, 1, quantization_info };
 
         // Fill reference
         fill(src);
@@ -122,7 +121,6 @@
 
     TensorType       _target{};
     SimpleTensor<T>  _reference{};
-    int              _fractional_bits{};
     QuantizationInfo _quantization_info{};
 };
 
@@ -135,7 +133,6 @@
     {
         SoftmaxValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape,
                                                                                           data_type,
-                                                                                          0,
                                                                                           QuantizationInfo(),
                                                                                           beta);
     }
@@ -146,11 +143,10 @@
 {
 public:
     template <typename...>
-    void setup(TensorShape shape, DataType data_type, int fixed_point_position)
+    void setup(TensorShape shape, DataType data_type)
     {
         SoftmaxValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape,
                                                                                           data_type,
-                                                                                          fixed_point_position,
                                                                                           QuantizationInfo(),
                                                                                           1.0f);
     }
@@ -165,7 +161,6 @@
     {
         SoftmaxValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape,
                                                                                           data_type,
-                                                                                          0,
                                                                                           quantization_info,
                                                                                           beta);
     }
diff --git a/tests/validation/fixtures/WidthConcatenateLayerFixture.h b/tests/validation/fixtures/WidthConcatenateLayerFixture.h
index cf9b12e..caad0fe 100644
--- a/tests/validation/fixtures/WidthConcatenateLayerFixture.h
+++ b/tests/validation/fixtures/WidthConcatenateLayerFixture.h
@@ -92,12 +92,12 @@
 
         for(const auto &shape : shapes)
         {
-            srcs.emplace_back(create_tensor<TensorType>(shape, data_type, 1, _fractional_bits));
+            srcs.emplace_back(create_tensor<TensorType>(shape, data_type, 1));
             src_ptrs.emplace_back(&srcs.back());
         }
 
         TensorShape dst_shape = misc::shape_calculator::calculate_width_concatenate_shape(src_ptrs);
-        TensorType  dst       = create_tensor<TensorType>(dst_shape, data_type, 1, _fractional_bits);
+        TensorType  dst       = create_tensor<TensorType>(dst_shape, data_type, 1);
 
         // Create and configure function
         FunctionType width_concat;
@@ -141,7 +141,7 @@
         int i = 0;
         for(const auto &shape : shapes)
         {
-            srcs.emplace_back(shape, data_type, 1, _fractional_bits);
+            srcs.emplace_back(shape, data_type, 1);
             fill(srcs.back(), i++);
         }
 
@@ -150,9 +150,6 @@
 
     TensorType      _target{};
     SimpleTensor<T> _reference{};
-
-private:
-    int _fractional_bits{ 1 };
 };
 } // namespace validation
 } // namespace test
diff --git a/tests/validation/fixtures/WinogradConvolutionLayerFixture.h b/tests/validation/fixtures/WinogradConvolutionLayerFixture.h
index ac168eb..f1660e6 100644
--- a/tests/validation/fixtures/WinogradConvolutionLayerFixture.h
+++ b/tests/validation/fixtures/WinogradConvolutionLayerFixture.h
@@ -201,10 +201,10 @@
         }
 
         // Create tensors
-        TensorType src     = create_tensor<TensorType>(input_shape, data_type, 1, 0, QuantizationInfo(), data_layout);
-        TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1, 0, QuantizationInfo(), data_layout);
-        TensorType bias    = create_tensor<TensorType>(bias_shape, data_type, 1, 0, QuantizationInfo(), data_layout);
-        TensorType dst     = create_tensor<TensorType>(output_shape, data_type, 1, 0, QuantizationInfo(), data_layout);
+        TensorType src     = create_tensor<TensorType>(input_shape, data_type, 1, QuantizationInfo(), data_layout);
+        TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1, QuantizationInfo(), data_layout);
+        TensorType bias    = create_tensor<TensorType>(bias_shape, data_type, 1, QuantizationInfo(), data_layout);
+        TensorType dst     = create_tensor<TensorType>(output_shape, data_type, 1, QuantizationInfo(), data_layout);
 
         // Create and configure function
         FunctionType conv;
@@ -340,8 +340,8 @@
             permute(input_shape, PermutationVector(2U, 0U, 1U));
         }
 
-        TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, 0, QuantizationInfo(), data_layout);
-        TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, 0, QuantizationInfo());
+        TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, QuantizationInfo(), data_layout);
+        TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, QuantizationInfo());
 
         // Create and configure function
         FunctionType transf;
@@ -369,7 +369,7 @@
     SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataLayout data_layout, DataType data_type)
     {
         // Create reference
-        SimpleTensor<T> src{ input_shape, data_type, 1, 0, QuantizationInfo() };
+        SimpleTensor<T> src{ input_shape, data_type, 1, QuantizationInfo() };
 
         // Fill reference
         fill(src, 0, -1.f, 1.f);
@@ -424,8 +424,8 @@
         }
 
         // Create tensors
-        TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, 0, QuantizationInfo(), data_layout);
-        TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, 0, QuantizationInfo());
+        TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, QuantizationInfo(), data_layout);
+        TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, QuantizationInfo());
 
         // Create and configure function
         FunctionType filter_transform;
@@ -452,7 +452,7 @@
     SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataLayout data_layout, DataType data_type)
     {
         // Create reference
-        SimpleTensor<T> src{ input_shape, data_type, 1, 0, QuantizationInfo() };
+        SimpleTensor<T> src{ input_shape, data_type, 1, QuantizationInfo() };
 
         // Fill reference
         fill(src, 0, -1.f, 1.f);
@@ -502,7 +502,7 @@
 
         // Create tensors
         TensorType src = create_tensor<TensorType>(input_shape, data_type);
-        TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, 0, QuantizationInfo(), winograd_info.output_data_layout);
+        TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, QuantizationInfo(), winograd_info.output_data_layout);
 
         // Create and configure function
         FunctionType output_transform;
diff --git a/tests/validation/reference/AbsoluteDifference.cpp b/tests/validation/reference/AbsoluteDifference.cpp
index f518e67..f9fce5b 100644
--- a/tests/validation/reference/AbsoluteDifference.cpp
+++ b/tests/validation/reference/AbsoluteDifference.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -24,7 +24,6 @@
 #include "AbsoluteDifference.h"
 
 #include "arm_compute/core/Types.h"
-#include "tests/validation/FixedPoint.h"
 #include "tests/validation/Helpers.h"
 
 namespace arm_compute
diff --git a/tests/validation/reference/Accumulate.cpp b/tests/validation/reference/Accumulate.cpp
index 29a2007..7f34be9 100644
--- a/tests/validation/reference/Accumulate.cpp
+++ b/tests/validation/reference/Accumulate.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -24,7 +24,6 @@
 #include "Accumulate.h"
 
 #include "arm_compute/core/Types.h"
-#include "tests/validation/FixedPoint.h"
 #include "tests/validation/Helpers.h"
 
 namespace arm_compute
diff --git a/tests/validation/reference/ActivationLayer.cpp b/tests/validation/reference/ActivationLayer.cpp
index df7f653..9455eff 100644
--- a/tests/validation/reference/ActivationLayer.cpp
+++ b/tests/validation/reference/ActivationLayer.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -24,7 +24,6 @@
 #include "ActivationLayer.h"
 
 #include "arm_compute/core/Types.h"
-#include "tests/validation/FixedPoint.h"
 #include "tests/validation/Helpers.h"
 
 namespace arm_compute
@@ -39,7 +38,7 @@
 SimpleTensor<T> activation_layer(const SimpleTensor<T> &src, ActivationLayerInfo info)
 {
     // Create reference
-    SimpleTensor<T> dst{ src.shape(), src.data_type(), 1, src.fixed_point_position() };
+    SimpleTensor<T> dst{ src.shape(), src.data_type(), 1 };
 
     // Compute reference
     const T a(info.a());
@@ -92,68 +91,6 @@
     return dst;
 }
 
-template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type>
-SimpleTensor<T> activation_layer(const SimpleTensor<T> &src, ActivationLayerInfo info)
-{
-    using namespace fixed_point_arithmetic;
-
-    // Create reference
-    SimpleTensor<T> dst{ src.shape(), src.data_type(), 1, src.fixed_point_position() };
-
-    // Compute reference
-    const int            fixed_point_position = src.fixed_point_position();
-    const fixed_point<T> a(info.a(), fixed_point_position);
-    const fixed_point<T> b(info.b(), fixed_point_position);
-    const fixed_point<T> const_0(0, fixed_point_position);
-    const fixed_point<T> const_1(1, fixed_point_position);
-
-    for(int i = 0; i < src.num_elements(); ++i)
-    {
-        fixed_point<T> x(src[i], fixed_point_position, true);
-
-        switch(info.activation())
-        {
-            case ActivationLayerInfo::ActivationFunction::ABS:
-                dst[i] = abs(x).raw();
-                break;
-            case ActivationLayerInfo::ActivationFunction::LINEAR:
-                dst[i] = add(b, mul(a, x)).raw();
-                break;
-            case ActivationLayerInfo::ActivationFunction::LOGISTIC:
-                dst[i] = (const_1 / (const_1 + exp(-x))).raw();
-                break;
-            case ActivationLayerInfo::ActivationFunction::RELU:
-                dst[i] = max(const_0, x).raw();
-                break;
-            case ActivationLayerInfo::ActivationFunction::BOUNDED_RELU:
-                dst[i] = min(a, max(const_0, x)).raw();
-                break;
-            case ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU:
-                dst[i] = min(a, max(b, x)).raw();
-                break;
-            case ActivationLayerInfo::ActivationFunction::LEAKY_RELU:
-                dst[i] = (x > const_0) ? x.raw() : mul(a, x).raw();
-                break;
-            case ActivationLayerInfo::ActivationFunction::SOFT_RELU:
-                dst[i] = log(const_1 + exp(x)).raw();
-                break;
-            case ActivationLayerInfo::ActivationFunction::SQRT:
-                dst[i] = (const_1 / inv_sqrt(x)).raw();
-                break;
-            case ActivationLayerInfo::ActivationFunction::SQUARE:
-                dst[i] = mul(x, x).raw();
-                break;
-            case ActivationLayerInfo::ActivationFunction::TANH:
-                dst[i] = mul(a, tanh(mul(b, x))).raw();
-                break;
-            default:
-                ARM_COMPUTE_ERROR("Unsupported activation function");
-        }
-    }
-
-    return dst;
-}
-
 template <>
 SimpleTensor<uint8_t> activation_layer<uint8_t>(const SimpleTensor<uint8_t> &src, ActivationLayerInfo info)
 {
@@ -165,8 +102,6 @@
 
 template SimpleTensor<float> activation_layer(const SimpleTensor<float> &src, ActivationLayerInfo info);
 template SimpleTensor<half> activation_layer(const SimpleTensor<half> &src, ActivationLayerInfo info);
-template SimpleTensor<qint8_t> activation_layer(const SimpleTensor<qint8_t> &src, ActivationLayerInfo info);
-template SimpleTensor<qint16_t> activation_layer(const SimpleTensor<qint16_t> &src, ActivationLayerInfo info);
 } // namespace reference
 } // namespace validation
 } // namespace test
diff --git a/tests/validation/reference/ArithmeticAddition.cpp b/tests/validation/reference/ArithmeticAddition.cpp
index f26838d..4569277 100644
--- a/tests/validation/reference/ArithmeticAddition.cpp
+++ b/tests/validation/reference/ArithmeticAddition.cpp
@@ -24,7 +24,6 @@
 #include "ArithmeticAddition.h"
 
 #include "arm_compute/core/Types.h"
-#include "tests/validation/FixedPoint.h"
 #include "tests/validation/Helpers.h"
 
 namespace arm_compute
diff --git a/tests/validation/reference/ArithmeticDivision.cpp b/tests/validation/reference/ArithmeticDivision.cpp
index 934e890..0102231 100644
--- a/tests/validation/reference/ArithmeticDivision.cpp
+++ b/tests/validation/reference/ArithmeticDivision.cpp
@@ -24,7 +24,6 @@
 #include "ArithmeticDivision.h"
 
 #include "arm_compute/core/Types.h"
-#include "tests/validation/FixedPoint.h"
 #include "tests/validation/Helpers.h"
 
 namespace arm_compute
diff --git a/tests/validation/reference/BatchNormalizationLayer.cpp b/tests/validation/reference/BatchNormalizationLayer.cpp
index c8badac..3d1a6ed 100644
--- a/tests/validation/reference/BatchNormalizationLayer.cpp
+++ b/tests/validation/reference/BatchNormalizationLayer.cpp
@@ -36,56 +36,11 @@
 {
 namespace reference
 {
-// Batch Normalization Layer for fixed point type
-template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type *>
-SimpleTensor<T> batch_normalization_layer(const SimpleTensor<T> &src, const SimpleTensor<T> &mean, const SimpleTensor<T> &var, const SimpleTensor<T> &beta, const SimpleTensor<T> &gamma, float epsilon,
-                                          ActivationLayerInfo act_info, int fixed_point_position)
-{
-    ARM_COMPUTE_UNUSED(act_info);
-    SimpleTensor<T> result(src.shape(), src.data_type());
-
-    const auto cols       = static_cast<int>(src.shape()[0]);
-    const auto rows       = static_cast<int>(src.shape()[1]);
-    const auto depth      = static_cast<int>(src.shape()[2]);
-    const int  upper_dims = src.shape().total_size() / (cols * rows * depth);
-
-    for(int r = 0; r < upper_dims; ++r)
-    {
-        for(int i = 0; i < depth; ++i)
-        {
-            for(int k = 0; k < rows; ++k)
-            {
-                for(int l = 0; l < cols; ++l)
-                {
-                    const int pos = l + k * cols + i * rows * cols + r * cols * rows * depth;
-
-                    fixed_point_arithmetic::fixed_point<T> src_qs(src[pos], fixed_point_position, true);
-                    fixed_point_arithmetic::fixed_point<T> var_qs(var[i], fixed_point_position, true);
-                    fixed_point_arithmetic::fixed_point<T> mean_qs(mean[i], fixed_point_position, true);
-                    fixed_point_arithmetic::fixed_point<T> beta_qs(beta[i], fixed_point_position, true);
-                    fixed_point_arithmetic::fixed_point<T> gamma_qs(gamma[i], fixed_point_position, true);
-                    fixed_point_arithmetic::fixed_point<T> epsilon_qs(epsilon, fixed_point_position);
-
-                    auto denominator = fixed_point_arithmetic::inv_sqrt(var_qs + epsilon_qs);
-                    auto numerator   = src_qs - mean_qs;
-                    auto x_bar       = numerator * denominator;
-                    x_bar            = beta_qs + x_bar * gamma_qs;
-                    result[pos]      = x_bar.raw();
-                }
-            }
-        }
-    }
-
-    return result;
-}
-
 // Batch Normalization Layer for floating point type
 template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type *>
 SimpleTensor<T> batch_normalization_layer(const SimpleTensor<T> &src, const SimpleTensor<T> &mean, const SimpleTensor<T> &var, const SimpleTensor<T> &beta, const SimpleTensor<T> &gamma, float epsilon,
-                                          ActivationLayerInfo act_info, int fixed_point_position)
+                                          ActivationLayerInfo act_info)
 {
-    ARM_COMPUTE_UNUSED(fixed_point_position);
-
     SimpleTensor<T> result(src.shape(), src.data_type());
 
     const auto cols       = static_cast<int>(src.shape()[0]);
@@ -119,14 +74,10 @@
     return result;
 }
 template SimpleTensor<float> batch_normalization_layer(const SimpleTensor<float> &src, const SimpleTensor<float> &mean, const SimpleTensor<float> &var, const SimpleTensor<float> &beta,
-                                                       const SimpleTensor<float> &gamma, float epsilon, ActivationLayerInfo act_info, int fixed_point_position);
-template SimpleTensor<int8_t> batch_normalization_layer(const SimpleTensor<int8_t> &src, const SimpleTensor<int8_t> &mean, const SimpleTensor<int8_t> &var, const SimpleTensor<int8_t> &beta,
-                                                        const SimpleTensor<int8_t> &gamma, float epsilon, ActivationLayerInfo act_info, int fixed_point_position);
-template SimpleTensor<int16_t> batch_normalization_layer(const SimpleTensor<int16_t> &src, const SimpleTensor<int16_t> &mean, const SimpleTensor<int16_t> &var, const SimpleTensor<int16_t> &beta,
-                                                         const SimpleTensor<int16_t> &gamma, float epsilon, ActivationLayerInfo act_info, int fixed_point_position);
+                                                       const SimpleTensor<float> &gamma, float epsilon, ActivationLayerInfo act_info);
 template SimpleTensor<half> batch_normalization_layer(const SimpleTensor<half> &src, const SimpleTensor<half> &mean, const SimpleTensor<half> &var,
                                                       const SimpleTensor<half> &beta,
-                                                      const SimpleTensor<half> &gamma, float epsilon, ActivationLayerInfo act_info, int fixed_point_position);
+                                                      const SimpleTensor<half> &gamma, float epsilon, ActivationLayerInfo act_info);
 
 } // namespace reference
 } // namespace validation
diff --git a/tests/validation/reference/BatchNormalizationLayer.h b/tests/validation/reference/BatchNormalizationLayer.h
index 329909d..b45d820 100644
--- a/tests/validation/reference/BatchNormalizationLayer.h
+++ b/tests/validation/reference/BatchNormalizationLayer.h
@@ -37,13 +37,11 @@
 {
 template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type * = nullptr>
 SimpleTensor<T> batch_normalization_layer(const SimpleTensor<T> &src, const SimpleTensor<T> &mean, const SimpleTensor<T> &var, const SimpleTensor<T> &beta, const SimpleTensor<T> &gamma, float epsilon,
-                                          ActivationLayerInfo act_info,
-                                          int                 fixed_point_position);
+                                          ActivationLayerInfo act_info);
 
 template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type * = nullptr>
 SimpleTensor<T> batch_normalization_layer(const SimpleTensor<T> &src, const SimpleTensor<T> &mean, const SimpleTensor<T> &var, const SimpleTensor<T> &beta, const SimpleTensor<T> &gamma, float epsilon,
-                                          ActivationLayerInfo act_info,
-                                          int                 fixed_point_position);
+                                          ActivationLayerInfo act_info);
 } // namespace reference
 } // namespace validation
 } // namespace test
diff --git a/tests/validation/reference/ChannelShuffle.cpp b/tests/validation/reference/ChannelShuffle.cpp
index c4d8d50..b8aa920 100644
--- a/tests/validation/reference/ChannelShuffle.cpp
+++ b/tests/validation/reference/ChannelShuffle.cpp
@@ -39,7 +39,7 @@
 SimpleTensor<T> channel_shuffle(const SimpleTensor<T> &src, int num_groups)
 {
     // Create reference
-    SimpleTensor<T> dst{ src.shape(), src.data_type(), src.num_channels(), src.fixed_point_position(), src.quantization_info() };
+    SimpleTensor<T> dst{ src.shape(), src.data_type(), src.num_channels(), src.quantization_info() };
 
     const int M                 = src.shape()[0];
     const int N                 = src.shape()[1];
diff --git a/tests/validation/reference/ConvolutionLayer.cpp b/tests/validation/reference/ConvolutionLayer.cpp
index fe558ba..00c839d 100644
--- a/tests/validation/reference/ConvolutionLayer.cpp
+++ b/tests/validation/reference/ConvolutionLayer.cpp
@@ -108,7 +108,7 @@
                                   const Size2D &dilation)
 {
     // Create reference
-    SimpleTensor<T> dst{ output_shape, src.data_type(), 1, src.fixed_point_position(), src.quantization_info() };
+    SimpleTensor<T> dst{ output_shape, src.data_type(), 1, src.quantization_info() };
 
     if(src.data_layout() == DataLayout::NHWC)
     {
@@ -128,10 +128,6 @@
                                                const PadStrideInfo &info, const Size2D &dilation);
 template SimpleTensor<half> convolution_layer(const SimpleTensor<half> &src, const SimpleTensor<half> &weights, const SimpleTensor<half> &bias, const TensorShape &output_shape,
                                               const PadStrideInfo &info, const Size2D &dilation);
-template SimpleTensor<qint8_t> convolution_layer(const SimpleTensor<qint8_t> &src, const SimpleTensor<qint8_t> &weights, const SimpleTensor<qint8_t> &bias, const TensorShape &output_shape,
-                                                 const PadStrideInfo &info, const Size2D &dilation);
-template SimpleTensor<qint16_t> convolution_layer(const SimpleTensor<qint16_t> &src, const SimpleTensor<qint16_t> &weights, const SimpleTensor<qint16_t> &bias, const TensorShape &output_shape,
-                                                  const PadStrideInfo &info, const Size2D &dilation);
 template SimpleTensor<uint8_t> convolution_layer(const SimpleTensor<uint8_t> &src, const SimpleTensor<uint8_t> &weights, const SimpleTensor<int32_t> &bias, const TensorShape &output_shape,
                                                  const PadStrideInfo &info, const Size2D &dilation);
 } // namespace reference
diff --git a/tests/validation/reference/DeconvolutionLayer.cpp b/tests/validation/reference/DeconvolutionLayer.cpp
index 3543708..d073bbf 100644
--- a/tests/validation/reference/DeconvolutionLayer.cpp
+++ b/tests/validation/reference/DeconvolutionLayer.cpp
@@ -46,7 +46,7 @@
     int         out_y        = src.shape().y() + (src.shape().y() - 1) * (stride_y - 1) + a.second + 2 * info.pad().second;
     scaled_shape.set(0, out_x);
     scaled_shape.set(1, out_y);
-    SimpleTensor<T> scaled{ scaled_shape, src.data_type(), 1, src.fixed_point_position() };
+    SimpleTensor<T> scaled{ scaled_shape, src.data_type(), 1 };
 
     const int width_in      = src.shape().x();
     const int height_in     = src.shape().y();
diff --git a/tests/validation/reference/DepthConcatenateLayer.cpp b/tests/validation/reference/DepthConcatenateLayer.cpp
index 9a72484..c9a2352 100644
--- a/tests/validation/reference/DepthConcatenateLayer.cpp
+++ b/tests/validation/reference/DepthConcatenateLayer.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -95,8 +95,6 @@
 
 template SimpleTensor<float> depthconcatenate_layer(const std::vector<SimpleTensor<float>> &srcs);
 template SimpleTensor<half> depthconcatenate_layer(const std::vector<SimpleTensor<half>> &srcs);
-template SimpleTensor<qint8_t> depthconcatenate_layer(const std::vector<SimpleTensor<qint8_t>> &srcs);
-template SimpleTensor<qint16_t> depthconcatenate_layer(const std::vector<SimpleTensor<qint16_t>> &srcs);
 } // namespace reference
 } // namespace validation
 } // namespace test
diff --git a/tests/validation/reference/DepthConvertLayer.cpp b/tests/validation/reference/DepthConvertLayer.cpp
index dd095b8..0220077 100644
--- a/tests/validation/reference/DepthConvertLayer.cpp
+++ b/tests/validation/reference/DepthConvertLayer.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -36,44 +36,6 @@
 {
 namespace reference
 {
-template < typename T1, typename T2, typename std::enable_if < std::is_integral<T1>::value &&std::is_floating_point<T2>::value, int >::type >
-SimpleTensor<T2> depth_convert(const SimpleTensor<T1> &src, DataType dt_out, ConvertPolicy policy, uint32_t shift)
-{
-    ARM_COMPUTE_UNUSED(policy);
-    ARM_COMPUTE_UNUSED(shift);
-
-    using namespace fixed_point_arithmetic;
-    SimpleTensor<T2> result(src.shape(), dt_out);
-
-    const int fixed_point_position = src.fixed_point_position();
-
-    for(int i = 0; i < src.num_elements(); ++i)
-    {
-        result[i] = static_cast<float>(fixed_point<T1>(src[i], fixed_point_position, true));
-    }
-
-    return result;
-}
-
-template < typename T1, typename T2, typename std::enable_if < std::is_floating_point<T1>::value &&std::is_integral<T2>::value, int >::type >
-SimpleTensor<T2> depth_convert(const SimpleTensor<T1> &src, DataType dt_out, ConvertPolicy policy, uint32_t shift)
-{
-    ARM_COMPUTE_UNUSED(policy);
-    ARM_COMPUTE_UNUSED(shift);
-
-    using namespace fixed_point_arithmetic;
-    SimpleTensor<T2> result(src.shape(), dt_out, 1, src.fixed_point_position());
-
-    const int fixed_point_position = result.fixed_point_position();
-
-    for(int i = 0; i < src.num_elements(); ++i)
-    {
-        result[i] = fixed_point<T2>(src[i], fixed_point_position).raw();
-    }
-
-    return result;
-}
-
 template < typename T1, typename T2, typename std::enable_if < std::is_integral<T1>::value &&std::is_integral<T2>::value &&!std::is_same<T1, T2>::value, int >::type >
 SimpleTensor<T2> depth_convert(const SimpleTensor<T1> &src, DataType dt_out, ConvertPolicy policy, uint32_t shift)
 {
@@ -126,20 +88,6 @@
     return result;
 }
 
-template < typename T1, typename T2, typename std::enable_if < std::is_floating_point<T1>::value &&is_floating_point<T2>::value, int >::type >
-SimpleTensor<T2> depth_convert(const SimpleTensor<T1> &src, DataType dt_out, ConvertPolicy policy, uint32_t shift)
-{
-    ARM_COMPUTE_UNUSED(policy);
-    ARM_COMPUTE_UNUSED(shift);
-
-    SimpleTensor<T2> result(src.shape(), dt_out);
-
-    for(int i = 0; i < src.num_elements(); ++i)
-    {
-        result[i] = static_cast<T2>(src[i]);
-    }
-}
-
 template SimpleTensor<uint16_t> depth_convert(const SimpleTensor<uint8_t> &src, DataType dt_out, ConvertPolicy policy, uint32_t shift);
 template SimpleTensor<int16_t> depth_convert(const SimpleTensor<uint8_t> &src, DataType dt_out, ConvertPolicy policy, uint32_t shift);
 template SimpleTensor<int32_t> depth_convert(const SimpleTensor<uint8_t> &src, DataType dt_out, ConvertPolicy policy, uint32_t shift);
@@ -147,10 +95,6 @@
 template SimpleTensor<uint32_t> depth_convert(const SimpleTensor<uint16_t> &src, DataType dt_out, ConvertPolicy policy, uint32_t shift);
 template SimpleTensor<uint8_t> depth_convert(const SimpleTensor<int16_t> &src, DataType dt_out, ConvertPolicy policy, uint32_t shift);
 template SimpleTensor<int32_t> depth_convert(const SimpleTensor<int16_t> &src, DataType dt_out, ConvertPolicy policy, uint32_t shift);
-template SimpleTensor<float> depth_convert(const SimpleTensor<int8_t> &src, DataType dt_out, ConvertPolicy policy, uint32_t shift);
-template SimpleTensor<float> depth_convert(const SimpleTensor<int16_t> &src, DataType dt_out, ConvertPolicy policy, uint32_t shift);
-template SimpleTensor<int8_t> depth_convert(const SimpleTensor<float> &src, DataType dt_out, ConvertPolicy policy, uint32_t shift);
-template SimpleTensor<int16_t> depth_convert(const SimpleTensor<float> &src, DataType dt_out, ConvertPolicy policy, uint32_t shift);
 } // namespace reference
 } // namespace validation
 } // namespace test
diff --git a/tests/validation/reference/DepthwiseConvolutionLayer.cpp b/tests/validation/reference/DepthwiseConvolutionLayer.cpp
index 10c617e..d8f3cba 100644
--- a/tests/validation/reference/DepthwiseConvolutionLayer.cpp
+++ b/tests/validation/reference/DepthwiseConvolutionLayer.cpp
@@ -53,7 +53,7 @@
 SimpleTensor<T> depthwise_convolution(const SimpleTensor<T> &src, const SimpleTensor<T> &weights, const SimpleTensor<TB> &biases, const TensorShape &dst_shape, const PadStrideInfo &conv_info,
                                       unsigned int depth_multiplier)
 {
-    SimpleTensor<T> dst{ dst_shape, src.data_type(), 1, src.fixed_point_position() };
+    SimpleTensor<T> dst{ dst_shape, src.data_type(), 1 };
 
     // Compute reference
     const int filter_width  = weights.shape().x();
@@ -122,7 +122,7 @@
 SimpleTensor<uint8_t> depthwise_convolution(const SimpleTensor<uint8_t> &src, const SimpleTensor<uint8_t> &weights, const SimpleTensor<int32_t> &biases, const TensorShape &dst_shape,
                                             const PadStrideInfo &conv_info, unsigned int depth_multiplier)
 {
-    SimpleTensor<uint8_t> dst{ dst_shape, src.data_type(), 1, src.fixed_point_position(), src.quantization_info() };
+    SimpleTensor<uint8_t> dst{ dst_shape, src.data_type(), 1, src.quantization_info() };
 
     // Create reference
     const int   input_offset   = -src.quantization_info().offset;
diff --git a/tests/validation/reference/FlattenLayer.cpp b/tests/validation/reference/FlattenLayer.cpp
index 44f4d93..e140d75 100644
--- a/tests/validation/reference/FlattenLayer.cpp
+++ b/tests/validation/reference/FlattenLayer.cpp
@@ -36,7 +36,7 @@
 template <typename T>
 SimpleTensor<T> flatten_layer(const SimpleTensor<T> &src, const TensorShape &shape_flatten)
 {
-    SimpleTensor<T> dst(shape_flatten, src.data_type(), 1, src.fixed_point_position());
+    SimpleTensor<T> dst(shape_flatten, src.data_type(), 1);
 
     // Note: Since the reference implementation does not use padding bytes, we can copy directly the content of the source tensor
     std::copy(src.data(), src.data() + src.num_elements(), dst.data());
@@ -46,8 +46,6 @@
 
 template SimpleTensor<float> flatten_layer(const SimpleTensor<float> &src, const TensorShape &shape_flatten);
 template SimpleTensor<half> flatten_layer(const SimpleTensor<half> &src, const TensorShape &shape_flatten);
-template SimpleTensor<qint8_t> flatten_layer(const SimpleTensor<qint8_t> &src, const TensorShape &shape_flatten);
-template SimpleTensor<qint16_t> flatten_layer(const SimpleTensor<qint16_t> &src, const TensorShape &shape_flatten);
 } // namespace reference
 } // namespace validation
 } // namespace test
diff --git a/tests/validation/reference/FullyConnectedLayer.cpp b/tests/validation/reference/FullyConnectedLayer.cpp
index 5384715..3ef10ea 100644
--- a/tests/validation/reference/FullyConnectedLayer.cpp
+++ b/tests/validation/reference/FullyConnectedLayer.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -44,10 +44,8 @@
 // Vector matrix multiply for floating point
 template < typename T, typename TB, typename std::enable_if < is_floating_point<T>::value &&is_floating_point<TB>::value, int >::type = 0 >
 void vector_matrix_multiply(const SimpleTensor<T> &src, const SimpleTensor<T> &weights, const SimpleTensor<TB> &bias, SimpleTensor<T> &dst, int offset_src, int offset_dst, int cols_weights,
-                            int rows_weights, uint8_t fixed_point_position)
+                            int rows_weights)
 {
-    ARM_COMPUTE_UNUSED(fixed_point_position);
-
     const T *src_ptr     = src.data() + offset_src;
     const T *weights_ptr = weights.data();
     const TB *bias_ptr    = bias.data();
@@ -60,57 +58,16 @@
     }
 }
 
-// Vector matrix multiply for fixed point type
-template < typename T, typename TB, typename std::enable_if < std::is_integral<T>::value &&std::is_integral<TB>::value, int >::type = 0 >
-void vector_matrix_multiply(const SimpleTensor<T> &src, const SimpleTensor<T> &weights, const SimpleTensor<TB> &bias, SimpleTensor<T> &dst, int offset_src, int offset_dst, int cols_weights,
-                            int rows_weights, uint8_t fixed_point_position)
+// Vector matrix multiply for quantized type
+template < typename T, typename TB, typename std::enable_if < std::is_same<T, uint8_t>::value &&std::is_same<TB, int32_t>::value, int >::type = 0 >
+void vector_matrix_multiply(const SimpleTensor<T> &src, const SimpleTensor<T> &weights, const SimpleTensor<TB> &bias, SimpleTensor<T> &dst, int offset_src, int offset_dst,
+                            int cols_weights, int rows_weights)
 {
     const T *src_ptr     = src.data() + offset_src;
     const T *weights_ptr = weights.data();
     const TB *bias_ptr    = bias.data();
     T        *dst_ptr     = dst.data() + offset_dst;
 
-    using namespace fixed_point_arithmetic;
-    using promoted_type = fixed_point_arithmetic::traits::promote_t<T>;
-
-    for(int y = 0; y < rows_weights; ++y)
-    {
-        // Reset accumulator
-        fixed_point<promoted_type> acc(0, fixed_point_position);
-
-        for(int x = 0; x < cols_weights; ++x)
-        {
-            const fixed_point<promoted_type> i_value(src_ptr[x], fixed_point_position, true);
-            const fixed_point<promoted_type> w_value(weights_ptr[x], fixed_point_position, true);
-            acc = acc + i_value * w_value;
-        }
-
-        // Get the bias
-        const fixed_point<T> b(bias_ptr[y], fixed_point_position, true);
-
-        // Convert back and accumulate the bias
-        fixed_point<T> res(acc);
-        res = res + b;
-
-        // Store the result
-        dst_ptr[y] = res.raw();
-
-        weights_ptr += cols_weights;
-    }
-}
-
-// Vector matrix multiply for quantized type
-template <>
-void vector_matrix_multiply(const SimpleTensor<uint8_t> &src, const SimpleTensor<uint8_t> &weights, const SimpleTensor<int32_t> &bias, SimpleTensor<uint8_t> &dst, int offset_src, int offset_dst,
-                            int cols_weights, int rows_weights, uint8_t fixed_point_position)
-{
-    ARM_COMPUTE_UNUSED(fixed_point_position);
-
-    const uint8_t *src_ptr     = src.data() + offset_src;
-    const uint8_t *weights_ptr = weights.data();
-    const int32_t *bias_ptr    = bias.data();
-    uint8_t       *dst_ptr     = dst.data() + offset_dst;
-
     const int   input_offset   = -src.quantization_info().offset;
     const float input_scale    = src.quantization_info().scale;
     const int   weights_offset = -weights.quantization_info().offset;
@@ -141,7 +98,7 @@
         acc = utility::clamp<int32_t>(acc, 0, 255);
 
         // Store the result
-        dst_ptr[y] = static_cast<uint8_t>(acc);
+        dst_ptr[y] = static_cast<T>(acc);
 
         weights_ptr += cols_weights;
     }
@@ -152,7 +109,7 @@
 SimpleTensor<T> fully_connected_layer(const SimpleTensor<T> &src, const SimpleTensor<T> &weights, const SimpleTensor<TB> &bias, const TensorShape &dst_shape)
 {
     // Create reference
-    SimpleTensor<T> dst{ TensorShape{ dst_shape }, src.data_type(), 1, src.fixed_point_position(), src.quantization_info() };
+    SimpleTensor<T> dst{ TensorShape{ dst_shape }, src.data_type(), 1, src.quantization_info() };
 
     // Sanity checks
     const int          num_batch_dimensions = std::max(0, static_cast<int>(dst_shape.num_dimensions()) - 1);
@@ -183,8 +140,7 @@
                                   offset_in,
                                   offset_out,
                                   cols_weights,
-                                  rows_weights,
-                                  src.fixed_point_position());
+                                  rows_weights);
     }
 
     return dst;
@@ -192,8 +148,6 @@
 
 template SimpleTensor<float> fully_connected_layer(const SimpleTensor<float> &src, const SimpleTensor<float> &weights, const SimpleTensor<float> &bias, const TensorShape &dst_shape);
 template SimpleTensor<half> fully_connected_layer(const SimpleTensor<half> &src, const SimpleTensor<half> &weights, const SimpleTensor<half> &bias, const TensorShape &dst_shape);
-template SimpleTensor<qint8_t> fully_connected_layer(const SimpleTensor<qint8_t> &src, const SimpleTensor<qint8_t> &weights, const SimpleTensor<qint8_t> &bias, const TensorShape &dst_shape);
-template SimpleTensor<qint16_t> fully_connected_layer(const SimpleTensor<qint16_t> &src, const SimpleTensor<qint16_t> &weights, const SimpleTensor<qint16_t> &bias, const TensorShape &dst_shape);
 template SimpleTensor<uint8_t> fully_connected_layer(const SimpleTensor<uint8_t> &src, const SimpleTensor<uint8_t> &weights, const SimpleTensor<int32_t> &bias, const TensorShape &dst_shape);
 } // namespace reference
 } // namespace validation
diff --git a/tests/validation/reference/GEMM.cpp b/tests/validation/reference/GEMM.cpp
index f9dcfcb..7378ada 100644
--- a/tests/validation/reference/GEMM.cpp
+++ b/tests/validation/reference/GEMM.cpp
@@ -38,7 +38,7 @@
 SimpleTensor<T> gemm(const SimpleTensor<T> &a, const SimpleTensor<T> &b, const SimpleTensor<T> &c, float alpha, float beta)
 {
     // Create reference
-    SimpleTensor<T> dst{ c.shape(), c.data_type(), 1, c.fixed_point_position() };
+    SimpleTensor<T> dst{ c.shape(), c.data_type(), 1 };
 
     // Compute reference
     const int M = a.shape().y();
@@ -91,7 +91,7 @@
     using namespace fixed_point_arithmetic;
 
     // Create reference
-    SimpleTensor<T> dst{ c.shape(), c.data_type(), 1, c.fixed_point_position() };
+    SimpleTensor<T> dst{ c.shape(), c.data_type(), 1 };
 
     // Compute reference
     using promoted_type = fixed_point_arithmetic::traits::promote_t<T>;
@@ -156,8 +156,6 @@
 
 template SimpleTensor<float> gemm(const SimpleTensor<float> &a, const SimpleTensor<float> &b, const SimpleTensor<float> &c, float alpha, float beta);
 template SimpleTensor<half> gemm(const SimpleTensor<half> &a, const SimpleTensor<half> &b, const SimpleTensor<half> &c, float alpha, float beta);
-template SimpleTensor<qint8_t> gemm(const SimpleTensor<qint8_t> &a, const SimpleTensor<qint8_t> &b, const SimpleTensor<qint8_t> &c, float alpha, float beta);
-template SimpleTensor<qint16_t> gemm(const SimpleTensor<qint16_t> &a, const SimpleTensor<qint16_t> &b, const SimpleTensor<qint16_t> &c, float alpha, float beta);
 } // namespace reference
 } // namespace validation
 } // namespace test
diff --git a/tests/validation/reference/LocallyConnected.cpp b/tests/validation/reference/LocallyConnected.cpp
index 08e3f02..ecc582b 100644
--- a/tests/validation/reference/LocallyConnected.cpp
+++ b/tests/validation/reference/LocallyConnected.cpp
@@ -41,7 +41,7 @@
 SimpleTensor<T> locally_connected(const SimpleTensor<T> &src, const SimpleTensor<T> &weights, const SimpleTensor<TB> &bias, const TensorShape &output_shape, const PadStrideInfo &info)
 {
     // Create reference
-    SimpleTensor<T> dst{ output_shape, src.data_type(), 1, src.fixed_point_position(), src.quantization_info() };
+    SimpleTensor<T> dst{ output_shape, src.data_type(), 1, src.quantization_info() };
 
     // Compute reference
     const int width_in  = src.shape().x();
diff --git a/tests/validation/reference/NormalizationLayer.cpp b/tests/validation/reference/NormalizationLayer.cpp
index 226af96..85872c8 100644
--- a/tests/validation/reference/NormalizationLayer.cpp
+++ b/tests/validation/reference/NormalizationLayer.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -38,7 +38,7 @@
 SimpleTensor<T> normalization_layer(const SimpleTensor<T> &src, NormalizationLayerInfo info)
 {
     // Create reference
-    SimpleTensor<T> dst{ src.shape(), src.data_type(), 1, src.fixed_point_position() };
+    SimpleTensor<T> dst{ src.shape(), src.data_type(), 1 };
 
     // Compute reference
     const uint32_t norm_size = info.norm_size();
@@ -152,7 +152,7 @@
     using namespace fixed_point_arithmetic;
 
     // Create reference
-    SimpleTensor<T> dst{ src.shape(), src.data_type(), 1, src.fixed_point_position() };
+    SimpleTensor<T> dst{ src.shape(), src.data_type(), 1 };
 
     // Compute reference
     const int fixed_point_position = src.fixed_point_position();
@@ -267,8 +267,6 @@
 
 template SimpleTensor<float> normalization_layer(const SimpleTensor<float> &src, NormalizationLayerInfo info);
 template SimpleTensor<half> normalization_layer(const SimpleTensor<half> &src, NormalizationLayerInfo info);
-template SimpleTensor<qint8_t> normalization_layer(const SimpleTensor<qint8_t> &src, NormalizationLayerInfo info);
-template SimpleTensor<qint16_t> normalization_layer(const SimpleTensor<qint16_t> &src, NormalizationLayerInfo info);
 } // namespace reference
 } // namespace validation
 } // namespace test
diff --git a/tests/validation/reference/Permute.cpp b/tests/validation/reference/Permute.cpp
index bbb2e8d..29c3c5c 100644
--- a/tests/validation/reference/Permute.cpp
+++ b/tests/validation/reference/Permute.cpp
@@ -42,7 +42,7 @@
     permute(dst_shape, perm);
 
     // Create reference
-    SimpleTensor<T> dst{ dst_shape, src.data_type(), src.num_channels(), src.fixed_point_position(), src.quantization_info() };
+    SimpleTensor<T> dst{ dst_shape, src.data_type(), src.num_channels(), src.quantization_info() };
 
     // Compute reference
     for(int i = 0; i < src.num_elements(); ++i)
diff --git a/tests/validation/reference/PoolingLayer.cpp b/tests/validation/reference/PoolingLayer.cpp
index 6973454..e9054b9 100644
--- a/tests/validation/reference/PoolingLayer.cpp
+++ b/tests/validation/reference/PoolingLayer.cpp
@@ -44,7 +44,7 @@
     ARM_COMPUTE_ERROR_ON(info.is_global_pooling() && (src.shape().x() != src.shape().y()));
 
     // Create reference
-    SimpleTensor<T> dst{ compute_pool_shape(TensorInfo(src.shape(), 1, src.data_type(), src.fixed_point_position()), info), src.data_type(), 1, src.fixed_point_position() };
+    SimpleTensor<T> dst{ compute_pool_shape(TensorInfo(src.shape(), 1, src.data_type(), src.fixed_point_position()), info), src.data_type(), 1 };
 
     const int   pool_size_x     = info.is_global_pooling() ? src.shape().x() : info.pool_size().width;
     const int   pool_size_y     = info.is_global_pooling() ? src.shape().y() : info.pool_size().height;
@@ -152,128 +152,6 @@
     return dst;
 }
 
-template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type>
-SimpleTensor<T> pooling_layer(const SimpleTensor<T> &src, const PoolingLayerInfo &info)
-{
-    ARM_COMPUTE_ERROR_ON(info.is_global_pooling() && (src.shape().x() != src.shape().y()));
-
-    const auto w_src      = static_cast<int>(src.shape()[0]);
-    const auto h_src      = static_cast<int>(src.shape()[1]);
-    const int  upper_dims = src.shape().total_size() / (w_src * h_src);
-
-    const int   pool_size_x     = info.is_global_pooling() ? src.shape().x() : info.pool_size().width;
-    const int   pool_size_y     = info.is_global_pooling() ? src.shape().y() : info.pool_size().height;
-    PoolingType type            = info.pool_type();
-    int         pool_stride_x   = info.pad_stride_info().stride().first;
-    int         pool_stride_y   = info.pad_stride_info().stride().second;
-    int         pad_left        = info.pad_stride_info().pad_left();
-    int         pad_top         = info.pad_stride_info().pad_top();
-    int         pad_right       = info.pad_stride_info().pad_right();
-    int         pad_bottom      = info.pad_stride_info().pad_bottom();
-    bool        exclude_padding = info.exclude_padding();
-
-    // Create reference
-    SimpleTensor<T> dst{ compute_pool_shape(TensorInfo(src.shape(), 1, src.data_type(), src.fixed_point_position()), info), src.data_type(), 1, src.fixed_point_position() };
-
-    const auto w_dst = static_cast<int>(dst.shape()[0]);
-    const auto h_dst = static_cast<int>(dst.shape()[1]);
-
-    if(type == PoolingType::MAX)
-    {
-        for(int r = 0; r < upper_dims; ++r)
-        {
-            for(int h = 0; h < h_dst; ++h)
-            {
-                for(int w = 0; w < w_dst; ++w)
-                {
-                    int wstart = w * pool_stride_x - pad_left;
-                    int hstart = h * pool_stride_y - pad_top;
-                    int wend   = std::min(wstart + pool_size_x, w_src);
-                    int hend   = std::min(hstart + pool_size_y, h_src);
-                    wstart     = std::max(wstart, 0);
-                    hstart     = std::max(hstart, 0);
-
-                    T max_val = std::numeric_limits<T>::lowest();
-                    for(int y = hstart; y < hend; ++y)
-                    {
-                        for(int x = wstart; x < wend; ++x)
-                        {
-                            const T val = src[r * h_src * w_src + y * w_src + x];
-                            if(val > max_val)
-                            {
-                                max_val = val;
-                            }
-                        }
-                    }
-
-                    dst[r * h_dst * w_dst + h * w_dst + w] = max_val;
-                }
-            }
-        }
-    }
-    else // Average or l2 pooling
-    {
-        for(int r = 0; r < upper_dims; ++r)
-        {
-            for(int h = 0; h < h_dst; ++h)
-            {
-                for(int w = 0; w < w_dst; ++w)
-                {
-                    int wstart = w * pool_stride_x - pad_left;
-                    int hstart = h * pool_stride_y - pad_top;
-                    int wend   = std::min(wstart + pool_size_x, w_src + pad_right);
-                    int hend   = std::min(hstart + pool_size_y, h_src + pad_bottom);
-                    int pool   = (hend - hstart) * (wend - wstart);
-                    wstart     = std::max(wstart, 0);
-                    hstart     = std::max(hstart, 0);
-                    wend       = std::min(wend, w_src);
-                    hend       = std::min(hend, h_src);
-                    // Exclude padding pixels from the average
-                    if(exclude_padding)
-                    {
-                        pool = (hend - hstart) * (wend - wstart);
-                    }
-
-                    using namespace fixed_point_arithmetic;
-
-                    const int            fixed_point_position = src.fixed_point_position();
-                    const fixed_point<T> const_1(1, fixed_point_position);
-                    const fixed_point<T> invpool_fp(1.f / static_cast<float>(pool), fixed_point_position);
-                    fixed_point<T>       avg_val(0, fixed_point_position, true);
-
-                    if(type == PoolingType::AVG)
-                    {
-                        for(int y = hstart; y < hend; ++y)
-                        {
-                            for(int x = wstart; x < wend; ++x)
-                            {
-                                const fixed_point<T> in_fp(src[r * h_src * w_src + y * w_src + x], fixed_point_position, true);
-                                avg_val = add(avg_val, in_fp);
-                            }
-                        }
-                        dst[r * h_dst * w_dst + h * w_dst + w] = mul(avg_val, invpool_fp).raw();
-                    }
-                    else
-                    {
-                        for(int y = hstart; y < hend; ++y)
-                        {
-                            for(int x = wstart; x < wend; ++x)
-                            {
-                                const fixed_point<T> in_fp(src[r * h_src * w_src + y * w_src + x], fixed_point_position, true);
-                                avg_val = add(avg_val, mul(in_fp, in_fp));
-                            }
-                        }
-                        auto res                               = div(const_1, (inv_sqrt(mul(avg_val, invpool_fp))));
-                        dst[r * h_dst * w_dst + h * w_dst + w] = res.raw();
-                    }
-                }
-            }
-        }
-    }
-
-    return dst;
-}
-
 template <>
 SimpleTensor<uint8_t> pooling_layer<uint8_t>(const SimpleTensor<uint8_t> &src, const PoolingLayerInfo &info)
 {
@@ -285,8 +163,6 @@
 
 template SimpleTensor<float> pooling_layer(const SimpleTensor<float> &src, const PoolingLayerInfo &info);
 template SimpleTensor<half> pooling_layer(const SimpleTensor<half> &src, const PoolingLayerInfo &info);
-template SimpleTensor<qint8_t> pooling_layer(const SimpleTensor<qint8_t> &src, const PoolingLayerInfo &info);
-template SimpleTensor<qint16_t> pooling_layer(const SimpleTensor<qint16_t> &src, const PoolingLayerInfo &info);
 } // namespace reference
 } // namespace validation
 } // namespace test
diff --git a/tests/validation/reference/SoftmaxLayer.cpp b/tests/validation/reference/SoftmaxLayer.cpp
index 90b9b1f..ae4bcd8 100644
--- a/tests/validation/reference/SoftmaxLayer.cpp
+++ b/tests/validation/reference/SoftmaxLayer.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -38,7 +38,7 @@
 SimpleTensor<T> softmax_layer(const SimpleTensor<T> &src, float beta)
 {
     // Create reference
-    SimpleTensor<T> dst{ src.shape(), src.data_type(), 1, src.fixed_point_position() };
+    SimpleTensor<T> dst{ src.shape(), src.data_type(), 1 };
 
     // Compute reference
     const int cols       = src.shape()[0];
@@ -79,7 +79,7 @@
     using namespace fixed_point_arithmetic;
 
     // Create reference
-    SimpleTensor<T> dst{ src.shape(), src.data_type(), 1, src.fixed_point_position() };
+    SimpleTensor<T> dst{ src.shape(), src.data_type(), 1 };
 
     // Compute reference
     const int cols       = src.shape()[0];
@@ -128,8 +128,6 @@
 
 template SimpleTensor<float> softmax_layer(const SimpleTensor<float> &src, float beta);
 template SimpleTensor<half> softmax_layer(const SimpleTensor<half> &src, float beta);
-template SimpleTensor<qint8_t> softmax_layer(const SimpleTensor<qint8_t> &src, float beta);
-template SimpleTensor<qint16_t> softmax_layer(const SimpleTensor<qint16_t> &src, float beta);
 } // namespace reference
 } // namespace validation
 } // namespace test
diff --git a/tests/validation/reference/WidthConcatenateLayer.cpp b/tests/validation/reference/WidthConcatenateLayer.cpp
index fe79b4a..5b89934 100644
--- a/tests/validation/reference/WidthConcatenateLayer.cpp
+++ b/tests/validation/reference/WidthConcatenateLayer.cpp
@@ -85,8 +85,6 @@
 
 template SimpleTensor<float> widthconcatenate_layer(const std::vector<SimpleTensor<float>> &srcs);
 template SimpleTensor<half> widthconcatenate_layer(const std::vector<SimpleTensor<half>> &srcs);
-template SimpleTensor<qint8_t> widthconcatenate_layer(const std::vector<SimpleTensor<qint8_t>> &srcs);
-template SimpleTensor<qint16_t> widthconcatenate_layer(const std::vector<SimpleTensor<qint16_t>> &srcs);
 } // namespace reference
 } // namespace validation
 } // namespace test