COMPMID-970 : Remove QS8 / QS16 support

Removed Fixed point position arguments from test sources

Change-Id: I8343724723b71611fd501ed34de0866d3fb60e7e
Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/136382
Tested-by: Jenkins <bsgcomp@arm.com>
Reviewed-by: Michele DiGiorgio <michele.digiorgio@arm.com>
Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
diff --git a/tests/benchmark/fixtures/ActivationLayerFixture.h b/tests/benchmark/fixtures/ActivationLayerFixture.h
index d46ef28..a82861f 100644
--- a/tests/benchmark/fixtures/ActivationLayerFixture.h
+++ b/tests/benchmark/fixtures/ActivationLayerFixture.h
@@ -48,10 +48,9 @@
         shape.set(shape.num_dimensions(), batches);
 
         // Create tensors
-        const int              fixed_point_position = 4;
         const QuantizationInfo q_info(0.5f, -10);
-        src = create_tensor<TensorType>(shape, data_type, 1, fixed_point_position, q_info);
-        dst = create_tensor<TensorType>(shape, data_type, 1, fixed_point_position, q_info);
+        src = create_tensor<TensorType>(shape, data_type, 1, q_info);
+        dst = create_tensor<TensorType>(shape, data_type, 1, q_info);
 
         // Create and configure function
         act_layer.configure(&src, &dst, info);
diff --git a/tests/benchmark/fixtures/AlexNetFixture.h b/tests/benchmark/fixtures/AlexNetFixture.h
index 46ac61b..4662feb 100644
--- a/tests/benchmark/fixtures/AlexNetFixture.h
+++ b/tests/benchmark/fixtures/AlexNetFixture.h
@@ -53,10 +53,9 @@
     template <typename...>
     void setup(DataType data_type, int batches)
     {
-        constexpr bool weights_reshaped     = false;
-        constexpr int  fixed_point_position = 4;
+        constexpr bool weights_reshaped = false;
 
-        network.init(data_type, fixed_point_position, batches, weights_reshaped);
+        network.init(data_type, batches, weights_reshaped);
         network.build();
         network.allocate();
     }
diff --git a/tests/benchmark/fixtures/BatchNormalizationLayerFixture.h b/tests/benchmark/fixtures/BatchNormalizationLayerFixture.h
index ae8f8a7..42adefe 100644
--- a/tests/benchmark/fixtures/BatchNormalizationLayerFixture.h
+++ b/tests/benchmark/fixtures/BatchNormalizationLayerFixture.h
@@ -45,7 +45,7 @@
     void setup(TensorShape tensor_shape, TensorShape param_shape, float epsilon, bool use_gamma, bool use_beta, ActivationLayerInfo act_info, DataType data_type, DataLayout data_layout, int batches)
     {
         // Set batched in source and destination shapes
-        const unsigned int fixed_point_position = 4;
+
         tensor_shape.set(tensor_shape.num_dimensions(), batches);
         if(data_layout == DataLayout::NHWC)
         {
@@ -53,12 +53,12 @@
         }
 
         // Create tensors
-        src      = create_tensor<TensorType>(tensor_shape, data_type, 1, fixed_point_position, QuantizationInfo(), data_layout);
-        dst      = create_tensor<TensorType>(tensor_shape, data_type, 1, fixed_point_position, QuantizationInfo(), data_layout);
-        mean     = create_tensor<TensorType>(param_shape, data_type, 1, fixed_point_position);
-        variance = create_tensor<TensorType>(param_shape, data_type, 1, fixed_point_position);
-        beta     = create_tensor<TensorType>(param_shape, data_type, 1, fixed_point_position);
-        gamma    = create_tensor<TensorType>(param_shape, data_type, 1, fixed_point_position);
+        src      = create_tensor<TensorType>(tensor_shape, data_type, 1, QuantizationInfo(), data_layout);
+        dst      = create_tensor<TensorType>(tensor_shape, data_type, 1, QuantizationInfo(), data_layout);
+        mean     = create_tensor<TensorType>(param_shape, data_type, 1);
+        variance = create_tensor<TensorType>(param_shape, data_type, 1);
+        beta     = create_tensor<TensorType>(param_shape, data_type, 1);
+        gamma    = create_tensor<TensorType>(param_shape, data_type, 1);
 
         // Create and configure function
         TensorType *beta_ptr  = use_beta ? &beta : nullptr;
diff --git a/tests/benchmark/fixtures/ConvolutionLayerFixture.h b/tests/benchmark/fixtures/ConvolutionLayerFixture.h
index 511daf7..338a021 100644
--- a/tests/benchmark/fixtures/ConvolutionLayerFixture.h
+++ b/tests/benchmark/fixtures/ConvolutionLayerFixture.h
@@ -46,16 +46,16 @@
                int batches)
     {
         // Set batched in source and destination shapes
-        const unsigned int fixed_point_position = 4;
+
         src_shape.set(3 /* batch */, batches);
         dst_shape.set(3 /* batch */, batches);
         DataType bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
 
         // Create tensors
-        src     = create_tensor<TensorType>(src_shape, data_type, 1, fixed_point_position);
-        weights = create_tensor<TensorType>(weights_shape, data_type, 1, fixed_point_position);
-        biases  = create_tensor<TensorType>(biases_shape, bias_data_type, 1, fixed_point_position);
-        dst     = create_tensor<TensorType>(dst_shape, data_type, 1, fixed_point_position);
+        src     = create_tensor<TensorType>(src_shape, data_type, 1);
+        weights = create_tensor<TensorType>(weights_shape, data_type, 1);
+        biases  = create_tensor<TensorType>(biases_shape, bias_data_type, 1);
+        dst     = create_tensor<TensorType>(dst_shape, data_type, 1);
 
         // Create and configure function
         conv_layer.configure(&src, &weights, &biases, &dst, info, WeightsInfo(), dilation, act_info);
diff --git a/tests/benchmark/fixtures/DepthConcatenateLayerFixture.h b/tests/benchmark/fixtures/DepthConcatenateLayerFixture.h
index bd4b404..292adde 100644
--- a/tests/benchmark/fixtures/DepthConcatenateLayerFixture.h
+++ b/tests/benchmark/fixtures/DepthConcatenateLayerFixture.h
@@ -95,12 +95,12 @@
 
         for(const auto &shape : src_shapes)
         {
-            _srcs.emplace_back(create_tensor<TensorType>(shape, data_type, 1, _fractional_bits));
+            _srcs.emplace_back(create_tensor<TensorType>(shape, data_type, 1));
             src_ptrs.emplace_back(&_srcs.back());
         }
 
         TensorShape dst_shape = calculate_depth_concatenate_shape(src_ptrs);
-        _dst                  = create_tensor<TensorType>(dst_shape, data_type, 1, _fractional_bits);
+        _dst                  = create_tensor<TensorType>(dst_shape, data_type, 1);
 
         _depth_concat.configure(src_ptrs, &_dst);
 
@@ -139,7 +139,6 @@
     std::vector<TensorType> _srcs{};
     TensorType              _dst{};
     Function                _depth_concat{};
-    int                     _fractional_bits{ 1 };
 };
 } // namespace benchmark
 } // namespace test
diff --git a/tests/benchmark/fixtures/DepthwiseConvolutionLayerFixture.h b/tests/benchmark/fixtures/DepthwiseConvolutionLayerFixture.h
index 9276431..48ea038 100644
--- a/tests/benchmark/fixtures/DepthwiseConvolutionLayerFixture.h
+++ b/tests/benchmark/fixtures/DepthwiseConvolutionLayerFixture.h
@@ -57,15 +57,15 @@
         weights_shape.set(2, dst_shape.z());
 
         // Set batched in source and destination shapes
-        const unsigned int fixed_point_position = 4;
+
         src_shape.set(3 /* batch */, batches);
         dst_shape.set(3 /* batch */, batches);
 
         // Create tensors
-        src     = create_tensor<TensorType>(src_shape, data_type, 1, fixed_point_position, QuantizationInfo(0.5f, 10));
-        weights = create_tensor<TensorType>(weights_shape, data_type, 1, fixed_point_position, QuantizationInfo(0.5f, 10));
-        biases  = create_tensor<TensorType>(TensorShape(weights_shape[2]), is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type, 1, fixed_point_position);
-        dst     = create_tensor<TensorType>(dst_shape, data_type, 1, fixed_point_position, QuantizationInfo(0.5f, 10));
+        src     = create_tensor<TensorType>(src_shape, data_type, 1, QuantizationInfo(0.5f, 10));
+        weights = create_tensor<TensorType>(weights_shape, data_type, 1, QuantizationInfo(0.5f, 10));
+        biases  = create_tensor<TensorType>(TensorShape(weights_shape[2]), is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type, 1);
+        dst     = create_tensor<TensorType>(dst_shape, data_type, 1, QuantizationInfo(0.5f, 10));
 
         // Create and configure function
         depth_conv.configure(&src, &weights, &biases, &dst, info);
diff --git a/tests/benchmark/fixtures/DepthwiseSeparableConvolutionLayerFixture.h b/tests/benchmark/fixtures/DepthwiseSeparableConvolutionLayerFixture.h
index ef1a407..927bb4d 100644
--- a/tests/benchmark/fixtures/DepthwiseSeparableConvolutionLayerFixture.h
+++ b/tests/benchmark/fixtures/DepthwiseSeparableConvolutionLayerFixture.h
@@ -47,18 +47,18 @@
                PadStrideInfo pad_stride_depthwise_info, PadStrideInfo pad_stride_pointwise_info, DataType data_type, int batches)
     {
         // Set batched in source and destination shapes
-        const unsigned int fixed_point_position = 4;
+
         src_shape.set(3 /* batch */, batches);
         depthwise_out_shape.set(3 /* batch */, batches);
         dst_shape.set(3 /* batch */, batches);
 
-        src               = create_tensor<TensorType>(src_shape, data_type, 1, fixed_point_position);
-        depthwise_weights = create_tensor<TensorType>(depthwise_weights_shape, data_type, 1, fixed_point_position);
-        depthwise_biases  = create_tensor<TensorType>(depthwise_biases_shape, data_type, 1, fixed_point_position);
-        depthwise_out     = create_tensor<TensorType>(depthwise_out_shape, data_type, 1, fixed_point_position);
-        pointwise_weights = create_tensor<TensorType>(pointwise_weights_shape, data_type, 1, fixed_point_position);
-        pointwise_biases  = create_tensor<TensorType>(pointwise_biases_shape, data_type, 1, fixed_point_position);
-        dst               = create_tensor<TensorType>(dst_shape, data_type, 1, fixed_point_position);
+        src               = create_tensor<TensorType>(src_shape, data_type, 1);
+        depthwise_weights = create_tensor<TensorType>(depthwise_weights_shape, data_type, 1);
+        depthwise_biases  = create_tensor<TensorType>(depthwise_biases_shape, data_type, 1);
+        depthwise_out     = create_tensor<TensorType>(depthwise_out_shape, data_type, 1);
+        pointwise_weights = create_tensor<TensorType>(pointwise_weights_shape, data_type, 1);
+        pointwise_biases  = create_tensor<TensorType>(pointwise_biases_shape, data_type, 1);
+        dst               = create_tensor<TensorType>(dst_shape, data_type, 1);
 
         // Create and configure function
         depth_sep_conv_layer.configure(&src, &depthwise_weights, &depthwise_biases, &depthwise_out, &pointwise_weights, &pointwise_biases, &dst, pad_stride_depthwise_info, pad_stride_pointwise_info);
diff --git a/tests/benchmark/fixtures/DirectConvolutionLayerFixture.h b/tests/benchmark/fixtures/DirectConvolutionLayerFixture.h
index 419f6dd..f74f0ec 100644
--- a/tests/benchmark/fixtures/DirectConvolutionLayerFixture.h
+++ b/tests/benchmark/fixtures/DirectConvolutionLayerFixture.h
@@ -49,16 +49,16 @@
         ARM_COMPUTE_UNUSED(dilation);
 
         // Set batched in source and destination shapes
-        const unsigned int fixed_point_position = 4;
+
         src_shape.set(3 /* batch */, batches);
         dst_shape.set(3 /* batch */, batches);
         DataType bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
 
         // Create tensors
-        src     = create_tensor<TensorType>(src_shape, data_type, 1, fixed_point_position);
-        weights = create_tensor<TensorType>(weights_shape, data_type, 1, fixed_point_position);
-        biases  = create_tensor<TensorType>(biases_shape, bias_data_type, 1, fixed_point_position);
-        dst     = create_tensor<TensorType>(dst_shape, data_type, 1, fixed_point_position);
+        src     = create_tensor<TensorType>(src_shape, data_type, 1);
+        weights = create_tensor<TensorType>(weights_shape, data_type, 1);
+        biases  = create_tensor<TensorType>(biases_shape, bias_data_type, 1);
+        dst     = create_tensor<TensorType>(dst_shape, data_type, 1);
 
         // Create and configure function
         conv_layer.configure(&src, &weights, &biases, &dst, info, act_info);
diff --git a/tests/benchmark/fixtures/FlattenLayerFixture.h b/tests/benchmark/fixtures/FlattenLayerFixture.h
index 749fa0d..3d46989 100644
--- a/tests/benchmark/fixtures/FlattenLayerFixture.h
+++ b/tests/benchmark/fixtures/FlattenLayerFixture.h
@@ -46,11 +46,9 @@
         TensorShape shape_flatten(shape);
         shape_flatten.collapse(3);
 
-        const unsigned int fixed_point_position = is_data_type_fixed_point(data_type) ? 4 : 0;
-
         // Create tensors
-        src = create_tensor<TensorType>(shape, data_type, 1, fixed_point_position);
-        dst = create_tensor<TensorType>(shape_flatten, data_type, 1, fixed_point_position);
+        src = create_tensor<TensorType>(shape, data_type, 1);
+        dst = create_tensor<TensorType>(shape_flatten, data_type, 1);
 
         // Create and configure function
         flatten_func.configure(&src, &dst);
diff --git a/tests/benchmark/fixtures/FullyConnectedLayerFixture.h b/tests/benchmark/fixtures/FullyConnectedLayerFixture.h
index e7a5260..caef5be 100644
--- a/tests/benchmark/fixtures/FullyConnectedLayerFixture.h
+++ b/tests/benchmark/fixtures/FullyConnectedLayerFixture.h
@@ -45,15 +45,15 @@
     void setup(TensorShape src_shape, TensorShape weights_shape, TensorShape biases_shape, TensorShape dst_shape, DataType data_type, int batches)
     {
         // Set batched in source and destination shapes
-        const unsigned int fixed_point_position = 4;
+
         src_shape.set(src_shape.num_dimensions() /* batch */, batches);
         dst_shape.set(dst_shape.num_dimensions() /* batch */, batches);
 
         // Create tensors
-        src     = create_tensor<TensorType>(src_shape, data_type, 1, fixed_point_position);
-        weights = create_tensor<TensorType>(weights_shape, data_type, 1, fixed_point_position);
-        biases  = create_tensor<TensorType>(biases_shape, data_type, 1, fixed_point_position);
-        dst     = create_tensor<TensorType>(dst_shape, data_type, 1, fixed_point_position);
+        src     = create_tensor<TensorType>(src_shape, data_type, 1);
+        weights = create_tensor<TensorType>(weights_shape, data_type, 1);
+        biases  = create_tensor<TensorType>(biases_shape, data_type, 1);
+        dst     = create_tensor<TensorType>(dst_shape, data_type, 1);
 
         // Create and configure function
         fc_layer.configure(&src, &weights, &biases, &dst);
diff --git a/tests/benchmark/fixtures/GEMMFixture.h b/tests/benchmark/fixtures/GEMMFixture.h
index f706f3e..7628abc 100644
--- a/tests/benchmark/fixtures/GEMMFixture.h
+++ b/tests/benchmark/fixtures/GEMMFixture.h
@@ -44,13 +44,11 @@
     template <typename...>
     void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_c, TensorShape shape_dst, float alpha, float beta, DataType data_type, bool reshape_b_only_on_first_run)
     {
-        constexpr int fixed_point_position = 4;
-
         // Create tensors
-        a   = create_tensor<TensorType>(shape_a, data_type, 1, fixed_point_position);
-        b   = create_tensor<TensorType>(shape_b, data_type, 1, fixed_point_position);
-        c   = create_tensor<TensorType>(shape_c, data_type, 1, fixed_point_position);
-        dst = create_tensor<TensorType>(shape_dst, data_type, 1, fixed_point_position);
+        a   = create_tensor<TensorType>(shape_a, data_type, 1);
+        b   = create_tensor<TensorType>(shape_b, data_type, 1);
+        c   = create_tensor<TensorType>(shape_c, data_type, 1);
+        dst = create_tensor<TensorType>(shape_dst, data_type, 1);
 
         // Create and configure function
         gemm.configure(&a, &b, &c, &dst, alpha, beta, GEMMInfo(false, false, reshape_b_only_on_first_run));
diff --git a/tests/benchmark/fixtures/GEMMInterleave4x4Fixture.h b/tests/benchmark/fixtures/GEMMInterleave4x4Fixture.h
index 793c540..c8e6f4a 100644
--- a/tests/benchmark/fixtures/GEMMInterleave4x4Fixture.h
+++ b/tests/benchmark/fixtures/GEMMInterleave4x4Fixture.h
@@ -44,14 +44,12 @@
     template <typename...>
     void setup(size_t x, size_t y, DataType data_type)
     {
-        constexpr int fixed_point_position = 4;
-
         const TensorShape shape_a(x, y);
         const TensorShape shape_b(static_cast<size_t>(x * 4.f), static_cast<size_t>(std::ceil(y / 4.f)));
 
         // Create tensors
-        a = create_tensor<TensorType>(shape_a, data_type, 1, fixed_point_position);
-        b = create_tensor<TensorType>(shape_b, data_type, 1, fixed_point_position);
+        a = create_tensor<TensorType>(shape_a, data_type, 1);
+        b = create_tensor<TensorType>(shape_b, data_type, 1);
 
         // Create and configure function
         gemm.configure(&a, &b);
diff --git a/tests/benchmark/fixtures/GEMMLowpFixture.h b/tests/benchmark/fixtures/GEMMLowpFixture.h
index b5381b0..46a2f5c 100644
--- a/tests/benchmark/fixtures/GEMMLowpFixture.h
+++ b/tests/benchmark/fixtures/GEMMLowpFixture.h
@@ -53,9 +53,9 @@
         // Note: The offsets for matrix A and matrix B are set to 0 in order to skip the computation for the offset contribution
 
         // Create tensors
-        a = create_tensor<TensorType>(shape_a, DataType::QASYMM8, 1, 0, QuantizationInfo(1.0f / 255.0f, 0));
-        b = create_tensor<TensorType>(shape_b, DataType::QASYMM8, 1, 0, QuantizationInfo(1.0f / 255.0f, 0));
-        c = create_tensor<TensorType>(shape_dst, DataType::S32, 1, 0, QuantizationInfo(1.0f / 255.0f, 0));
+        a = create_tensor<TensorType>(shape_a, DataType::QASYMM8, 1, QuantizationInfo(1.0f / 255.0f, 0));
+        b = create_tensor<TensorType>(shape_b, DataType::QASYMM8, 1, QuantizationInfo(1.0f / 255.0f, 0));
+        c = create_tensor<TensorType>(shape_dst, DataType::S32, 1, QuantizationInfo(1.0f / 255.0f, 0));
 
         // Create and configure function
         gemmlowp.configure(&a, &b, &c);
diff --git a/tests/benchmark/fixtures/NormalizationLayerFixture.h b/tests/benchmark/fixtures/NormalizationLayerFixture.h
index 7742dca..4331506 100644
--- a/tests/benchmark/fixtures/NormalizationLayerFixture.h
+++ b/tests/benchmark/fixtures/NormalizationLayerFixture.h
@@ -45,12 +45,12 @@
     void setup(TensorShape shape, NormalizationLayerInfo info, DataType data_type, int batches)
     {
         // Set batched in source and destination shapes
-        const unsigned int fixed_point_position = 4;
+
         shape.set(shape.num_dimensions(), batches);
 
         // Create tensors
-        src = create_tensor<TensorType>(shape, data_type, 1, fixed_point_position);
-        dst = create_tensor<TensorType>(shape, data_type, 1, fixed_point_position);
+        src = create_tensor<TensorType>(shape, data_type, 1);
+        dst = create_tensor<TensorType>(shape, data_type, 1);
 
         // Create and configure function
         norm_layer.configure(&src, &dst, info);
diff --git a/tests/benchmark/fixtures/PoolingLayerFixture.h b/tests/benchmark/fixtures/PoolingLayerFixture.h
index 5a1a296..cbcfe2e 100644
--- a/tests/benchmark/fixtures/PoolingLayerFixture.h
+++ b/tests/benchmark/fixtures/PoolingLayerFixture.h
@@ -48,7 +48,6 @@
     void setup(TensorShape src_shape, PoolingLayerInfo info, DataType data_type, DataLayout data_layout, int batches)
     {
         // Set batched in source and destination shapes
-        const unsigned int fixed_point_position = 4;
 
         // Permute shape if NHWC format
         if(data_layout == DataLayout::NHWC)
@@ -56,7 +55,7 @@
             permute(src_shape, PermutationVector(2U, 0U, 1U));
         }
 
-        TensorInfo src_info(src_shape, 1, data_type, fixed_point_position);
+        TensorInfo src_info(src_shape, 1, data_type);
         src_info.set_data_layout(data_layout);
 
         TensorShape dst_shape = compute_pool_shape(src_info, info);
@@ -65,8 +64,8 @@
         dst_shape.set(dst_shape.num_dimensions(), batches);
 
         // Create tensors
-        src = create_tensor<TensorType>(src_shape, data_type, 1, fixed_point_position, QuantizationInfo(), data_layout);
-        dst = create_tensor<TensorType>(dst_shape, data_type, 1, fixed_point_position, QuantizationInfo(), data_layout);
+        src = create_tensor<TensorType>(src_shape, data_type, 1, QuantizationInfo(), data_layout);
+        dst = create_tensor<TensorType>(dst_shape, data_type, 1, QuantizationInfo(), data_layout);
 
         // Create and configure function
         pool_layer.configure(&src, &dst, info);
diff --git a/tests/benchmark/fixtures/ROIPoolingLayerFixture.h b/tests/benchmark/fixtures/ROIPoolingLayerFixture.h
index 4adfa44..fa4a5b7 100644
--- a/tests/benchmark/fixtures/ROIPoolingLayerFixture.h
+++ b/tests/benchmark/fixtures/ROIPoolingLayerFixture.h
@@ -47,8 +47,8 @@
     void setup(TensorShape shape, const ROIPoolingLayerInfo pool_info, unsigned int num_rois, DataType data_type, int batches)
     {
         // Set batched in source and destination shapes
-        const unsigned int fixed_point_position = 4;
-        TensorShape        shape_dst;
+
+        TensorShape shape_dst;
         shape.set(shape.num_dimensions(), batches);
         shape_dst.set(0, pool_info.pooled_width());
         shape_dst.set(1, pool_info.pooled_height());
@@ -56,8 +56,8 @@
         shape_dst.set(3, num_rois);
 
         // Create tensors
-        src = create_tensor<TensorType>(shape, data_type, 1, fixed_point_position);
-        dst = create_tensor<TensorType>(shape_dst, data_type, 1, fixed_point_position);
+        src = create_tensor<TensorType>(shape, data_type, 1);
+        dst = create_tensor<TensorType>(shape_dst, data_type, 1);
 
         // Create random ROIs
         std::vector<ROI> rois = generate_random_rois(shape, pool_info, num_rois, 0U);
diff --git a/tests/benchmark/fixtures/SoftmaxLayerFixture.h b/tests/benchmark/fixtures/SoftmaxLayerFixture.h
index 4f6dde8..4d092f7 100644
--- a/tests/benchmark/fixtures/SoftmaxLayerFixture.h
+++ b/tests/benchmark/fixtures/SoftmaxLayerFixture.h
@@ -45,11 +45,9 @@
     template <typename...>
     void setup(TensorShape shape, DataType data_type)
     {
-        const unsigned int fixed_point_position = 4;
-
         // Create tensors
-        src = create_tensor<TensorType>(shape, data_type, 1, fixed_point_position, QuantizationInfo(1.f / 256, 10));
-        dst = create_tensor<TensorType>(shape, data_type, 1, fixed_point_position, QuantizationInfo(1.f / 256, 0));
+        src = create_tensor<TensorType>(shape, data_type, 1, QuantizationInfo(1.f / 256, 10));
+        dst = create_tensor<TensorType>(shape, data_type, 1, QuantizationInfo(1.f / 256, 0));
 
         ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
         ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
diff --git a/tests/benchmark/fixtures/WinogradConvolutionLayerFixture.h b/tests/benchmark/fixtures/WinogradConvolutionLayerFixture.h
index 8ed75af..5f44517 100644
--- a/tests/benchmark/fixtures/WinogradConvolutionLayerFixture.h
+++ b/tests/benchmark/fixtures/WinogradConvolutionLayerFixture.h
@@ -48,16 +48,16 @@
         ARM_COMPUTE_UNUSED(dilation);
 
         // Set batched in source and destination shapes
-        const unsigned int fixed_point_position = 4;
+
         src_shape.set(3 /* batch */, batches);
         dst_shape.set(3 /* batch */, batches);
         DataType bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
 
         // Create tensors
-        src     = create_tensor<TensorType>(src_shape, data_type, 1, fixed_point_position);
-        weights = create_tensor<TensorType>(weights_shape, data_type, 1, fixed_point_position);
-        biases  = create_tensor<TensorType>(biases_shape, bias_data_type, 1, fixed_point_position);
-        dst     = create_tensor<TensorType>(dst_shape, data_type, 1, fixed_point_position);
+        src     = create_tensor<TensorType>(src_shape, data_type, 1);
+        weights = create_tensor<TensorType>(weights_shape, data_type, 1);
+        biases  = create_tensor<TensorType>(biases_shape, bias_data_type, 1);
+        dst     = create_tensor<TensorType>(dst_shape, data_type, 1);
 
         // Create and configure function
         conv_layer.configure(&src, &weights, &biases, &dst, info, act_info);