COMPMID-1586: Add support for NHWC CLDeconvolutionLayer

COMPMID-1651: Fix QASYMM8 CLDeconvolutionLayer

This patch also extends the range of values used for testing Convolution and
Deconvolution to cover quantized [-1.0f, 1.0f].

Change-Id: I8b280669db67bb3ec25bf5d411c8f5954f5b0dab
Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/149869
Reviewed-by: Michalis Spyrou <michalis.spyrou@arm.com>
Tested-by: bsgcomp <bsgcomp@arm.com>
diff --git a/tests/validation/CL/DeconvolutionLayer.cpp b/tests/validation/CL/DeconvolutionLayer.cpp
index 84a2b01..7727d90 100644
--- a/tests/validation/CL/DeconvolutionLayer.cpp
+++ b/tests/validation/CL/DeconvolutionLayer.cpp
@@ -23,6 +23,7 @@
  */
 #include "arm_compute/core/CL/kernels/CLFillBorderKernel.h"
 #include "arm_compute/core/Types.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
 #include "arm_compute/runtime/CL/CLTensor.h"
 #include "arm_compute/runtime/CL/CLTensorAllocator.h"
 #include "arm_compute/runtime/CL/functions/CLDeconvolutionLayer.h"
@@ -45,7 +46,7 @@
 {
 constexpr AbsoluteTolerance<float>  tolerance_fp32(0.001f);               /**< Tolerance for floating point tests */
 RelativeTolerance<half_float::half> tolerance_f16(half_float::half(0.2)); /**< Tolerance value for comparing reference's for DataType::F16 */
-constexpr AbsoluteTolerance<float>  tolerance_qasymm8(1.0);               /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */
+constexpr AbsoluteTolerance<float>  tolerance_qasymm8(0.0);               /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */
 constexpr float                     tolerance_num = 0.07f;                /**< Tolerance number */
 
 const auto data4x4 = datasets::SmallDeconvolutionShapes() * framework::dataset::make("StrideX", 1, 5) * framework::dataset::make("StrideY", 1, 5) * framework::dataset::make("PadX", 0, 3)
@@ -57,6 +58,7 @@
 const auto data1x1 = datasets::SmallDeconvolutionShapes() * framework::dataset::make("StrideX", 1, 4) * framework::dataset::make("StrideY", 1, 4) * framework::dataset::make("PadX", 0, 1)
                      * framework::dataset::make("PadY", 0, 1) * framework::dataset::make("ax", 0) * framework::dataset::make("ay", 0) * framework::dataset::make("NumKernels", { 1, 3 });
 
+const auto data_layouts_dataset = framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC });
 } // namespace
 
 TEST_SUITE(CL)
@@ -72,7 +74,7 @@
     const TensorShape  weights_shape(kernel_size_x, kernel_size_y, input_shape.z(), num_kernels);
     const TensorShape  bias_shape(num_kernels);
     auto               out_dim      = deconvolution_output_dimensions(input_shape.x(), input_shape.y(), kernel_size_x, kernel_size_y, 1, 1, 1, 1);
-    TensorShape        output_shape = deconvolution_output_shape(out_dim, input_shape, weights_shape);
+    TensorShape        output_shape = compute_deconvolution_output_shape(out_dim, TensorInfo(input_shape, 1, data_type), TensorInfo(weights_shape, 1, data_type));
 
     // Create tensors
     CLTensor src     = create_tensor<CLTensor>(input_shape, data_type, 1);
@@ -169,7 +171,7 @@
 TEST_SUITE(FP32)
 
 TEST_SUITE(W4x4)
-FIXTURE_DATA_TEST_CASE(Run, CLDeconvolutionLayerFixture4x4<float>, framework::DatasetMode::ALL, combine(data4x4, framework::dataset::make("DataType", DataType::F32)))
+FIXTURE_DATA_TEST_CASE(Run, CLDeconvolutionLayerFixture4x4<float>, framework::DatasetMode::ALL, combine(combine(data4x4, framework::dataset::make("DataType", DataType::F32)), data_layouts_dataset))
 {
     // Validate output
     validate(CLAccessor(_target), _reference, tolerance_fp32);
@@ -177,7 +179,7 @@
 TEST_SUITE_END()
 
 TEST_SUITE(W3x3)
-FIXTURE_DATA_TEST_CASE(Run, CLDeconvolutionLayerFixture3x3<float>, framework::DatasetMode::ALL, combine(data3x3, framework::dataset::make("DataType", DataType::F32)))
+FIXTURE_DATA_TEST_CASE(Run, CLDeconvolutionLayerFixture3x3<float>, framework::DatasetMode::ALL, combine(combine(data3x3, framework::dataset::make("DataType", DataType::F32)), data_layouts_dataset))
 {
     // Validate output
     validate(CLAccessor(_target), _reference, tolerance_fp32);
@@ -185,7 +187,7 @@
 TEST_SUITE_END()
 
 TEST_SUITE(W1x1)
-FIXTURE_DATA_TEST_CASE(Run, CLDeconvolutionLayerFixture1x1<float>, framework::DatasetMode::ALL, combine(data1x1, framework::dataset::make("DataType", DataType::F32)))
+FIXTURE_DATA_TEST_CASE(Run, CLDeconvolutionLayerFixture1x1<float>, framework::DatasetMode::ALL, combine(combine(data1x1, framework::dataset::make("DataType", DataType::F32)), data_layouts_dataset))
 {
     // Validate output
     validate(CLAccessor(_target), _reference, tolerance_fp32);
@@ -197,7 +199,7 @@
 TEST_SUITE(FP16)
 
 TEST_SUITE(W4x4)
-FIXTURE_DATA_TEST_CASE(Run, CLDeconvolutionLayerFixture4x4<half>, framework::DatasetMode::ALL, combine(data4x4, framework::dataset::make("DataType", DataType::F16)))
+FIXTURE_DATA_TEST_CASE(Run, CLDeconvolutionLayerFixture4x4<half>, framework::DatasetMode::ALL, combine(combine(data4x4, framework::dataset::make("DataType", DataType::F16)), data_layouts_dataset))
 {
     // Validate output
     validate(CLAccessor(_target), _reference, tolerance_f16, tolerance_num);
@@ -205,7 +207,7 @@
 TEST_SUITE_END()
 
 TEST_SUITE(W3x3)
-FIXTURE_DATA_TEST_CASE(Run, CLDeconvolutionLayerFixture3x3<half>, framework::DatasetMode::ALL, combine(data3x3, framework::dataset::make("DataType", DataType::F16)))
+FIXTURE_DATA_TEST_CASE(Run, CLDeconvolutionLayerFixture3x3<half>, framework::DatasetMode::ALL, combine(combine(data3x3, framework::dataset::make("DataType", DataType::F16)), data_layouts_dataset))
 {
     // Validate output
     validate(CLAccessor(_target), _reference, tolerance_f16, tolerance_num);
@@ -213,7 +215,7 @@
 TEST_SUITE_END()
 
 TEST_SUITE(W1x1)
-FIXTURE_DATA_TEST_CASE(Run, CLDeconvolutionLayerFixture1x1<half>, framework::DatasetMode::ALL, combine(data1x1, framework::dataset::make("DataType", DataType::F16)))
+FIXTURE_DATA_TEST_CASE(Run, CLDeconvolutionLayerFixture1x1<half>, framework::DatasetMode::ALL, combine(combine(data1x1, framework::dataset::make("DataType", DataType::F16)), data_layouts_dataset))
 {
     // Validate output
     validate(CLAccessor(_target), _reference, tolerance_f16, tolerance_num);
@@ -236,7 +238,8 @@
 TEST_SUITE(QASYMM8)
 
 TEST_SUITE(W4x4)
-FIXTURE_DATA_TEST_CASE(Run, CLDeconvolutionLayerQuantizedFixture4x4<uint8_t>, framework::DatasetMode::ALL, combine(combine(data4x4, framework::dataset::make("DataType", DataType::QASYMM8)),
+FIXTURE_DATA_TEST_CASE(Run, CLDeconvolutionLayerQuantizedFixture4x4<uint8_t>, framework::DatasetMode::ALL, combine(combine(combine(data4x4, framework::dataset::make("DataType", DataType::QASYMM8)),
+                                                                                                                   data_layouts_dataset),
                                                                                                                    framework::dataset::make("QuantizationInfo", QuantizationInfo(2.f / 255.f, 0))))
 {
     // Validate output
@@ -245,7 +248,8 @@
 TEST_SUITE_END()
 
 TEST_SUITE(W3x3)
-FIXTURE_DATA_TEST_CASE(Run, CLDeconvolutionLayerQuantizedFixture3x3<uint8_t>, framework::DatasetMode::ALL, combine(combine(data3x3, framework::dataset::make("DataType", DataType::QASYMM8)),
+FIXTURE_DATA_TEST_CASE(Run, CLDeconvolutionLayerQuantizedFixture3x3<uint8_t>, framework::DatasetMode::ALL, combine(combine(combine(data3x3, framework::dataset::make("DataType", DataType::QASYMM8)),
+                                                                                                                   data_layouts_dataset),
                                                                                                                    framework::dataset::make("QuantizationInfo", QuantizationInfo(2.f / 255.f, 0))))
 {
     // Validate output
@@ -254,7 +258,8 @@
 TEST_SUITE_END()
 
 TEST_SUITE(W1x1)
-FIXTURE_DATA_TEST_CASE(Run, CLDeconvolutionLayerQuantizedFixture1x1<uint8_t>, framework::DatasetMode::ALL, combine(combine(data1x1, framework::dataset::make("DataType", DataType::QASYMM8)),
+FIXTURE_DATA_TEST_CASE(Run, CLDeconvolutionLayerQuantizedFixture1x1<uint8_t>, framework::DatasetMode::ALL, combine(combine(combine(data1x1, framework::dataset::make("DataType", DataType::QASYMM8)),
+                                                                                                                   data_layouts_dataset),
                                                                                                                    framework::dataset::make("QuantizationInfo", QuantizationInfo(2.f / 255.f, 0))))
 {
     // Validate output
diff --git a/tests/validation/Helpers.cpp b/tests/validation/Helpers.cpp
index fd034b6..eab6d56 100644
--- a/tests/validation/Helpers.cpp
+++ b/tests/validation/Helpers.cpp
@@ -302,6 +302,15 @@
     }
 }
 
+std::pair<int, int> get_quantized_bounds(const QuantizationInfo &quant_info, float min, float max)
+{
+    ARM_COMPUTE_ERROR_ON_MSG(min > max, "min must be lower equal than max");
+
+    const int min_bound = quant_info.quantize(min, RoundingPolicy::TO_NEAREST_UP);
+    const int max_bound = quant_info.quantize(max, RoundingPolicy::TO_NEAREST_UP);
+    return std::pair<int, int>(min_bound, max_bound);
+}
+
 template void get_tile(const SimpleTensor<float> &in, SimpleTensor<float> &roi, const Coordinates &coord);
 template void get_tile(const SimpleTensor<half> &in, SimpleTensor<half> &roi, const Coordinates &coord);
 template void zeros(SimpleTensor<float> &in, const Coordinates &anchor, const TensorShape &shape);
diff --git a/tests/validation/Helpers.h b/tests/validation/Helpers.h
index 779ecdc..4d1d214 100644
--- a/tests/validation/Helpers.h
+++ b/tests/validation/Helpers.h
@@ -231,6 +231,14 @@
  */
 template <typename T>
 void zeros(SimpleTensor<T> &in, const Coordinates &anchor, const TensorShape &shape);
+
+/** Helper function to compute quantized min and max bounds
+ *
+ * @param[in] quant_info Quantization info to be used for conversion
+ * @param[in] min        Floating point minimum value to be quantized
+ * @param[in] max        Floating point maximum value to be quantized
+ */
+std::pair<int, int> get_quantized_bounds(const QuantizationInfo &quant_info, float min, float max);
 } // namespace validation
 } // namespace test
 } // namespace arm_compute
diff --git a/tests/validation/NEON/DeconvolutionLayer.cpp b/tests/validation/NEON/DeconvolutionLayer.cpp
index eb643b8..1b74400 100644
--- a/tests/validation/NEON/DeconvolutionLayer.cpp
+++ b/tests/validation/NEON/DeconvolutionLayer.cpp
@@ -22,6 +22,7 @@
  * SOFTWARE.
  */
 #include "arm_compute/core/Types.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
 #include "arm_compute/runtime/NEON/functions/NEDeconvolutionLayer.h"
 #include "arm_compute/runtime/Tensor.h"
 #include "arm_compute/runtime/TensorAllocator.h"
@@ -53,6 +54,7 @@
 const auto data1x1 = datasets::SmallDeconvolutionShapes() * framework::dataset::make("StrideX", 1, 4) * framework::dataset::make("StrideY", 1, 4) * framework::dataset::make("PadX", 0, 1)
                      * framework::dataset::make("PadY", 0, 1) * framework::dataset::make("ax", 0) * framework::dataset::make("ay", 0) * framework::dataset::make("NumKernels", { 1, 3 });
 
+const auto data_layouts_dataset = framework::dataset::make("DataLayout", { DataLayout::NCHW });
 } // namespace
 
 TEST_SUITE(NEON)
@@ -68,7 +70,7 @@
     const TensorShape  weights_shape(kernel_size_x, kernel_size_y, input_shape.z(), num_kernels);
     const TensorShape  bias_shape(num_kernels);
     auto               out_dim      = deconvolution_output_dimensions(input_shape.x(), input_shape.y(), kernel_size_x, kernel_size_y, 1, 1, 1, 1);
-    TensorShape        output_shape = deconvolution_output_shape(out_dim, input_shape, weights_shape);
+    TensorShape        output_shape = compute_deconvolution_output_shape(out_dim, TensorInfo(input_shape, 1, data_type), TensorInfo(weights_shape, 1, data_type));
 
     // Create tensors
     Tensor src     = create_tensor<Tensor>(input_shape, data_type, 1);
@@ -172,7 +174,7 @@
 TEST_SUITE(FP32)
 TEST_SUITE(W4x4)
 
-FIXTURE_DATA_TEST_CASE(Run, NEDeconvolutionLayerFixture4x4<float>, framework::DatasetMode::ALL, combine(data4x4, framework::dataset::make("DataType", DataType::F32)))
+FIXTURE_DATA_TEST_CASE(Run, NEDeconvolutionLayerFixture4x4<float>, framework::DatasetMode::ALL, combine(combine(data4x4, framework::dataset::make("DataType", DataType::F32)), data_layouts_dataset))
 {
     // Validate output
     validate(Accessor(_target), _reference, tolerance_fp32);
@@ -181,7 +183,7 @@
 
 TEST_SUITE(W3x3)
 
-FIXTURE_DATA_TEST_CASE(Run, NEDeconvolutionLayerFixture3x3<float>, framework::DatasetMode::ALL, combine(data3x3, framework::dataset::make("DataType", DataType::F32)))
+FIXTURE_DATA_TEST_CASE(Run, NEDeconvolutionLayerFixture3x3<float>, framework::DatasetMode::ALL, combine(combine(data3x3, framework::dataset::make("DataType", DataType::F32)), data_layouts_dataset))
 {
     // Validate output
     validate(Accessor(_target), _reference, tolerance_fp32);
@@ -189,7 +191,7 @@
 TEST_SUITE_END()
 
 TEST_SUITE(W1x1)
-FIXTURE_DATA_TEST_CASE(Run, NEDeconvolutionLayerFixture1x1<float>, framework::DatasetMode::ALL, combine(data1x1, framework::dataset::make("DataType", DataType::F32)))
+FIXTURE_DATA_TEST_CASE(Run, NEDeconvolutionLayerFixture1x1<float>, framework::DatasetMode::ALL, combine(combine(data1x1, framework::dataset::make("DataType", DataType::F32)), data_layouts_dataset))
 {
     // Validate output
     validate(Accessor(_target), _reference, tolerance_fp32);
diff --git a/tests/validation/fixtures/ConvolutionLayerFixture.h b/tests/validation/fixtures/ConvolutionLayerFixture.h
index 3b420ea..795b9de 100644
--- a/tests/validation/fixtures/ConvolutionLayerFixture.h
+++ b/tests/validation/fixtures/ConvolutionLayerFixture.h
@@ -77,7 +77,8 @@
         {
             case DataType::QASYMM8:
             {
-                std::uniform_int_distribution<uint8_t> distribution(0, 3);
+                std::pair<int, int> bounds = get_quantized_bounds(tensor.quantization_info(), -1.0f, 1.0f);
+                std::uniform_int_distribution<uint8_t> distribution(bounds.first, bounds.second);
                 library->fill(tensor, distribution, i);
                 break;
             }
diff --git a/tests/validation/fixtures/DeconvolutionLayerFixture.h b/tests/validation/fixtures/DeconvolutionLayerFixture.h
index d3a7be7..85c7ed5 100644
--- a/tests/validation/fixtures/DeconvolutionLayerFixture.h
+++ b/tests/validation/fixtures/DeconvolutionLayerFixture.h
@@ -23,6 +23,7 @@
  */
 #include "arm_compute/core/TensorShape.h"
 #include "arm_compute/core/Types.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
 #include "tests/AssetsLibrary.h"
 #include "tests/Globals.h"
 #include "tests/IAccessor.h"
@@ -39,6 +40,8 @@
 {
 namespace validation
 {
+using namespace arm_compute::misc::shape_calculator;
+
 template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
 class DeconvolutionLayerFixtureBase : public framework::Fixture
 {
@@ -48,12 +51,15 @@
 public:
     template <typename...>
     void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info,
-               const std::pair<unsigned int, unsigned int> &inner_border, DataType data_type, QuantizationInfo quantization_info)
+               const std::pair<unsigned int, unsigned int> &inner_border, DataType data_type, DataLayout data_layout, QuantizationInfo quantization_info)
     {
-        _data_type = data_type;
+        _data_type         = data_type;
+        _bias_data_type    = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
+        _data_layout       = data_layout;
+        _quantization_info = quantization_info;
 
-        _target    = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, inner_border, data_type, quantization_info);
-        _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, inner_border, data_type, quantization_info);
+        _target    = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, inner_border);
+        _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, inner_border);
     }
 
 protected:
@@ -64,7 +70,8 @@
         {
             case DataType::QASYMM8:
             {
-                std::uniform_int_distribution<uint8_t> distribution(0, 3);
+                std::pair<int, int> bounds = get_quantized_bounds(tensor.quantization_info(), -1.0f, 1.0f);
+                std::uniform_int_distribution<uint8_t> distribution(bounds.first, bounds.second);
                 library->fill(tensor, distribution, i);
                 break;
             }
@@ -86,14 +93,21 @@
         }
     }
 
-    TensorType compute_target(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape,
-                              const PadStrideInfo &info, const std::pair<unsigned int, unsigned int> &inner_border, DataType data_type, QuantizationInfo quantization_info)
+    TensorType compute_target(TensorShape input_shape, TensorShape weights_shape, const TensorShape bias_shape, TensorShape output_shape,
+                              const PadStrideInfo &info, const std::pair<unsigned int, unsigned int> &inner_border)
     {
+        if(_data_layout == DataLayout::NHWC)
+        {
+            permute(input_shape, PermutationVector(2U, 0U, 1U));
+            permute(weights_shape, PermutationVector(2U, 0U, 1U));
+            permute(output_shape, PermutationVector(2U, 0U, 1U));
+        }
+
         // Create tensors
-        TensorType src     = create_tensor<TensorType>(input_shape, data_type, 1, quantization_info);
-        TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1, quantization_info);
-        TensorType bias    = create_tensor<TensorType>(bias_shape, is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type, 1, quantization_info);
-        TensorType dst     = create_tensor<TensorType>(output_shape, data_type, 1, quantization_info);
+        TensorType src     = create_tensor<TensorType>(input_shape, _data_type, 1, _quantization_info, _data_layout);
+        TensorType weights = create_tensor<TensorType>(weights_shape, _data_type, 1, _quantization_info, _data_layout);
+        TensorType bias    = create_tensor<TensorType>(bias_shape, _bias_data_type, 1, _quantization_info, _data_layout);
+        TensorType dst     = create_tensor<TensorType>(output_shape, _data_type, 1, _quantization_info, _data_layout);
 
         // Create and configure function
         FunctionType conv;
@@ -127,12 +141,12 @@
     }
 
     SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape,
-                                      const PadStrideInfo &info, const std::pair<unsigned int, unsigned int> inner_border, DataType data_type, QuantizationInfo quantization_info)
+                                      const PadStrideInfo &info, const std::pair<unsigned int, unsigned int> inner_border)
     {
         // Create reference
-        SimpleTensor<T>     src{ input_shape, data_type, 1, quantization_info };
-        SimpleTensor<T>     weights{ weights_shape, data_type, 1, quantization_info };
-        SimpleTensor<TBias> bias{ bias_shape, data_type, 1, quantization_info };
+        SimpleTensor<T>     src{ input_shape, _data_type, 1, _quantization_info };
+        SimpleTensor<T>     weights{ weights_shape, _data_type, 1, _quantization_info };
+        SimpleTensor<TBias> bias{ bias_shape, _bias_data_type, 1, _quantization_info };
 
         // Fill reference
         fill(src, 0);
@@ -142,9 +156,12 @@
         return reference::deconvolution_layer<T>(src, weights, bias, output_shape, info, inner_border);
     }
 
-    TensorType      _target{};
-    SimpleTensor<T> _reference{};
-    DataType        _data_type{};
+    TensorType       _target{};
+    SimpleTensor<T>  _reference{};
+    DataType         _data_type{};
+    DataType         _bias_data_type{};
+    DataLayout       _data_layout{};
+    QuantizationInfo _quantization_info{};
 };
 
 template <typename TensorType, typename AccessorType, typename FunctionType, typename T, unsigned int kernel_size_x, unsigned int kernel_size_y>
@@ -153,16 +170,18 @@
 public:
     template <typename...>
     void setup(TensorShape input_shape, unsigned int sx, unsigned int sy, unsigned int padx, unsigned int pady,
-               unsigned int inner_border_right, unsigned int inner_border_top, unsigned int num_kernels, DataType data_type)
+               unsigned int inner_border_right, unsigned int inner_border_top, unsigned int num_kernels, DataType data_type, DataLayout data_layout)
     {
         ARM_COMPUTE_ERROR_ON_MSG(kernel_size_x != kernel_size_y, "Only square kernels supported");
         const TensorShape   weights_shape(kernel_size_x, kernel_size_y, input_shape.z(), num_kernels);
         const TensorShape   bias_shape(num_kernels);
         const PadStrideInfo info(sx, sy, padx, pady, DimensionRoundingType::CEIL);
         const std::pair<unsigned int, unsigned int> inner_border(inner_border_right, inner_border_top);
-        auto        out_dim      = deconvolution_output_dimensions(input_shape.x(), input_shape.y(), kernel_size_x, kernel_size_y, padx, pady, sx, sy);
-        TensorShape output_shape = deconvolution_output_shape(out_dim, input_shape, weights_shape);
-        DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, inner_border, data_type, QuantizationInfo());
+        auto        out_dim = deconvolution_output_dimensions(input_shape.x(), input_shape.y(), kernel_size_x, kernel_size_y, padx, pady, sx, sy);
+        TensorInfo  input_info(input_shape, 1, data_type);
+        TensorInfo  weights_info(weights_shape, 1, data_type);
+        TensorShape output_shape = compute_deconvolution_output_shape(out_dim, input_info, weights_info);
+        DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, inner_border, data_type, data_layout, QuantizationInfo());
     }
 };
 
@@ -172,16 +191,18 @@
 public:
     template <typename...>
     void setup(TensorShape input_shape, unsigned int sx, unsigned int sy, unsigned int padx, unsigned int pady,
-               unsigned int inner_border_right, unsigned int inner_border_top, unsigned int num_kernels, DataType data_type, QuantizationInfo quantization_info)
+               unsigned int inner_border_right, unsigned int inner_border_top, unsigned int num_kernels, DataType data_type, DataLayout data_layout, QuantizationInfo quantization_info)
     {
         ARM_COMPUTE_ERROR_ON_MSG(kernel_size_x != kernel_size_y, "Only square kernels supported");
         const TensorShape   weights_shape(kernel_size_x, kernel_size_y, input_shape.z(), num_kernels);
         const TensorShape   bias_shape(num_kernels);
         const PadStrideInfo info(sx, sy, padx, pady, DimensionRoundingType::CEIL);
         const std::pair<unsigned int, unsigned int> inner_border(inner_border_right, inner_border_top);
-        auto        out_dim      = deconvolution_output_dimensions(input_shape.x(), input_shape.y(), kernel_size_x, kernel_size_y, padx, pady, sx, sy);
-        TensorShape output_shape = deconvolution_output_shape(out_dim, input_shape, weights_shape);
-        DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, inner_border, data_type, quantization_info);
+        auto        out_dim = deconvolution_output_dimensions(input_shape.x(), input_shape.y(), kernel_size_x, kernel_size_y, padx, pady, sx, sy);
+        TensorInfo  input_info(input_shape, 1, data_type, quantization_info);
+        TensorInfo  weights_info(weights_shape, 1, data_type, quantization_info);
+        TensorShape output_shape = compute_deconvolution_output_shape(out_dim, input_info, weights_info);
+        DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, inner_border, data_type, data_layout, quantization_info);
     }
 };
 
diff --git a/tests/validation/fixtures/NormalizePlanarYUVLayerFixture.h b/tests/validation/fixtures/NormalizePlanarYUVLayerFixture.h
index 3bb935e..93e4e64 100644
--- a/tests/validation/fixtures/NormalizePlanarYUVLayerFixture.h
+++ b/tests/validation/fixtures/NormalizePlanarYUVLayerFixture.h
@@ -68,11 +68,10 @@
         }
         else if(is_data_type_quantized_asymmetric(_data_type))
         {
-            const QuantizationInfo          quant_info = src_tensor.quantization_info();
-            const int                       min_bound  = quant_info.quantize(-1.f, RoundingPolicy::TO_NEAREST_UP);
-            const int                       max_bound  = quant_info.quantize(1.f, RoundingPolicy::TO_NEAREST_UP);
-            std::uniform_int_distribution<> distribution(min_bound, max_bound);
-            std::uniform_int_distribution<> distribution_std(quant_info.quantize(0.1f, RoundingPolicy::TO_NEAREST_UP), max_bound);
+            const QuantizationInfo quant_info = src_tensor.quantization_info();
+            std::pair<int, int> bounds = get_quantized_bounds(quant_info, -1.f, 1.0f);
+            std::uniform_int_distribution<> distribution(bounds.first, bounds.second);
+            std::uniform_int_distribution<> distribution_std(quant_info.quantize(0.1f, RoundingPolicy::TO_NEAREST_UP), bounds.second);
             library->fill(src_tensor, distribution, 0);
             library->fill(mean_tensor, distribution, 1);
             library->fill(std_tensor, distribution_std, 2);
diff --git a/tests/validation/fixtures/ReduceMeanFixture.h b/tests/validation/fixtures/ReduceMeanFixture.h
index 6debd4a..8692213 100644
--- a/tests/validation/fixtures/ReduceMeanFixture.h
+++ b/tests/validation/fixtures/ReduceMeanFixture.h
@@ -32,6 +32,7 @@
 #include "tests/IAccessor.h"
 #include "tests/framework/Asserts.h"
 #include "tests/framework/Fixture.h"
+#include "tests/validation/Helpers.h"
 #include "tests/validation/reference/ReductionOperation.h"
 #include "tests/validation/reference/ReshapeLayer.h"
 
@@ -63,10 +64,8 @@
         }
         else
         {
-            const QuantizationInfo          quant_info = tensor.quantization_info();
-            const int                       min_bound  = quant_info.quantize(-1.f, RoundingPolicy::TO_NEAREST_UP);
-            const int                       max_bound  = quant_info.quantize(1.f, RoundingPolicy::TO_NEAREST_UP);
-            std::uniform_int_distribution<> distribution(min_bound, max_bound);
+            std::pair<int, int> bounds = get_quantized_bounds(tensor.quantization_info(), -1.0f, 1.0f);
+            std::uniform_int_distribution<> distribution(bounds.first, bounds.second);
 
             library->fill(tensor, distribution, 0);
         }