COMPMID-1376: Add support for QASYMM8 in CLDeconvolutionLayer

Change-Id: I13ec79b6668e2b9559d3fa789ae0b51ab6975289
Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/139126
Reviewed-by: Michalis Spyrou <michalis.spyrou@arm.com>
Tested-by: Jenkins <bsgcomp@arm.com>
diff --git a/tests/validation/fixtures/DeconvolutionLayerFixture.h b/tests/validation/fixtures/DeconvolutionLayerFixture.h
index 12ce9ce..7741557 100644
--- a/tests/validation/fixtures/DeconvolutionLayerFixture.h
+++ b/tests/validation/fixtures/DeconvolutionLayerFixture.h
@@ -43,39 +43,57 @@
 class DeconvolutionLayerFixtureBase : public framework::Fixture
 {
 public:
+    using TBias = typename std::conditional<std::is_same<typename std::decay<T>::type, uint8_t>::value, int32_t, T>::type;
+
+public:
     template <typename...>
     void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info,
-               const std::pair<unsigned int, unsigned int> &inner_border, DataType data_type)
+               const std::pair<unsigned int, unsigned int> &inner_border, DataType data_type, QuantizationInfo quantization_info)
     {
         _data_type = data_type;
 
-        _target    = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, inner_border, data_type);
-        _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, inner_border, data_type);
+        _target    = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, inner_border, data_type, quantization_info);
+        _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, inner_border, data_type, quantization_info);
     }
 
 protected:
     template <typename U>
     void fill(U &&tensor, int i)
     {
-        if(is_data_type_float(tensor.data_type()))
+        switch(tensor.data_type())
         {
-            std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
-            library->fill(tensor, distribution, i);
-        }
-        else
-        {
-            library->fill_tensor_uniform(tensor, i);
+            case DataType::QASYMM8:
+            {
+                std::uniform_int_distribution<uint8_t> distribution(0, 255);
+                library->fill(tensor, distribution, i);
+                break;
+            }
+            case DataType::S32:
+            {
+                std::uniform_int_distribution<int32_t> distribution(-100, 100);
+                library->fill(tensor, distribution, i);
+                break;
+            }
+            case DataType::F16:
+            case DataType::F32:
+            {
+                std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
+                library->fill(tensor, distribution, i);
+                break;
+            }
+            default:
+                library->fill_tensor_uniform(tensor, i);
         }
     }
 
     TensorType compute_target(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape,
-                              const PadStrideInfo &info, const std::pair<unsigned int, unsigned int> &inner_border, DataType data_type)
+                              const PadStrideInfo &info, const std::pair<unsigned int, unsigned int> &inner_border, DataType data_type, QuantizationInfo quantization_info)
     {
         // Create tensors
-        TensorType src     = create_tensor<TensorType>(input_shape, data_type, 1);
-        TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1);
-        TensorType bias    = create_tensor<TensorType>(bias_shape, data_type, 1);
-        TensorType dst     = create_tensor<TensorType>(output_shape, data_type, 1);
+        TensorType src     = create_tensor<TensorType>(input_shape, data_type, 1, quantization_info);
+        TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1, quantization_info);
+        TensorType bias    = create_tensor<TensorType>(bias_shape, is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type, 1, quantization_info);
+        TensorType dst     = create_tensor<TensorType>(output_shape, data_type, 1, quantization_info);
 
         // Create and configure function
         FunctionType conv;
@@ -102,19 +120,19 @@
         fill(AccessorType(weights), 1);
         fill(AccessorType(bias), 2);
 
-        // Compute NEConvolutionLayer function
+        // Compute DeconvolutionLayer function
         conv.run();
 
         return dst;
     }
 
     SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape,
-                                      const PadStrideInfo &info, const std::pair<unsigned int, unsigned int> inner_border, DataType data_type)
+                                      const PadStrideInfo &info, const std::pair<unsigned int, unsigned int> inner_border, DataType data_type, QuantizationInfo quantization_info)
     {
         // Create reference
-        SimpleTensor<T> src{ input_shape, data_type, 1 };
-        SimpleTensor<T> weights{ weights_shape, data_type, 1 };
-        SimpleTensor<T> bias{ bias_shape, data_type, 1 };
+        SimpleTensor<T>     src{ input_shape, data_type, 1, quantization_info };
+        SimpleTensor<T>     weights{ weights_shape, data_type, 1, quantization_info };
+        SimpleTensor<TBias> bias{ bias_shape, data_type, 1, quantization_info };
 
         // Fill reference
         fill(src, 0);
@@ -144,7 +162,26 @@
         const std::pair<unsigned int, unsigned int> inner_border(inner_border_right, inner_border_top);
         auto        out_dim      = deconvolution_output_dimensions(input_shape.x(), input_shape.y(), kernel_size_x, kernel_size_y, padx, pady, inner_border.first, inner_border.second, sx, sy);
         TensorShape output_shape = deconvolution_output_shape(out_dim, input_shape, weights_shape);
-        DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, inner_border, data_type);
+        DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, inner_border, data_type, QuantizationInfo());
+    }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, unsigned int kernel_size_x, unsigned int kernel_size_y>
+class DeconvolutionValidationQuantizedFixture : public DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T>
+{
+public:
+    template <typename...>
+    void setup(TensorShape input_shape, unsigned int sx, unsigned int sy, unsigned int padx, unsigned int pady,
+               unsigned int inner_border_right, unsigned int inner_border_top, unsigned int num_kernels, DataType data_type, QuantizationInfo quantization_info)
+    {
+        ARM_COMPUTE_ERROR_ON_MSG(kernel_size_x != kernel_size_y, "Only square kernels supported");
+        const TensorShape   weights_shape(kernel_size_x, kernel_size_y, input_shape.z(), num_kernels);
+        const TensorShape   bias_shape(num_kernels);
+        const PadStrideInfo info(sx, sy, padx, pady, DimensionRoundingType::CEIL);
+        const std::pair<unsigned int, unsigned int> inner_border(inner_border_right, inner_border_top);
+        auto        out_dim      = deconvolution_output_dimensions(input_shape.x(), input_shape.y(), kernel_size_x, kernel_size_y, padx, pady, inner_border.first, inner_border.second, sx, sy);
+        TensorShape output_shape = deconvolution_output_shape(out_dim, input_shape, weights_shape);
+        DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, inner_border, data_type, quantization_info);
     }
 };