COMPMID-3046: Add CLRequantizationLayerKernel

Change-Id: I034f5aa023642f2323372495ddd14fc62b4c12e0
Signed-off-by: Manuel Bottini <manuel.bottini@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/2681
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Giorgio Arena <giorgio.arena@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
diff --git a/tests/validation/CL/QuantizationLayer.cpp b/tests/validation/CL/QuantizationLayer.cpp
index e9544fd..e3f47f9 100644
--- a/tests/validation/CL/QuantizationLayer.cpp
+++ b/tests/validation/CL/QuantizationLayer.cpp
@@ -42,9 +42,12 @@
 {
 namespace
 {
-constexpr AbsoluteTolerance<float> tolerance_f32(1.0f); /**< Tolerance value for comparing reference's output against implementation's output for floating point data types */
-const auto                         QuantizationSmallShapes = concat(datasets::Small3DShapes(), datasets::Small4DShapes());
-const auto                         QuantizationLargeShapes = concat(datasets::Large3DShapes(), datasets::Large4DShapes());
+constexpr AbsoluteTolerance<float>    tolerance_f32(1.0f); /**< Tolerance value for comparing reference's output against implementation's output for floating point data types */
+constexpr AbsoluteTolerance<uint8_t>  tolerance_u8(1);     /**< Tolerance value for comparing reference's output against implementation's output for QASYMM8 data types */
+constexpr AbsoluteTolerance<int8_t>   tolerance_s8(1);     /**< Tolerance value for comparing reference's output against implementation's output for QASYMM8_SIGNED data types */
+constexpr AbsoluteTolerance<uint16_t> tolerance_u16(1);    /**< Tolerance value for comparing reference's output against implementation's output for QASYMM16 data types */
+const auto                            QuantizationSmallShapes = concat(datasets::Small3DShapes(), datasets::Small4DShapes());
+const auto                            QuantizationLargeShapes = concat(datasets::Large3DShapes(), datasets::Large4DShapes());
 } // namespace
 
 TEST_SUITE(CL)
@@ -53,7 +56,7 @@
 // *INDENT-OFF*
 // clang-format off
 DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(
-               framework::dataset::make("InputInfo", { TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::QASYMM8),  // Wrong input data type
+               framework::dataset::make("InputInfo", { TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::QASYMM8),  // Wrong output data type
                                                        TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::F32), // Wrong output data type
                                                        TensorInfo(TensorShape(16U, 16U, 2U, 5U), 1, DataType::F32),   // Mismatching shapes
                                                        TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::F32), // Valid
@@ -125,7 +128,7 @@
                        framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })))
 {
     // Validate output
-    validate(CLAccessor(_target), _reference, tolerance_f32);
+    validate(CLAccessor(_target), _reference, tolerance_u16);
 }
 FIXTURE_DATA_TEST_CASE(RunLargeQASYMM8, CLQuantizationLayerQASYMM8Fixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(QuantizationLargeShapes,
                        framework::dataset::make("DataTypeIn", DataType::F32)),
@@ -141,7 +144,7 @@
                        framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })))
 {
     // Validate output
-    validate(CLAccessor(_target), _reference, tolerance_f32);
+    validate(CLAccessor(_target), _reference, tolerance_u16);
 }
 TEST_SUITE_END() // FP32
 
@@ -165,6 +168,64 @@
 TEST_SUITE_END() // FP16
 TEST_SUITE_END() // Float
 
+TEST_SUITE(Quantized)
+template <typename T>
+using CLQuantizationLayerQASYMM8GenFixture = QuantizationValidationGenericFixture<CLTensor, CLAccessor, CLQuantizationLayer, T, uint8_t>;
+template <typename T>
+using CLQuantizationLayerQASYMM8_SIGNEDGenFixture = QuantizationValidationGenericFixture<CLTensor, CLAccessor, CLQuantizationLayer, T, int8_t>;
+template <typename T>
+using CLQuantizationLayerQASYMM16GenFixture = QuantizationValidationGenericFixture<CLTensor, CLAccessor, CLQuantizationLayer, T, uint16_t>;
+TEST_SUITE(QASYMM8)
+FIXTURE_DATA_TEST_CASE(RunSmallQASYMM8, CLQuantizationLayerQASYMM8GenFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(QuantizationSmallShapes,
+                       framework::dataset::make("DataType", DataType::QASYMM8)),
+                       framework::dataset::make("DataTypeOut", { DataType::QASYMM8 })),
+                       framework::dataset::make("QuantizationInfoOutput", { QuantizationInfo(0.5f, 10) })),
+                       framework::dataset::make("QuantizationInfoInput", { QuantizationInfo(2.0f, 15) })))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference, tolerance_u8);
+}
+FIXTURE_DATA_TEST_CASE(RunSmallQASYMM8_SIGNED, CLQuantizationLayerQASYMM8_SIGNEDGenFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(QuantizationSmallShapes,
+                       framework::dataset::make("DataTypeIn", DataType::QASYMM8)),
+                       framework::dataset::make("DataTypeOut", { DataType::QASYMM8_SIGNED })),
+                       framework::dataset::make("QuantizationInfoOutput", { QuantizationInfo(1.0f, 10) })),
+                       framework::dataset::make("QuantizationInfoInput", { QuantizationInfo(1.0f, 15) })))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference, tolerance_s8);
+}
+FIXTURE_DATA_TEST_CASE(RunSmallQASYMM16, CLQuantizationLayerQASYMM16GenFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(QuantizationSmallShapes,
+                       framework::dataset::make("DataTypeIn", DataType::QASYMM8)),
+                       framework::dataset::make("DataTypeOut", { DataType::QASYMM16 })),
+                       framework::dataset::make("QuantizationInfoOutput", { QuantizationInfo(1.0f, 10) })),
+                       framework::dataset::make("QuantizationInfoInput", { QuantizationInfo(4.0f, 23) })))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference, tolerance_u16);
+}
+TEST_SUITE_END() // QASYMM8
+TEST_SUITE(QASYMM8_SIGNED)
+FIXTURE_DATA_TEST_CASE(RunSmallQASYMM8_SIGNED, CLQuantizationLayerQASYMM8_SIGNEDGenFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(QuantizationSmallShapes,
+                       framework::dataset::make("DataTypeIn", DataType::QASYMM8_SIGNED)),
+                       framework::dataset::make("DataTypeOut", { DataType::QASYMM8_SIGNED })),
+                       framework::dataset::make("QuantizationInfoOutput", { QuantizationInfo(1.0f, 10) })),
+                       framework::dataset::make("QuantizationInfoInput", { QuantizationInfo(2.0f, 5) })))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference, tolerance_s8);
+}
+FIXTURE_DATA_TEST_CASE(RunSmallQASYMM8, CLQuantizationLayerQASYMM8GenFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(QuantizationSmallShapes,
+                       framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
+                       framework::dataset::make("DataTypeOut", { DataType::QASYMM8 })),
+                       framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.0f, 10) })),
+                       framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.0f, 30) })))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference, tolerance_u8);
+}
+TEST_SUITE_END() // QASYMM8_SIGNED
+TEST_SUITE_END() // Quantized
+
 TEST_SUITE_END() // QuantizationLayer
 TEST_SUITE_END() // CL
 } // namespace validation
diff --git a/tests/validation/fixtures/QuantizationLayerFixture.h b/tests/validation/fixtures/QuantizationLayerFixture.h
index 4ffc659..085abef 100644
--- a/tests/validation/fixtures/QuantizationLayerFixture.h
+++ b/tests/validation/fixtures/QuantizationLayerFixture.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2019 ARM Limited.
+ * Copyright (c) 2017-2020 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -43,14 +43,14 @@
 namespace validation
 {
 template <typename TensorType, typename AccessorType, typename FunctionType, typename Tin, typename Tout>
-class QuantizationValidationFixture : public framework::Fixture
+class QuantizationValidationGenericFixture : public framework::Fixture
 {
 public:
     template <typename...>
-    void setup(TensorShape shape, DataType data_type_in, DataType data_type_out, QuantizationInfo qinfo)
+    void setup(TensorShape shape, DataType data_type_in, DataType data_type_out, QuantizationInfo qinfo, QuantizationInfo qinfo_in)
     {
-        _target    = compute_target(shape, data_type_in, data_type_out, qinfo);
-        _reference = compute_reference(shape, data_type_in, data_type_out, qinfo);
+        _target    = compute_target(shape, data_type_in, data_type_out, qinfo, qinfo_in);
+        _reference = compute_reference(shape, data_type_in, data_type_out, qinfo, qinfo_in);
     }
 
 protected:
@@ -60,10 +60,10 @@
         library->fill_tensor_uniform(tensor, 0);
     }
 
-    TensorType compute_target(const TensorShape &shape, DataType data_type_in, DataType data_type_out, QuantizationInfo qinfo)
+    TensorType compute_target(const TensorShape &shape, DataType data_type_in, DataType data_type_out, QuantizationInfo qinfo, QuantizationInfo qinfo_in)
     {
         // Create tensors
-        TensorType src = create_tensor<TensorType>(shape, data_type_in);
+        TensorType src = create_tensor<TensorType>(shape, data_type_in, 1, qinfo_in);
         TensorType dst = create_tensor<TensorType>(shape, data_type_out, 1, qinfo);
 
         // Create and configure function
@@ -89,10 +89,10 @@
         return dst;
     }
 
-    SimpleTensor<Tout> compute_reference(const TensorShape &shape, DataType data_type_in, DataType data_type_out, QuantizationInfo qinfo)
+    SimpleTensor<Tout> compute_reference(const TensorShape &shape, DataType data_type_in, DataType data_type_out, QuantizationInfo qinfo, QuantizationInfo qinfo_in)
     {
         // Create reference
-        SimpleTensor<Tin> src{ shape, data_type_in };
+        SimpleTensor<Tin> src{ shape, data_type_in, 1, qinfo_in };
 
         // Fill reference
         fill(src);
@@ -104,6 +104,17 @@
     SimpleTensor<Tout> _reference{};
 };
 
+template <typename TensorType, typename AccessorType, typename FunctionType, typename Tin, typename Tout>
+class QuantizationValidationFixture : public QuantizationValidationGenericFixture<TensorType, AccessorType, FunctionType, Tin, Tout>
+{
+public:
+    template <typename...>
+    void setup(TensorShape shape, DataType data_type_in, DataType data_type_out, QuantizationInfo qinfo)
+    {
+        QuantizationValidationGenericFixture<TensorType, AccessorType, FunctionType, Tin, Tout>::setup(shape, data_type_in, data_type_out, qinfo, QuantizationInfo());
+    }
+};
+
 } // namespace validation
 } // namespace test
 } // namespace arm_compute
diff --git a/tests/validation/reference/QuantizationLayer.cpp b/tests/validation/reference/QuantizationLayer.cpp
index 8ba3744..cfc5085 100644
--- a/tests/validation/reference/QuantizationLayer.cpp
+++ b/tests/validation/reference/QuantizationLayer.cpp
@@ -77,6 +77,41 @@
     return dst;
 }
 
+template <>
+SimpleTensor<uint8_t> quantization_layer(const SimpleTensor<uint8_t> &src, DataType output_data_type, const QuantizationInfo &quantization_info)
+{
+    SimpleTensor<float> src_tmp = convert_from_asymmetric<uint8_t>(src);
+    return quantization_layer<float, uint8_t>(src_tmp, output_data_type, quantization_info);
+}
+
+template <>
+SimpleTensor<int8_t> quantization_layer(const SimpleTensor<uint8_t> &src, DataType output_data_type, const QuantizationInfo &quantization_info)
+{
+    SimpleTensor<float> src_tmp = convert_from_asymmetric<uint8_t>(src);
+    return quantization_layer<float, int8_t>(src_tmp, output_data_type, quantization_info);
+}
+
+template <>
+SimpleTensor<uint8_t> quantization_layer(const SimpleTensor<int8_t> &src, DataType output_data_type, const QuantizationInfo &quantization_info)
+{
+    SimpleTensor<float> src_tmp = convert_from_asymmetric<int8_t>(src);
+    return quantization_layer<float, uint8_t>(src_tmp, output_data_type, quantization_info);
+}
+
+template <>
+SimpleTensor<int8_t> quantization_layer(const SimpleTensor<int8_t> &src, DataType output_data_type, const QuantizationInfo &quantization_info)
+{
+    SimpleTensor<float> src_tmp = convert_from_asymmetric<int8_t>(src);
+    return quantization_layer<float, int8_t>(src_tmp, output_data_type, quantization_info);
+}
+
+template <>
+SimpleTensor<uint16_t> quantization_layer(const SimpleTensor<uint8_t> &src, DataType output_data_type, const QuantizationInfo &quantization_info)
+{
+    SimpleTensor<float> src_tmp = convert_from_asymmetric<uint8_t>(src);
+    return quantization_layer<float, uint16_t>(src_tmp, output_data_type, quantization_info);
+}
+
 template SimpleTensor<int8_t> quantization_layer(const SimpleTensor<half> &src, DataType output_data_type, const QuantizationInfo &quantization_info);
 template SimpleTensor<int8_t> quantization_layer(const SimpleTensor<float> &src, DataType output_data_type, const QuantizationInfo &quantization_info);
 template SimpleTensor<uint8_t> quantization_layer(const SimpleTensor<half> &src, DataType output_data_type, const QuantizationInfo &quantization_info);