COMPMID-2764: Add support for QASYMM8_SIGNED in NEConvolutionLayer.

Change-Id: I8fbbd2e399f48968337a60147098d04f27c2d1c0
Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com>
Reviewed-on: https://review.mlplatform.org/c/2402
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
diff --git a/tests/validation/Helpers.cpp b/tests/validation/Helpers.cpp
index fef4510..afefee7 100644
--- a/tests/validation/Helpers.cpp
+++ b/tests/validation/Helpers.cpp
@@ -122,6 +122,19 @@
 }
 
 template <>
+SimpleTensor<float> convert_from_asymmetric(const SimpleTensor<int8_t> &src)
+{
+    const UniformQuantizationInfo &quantization_info = src.quantization_info().uniform();
+    SimpleTensor<float>            dst{ src.shape(), DataType::F32, 1, QuantizationInfo(), src.data_layout() };
+
+    for(int i = 0; i < src.num_elements(); ++i)
+    {
+        dst[i] = dequantize_qasymm8_signed(src[i], quantization_info);
+    }
+    return dst;
+}
+
+template <>
 SimpleTensor<float> convert_from_asymmetric(const SimpleTensor<uint16_t> &src)
 {
     const UniformQuantizationInfo &quantization_info = src.quantization_info().uniform();
@@ -161,19 +174,6 @@
 }
 
 template <>
-SimpleTensor<float> convert_from_asymmetric(const SimpleTensor<int8_t> &src)
-{
-    const UniformQuantizationInfo &quantization_info = src.quantization_info().uniform();
-    SimpleTensor<float>            dst{ src.shape(), DataType::F32, 1, QuantizationInfo(), src.data_layout() };
-
-    for(int i = 0; i < src.num_elements(); ++i)
-    {
-        dst[i] = dequantize_qasymm8_signed(src[i], quantization_info);
-    }
-    return dst;
-}
-
-template <>
 SimpleTensor<uint16_t> convert_to_asymmetric(const SimpleTensor<float> &src, const QuantizationInfo &quantization_info)
 {
     SimpleTensor<uint16_t>         dst{ src.shape(), DataType::QASYMM16, 1, quantization_info };
@@ -354,6 +354,15 @@
     return std::pair<int, int> { min_bound, max_bound };
 }
 
+std::pair<int, int> get_quantized_qasymm8_signed_bounds(const QuantizationInfo &quant_info, float min, float max)
+{
+    ARM_COMPUTE_ERROR_ON_MSG(min > max, "min must be lower equal than max");
+
+    const int min_bound = quantize_qasymm8_signed(min, quant_info.uniform());
+    const int max_bound = quantize_qasymm8_signed(max, quant_info.uniform());
+    return std::pair<int, int> { min_bound, max_bound };
+}
+
 std::pair<int, int> get_symm_quantized_per_channel_bounds(const QuantizationInfo &quant_info, float min, float max, size_t channel_id)
 {
     ARM_COMPUTE_ERROR_ON_MSG(min > max, "min must be lower equal than max");
diff --git a/tests/validation/Helpers.h b/tests/validation/Helpers.h
index 3227a98..b481b52 100644
--- a/tests/validation/Helpers.h
+++ b/tests/validation/Helpers.h
@@ -262,6 +262,14 @@
  */
 std::pair<int, int> get_quantized_bounds(const QuantizationInfo &quant_info, float min, float max);
 
+/** Helper function to compute asymmetric quantized signed min and max bounds
+ *
+ * @param[in] quant_info Quantization info to be used for conversion
+ * @param[in] min        Floating point minimum value to be quantized
+ * @param[in] max        Floating point maximum value to be quantized
+ */
+std::pair<int, int> get_quantized_qasymm8_signed_bounds(const QuantizationInfo &quant_info, float min, float max);
+
 /** Helper function to compute symmetric quantized min and max bounds
  *
  * @param[in] quant_info Quantization info to be used for conversion
diff --git a/tests/validation/NEON/ConvolutionLayer.cpp b/tests/validation/NEON/ConvolutionLayer.cpp
index c2a0cb5..1d7805d 100644
--- a/tests/validation/NEON/ConvolutionLayer.cpp
+++ b/tests/validation/NEON/ConvolutionLayer.cpp
@@ -462,6 +462,19 @@
 }
 TEST_SUITE_END() // QASYMM8
 
+TEST_SUITE(QASYMM8_SIGNED)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerQuantizedFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
+                       framework::dataset::make("ReshapeWeights", { true })),
+                       framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
+                       framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
+                       framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })),
+                       QuantizedActivationFunctionsDataset))
+{
+    // Validate output
+    validate(Accessor(_target), _reference, tolerance_qasymm8);
+}
+TEST_SUITE_END() // QASYMM8_SIGNED
+
 TEST_SUITE(QSYMM8_PER_CHANNEL)
 FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerQuantizedPerChannelFixture<uint8_t>, framework::DatasetMode::PRECOMMIT,
                        combine(combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerReducedDataset(),
diff --git a/tests/validation/fixtures/ConvolutionLayerFixture.h b/tests/validation/fixtures/ConvolutionLayerFixture.h
index c5cddc2..3c4b625 100644
--- a/tests/validation/fixtures/ConvolutionLayerFixture.h
+++ b/tests/validation/fixtures/ConvolutionLayerFixture.h
@@ -52,7 +52,9 @@
 class ConvolutionValidationGenericFixture : public framework::Fixture
 {
 public:
-    using TBias = typename std::conditional<std::is_same<typename std::decay<T>::type, uint8_t>::value, int32_t, T>::type;
+    using TBias = typename std::conditional < std::is_same<typename std::decay<T>::type, uint8_t>::value
+                  || std::is_same<typename std::decay<T>::type, int8_t>::value,
+                  int32_t, T >::type;
 
 public:
     template <typename...>
@@ -84,6 +86,13 @@
                 library->fill(tensor, distribution, i);
                 break;
             }
+            case DataType::QASYMM8_SIGNED:
+            {
+                std::pair<int, int> bounds = get_quantized_qasymm8_signed_bounds(tensor.quantization_info(), -1.0f, 1.0f);
+                std::uniform_int_distribution<int8_t> distribution(bounds.first, bounds.second);
+                library->fill(tensor, distribution, i);
+                break;
+            }
             case DataType::QSYMM8_PER_CHANNEL:
             {
                 int min_bound = 128;
diff --git a/tests/validation/reference/ActivationLayer.cpp b/tests/validation/reference/ActivationLayer.cpp
index 6cdba09..7a699c5 100644
--- a/tests/validation/reference/ActivationLayer.cpp
+++ b/tests/validation/reference/ActivationLayer.cpp
@@ -66,6 +66,17 @@
 }
 
 template <>
+SimpleTensor<int8_t> activation_layer<int8_t>(const SimpleTensor<int8_t> &src, ActivationLayerInfo info, const QuantizationInfo &oq_info)
+{
+    const QuantizationInfo dst_qinfo = oq_info.empty() ? src.quantization_info() : oq_info;
+
+    SimpleTensor<float>  src_tmp = convert_from_asymmetric(src);
+    SimpleTensor<float>  dst_tmp = activation_layer<float>(src_tmp, info);
+    SimpleTensor<int8_t> dst     = convert_to_asymmetric<int8_t>(dst_tmp, dst_qinfo);
+    return dst;
+}
+
+template <>
 SimpleTensor<int16_t> activation_layer<int16_t>(const SimpleTensor<int16_t> &src, ActivationLayerInfo info, const QuantizationInfo &oq_info)
 {
     const QuantizationInfo dst_qinfo = oq_info.empty() ? src.quantization_info() : oq_info;
diff --git a/tests/validation/reference/Convolution3d.h b/tests/validation/reference/Convolution3d.h
index 6ac5df9..6168f10 100644
--- a/tests/validation/reference/Convolution3d.h
+++ b/tests/validation/reference/Convolution3d.h
@@ -24,6 +24,7 @@
 #ifndef ARM_COMPUTE_TEST_VALIDATION_CONVOLUTION_H
 #define ARM_COMPUTE_TEST_VALIDATION_CONVOLUTION_H
 
+#include "arm_compute/core/utils/misc/Requires.h"
 #include "arm_compute/core/utils/quantization/AsymmHelpers.h"
 #include "tests/validation/Helpers.h"
 #include "tests/validation/reference/UtilsQuantizedAsymm.h"
@@ -94,10 +95,8 @@
 }
 
 // 3D convolution for QASYMM8 type
-template < typename T, typename TW, typename TB, typename std::enable_if < std::is_same<T, uint8_t>::value &&(std::is_same<TW, uint8_t>::value
-                                                                                                              || std::is_same<TW, int8_t>::value)
-                                                                           &&std::is_same<TB, int32_t>::value,
-                                                                           int >::type = 0 >
+template < typename T, typename TW, typename TB, REQUIRES_TA((std::is_same<T, uint8_t>::value || std::is_same<T, int8_t>::value) &&(std::is_same<TW, uint8_t>::value
+                                                             || std::is_same<TW, int8_t>::value)) >
 inline void convolution3d(const SimpleTensor<T> &in, const SimpleTensor<TW> &weights, const SimpleTensor<TB> &bias, SimpleTensor<T> &out,
                           int i_offset, int w_offset, int b_offset, int o_offset,
                           int xi, int yi, int width_in, int height_in, int depth_in, int width_weights, int height_weights, int dilation_x = 1, int dilation_y = 1, int filter_id = 0)
@@ -172,7 +171,8 @@
     acc += (*b_ptr);
 
     // Quantize down
-    acc = validation::quantize_down_scale_by_fixedpoint(acc, output_multiplier, output_shift, output_offset, 0, 255);
+    acc = validation::quantize_down_scale_by_fixedpoint(acc, output_multiplier, output_shift, output_offset,
+                                                        std::numeric_limits<T>::lowest(), std::numeric_limits<T>::max());
 
     // Store the result
     *out_ptr = acc;
diff --git a/tests/validation/reference/ConvolutionLayer.cpp b/tests/validation/reference/ConvolutionLayer.cpp
index 4d2c1ac..c9ad8d3 100644
--- a/tests/validation/reference/ConvolutionLayer.cpp
+++ b/tests/validation/reference/ConvolutionLayer.cpp
@@ -41,10 +41,6 @@
 {
 namespace reference
 {
-namespace
-{
-} // namespace
-
 template <typename T, typename TW, typename TB>
 SimpleTensor<T> convolution_layer_nchw(const SimpleTensor<T> &src, const SimpleTensor<TW> &weights, const SimpleTensor<TB> &bias, SimpleTensor<T> &dst, const PadStrideInfo &info,
                                        const Size2D &dilation, unsigned int num_groups)
@@ -141,6 +137,8 @@
                                                  const PadStrideInfo &info, const Size2D &dilation, unsigned int num_groups, QuantizationInfo out_quant_info);
 template SimpleTensor<uint8_t> convolution_layer(const SimpleTensor<uint8_t> &src, const SimpleTensor<int8_t> &weights, const SimpleTensor<int32_t> &bias, const TensorShape &output_shape,
                                                  const PadStrideInfo &info, const Size2D &dilation, unsigned int num_groups, QuantizationInfo out_quant_info);
+template SimpleTensor<int8_t> convolution_layer(const SimpleTensor<int8_t> &src, const SimpleTensor<int8_t> &weights, const SimpleTensor<int32_t> &bias, const TensorShape &output_shape,
+                                                const PadStrideInfo &info, const Size2D &dilation, unsigned int num_groups, QuantizationInfo out_quant_info);
 } // namespace reference
 } // namespace validation
 } // namespace test