COMPMID-2792: Add support for QASYMM8_SIGNED in CLGEMMLowpMatrixMultiplyReshapedKernel

Signed-off-by: Sheri Zhang <sheri.zhang@arm.com>
Change-Id: I005e604253394f31173f37ec0296caf76b5e697c
Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/c/VisualCompute/ComputeLibrary/+/227008
Tested-by: bsgcomp <bsgcomp@arm.com>
Reviewed-by: Sang-Hoon Park <sang-hoon.park@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/2798
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com>
diff --git a/tests/validation/fixtures/GEMMLowpFixture.h b/tests/validation/fixtures/GEMMLowpFixture.h
index 1154d6c..d13ada9 100644
--- a/tests/validation/fixtures/GEMMLowpFixture.h
+++ b/tests/validation/fixtures/GEMMLowpFixture.h
@@ -656,7 +656,7 @@
 public:
     template <typename...>
     void setup(unsigned int m, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0, unsigned int v0, unsigned int h0, bool interleave_lhs,
-               bool interleave_rhs)
+               bool interleave_rhs, DataType data_type)
     {
         GEMMLHSMatrixInfo lhs_info;
         lhs_info.m0         = m0;
@@ -676,24 +676,39 @@
         const TensorShape lhs_shape(k, m, batch_size);
         const TensorShape rhs_shape(n, k, batch_size);
 
-        _target    = compute_target(lhs_shape, rhs_shape, lhs_info, rhs_info);
-        _reference = compute_reference(lhs_shape, rhs_shape);
+        _target    = compute_target(lhs_shape, rhs_shape, lhs_info, rhs_info, data_type);
+        _reference = compute_reference(lhs_shape, rhs_shape, data_type);
     }
 
 protected:
     template <typename U>
     void fill(U &&tensor, int i)
     {
-        // Between 1 and 254 in order to avoid having -128 and 128 for the DOT product path
-        std::uniform_int_distribution<> distribution(1, 254);
-        library->fill(tensor, distribution, i);
+        switch(tensor.data_type())
+        {
+            case DataType::QASYMM8:
+            {
+                // Between 1 and 254 in order to avoid having -128 and 128 for the DOT product path
+                std::uniform_int_distribution<> distribution(1, 254);
+                library->fill(tensor, distribution, i);
+            }
+            break;
+            case DataType::QASYMM8_SIGNED:
+            {
+                std::uniform_int_distribution<> distribution(-127, 126);
+                library->fill(tensor, distribution, i);
+            }
+            break;
+            default:
+                ARM_COMPUTE_ERROR("Unsupported data type");
+        }
     }
 
-    TensorType compute_target(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info)
+    TensorType compute_target(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, DataType data_type)
     {
         // Create tensors
-        TensorType lhs = create_tensor<TensorType>(lhs_shape, DataType::QASYMM8, 1);
-        TensorType rhs = create_tensor<TensorType>(rhs_shape, DataType::QASYMM8, 1);
+        TensorType lhs = create_tensor<TensorType>(lhs_shape, data_type, 1);
+        TensorType rhs = create_tensor<TensorType>(rhs_shape, data_type, 1);
         TensorType lhs_reshaped;
         TensorType rhs_reshaped;
         TensorType dst;
@@ -740,21 +755,41 @@
         return dst;
     }
 
-    SimpleTensor<int32_t> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape)
+    SimpleTensor<int32_t> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, DataType data_type)
     {
         TensorShape dst_shape = lhs_shape;
         dst_shape[0]          = rhs_shape[0];
         dst_shape[1]          = lhs_shape[1];
 
-        // Create reference
-        SimpleTensor<uint8_t> lhs{ lhs_shape, DataType::QASYMM8, 1 };
-        SimpleTensor<uint8_t> rhs{ rhs_shape, DataType::QASYMM8, 1 };
+        switch(data_type)
+        {
+            case DataType::QASYMM8:
+            {
+                // Create reference
+                SimpleTensor<uint8_t> lhs{ lhs_shape, data_type, 1 };
+                SimpleTensor<uint8_t> rhs{ rhs_shape, data_type, 1 };
 
-        // Fill reference
-        fill(lhs, 0);
-        fill(rhs, 1);
+                // Fill reference
+                fill(lhs, 0);
+                fill(rhs, 1);
 
-        return reference::gemmlowp_matrix_multiply_core<int32_t, uint8_t>(lhs, rhs, dst_shape, 0, 0);
+                return reference::gemmlowp_matrix_multiply_core<int32_t, uint8_t>(lhs, rhs, dst_shape, 0, 0);
+            }
+            case DataType::QASYMM8_SIGNED:
+            {
+                // Create reference
+                SimpleTensor<int8_t> lhs{ lhs_shape, data_type, 1 };
+                SimpleTensor<int8_t> rhs{ rhs_shape, data_type, 1 };
+
+                // Fill reference
+                fill(lhs, 0);
+                fill(rhs, 1);
+
+                return reference::gemmlowp_matrix_multiply_core<int32_t, int8_t>(lhs, rhs, dst_shape, 0, 0);
+            }
+            default:
+                ARM_COMPUTE_ERROR("Unsupported data type");
+        }
     }
 
     TensorType            _target{};
@@ -767,7 +802,7 @@
 public:
     template <typename...>
     void setup(unsigned int m_w, unsigned int m_h, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0, unsigned int v0, unsigned int h0,
-               bool interleave_lhs, bool interleave_rhs)
+               bool interleave_lhs, bool interleave_rhs, DataType data_type)
     {
         GEMMLHSMatrixInfo lhs_info;
         lhs_info.m0         = m0;
@@ -790,24 +825,40 @@
         const TensorShape lhs_shape(k, m, batch_size);
         const TensorShape rhs_shape(n, k, batch_size);
 
-        _target    = compute_target(lhs_shape, rhs_shape, lhs_info, rhs_info, m_h);
-        _reference = compute_reference(lhs_shape, rhs_shape, m_h);
+        _target    = compute_target(lhs_shape, rhs_shape, lhs_info, rhs_info, m_h, data_type);
+        _reference = compute_reference(lhs_shape, rhs_shape, m_h, data_type);
     }
 
 protected:
     template <typename U>
     void fill(U &&tensor, int i)
     {
-        // Between 1 and 254 in order to avoid having -128 and 128 for the DOT product path
-        std::uniform_int_distribution<> distribution(1, 254);
-        library->fill(tensor, distribution, i);
+        switch(tensor.data_type())
+        {
+            case DataType::QASYMM8:
+            {
+                // Between 1 and 254 in order to avoid having -128 and 128 for the DOT product path
+                std::uniform_int_distribution<> distribution(1, 254);
+                library->fill(tensor, distribution, i);
+            }
+            break;
+            case DataType::QASYMM8_SIGNED:
+            {
+                std::uniform_int_distribution<> distribution(-127, 126);
+                library->fill(tensor, distribution, i);
+            }
+            break;
+            default:
+                ARM_COMPUTE_ERROR("Unsupported data type");
+        }
     }
 
-    TensorType compute_target(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, unsigned int m_h)
+    TensorType compute_target(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, unsigned int m_h,
+                              DataType data_type)
     {
         // Create tensors
-        TensorType lhs = create_tensor<TensorType>(lhs_shape, DataType::QASYMM8, 1);
-        TensorType rhs = create_tensor<TensorType>(rhs_shape, DataType::QASYMM8, 1);
+        TensorType lhs = create_tensor<TensorType>(lhs_shape, data_type, 1);
+        TensorType rhs = create_tensor<TensorType>(rhs_shape, data_type, 1);
         TensorType lhs_reshaped;
         TensorType rhs_reshaped;
         TensorType dst;
@@ -854,7 +905,7 @@
         return dst;
     }
 
-    SimpleTensor<int32_t> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, unsigned int m_h)
+    SimpleTensor<int32_t> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, unsigned int m_h, DataType data_type)
     {
         TensorShape dst_shape = lhs_shape;
         dst_shape.set(0, rhs_shape[0]);
@@ -862,15 +913,35 @@
         dst_shape.set(2, m_h);
         dst_shape.set(3, lhs_shape[2]);
 
-        // Create reference
-        SimpleTensor<uint8_t> lhs{ lhs_shape, DataType::QASYMM8, 1 };
-        SimpleTensor<uint8_t> rhs{ rhs_shape, DataType::QASYMM8, 1 };
+        switch(data_type)
+        {
+            case DataType::QASYMM8:
+            {
+                // Create reference
+                SimpleTensor<uint8_t> lhs{ lhs_shape, data_type, 1 };
+                SimpleTensor<uint8_t> rhs{ rhs_shape, data_type, 1 };
 
-        // Fill reference
-        fill(lhs, 0);
-        fill(rhs, 1);
+                // Fill reference
+                fill(lhs, 0);
+                fill(rhs, 1);
 
-        return reference::gemmlowp_matrix_multiply_core<int32_t, uint8_t>(lhs, rhs, dst_shape, 0, 0);
+                return reference::gemmlowp_matrix_multiply_core<int32_t, uint8_t>(lhs, rhs, dst_shape, 0, 0);
+            }
+            case DataType::QASYMM8_SIGNED:
+            {
+                // Create reference
+                SimpleTensor<int8_t> lhs{ lhs_shape, data_type, 1 };
+                SimpleTensor<int8_t> rhs{ rhs_shape, data_type, 1 };
+
+                // Fill reference
+                fill(lhs, 0);
+                fill(rhs, 1);
+
+                return reference::gemmlowp_matrix_multiply_core<int32_t, int8_t>(lhs, rhs, dst_shape, 0, 0);
+            }
+            default:
+                ARM_COMPUTE_ERROR("Unsupported data type");
+        }
     }
 
     TensorType            _target{};