COMPMID-675 - Reworked NEGEMMLowp interface/function

The new interface makes NEGEMMLowp able to work with ASYMM8 data types.

Implemented 2 new functions:
- NEGEMMLowpMatrixMultiplyCore
- NEGEMMLowpOutputStage

These functions should make the integration in android NN doable

For more information about GEMMLowp:
https://github.com/google/gemmlowp/blob/master/doc/low-precision.md

Change-Id: Ie2c775f45234f68ca53dba644b3a912b997fd890
Reviewed-on: http://mpd-gerrit.cambridge.arm.com/95504
Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com>
Reviewed-by: Pablo Tello <pablo.tello@arm.com>
diff --git a/tests/validation/fixtures/GEMMLowpFixture.h b/tests/validation/fixtures/GEMMLowpFixture.h
index fba4400..f9b0dbd 100644
--- a/tests/validation/fixtures/GEMMLowpFixture.h
+++ b/tests/validation/fixtures/GEMMLowpFixture.h
@@ -43,109 +43,36 @@
 namespace validation
 {
 template <typename TensorType, typename AccessorType, typename FunctionType>
-class GEMMLowpOffsetValidationFixture : public framework::Fixture
+class GEMMLowpMatrixMultiplyCoreValidationFixture : public framework::Fixture
 {
 public:
     template <typename...>
-    void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_c, int32_t a_offset, int32_t b_offset, int32_t c_offset, int32_t c_mult_int, int32_t out_shift, DataType data_type)
+    void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_c, int32_t a_offset, int32_t b_offset)
     {
-        _target    = compute_target(shape_a, shape_b, shape_c, a_offset, b_offset, c_offset, c_mult_int, out_shift, data_type);
-        _reference = compute_reference(shape_a, shape_b, shape_c, a_offset, b_offset, c_offset, c_mult_int, out_shift, data_type);
+        _target    = compute_target(shape_a, shape_b, shape_c, a_offset, b_offset);
+        _reference = compute_reference(shape_a, shape_b, shape_c, a_offset, b_offset);
     }
 
 protected:
     template <typename U>
     void fill(U &&tensor, int i)
     {
-        ARM_COMPUTE_ERROR_ON(tensor.data_type() != DataType::S8);
-        std::uniform_int_distribution<> distribution(0, 3);
+        // Between 1 and 254 in order to avoid having -128 and 128 for the DOT product path
+        std::uniform_int_distribution<> distribution(1, 254);
         library->fill(tensor, distribution, i);
     }
 
     TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c,
-                              int32_t a_offset, int32_t b_offset, int32_t c_offset, int32_t c_mult_int, int32_t out_shift, DataType data_type)
+                              int32_t a_offset, int32_t b_offset)
     {
         // Create tensors
-        TensorType a = create_tensor<TensorType>(shape_a, data_type, 1);
-        TensorType b = create_tensor<TensorType>(shape_b, data_type, 1);
-        TensorType c = create_tensor<TensorType>(shape_c, data_type, 1);
-
-        // Create and configure function
-        FunctionType gemmlowp;
-        gemmlowp.configure(&a, &b, &c, a_offset, b_offset, c_offset, c_mult_int, out_shift);
-
-        ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
-        ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
-        ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS);
-
-        // Allocate tensors
-        a.allocator()->allocate();
-        b.allocator()->allocate();
-        c.allocator()->allocate();
-
-        ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS);
-        ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS);
-        ARM_COMPUTE_EXPECT(!c.info()->is_resizable(), framework::LogLevel::ERRORS);
-
-        // Fill tensors
-        fill(AccessorType(a), 0);
-        fill(AccessorType(b), 1);
-        fill(AccessorType(c), 2);
-
-        // Compute GEMM function
-        gemmlowp.run();
-        return c;
-    }
-
-    SimpleTensor<int8_t> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c,
-                                           int32_t a_offset, int32_t b_offset, int32_t c_offset, int32_t c_mult_int, int32_t out_shift, DataType data_type)
-    {
-        // Create reference
-        SimpleTensor<int8_t> a{ shape_a, data_type, 1 };
-        SimpleTensor<int8_t> b{ shape_b, data_type, 1 };
-        SimpleTensor<int8_t> c{ shape_c, data_type, 1 };
-
-        // Fill reference
-        fill(a, 0);
-        fill(b, 1);
-        fill(c, 2);
-
-        return reference::gemmlowp<int8_t>(a, b, c, a_offset, b_offset, c_offset, c_mult_int, out_shift);
-    }
-
-    TensorType           _target{};
-    SimpleTensor<int8_t> _reference{};
-};
-
-template <typename TensorType, typename AccessorType, typename FunctionType>
-class GEMMLowpMatrixMultiplyValidationFixture : public framework::Fixture
-{
-public:
-    template <typename...>
-    void setup(size_t m, size_t n, size_t k)
-    {
-        const TensorShape shape_a(k, m);
-        const TensorShape shape_b(n, k);
-        const TensorShape shape_c(n, m);
-        _target    = compute_target(shape_a, shape_b, shape_c);
-        _reference = compute_reference(shape_a, shape_b, shape_c);
-    }
-
-protected:
-    template <typename U>
-    void fill(U &&tensor, int i, int lo, int hi)
-    {
-        std::uniform_int_distribution<> distribution(lo, hi);
-        library->fill(tensor, distribution, i);
-    }
-
-    TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c)
-    {
-        // Create tensors
-        TensorType a = create_tensor<TensorType>(shape_a, DataType::S8, 1);
-        TensorType b = create_tensor<TensorType>(shape_b, DataType::S8, 1);
+        TensorType a = create_tensor<TensorType>(shape_a, DataType::QASYMM8, 1);
+        TensorType b = create_tensor<TensorType>(shape_b, DataType::QASYMM8, 1);
         TensorType c = create_tensor<TensorType>(shape_c, DataType::S32, 1);
 
+        a.info()->set_quantization_info(QuantizationInfo(1.0f / 255, a_offset));
+        b.info()->set_quantization_info(QuantizationInfo(1.0f / 255, b_offset));
+
         // Create and configure function
         FunctionType gemmlowp;
         gemmlowp.configure(&a, &b, &c);
@@ -164,34 +91,93 @@
         ARM_COMPUTE_EXPECT(!c.info()->is_resizable(), framework::LogLevel::ERRORS);
 
         // Fill tensors
-        fill(AccessorType(a), 0, -128, 127);
-        fill(AccessorType(b), 1, -128, 127);
-        fill(AccessorType(c), 2, 0, 0);
+        fill(AccessorType(a), 0);
+        fill(AccessorType(b), 1);
 
         // Compute GEMM function
         gemmlowp.run();
         return c;
     }
 
-    SimpleTensor<int32_t> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c)
+    SimpleTensor<int32_t> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c,
+                                            int32_t a_offset, int32_t b_offset)
     {
         // Create reference
-        SimpleTensor<int8_t>  a{ shape_a, DataType::S8, 1 };
-        SimpleTensor<int8_t>  b{ shape_b, DataType::S8, 1 };
-        SimpleTensor<int32_t> c{ shape_c, DataType::S32, 1 };
+        SimpleTensor<uint8_t> a{ shape_a, DataType::QASYMM8, 1 };
+        SimpleTensor<uint8_t> b{ shape_b, DataType::QASYMM8, 1 };
 
         // Fill reference
-        fill(a, 0, -128, 127);
-        fill(b, 1, -128, 127);
-        fill(c, 2, 0, 0);
+        fill(a, 0);
+        fill(b, 1);
 
-        return reference::gemmlowp(a, b, c);
+        return reference::gemmlowp_matrix_multiply_core<uint8_t>(a, b, a_offset, b_offset);
     }
 
     TensorType            _target{};
     SimpleTensor<int32_t> _reference{};
 };
 
+template <typename TensorType, typename AccessorType, typename FunctionType>
+class GEMMLowpQuantizeDownInt32ToUint8ScaleValidationFixture : public framework::Fixture
+{
+public:
+    template <typename...>
+    void setup(TensorShape shape, int32_t result_offset, int32_t result_mult_int, int32_t result_shift)
+    {
+        _target    = compute_target(shape, result_offset, result_mult_int, result_shift);
+        _reference = compute_reference(shape, result_offset, result_mult_int, result_shift);
+    }
+
+protected:
+    template <typename U>
+    void fill(U &&tensor, int i)
+    {
+        std::uniform_int_distribution<> distribution(-6000, 6000);
+        library->fill(tensor, distribution, i);
+    }
+
+    TensorType compute_target(const TensorShape &shape, int32_t result_offset, int32_t result_mult_int, int32_t result_shift)
+    {
+        // Create tensors
+        TensorType a = create_tensor<TensorType>(shape, DataType::S32, 1);
+        TensorType b = create_tensor<TensorType>(shape, DataType::QASYMM8, 1);
+
+        // Create and configure function
+        FunctionType output_stage;
+        output_stage.configure(&a, &b, result_offset, result_mult_int, result_shift);
+
+        ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
+        ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+        // Allocate tensors
+        a.allocator()->allocate();
+        b.allocator()->allocate();
+
+        ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS);
+        ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+        // Fill tensors
+        fill(AccessorType(a), 0);
+
+        // Compute GEMM function
+        output_stage.run();
+        return b;
+    }
+
+    SimpleTensor<uint8_t> compute_reference(const TensorShape &shape, int32_t result_offset, int32_t result_mult_int, int32_t result_shift)
+    {
+        // Create reference
+        SimpleTensor<int32_t> a{ shape, DataType::S32, 1 };
+
+        // Fill reference
+        fill(a, 0);
+
+        return reference::gemmlowp_quantize_down_int32_to_uint8_scale<int32_t>(a, result_offset, result_mult_int, result_shift);
+    }
+
+    TensorType            _target{};
+    SimpleTensor<uint8_t> _reference{};
+};
 } // namespace validation
 } // namespace test
 } // namespace arm_compute