COMPMID-481: Add gemmlowp_aarch64_v8p4 kernel.

Change-Id: I15496b16ffd636f5bff76572e750df7e15c80830
Reviewed-on: http://mpd-gerrit.cambridge.arm.com/90532
Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com>
diff --git a/arm_compute/core/NEON/NEKernels.h b/arm_compute/core/NEON/NEKernels.h
index 5839d82..6d50ce7 100644
--- a/arm_compute/core/NEON/NEKernels.h
+++ b/arm_compute/core/NEON/NEKernels.h
@@ -59,6 +59,8 @@
 #include "arm_compute/core/NEON/kernels/NEFloorKernel.h"
 #include "arm_compute/core/NEON/kernels/NEGEMMAssemblyBaseKernel.h"
 #include "arm_compute/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h"
+#include "arm_compute/core/NEON/kernels/NEGEMMInterleaveBlockedKernel.h"
+#include "arm_compute/core/NEON/kernels/NEGEMMLowpAssemblyBaseKernel.h"
 #include "arm_compute/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h"
 #include "arm_compute/core/NEON/kernels/NEGEMMMatrixAccumulateBiasesKernel.h"
 #include "arm_compute/core/NEON/kernels/NEGEMMMatrixAdditionKernel.h"
@@ -104,5 +106,6 @@
 #include "arm_compute/core/NEON/kernels/NEWeightsReshapeKernel.h"
 #include "arm_compute/core/NEON/kernels/arm32/NEGEMMAArch32Kernel.h"
 #include "arm_compute/core/NEON/kernels/arm64/NEGEMMAArch64Kernel.h"
+#include "arm_compute/core/NEON/kernels/arm64/NEGEMMLowpAArch64V8P4Kernel.h"
 
 #endif /* __ARM_COMPUTE_NEKERNELS_H__ */
diff --git a/arm_compute/core/NEON/kernels/NEGEMMInterleaveBlockedKernel.h b/arm_compute/core/NEON/kernels/NEGEMMInterleaveBlockedKernel.h
new file mode 100644
index 0000000..aa942c4
--- /dev/null
+++ b/arm_compute/core/NEON/kernels/NEGEMMInterleaveBlockedKernel.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_NEGEMMINTERLEAVEBLOCKEDKERNEL_H__
+#define __ARM_COMPUTE_NEGEMMINTERLEAVEBLOCKEDKERNEL_H__
+
+#include "arm_compute/core/NEON/INESimpleKernel.h"
+
+namespace arm_compute
+{
+class ITensor;
+
+/** NEON kernel to interleave the elements of a matrix
+ *
+ * Interleave_Blocked copies a block of values at a time instead of just one.  The main use of this is the gemmlowp with the "dot product"
+ * instruction, where each operation consumes 4 values, so we need to copy blocks of 4 values.
+ *
+ */
+class NEGEMMInterleaveBlockedKernel : public INESimpleKernel
+{
+public:
+    /* Constructor */
+    NEGEMMInterleaveBlockedKernel();
+    /** Initialise the kernel's input and output.
+     *
+     * @param[in]  input        Input tensor. Data types supported: U8
+     * @param[out] output       Output tensor which stores the interleaved matrix. Data type supported: same as @p input.
+     * @param[in]  block_height The height of the blocks to be interleaved.
+     * @param[in]  block_width  The width of the blocks to be interleved.
+     * @param[in]  transpose    True if transpose operation must be performed, false otherwise.
+     */
+    void configure(const ITensor *input, ITensor *output, unsigned int block_height, unsigned int block_width, bool transpose);
+
+    // Inherited methods overridden:
+    void run(const Window &window, const ThreadInfo &info) override;
+
+private:
+    unsigned int _block_height;
+    unsigned int _block_width;
+    bool         _transpose;
+};
+
+} // namespace arm_compute
+#endif /*__ARM_COMPUTE_NEGEMMINTERLEAVEBLOCKEDKERNEL_H__*/
diff --git a/arm_compute/core/NEON/kernels/NEGEMMLowpAssemblyBaseKernel.h b/arm_compute/core/NEON/kernels/NEGEMMLowpAssemblyBaseKernel.h
new file mode 100644
index 0000000..32105ad
--- /dev/null
+++ b/arm_compute/core/NEON/kernels/NEGEMMLowpAssemblyBaseKernel.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_NEGEMMLOWPASSEMBLYBASE_H__
+#define __ARM_COMPUTE_NEGEMMLOWPASSEMBLYBASE_H__
+
+#include "arm_compute/core/NEON/INEKernel.h"
+
+namespace arm_compute
+{
+class ITensor;
+
+/** GEMMLOWP AssemblyBase NEON kernel to multiply two input matrices "A" and "B". */
+class NEGEMMLowpAssemblyBaseKernel : public INEKernel
+{
+public:
+    /** Constructor */
+    NEGEMMLowpAssemblyBaseKernel()
+        : _input0(nullptr), _input1(nullptr), _output(nullptr), _workspace(nullptr), _transform_0(true), _transform_1(true)
+    {
+    }
+
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    NEGEMMLowpAssemblyBaseKernel(const NEGEMMLowpAssemblyBaseKernel &) = delete;
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    NEGEMMLowpAssemblyBaseKernel &operator=(const NEGEMMLowpAssemblyBaseKernel &) = delete;
+    /** Allow instances of this class to be moved */
+    NEGEMMLowpAssemblyBaseKernel(NEGEMMLowpAssemblyBaseKernel &&) = default;
+    /** Allow instances of this class to be moved */
+    NEGEMMLowpAssemblyBaseKernel &operator=(NEGEMMLowpAssemblyBaseKernel &&) = default;
+
+    virtual ~NEGEMMLowpAssemblyBaseKernel() = default;
+
+    /** Initialise the kernel's input and output.
+     *
+     * The computed function is C = a * AxB + b * C.
+     *
+     * @param[in]     input0 Input tensor containing the Matrix A. Data types supported: F32
+     * @param[in]     input1 Input tensor containing the Matrix B. Data types supported: same as @p input0
+     * @param[in,out] output Output tensor to store the result of matrix multiplication. If @p beta is not zero the values are multiplied by @p beta before the result is accumulated. Otherwise the values are overwritten by the result. Data types supported: same as @p input0.
+     */
+    void configure(const ITensor *input0, const ITensor *input1, ITensor *output)
+    {
+        internal_configure(input0, input1, output);
+    }
+
+protected:
+    virtual void internal_configure(const ITensor *input0, const ITensor *input1, ITensor *output) = 0;
+
+    const ITensor *_input0;
+    const ITensor *_input1;
+    ITensor       *_output;
+    ITensor       *_workspace;
+    bool           _transform_0;
+    bool           _transform_1;
+};
+} // namespace arm_compute
+#endif /*__ARM_COMPUTE_NEGEMMLOWPASSEMBLYBASE_H__*/
diff --git a/arm_compute/core/NEON/kernels/arm64/NEGEMMLowpAArch64V8P4Kernel.h b/arm_compute/core/NEON/kernels/arm64/NEGEMMLowpAArch64V8P4Kernel.h
new file mode 100644
index 0000000..f218e1f
--- /dev/null
+++ b/arm_compute/core/NEON/kernels/arm64/NEGEMMLowpAArch64V8P4Kernel.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_NEGEMMLOWPAARCH64V8P4KERNEL_H__
+#define __ARM_COMPUTE_NEGEMMLOWPAARCH64V8P4KERNEL_H__
+
+#include "arm_compute/core/NEON/kernels/NEGEMMLowpAssemblyBaseKernel.h"
+
+namespace arm_compute
+{
+class ITensor;
+
+/** AArch64 NEON kernel to multiply two input matrices "A" and "B". */
+class NEGEMMLowpAArch64V8P4Kernel : public NEGEMMLowpAssemblyBaseKernel
+{
+public:
+    // Inherited methods overridden:
+    void run(const Window &window, const ThreadInfo &info) override;
+    bool is_parallelisable() const override;
+
+protected:
+    void internal_configure(const ITensor *input0, const ITensor *input1, ITensor *output) override;
+};
+} // namespace arm_compute
+#endif /*__ARM_COMPUTE_NEGEMMLOWPAARCH64V8P4KERNEL_H__*/
diff --git a/arm_compute/runtime/NEON/functions/NEGEMMLowp.h b/arm_compute/runtime/NEON/functions/NEGEMMLowp.h
index 0b0a774..84850db 100644
--- a/arm_compute/runtime/NEON/functions/NEGEMMLowp.h
+++ b/arm_compute/runtime/NEON/functions/NEGEMMLowp.h
@@ -30,6 +30,8 @@
 
 #include "arm_compute/core/NEON/kernels/NEFillBorderKernel.h"
 #include "arm_compute/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h"
+#include "arm_compute/core/NEON/kernels/NEGEMMInterleaveBlockedKernel.h"
+#include "arm_compute/core/NEON/kernels/NEGEMMLowpAssemblyBaseKernel.h"
 #include "arm_compute/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h"
 #include "arm_compute/core/NEON/kernels/NEGEMMTranspose1xWKernel.h"
 #include "arm_compute/runtime/IMemoryManager.h"
@@ -75,16 +77,30 @@
     * @param[in]  shift           Number of bits to shift right the result.
     */
     void configure(const ITensor *a, const ITensor *b, ITensor *output, int32_t a_offset, int32_t b_offset, int32_t output_offset, int32_t output_mult_int, int32_t shift);
+    /** Initialise the kernel's inputs, output
+    *
+    * @note GEMM_LOWP:  low precision GEMM kernel
+    *  This kernel performs the following computation:
+    *
+    * @param[in]  a      First input tensor  (Matrix A). Data type supported: U8.
+    * @param[in]  b      Second input tensor (Matrix B). Data type supported: same as @p a
+    * @param[out] output Output tensor. Data type supported: U32.
+    */
+    void configure(const ITensor *a, const ITensor *b, ITensor *output);
+
     // Inherited methods overridden:
     void run() override;
 
 private:
-    MemoryGroup                    _memory_group;
-    NEGEMMInterleave4x4Kernel      _interleave_kernel;
-    NEGEMMTranspose1xWKernel       _transpose_kernel;
-    NEGEMMLowpMatrixMultiplyKernel _mm_kernel;
-    Tensor                         _tmp_a;
-    Tensor                         _tmp_b;
+    MemoryGroup                                   _memory_group;
+    NEGEMMInterleave4x4Kernel                     _interleave_kernel;
+    NEGEMMTranspose1xWKernel                      _transpose_kernel;
+    NEGEMMLowpMatrixMultiplyKernel                _mm_kernel;
+    std::unique_ptr<NEGEMMLowpAssemblyBaseKernel> _mm_optimised_kernel;
+    NEGEMMInterleaveBlockedKernel                 _interleave_blocked;
+    NEGEMMInterleaveBlockedKernel                 _interleave_blocked_transposed;
+    Tensor                                        _tmp_a;
+    Tensor                                        _tmp_b;
 };
 }
 #endif /*__ARM_COMPUTE_NEGEMMLOWP_H__ */