COMPMID-481: Add gemmlowp_aarch64_v8p4 kernel.

Change-Id: I15496b16ffd636f5bff76572e750df7e15c80830
Reviewed-on: http://mpd-gerrit.cambridge.arm.com/90532
Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com>
diff --git a/arm_compute/core/NEON/NEKernels.h b/arm_compute/core/NEON/NEKernels.h
index 5839d82..6d50ce7 100644
--- a/arm_compute/core/NEON/NEKernels.h
+++ b/arm_compute/core/NEON/NEKernels.h
@@ -59,6 +59,8 @@
 #include "arm_compute/core/NEON/kernels/NEFloorKernel.h"
 #include "arm_compute/core/NEON/kernels/NEGEMMAssemblyBaseKernel.h"
 #include "arm_compute/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h"
+#include "arm_compute/core/NEON/kernels/NEGEMMInterleaveBlockedKernel.h"
+#include "arm_compute/core/NEON/kernels/NEGEMMLowpAssemblyBaseKernel.h"
 #include "arm_compute/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h"
 #include "arm_compute/core/NEON/kernels/NEGEMMMatrixAccumulateBiasesKernel.h"
 #include "arm_compute/core/NEON/kernels/NEGEMMMatrixAdditionKernel.h"
@@ -104,5 +106,6 @@
 #include "arm_compute/core/NEON/kernels/NEWeightsReshapeKernel.h"
 #include "arm_compute/core/NEON/kernels/arm32/NEGEMMAArch32Kernel.h"
 #include "arm_compute/core/NEON/kernels/arm64/NEGEMMAArch64Kernel.h"
+#include "arm_compute/core/NEON/kernels/arm64/NEGEMMLowpAArch64V8P4Kernel.h"
 
 #endif /* __ARM_COMPUTE_NEKERNELS_H__ */
diff --git a/arm_compute/core/NEON/kernels/NEGEMMInterleaveBlockedKernel.h b/arm_compute/core/NEON/kernels/NEGEMMInterleaveBlockedKernel.h
new file mode 100644
index 0000000..aa942c4
--- /dev/null
+++ b/arm_compute/core/NEON/kernels/NEGEMMInterleaveBlockedKernel.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_NEGEMMINTERLEAVEBLOCKEDKERNEL_H__
+#define __ARM_COMPUTE_NEGEMMINTERLEAVEBLOCKEDKERNEL_H__
+
+#include "arm_compute/core/NEON/INESimpleKernel.h"
+
+namespace arm_compute
+{
+class ITensor;
+
+/** NEON kernel to interleave the elements of a matrix
+ *
+ * Interleave_Blocked copies a block of values at a time instead of just one.  The main use of this is the gemmlowp with the "dot product"
+ * instruction, where each operation consumes 4 values, so we need to copy blocks of 4 values.
+ *
+ */
+class NEGEMMInterleaveBlockedKernel : public INESimpleKernel
+{
+public:
+    /* Constructor */
+    NEGEMMInterleaveBlockedKernel();
+    /** Initialise the kernel's input and output.
+     *
+     * @param[in]  input        Input tensor. Data types supported: U8
+     * @param[out] output       Output tensor which stores the interleaved matrix. Data type supported: same as @p input.
+     * @param[in]  block_height The height of the blocks to be interleaved.
+     * @param[in]  block_width  The width of the blocks to be interleved.
+     * @param[in]  transpose    True if transpose operation must be performed, false otherwise.
+     */
+    void configure(const ITensor *input, ITensor *output, unsigned int block_height, unsigned int block_width, bool transpose);
+
+    // Inherited methods overridden:
+    void run(const Window &window, const ThreadInfo &info) override;
+
+private:
+    unsigned int _block_height;
+    unsigned int _block_width;
+    bool         _transpose;
+};
+
+} // namespace arm_compute
+#endif /*__ARM_COMPUTE_NEGEMMINTERLEAVEBLOCKEDKERNEL_H__*/
diff --git a/arm_compute/core/NEON/kernels/NEGEMMLowpAssemblyBaseKernel.h b/arm_compute/core/NEON/kernels/NEGEMMLowpAssemblyBaseKernel.h
new file mode 100644
index 0000000..32105ad
--- /dev/null
+++ b/arm_compute/core/NEON/kernels/NEGEMMLowpAssemblyBaseKernel.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_NEGEMMLOWPASSEMBLYBASE_H__
+#define __ARM_COMPUTE_NEGEMMLOWPASSEMBLYBASE_H__
+
+#include "arm_compute/core/NEON/INEKernel.h"
+
+namespace arm_compute
+{
+class ITensor;
+
+/** GEMMLOWP AssemblyBase NEON kernel to multiply two input matrices "A" and "B". */
+class NEGEMMLowpAssemblyBaseKernel : public INEKernel
+{
+public:
+    /** Constructor */
+    NEGEMMLowpAssemblyBaseKernel()
+        : _input0(nullptr), _input1(nullptr), _output(nullptr), _workspace(nullptr), _transform_0(true), _transform_1(true)
+    {
+    }
+
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    NEGEMMLowpAssemblyBaseKernel(const NEGEMMLowpAssemblyBaseKernel &) = delete;
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    NEGEMMLowpAssemblyBaseKernel &operator=(const NEGEMMLowpAssemblyBaseKernel &) = delete;
+    /** Allow instances of this class to be moved */
+    NEGEMMLowpAssemblyBaseKernel(NEGEMMLowpAssemblyBaseKernel &&) = default;
+    /** Allow instances of this class to be moved */
+    NEGEMMLowpAssemblyBaseKernel &operator=(NEGEMMLowpAssemblyBaseKernel &&) = default;
+
+    virtual ~NEGEMMLowpAssemblyBaseKernel() = default;
+
+    /** Initialise the kernel's input and output.
+     *
+     * The computed function is C = a * AxB + b * C.
+     *
+     * @param[in]     input0 Input tensor containing the Matrix A. Data types supported: F32
+     * @param[in]     input1 Input tensor containing the Matrix B. Data types supported: same as @p input0
+     * @param[in,out] output Output tensor to store the result of matrix multiplication. If @p beta is not zero the values are multiplied by @p beta before the result is accumulated. Otherwise the values are overwritten by the result. Data types supported: same as @p input0.
+     */
+    void configure(const ITensor *input0, const ITensor *input1, ITensor *output)
+    {
+        internal_configure(input0, input1, output);
+    }
+
+protected:
+    virtual void internal_configure(const ITensor *input0, const ITensor *input1, ITensor *output) = 0;
+
+    const ITensor *_input0;
+    const ITensor *_input1;
+    ITensor       *_output;
+    ITensor       *_workspace;
+    bool           _transform_0;
+    bool           _transform_1;
+};
+} // namespace arm_compute
+#endif /*__ARM_COMPUTE_NEGEMMLOWPASSEMBLYBASE_H__*/
diff --git a/arm_compute/core/NEON/kernels/arm64/NEGEMMLowpAArch64V8P4Kernel.h b/arm_compute/core/NEON/kernels/arm64/NEGEMMLowpAArch64V8P4Kernel.h
new file mode 100644
index 0000000..f218e1f
--- /dev/null
+++ b/arm_compute/core/NEON/kernels/arm64/NEGEMMLowpAArch64V8P4Kernel.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_NEGEMMLOWPAARCH64V8P4KERNEL_H__
+#define __ARM_COMPUTE_NEGEMMLOWPAARCH64V8P4KERNEL_H__
+
+#include "arm_compute/core/NEON/kernels/NEGEMMLowpAssemblyBaseKernel.h"
+
+namespace arm_compute
+{
+class ITensor;
+
+/** AArch64 NEON kernel to multiply two input matrices "A" and "B". */
+class NEGEMMLowpAArch64V8P4Kernel : public NEGEMMLowpAssemblyBaseKernel
+{
+public:
+    // Inherited methods overridden:
+    void run(const Window &window, const ThreadInfo &info) override;
+    bool is_parallelisable() const override;
+
+protected:
+    void internal_configure(const ITensor *input0, const ITensor *input1, ITensor *output) override;
+};
+} // namespace arm_compute
+#endif /*__ARM_COMPUTE_NEGEMMLOWPAARCH64V8P4KERNEL_H__*/
diff --git a/arm_compute/runtime/NEON/functions/NEGEMMLowp.h b/arm_compute/runtime/NEON/functions/NEGEMMLowp.h
index 0b0a774..84850db 100644
--- a/arm_compute/runtime/NEON/functions/NEGEMMLowp.h
+++ b/arm_compute/runtime/NEON/functions/NEGEMMLowp.h
@@ -30,6 +30,8 @@
 
 #include "arm_compute/core/NEON/kernels/NEFillBorderKernel.h"
 #include "arm_compute/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h"
+#include "arm_compute/core/NEON/kernels/NEGEMMInterleaveBlockedKernel.h"
+#include "arm_compute/core/NEON/kernels/NEGEMMLowpAssemblyBaseKernel.h"
 #include "arm_compute/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h"
 #include "arm_compute/core/NEON/kernels/NEGEMMTranspose1xWKernel.h"
 #include "arm_compute/runtime/IMemoryManager.h"
@@ -75,16 +77,30 @@
     * @param[in]  shift           Number of bits to shift right the result.
     */
     void configure(const ITensor *a, const ITensor *b, ITensor *output, int32_t a_offset, int32_t b_offset, int32_t output_offset, int32_t output_mult_int, int32_t shift);
+    /** Initialise the kernel's inputs, output
+    *
+    * @note GEMM_LOWP:  low precision GEMM kernel
+    *  This kernel performs the following computation:
+    *
+    * @param[in]  a      First input tensor  (Matrix A). Data type supported: U8.
+    * @param[in]  b      Second input tensor (Matrix B). Data type supported: same as @p a
+    * @param[out] output Output tensor. Data type supported: U32.
+    */
+    void configure(const ITensor *a, const ITensor *b, ITensor *output);
+
     // Inherited methods overridden:
     void run() override;
 
 private:
-    MemoryGroup                    _memory_group;
-    NEGEMMInterleave4x4Kernel      _interleave_kernel;
-    NEGEMMTranspose1xWKernel       _transpose_kernel;
-    NEGEMMLowpMatrixMultiplyKernel _mm_kernel;
-    Tensor                         _tmp_a;
-    Tensor                         _tmp_b;
+    MemoryGroup                                   _memory_group;
+    NEGEMMInterleave4x4Kernel                     _interleave_kernel;
+    NEGEMMTranspose1xWKernel                      _transpose_kernel;
+    NEGEMMLowpMatrixMultiplyKernel                _mm_kernel;
+    std::unique_ptr<NEGEMMLowpAssemblyBaseKernel> _mm_optimised_kernel;
+    NEGEMMInterleaveBlockedKernel                 _interleave_blocked;
+    NEGEMMInterleaveBlockedKernel                 _interleave_blocked_transposed;
+    Tensor                                        _tmp_a;
+    Tensor                                        _tmp_b;
 };
 }
 #endif /*__ARM_COMPUTE_NEGEMMLOWP_H__ */
diff --git a/src/core/NEON/kernels/NEGEMMInterleaveBlockedKernel.cpp b/src/core/NEON/kernels/NEGEMMInterleaveBlockedKernel.cpp
new file mode 100644
index 0000000..a9c624a
--- /dev/null
+++ b/src/core/NEON/kernels/NEGEMMInterleaveBlockedKernel.cpp
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/NEON/kernels/NEGEMMInterleaveBlockedKernel.h"
+
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/NEON/INEKernel.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/core/Window.h"
+
+#include <arm_neon.h>
+#include <cstddef>
+#include <cstdint>
+#include <tuple>
+
+using namespace arm_compute;
+
+namespace
+{
+inline void gemm_interleave_8bit_elements(const ITensor *input, ITensor *output, const Window &window, unsigned int block_width, unsigned int block_height, bool transpose)
+{
+    const size_t in_stride      = input->info()->strides_in_bytes()[1];
+    const float  scale_y_factor = 1.f / float(block_height);
+
+    // Set window for output tensor
+    Window win_out(window);
+    win_out.scale(Window::DimY, scale_y_factor);
+    Iterator in(input, window);
+
+    win_out.set_dimension_step(Window::DimX, block_width * block_height);
+    Iterator out(output, win_out);
+    execute_window_loop(window, [&](const Coordinates & id)
+    {
+        int j = 0;
+        for(unsigned int z = 0; z < block_height; ++z)
+        {
+            for(unsigned int b = 0; b < block_width; ++b)
+            {
+                if(!transpose)
+                {
+                    const bool inbounds = (id.x() + b) < input->info()->dimension(0) && (id.y() + z) < input->info()->dimension(1);
+                    *(out.ptr() + j++)  = (inbounds) ? *(in.ptr() + z * in_stride + b) : 0;
+                }
+                else
+                {
+                    const bool    inbounds = (id.x() + b) < input->info()->dimension(1) && (id.y() + z) < input->info()->dimension(0);
+                    const uint8_t value    = (inbounds) ? *(input->buffer() + (id.x() + b) * in_stride + (id.y() + z)) : 0;
+                    *(out.ptr() + j++)     = value;
+                }
+            }
+        }
+    },
+    in, out);
+}
+
+} // namespace
+
+NEGEMMInterleaveBlockedKernel::NEGEMMInterleaveBlockedKernel()
+    : _block_height(0), _block_width(0), _transpose(false)
+{
+}
+
+void NEGEMMInterleaveBlockedKernel::configure(const ITensor *input, ITensor *output, unsigned int block_height, unsigned int block_width, bool transpose)
+{
+    ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8);
+    ARM_COMPUTE_ERROR_ON_NULLPTR(output);
+    ARM_COMPUTE_ERROR_ON_MSG(block_height < 1, "Block height must be greater than 0");
+    ARM_COMPUTE_ERROR_ON_MSG(block_width < 1, "Block window must be greater than 0");
+
+    TensorShape output_shape      = input->info()->tensor_shape();
+    const float interleave_by_f32 = block_height;
+    output_shape.set(0, input->info()->dimension(0) * interleave_by_f32);
+    output_shape.set(1, std::ceil(static_cast<float>(input->info()->dimension(1)) / interleave_by_f32));
+    // Output auto inizialitation if not yet initialized
+    auto_init_if_empty(*output->info(), output_shape, 1, input->info()->data_type(), input->info()->fixed_point_position());
+    ARM_COMPUTE_ERROR_ON_MISMATCHING_DIMENSIONS(output->info()->tensor_shape(), output_shape);
+    ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
+    ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input, output);
+
+    _input        = input;
+    _output       = output;
+    _block_height = block_height;
+    _block_width  = block_width;
+    _transpose    = transpose;
+
+    const unsigned int num_elems_processed_per_iteration_x = block_width;
+    const unsigned int num_elems_processed_per_iteration_y = block_height;
+
+    // Configure kernel window
+    Window      win           = calculate_max_window(*input->info(), Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
+    const float scaley_factor = 1.f / interleave_by_f32;
+
+    AccessWindowRectangle output_access(output->info(), 0, 0, num_elems_processed_per_iteration_x * num_elems_processed_per_iteration_y, 1, num_elems_processed_per_iteration_y, scaley_factor);
+    AccessWindowRectangle input_access(input->info(), 0, 0, num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y);
+    update_window_and_padding(win, output_access, input_access);
+
+    output_access.set_valid_region(win, input->info()->valid_region());
+
+    INEKernel::configure(win);
+}
+
+void NEGEMMInterleaveBlockedKernel::run(const Window &window, const ThreadInfo &info)
+{
+    ARM_COMPUTE_UNUSED(info);
+    ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+    ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
+    gemm_interleave_8bit_elements(_input, _output, window, _block_width, _block_height, _transpose);
+}
diff --git a/src/core/NEON/kernels/arm64/NEGEMMLowpAArch64V8P4Kernel.cpp b/src/core/NEON/kernels/arm64/NEGEMMLowpAArch64V8P4Kernel.cpp
new file mode 100644
index 0000000..939f1b7
--- /dev/null
+++ b/src/core/NEON/kernels/arm64/NEGEMMLowpAArch64V8P4Kernel.cpp
@@ -0,0 +1,519 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/NEON/kernels/arm64/NEGEMMLowpAArch64V8P4Kernel.h"
+
+#include "arm_compute/core/AccessWindowStatic.h"
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/IAccessWindow.h"
+#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/core/Window.h"
+#include "support/ToolchainSupport.h"
+
+#include <arm_neon.h>
+#include <cstddef>
+#include <cstdint>
+
+#define ASM_PREFETCH(address) "PRFM PLDL1KEEP, " address "\n"
+#define ASM_PREFETCHL2(address) "PRFM PLDL2KEEP, " address "\n"
+#define ASM_PREFETCHW(address) "PRFM PSTL1KEEP, " address "\n"
+#define ASM_PREFETCHWL2(address) "PRFM PSTL2KEEP, " address "\n"
+
+static inline void stincpld(uint32x4_t v0, uint32x4_t v1, uint32x4_t v2, uint32x4_t v3,
+                            uint32x4_t v4, uint32x4_t v5, uint32x4_t v6, uint32x4_t v7,
+                            uint32_t *&ptr0, uint32_t *&ptr1, uint32_t *&ptr2, uint32_t *&ptr3,
+                            uint32_t *&ptr4, uint32_t *&ptr5, uint32_t *&ptr6, uint32_t *&ptr7)
+{
+    __asm __volatile(
+        "LDR    q0, [%[ptr0]]\n"
+        "LDR    q1, [%[ptr1]]\n"
+        "LDR    q2, [%[ptr2]]\n"
+        "LDR    q3, [%[ptr3]]\n"
+        "LDR    q4, [%[ptr4]]\n"
+        "LDR    q5, [%[ptr5]]\n"
+        "LDR    q6, [%[ptr6]]\n"
+        "LDR    q7, [%[ptr7]]\n"
+        "ADD    v0.4s, v0.4s, %[v0].4s\n" ASM_PREFETCH("[%[ptr0], #80]") "ADD    v1.4s, v1.4s, %[v1].4s\n" ASM_PREFETCH("[%[ptr1], #80]") "ADD    v2.4s, v2.4s, %[v2].4s\n" ASM_PREFETCH("[%[ptr2], #80]")
+        "ADD    v3.4s, v3.4s, %[v3].4s\n" ASM_PREFETCH("[%[ptr3], #80]") "ADD    v4.4s, v4.4s, %[v4].4s\n" ASM_PREFETCH("[%[ptr4], #80]") "ADD    v5.4s, v5.4s, %[v5].4s\n" ASM_PREFETCH("[%[ptr5], #80]")
+        "ADD    v6.4s, v6.4s, %[v6].4s\n" ASM_PREFETCH("[%[ptr6], #80]") "ADD    v7.4s, v7.4s, %[v7].4s\n" ASM_PREFETCH("[%[ptr7], #80]")
+        "STR    q0, [%[ptr0]], #16\n"
+        "STR    q1, [%[ptr1]], #16\n"
+        "STR    q2, [%[ptr2]], #16\n"
+        "STR    q3, [%[ptr3]], #16\n"
+        "STR    q4, [%[ptr4]], #16\n"
+        "STR    q5, [%[ptr5]], #16\n"
+        "STR    q6, [%[ptr6]], #16\n"
+        "STR    q7, [%[ptr7]], #16\n"
+        : [ptr0] "+r"(ptr0), [ptr1] "+r"(ptr1), [ptr2] "+r"(ptr2), [ptr3] "+r"(ptr3),
+        [ptr4] "+r"(ptr4), [ptr5] "+r"(ptr5), [ptr6] "+r"(ptr6), [ptr7] "+r"(ptr7)
+        : [v0] "w"(v0), [v1] "w"(v1), [v2] "w"(v2), [v3] "w"(v3),
+        [v4] "w"(v4), [v5] "w"(v5), [v6] "w"(v6), [v7] "w"(v7)
+        : "x20", "x21", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "memory");
+}
+
+static inline void stinc(uint32x4_t v0, uint32x4_t v1, uint32x4_t v2, uint32x4_t v3,
+                         uint32x4_t v4, uint32x4_t v5, uint32x4_t v6, uint32x4_t v7,
+                         uint32_t *&ptr0, uint32_t *&ptr1, uint32_t *&ptr2, uint32_t *&ptr3,
+                         uint32_t *&ptr4, uint32_t *&ptr5, uint32_t *&ptr6, uint32_t *&ptr7)
+{
+    __asm __volatile(
+        "LDR    q0, [%[ptr0]]\n"
+        "LDR    q1, [%[ptr1]]\n"
+        "LDR    q2, [%[ptr2]]\n"
+        "LDR    q3, [%[ptr3]]\n"
+        "LDR    q4, [%[ptr4]]\n"
+        "LDR    q5, [%[ptr5]]\n"
+        "LDR    q6, [%[ptr6]]\n"
+        "LDR    q7, [%[ptr7]]\n"
+        "ADD    v0.4s, v0.4s, %[v0].4s\n"
+        "ADD    v1.4s, v1.4s, %[v1].4s\n"
+        "ADD    v2.4s, v2.4s, %[v2].4s\n"
+        "ADD    v3.4s, v3.4s, %[v3].4s\n"
+        "ADD    v4.4s, v4.4s, %[v4].4s\n"
+        "ADD    v5.4s, v5.4s, %[v5].4s\n"
+        "ADD    v6.4s, v6.4s, %[v6].4s\n"
+        "ADD    v7.4s, v7.4s, %[v7].4s\n"
+        "STR    q0, [%[ptr0]], #16\n"
+        "STR    q1, [%[ptr1]], #16\n"
+        "STR    q2, [%[ptr2]], #16\n"
+        "STR    q3, [%[ptr3]], #16\n"
+        "STR    q4, [%[ptr4]], #16\n"
+        "STR    q5, [%[ptr5]], #16\n"
+        "STR    q6, [%[ptr6]], #16\n"
+        "STR    q7, [%[ptr7]], #16\n"
+        : [ptr0] "+r"(ptr0), [ptr1] "+r"(ptr1), [ptr2] "+r"(ptr2), [ptr3] "+r"(ptr3),
+        [ptr4] "+r"(ptr4), [ptr5] "+r"(ptr5), [ptr6] "+r"(ptr6), [ptr7] "+r"(ptr7)
+        : [v0] "w"(v0), [v1] "w"(v1), [v2] "w"(v2), [v3] "w"(v3),
+        [v4] "w"(v4), [v5] "w"(v5), [v6] "w"(v6), [v7] "w"(v7)
+        : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "memory");
+}
+
+namespace arm_compute
+{
+void NEGEMMLowpAArch64V8P4Kernel::internal_configure(const ITensor *input0, const ITensor *input1, ITensor *output)
+{
+    ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::U8);
+    ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U32);
+    ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input0, input1);
+
+    _input0 = input0;
+    _input1 = input1;
+    _output = output;
+
+    // Configure kernel window
+    Window win = calculate_max_window(*output->info());
+
+    AccessWindowRectangle output_access(output->info(), 0, 0, 12, 8);
+
+    const int input0_access_end = ceil_to_multiple(input0->info()->tensor_shape().x(), 8);
+    const int input1_access_end = ceil_to_multiple(input1->info()->tensor_shape().x(), 12);
+
+    update_window_and_padding(win,
+                              AccessWindowStatic(input0->info(), 0, 0, input0_access_end, input0->info()->tensor_shape().y()),
+                              AccessWindowStatic(input1->info(), 0, 0, input1_access_end, input1->info()->tensor_shape().y()),
+                              output_access);
+
+    INEKernel::configure(win);
+}
+
+bool NEGEMMLowpAArch64V8P4Kernel::is_parallelisable() const
+{
+    return false;
+}
+
+#define _UDOT_MACRO                                                                                    \
+    ".altmacro\n"                                                                                      \
+    ".macro udot opd:req, opn:req, opm:req\n"                                                          \
+    "local vd, vn, vm, h, l\n"                                                                         \
+    ".irp reg,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31\n" \
+    ".ifeqs \"\\opd\",\"v\\reg\\.4s\"\n"                                                               \
+    ".set vd,\\reg\n"                                                                                  \
+    ".endif\n"                                                                                         \
+    ".ifeqs \"\\opn\",\"v\\reg\\.16b\"\n"                                                              \
+    ".set vn,\\reg\n"                                                                                  \
+    ".endif\n"                                                                                         \
+    ".irp idx,0,1,2,3\n"                                                                               \
+    ".ifeqs \"\\opm\",\"v\\reg\\.4b[\\idx\\]\"\n"                                                      \
+    ".set vm,\\reg\n"                                                                                  \
+    ".set h,\\idx / 2\n"                                                                               \
+    ".set l,\\idx %% 2\n"                                                                              \
+    ".endif\n"                                                                                         \
+    ".endr\n"                                                                                          \
+    ".endr\n"                                                                                          \
+    ".ifndef vd\n"                                                                                     \
+    ".error \"Bad operand \\opd\"\n"                                                                   \
+    ".exitm\n"                                                                                         \
+    ".endif\n"                                                                                         \
+    ".ifndef vn\n"                                                                                     \
+    ".error \"Bad operand \\opn\"\n"                                                                   \
+    ".exitm\n"                                                                                         \
+    ".endif\n"                                                                                         \
+    ".ifndef vm\n"                                                                                     \
+    ".error \"Bad operand \\opm\"\n"                                                                   \
+    ".exitm\n"                                                                                         \
+    ".endif\n"                                                                                         \
+    ".ifndef h\n"                                                                                      \
+    ".error \"Bad operand \\opm\"\n"                                                                   \
+    ".exitm\n"                                                                                         \
+    ".endif\n"                                                                                         \
+    ".ifndef l\n"                                                                                      \
+    ".error \"Bad operand \\opm\"\n"                                                                   \
+    ".exitm\n"                                                                                         \
+    ".endif\n"                                                                                         \
+    ".int    0x6f80e000 | vd | (vn << 5) | (vm << 16) | (l << 21) | (h << 11)\n"                       \
+    ".endm\n"
+
+#define _PREFETCH_                                     \
+    __asm __volatile(                                  \
+                                                       "" ASM_PREFETCH("[%[a_ptr], #64]")             \
+                                                       ASM_PREFETCH("[%[a_ptr], #128]")           \
+                                                       ASM_PREFETCH("[%[a_ptr], #192]")       \
+                                                       :                                              \
+                                                       :                                              \
+                                                       [a_ptr] "r"(a_ptr), [b_ptr] "r"(b_ptr)         \
+                                                       : "x20", "x21", "memory");                     \
+    __asm __volatile(                                  \
+                                                       "" ASM_PREFETCH("[%[b_ptr]]")                  \
+                                                       ASM_PREFETCH("[%[b_ptr], #64]")            \
+                                                       ASM_PREFETCH("[%[b_ptr], #128]")       \
+                                                       ASM_PREFETCH("[%[b_ptr], #192]")   \
+                                                       :                                              \
+                                                       :                                              \
+                                                       [b_ptr] "r"(b_ptr)                             \
+                                                       : "x20", "x21");                               \
+    __asm __volatile(                                  \
+                                                       ""                                             \
+                                                       : [r00] "+w"(r00), [r01] "+w"(r01),            \
+                                                       [r10] "+w"(r10), [r11] "+w"(r11),            \
+                                                       [r20] "+w"(r20), [r21] "+w"(r21),            \
+                                                       [r30] "+w"(r30), [r31] "+w"(r31),            \
+                                                       [a0] "+w"(a0), [a1] "+w"(a1),                \
+                                                       [b0] "+w"(b0), [b1] "+w"(b1), [b2] "=w"(b2), \
+                                                       [a_ptr] "+r"(a_ptr), [b_ptr] "+r"(b_ptr)     \
+                                                       :                                              \
+                                                       :);                                            \
+    __asm __volatile(                                  \
+                                                       ""                                             \
+                                                       : [r02] "+w"(r02),                             \
+                                                       [r12] "+w"(r12),                             \
+                                                       [r22] "+w"(r22),                             \
+                                                       [r32] "+w"(r32),                             \
+                                                       [r40] "+w"(r40),                             \
+                                                       [r50] "+w"(r50),                             \
+                                                       [r60] "+w"(r60),                             \
+                                                       [r70] "+w"(r70),                             \
+                                                       [a0a] "=w"(a0a), [a1a] "=w"(a1a),            \
+                                                       [b0] "+w"(b0), [b2] "+w"(b2), [b5] "=&w"(b5) \
+                                                       :                                              \
+                                                       :);                                            \
+    __asm __volatile(                                  \
+                                                       ""                                             \
+                                                       :                                              \
+                                                       [r41] "+w"(r41), [r42] "+w"(r42),              \
+                                                       [r51] "+w"(r51), [r52] "+w"(r52),              \
+                                                       [r61] "+w"(r61), [r62] "+w"(r62),              \
+                                                       [r71] "+w"(r71), [r72] "+w"(r72),              \
+                                                       [a1] "+w"(a1),                                 \
+                                                       [b0] "+w"(b0), [b1] "+w"(b1), [b2] "+w"(b2),   \
+                                                       [b_ptr] "+r"(b_ptr), [k] "+r"(k)               \
+                                                       :                                              \
+                                                       :);
+
+void NEGEMMLowpAArch64V8P4Kernel::run(const Window &window, const ThreadInfo &info)
+{
+    ARM_COMPUTE_UNUSED(info);
+    ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+    ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
+
+    const int x_block  = 348;
+    const int k_block  = 1664;
+    const int nthreads = 1;
+    const int M        = _output->info()->tensor_shape().y();
+    const int N        = _output->info()->tensor_shape().x();
+    const int K        = _input0->info()->tensor_shape().x() >> 3;
+
+    int yblocksperthread = ((M / nthreads) + 7) / 8;
+
+    if(yblocksperthread < 1)
+    {
+        yblocksperthread = 1;
+    }
+
+    const int lda  = _input0->info()->strides_in_bytes().y();
+    const int ldb  = _input1->info()->strides_in_bytes().y();
+    const int ldc  = _output->info()->strides_in_bytes().y();
+    const int ldc2 = _output->info()->strides_in_bytes().x();
+    const int ldc3 = ldc / sizeof(uint32_t);
+
+    const int threadid = 0;
+    int       y0       = threadid * yblocksperthread * 8;
+    int       ymax     = (threadid + 1) * yblocksperthread * 8;
+    if(y0 >= M)
+    {
+        return;
+    }
+    if(ymax > M)
+    {
+        ymax = M;
+    }
+    for(int k0 = 0; k0 < K; k0 += k_block)
+    {
+        int kmax = k0 + k_block;
+        if(kmax > K)
+        {
+            kmax = K;
+        }
+
+        for(int x0 = 0; x0 < N; x0 += x_block)
+        {
+            int xmax = x0 + x_block;
+            if(xmax > N)
+            {
+                xmax = N;
+            }
+
+            for(int y = y0; y < ymax; y += 8)
+            {
+                auto      c_ptr0 = reinterpret_cast<uint32_t *>(_output->buffer() + (y * ldc) + x0 * ldc2);
+                uint32_t *c_ptr1 = c_ptr0 + ldc3;
+                uint32_t *c_ptr2 = c_ptr1 + ldc3;
+                uint32_t *c_ptr3 = c_ptr2 + ldc3;
+                uint32_t *c_ptr4 = c_ptr3 + ldc3;
+                uint32_t *c_ptr5 = c_ptr4 + ldc3;
+                uint32_t *c_ptr6 = c_ptr5 + ldc3;
+                uint32_t *c_ptr7 = c_ptr6 + ldc3;
+
+                __asm __volatile(
+                    "" ASM_PREFETCH("[%[c_ptr0]]")
+                    ASM_PREFETCH("[%[c_ptr1]]")
+                    ASM_PREFETCH("[%[c_ptr2]]")
+                    ASM_PREFETCH("[%[c_ptr3]]")
+                    ASM_PREFETCH("[%[c_ptr4]]")
+                    ASM_PREFETCH("[%[c_ptr5]]")
+                    ASM_PREFETCH("[%[c_ptr6]]")
+                    ASM_PREFETCH("[%[c_ptr7]]")
+                    :
+                    : [c_ptr0] "r"(c_ptr0), [c_ptr1] "r"(c_ptr1), [c_ptr2] "r"(c_ptr2), [c_ptr3] "r"(c_ptr3),
+                    [c_ptr4] "r"(c_ptr4), [c_ptr5] "r"(c_ptr5), [c_ptr6] "r"(c_ptr6), [c_ptr7] "r"(c_ptr7)
+                    : "x20", "x21");
+
+                for(int x = x0; x < xmax; x += 12)
+                {
+                    register uint32x4_t r00 asm("v8");
+                    register uint32x4_t r10 asm("v9");
+                    register uint32x4_t r20 asm("v10");
+                    register uint32x4_t r30 asm("v11");
+                    register uint32x4_t r40 asm("v12");
+                    register uint32x4_t r50 asm("v13");
+                    register uint32x4_t r60 asm("v14");
+                    register uint32x4_t r70 asm("v15");
+                    register uint32x4_t r01 asm("v16");
+                    register uint32x4_t r11 asm("v17");
+                    register uint32x4_t r21 asm("v18");
+                    register uint32x4_t r31 asm("v19");
+                    register uint32x4_t r41 asm("v20");
+                    register uint32x4_t r51 asm("v21");
+                    register uint32x4_t r61 asm("v22");
+                    register uint32x4_t r71 asm("v23");
+                    register uint32x4_t r02 asm("v24");
+                    register uint32x4_t r12 asm("v25");
+                    register uint32x4_t r22 asm("v26");
+                    register uint32x4_t r32 asm("v27");
+                    register uint32x4_t r42 asm("v28");
+                    register uint32x4_t r52 asm("v29");
+                    register uint32x4_t r62 asm("v30");
+                    register uint32x4_t r72 asm("v31");
+
+                    register uint8x16_t a0 asm("v0");
+                    register uint8x16_t a1 asm("v1");
+                    register uint8x16_t b0 asm("v2");
+                    register uint8x16_t b1 asm("v3");
+                    register uint8x16_t b2 asm("v4");
+                    register uint8x16_t a0a asm("v5");
+                    register uint8x16_t a1a asm("v6");
+                    register uint8x16_t b5 asm("v7");
+                    const uint8_t      *a_ptr = _input0->buffer() + ((y / 8) * lda) + (k0 * 8);
+                    const uint8_t      *b_ptr = _input1->buffer() + ((x / 12) * ldb) + (k0 * 12);
+
+                    r00 = r01 = r02 = r10 = r11 = r12 = r20 = r21 = r22 = r30 = r31 = r32 = vdupq_n_u32(0);
+                    r40 = r41 = r42 = r50 = r51 = r52 = r60 = r61 = r62 = r70 = r71 = r72 = vdupq_n_u32(0);
+
+                    int k = ((kmax - k0) / 8) - 1;
+
+                    a0 = vld1q_u8(a_ptr);
+                    b0 = vld1q_u8(b_ptr);
+                    a1 = vld1q_u8(a_ptr + 16);
+                    b1 = vld1q_u8(b_ptr + 16);
+
+                    _PREFETCH_
+
+                    __asm __volatile(
+                        _UDOT_MACRO
+                        "1:\n"
+                        "udot    v8.4s , %[b0].16b, %[a0].4b[0]\n"
+                        "udot    v9.4s , %[b0].16b, %[a0].4b[1]\n"
+                        "ldr    %q[b2], [%[b_ptr], #32]\n"
+                        "udot    v10.4s, %[b0].16b, %[a0].4b[2]\n"
+                        "udot    v11.4s, %[b0].16b, %[a0].4b[3]\n"
+                        "ldr    %q[a0a], [%[a_ptr], #32]\n"
+                        "udot    v12.4s, %[b0].16b, %[a1].4b[0]\n"
+                        "udot    v13.4s, %[b0].16b, %[a1].4b[1]\n"
+                        "ldr    %q[a1a], [%[a_ptr], #48]\n"
+                        "udot    v14.4s, %[b0].16b, %[a1].4b[2]\n"
+                        "udot    v15.4s, %[b0].16b, %[a1].4b[3]\n"
+                        "ldr    %q[b0], [%[b_ptr], #48]\n"
+
+                        "udot    v16.4s, %[b1].16b, %[a0].4b[0]\n"
+                        "udot    v17.4s, %[b1].16b, %[a0].4b[1]\n" ASM_PREFETCH("[%[a_ptr], #256]")
+                        "udot    v18.4s, %[b1].16b, %[a0].4b[2]\n"
+                        "udot    v19.4s, %[b1].16b, %[a0].4b[3]\n"
+                        "udot    v20.4s, %[b1].16b, %[a1].4b[0]\n"
+                        "udot    v21.4s, %[b1].16b, %[a1].4b[1]\n"
+                        "udot    v22.4s, %[b1].16b, %[a1].4b[2]\n"
+                        "udot    v23.4s, %[b1].16b, %[a1].4b[3]\n"
+                        "ldr    %q[b1], [%[b_ptr], #64]\n"
+
+                        "udot    v24.4s, %[b2].16b, %[a0].4b[0]\n"
+                        "udot    v25.4s, %[b2].16b, %[a0].4b[1]\n" ASM_PREFETCH("[%[b_ptr], #256]")
+                        "udot    v26.4s, %[b2].16b, %[a0].4b[2]\n"
+                        "udot    v27.4s, %[b2].16b, %[a0].4b[3]\n"
+                        "udot    v28.4s, %[b2].16b, %[a1].4b[0]\n"
+                        "udot    v29.4s, %[b2].16b, %[a1].4b[1]\n"
+                        "udot    v30.4s, %[b2].16b, %[a1].4b[2]\n"
+                        "udot    v31.4s, %[b2].16b, %[a1].4b[3]\n"
+                        "ldr    %q[b2], [%[b_ptr], #80]\n"
+
+                        "udot    v8.4s , %[b0].16b, %[a0a].4b[0]\n"
+                        "udot    v9.4s , %[b0].16b, %[a0a].4b[1]\n"
+                        "ldr    %q[a0], [%[a_ptr], #64]\n"
+                        "udot    v10.4s, %[b0].16b, %[a0a].4b[2]\n"
+                        "udot    v11.4s, %[b0].16b, %[a0a].4b[3]\n"
+                        "udot    v12.4s, %[b0].16b, %[a1a].4b[0]\n"
+                        "ldr    %q[a1], [%[a_ptr], #80]\n"
+                        "udot    v13.4s, %[b0].16b, %[a1a].4b[1]\n"
+                        "udot    v14.4s, %[b0].16b, %[a1a].4b[2]\n"
+                        "udot    v15.4s, %[b0].16b, %[a1a].4b[3]\n"
+                        "ldr    %q[b0], [%[b_ptr], #96]\n"
+
+                        "udot    v16.4s, %[b1].16b, %[a0a].4b[0]\n"
+                        "udot    v17.4s, %[b1].16b, %[a0a].4b[1]\n" ASM_PREFETCH("[%[b_ptr], #320]")
+                        "udot    v18.4s, %[b1].16b, %[a0a].4b[2]\n"
+                        "udot    v19.4s, %[b1].16b, %[a0a].4b[3]\n"
+                        "udot    v20.4s, %[b1].16b, %[a1a].4b[0]\n"
+                        "udot    v21.4s, %[b1].16b, %[a1a].4b[1]\n"
+                        "udot    v22.4s, %[b1].16b, %[a1a].4b[2]\n"
+                        "udot    v23.4s, %[b1].16b, %[a1a].4b[3]\n"
+                        "ldr    %q[b1], [%[b_ptr], #112]\n"
+
+                        "udot    v24.4s, %[b2].16b, %[a0a].4b[0]\n"
+                        "udot    v25.4s, %[b2].16b, %[a0a].4b[1]\n"
+                        "add    %[a_ptr], %[a_ptr], #64\n"
+                        "udot    v26.4s, %[b2].16b, %[a0a].4b[2]\n"
+                        "udot    v27.4s, %[b2].16b, %[a0a].4b[3]\n"
+                        "add    %[b_ptr], %[b_ptr], #96\n"
+                        "udot    v28.4s, %[b2].16b, %[a1a].4b[0]\n"
+                        "udot    v29.4s, %[b2].16b, %[a1a].4b[1]\n"
+                        "subs    %w[k], %w[k], #1\n"
+                        "udot    v30.4s, %[b2].16b, %[a1a].4b[2]\n"
+                        "udot    v31.4s, %[b2].16b, %[a1a].4b[3]\n"
+
+                        "bne    1b\n"
+
+                        "udot    v8.4s , %[b0].16b, %[a0].4b[0]\n"
+                        "udot    v9.4s , %[b0].16b, %[a0].4b[1]\n"
+                        "ldr    %q[b2], [%[b_ptr], #32]\n"
+                        "udot    v10.4s, %[b0].16b, %[a0].4b[2]\n"
+                        "udot    v11.4s, %[b0].16b, %[a0].4b[3]\n"
+                        "ldr    %q[a0a], [%[a_ptr], #32]\n"
+                        "udot    v12.4s, %[b0].16b, %[a1].4b[0]\n"
+                        "udot    v13.4s, %[b0].16b, %[a1].4b[1]\n"
+                        "ldr    %q[a1a], [%[a_ptr], #48]\n"
+                        "udot    v14.4s, %[b0].16b, %[a1].4b[2]\n"
+                        "udot    v15.4s, %[b0].16b, %[a1].4b[3]\n"
+                        "ldr    %q[b0], [%[b_ptr], #48]\n"
+
+                        "udot    v16.4s, %[b1].16b, %[a0].4b[0]\n"
+                        "udot    v17.4s, %[b1].16b, %[a0].4b[1]\n"
+                        "udot    v18.4s, %[b1].16b, %[a0].4b[2]\n"
+                        "udot    v19.4s, %[b1].16b, %[a0].4b[3]\n"
+                        "udot    v20.4s, %[b1].16b, %[a1].4b[0]\n"
+                        "udot    v21.4s, %[b1].16b, %[a1].4b[1]\n"
+                        "udot    v22.4s, %[b1].16b, %[a1].4b[2]\n"
+                        "udot    v23.4s, %[b1].16b, %[a1].4b[3]\n"
+                        "ldr    %q[b1], [%[b_ptr], #64]\n"
+
+                        "udot    v24.4s, %[b2].16b, %[a0].4b[0]\n"
+                        "udot    v25.4s, %[b2].16b, %[a0].4b[1]\n"
+                        "udot    v26.4s, %[b2].16b, %[a0].4b[2]\n"
+                        "udot    v27.4s, %[b2].16b, %[a0].4b[3]\n"
+                        "udot    v28.4s, %[b2].16b, %[a1].4b[0]\n"
+                        "udot    v29.4s, %[b2].16b, %[a1].4b[1]\n"
+                        "udot    v30.4s, %[b2].16b, %[a1].4b[2]\n"
+                        "udot    v31.4s, %[b2].16b, %[a1].4b[3]\n"
+                        "ldr    %q[b2], [%[b_ptr], #80]\n"
+
+                        "udot    v8.4s , %[b0].16b, %[a0a].4b[0]\n" ASM_PREFETCH("[%[c_ptr0]]") "udot    v9.4s , %[b0].16b, %[a0a].4b[1]\n" ASM_PREFETCH("[%[c_ptr1]]") "udot    v10.4s, %[b0].16b, %[a0a].4b[2]\n"
+                        ASM_PREFETCH("[%[c_ptr2]]") "udot    v11.4s, %[b0].16b, %[a0a].4b[3]\n" ASM_PREFETCH("[%[c_ptr3]]") "udot    v12.4s, %[b0].16b, %[a1a].4b[0]\n" ASM_PREFETCH("[%[c_ptr4]]")
+                        "udot    v13.4s, %[b0].16b, %[a1a].4b[1]\n" ASM_PREFETCH("[%[c_ptr5]]") "udot    v14.4s, %[b0].16b, %[a1a].4b[2]\n" ASM_PREFETCH("[%[c_ptr6]]") "udot    v15.4s, %[b0].16b, %[a1a].4b[3]\n"
+                        ASM_PREFETCH("[%[c_ptr7]]")
+
+                        "udot    v16.4s, %[b1].16b, %[a0a].4b[0]\n" ASM_PREFETCH("[%[c_ptr0], #48]") "udot    v17.4s, %[b1].16b, %[a0a].4b[1]\n" ASM_PREFETCH("[%[c_ptr1], #48]") "udot    v18.4s, %[b1].16b, %[a0a].4b[2]\n"
+                        ASM_PREFETCH("[%[c_ptr2], #48]") "udot    v19.4s, %[b1].16b, %[a0a].4b[3]\n" ASM_PREFETCH("[%[c_ptr3], #48]") "udot    v20.4s, %[b1].16b, %[a1a].4b[0]\n" ASM_PREFETCH("[%[c_ptr4], #48]")
+                        "udot    v21.4s, %[b1].16b, %[a1a].4b[1]\n" ASM_PREFETCH("[%[c_ptr5], #48]") "udot    v22.4s, %[b1].16b, %[a1a].4b[2]\n" ASM_PREFETCH("[%[c_ptr6], #48]") "udot    v23.4s, %[b1].16b, %[a1a].4b[3]\n"
+                        ASM_PREFETCH("[%[c_ptr7], #48]")
+
+                        "udot    v24.4s, %[b2].16b, %[a0a].4b[0]\n"
+                        "udot    v25.4s, %[b2].16b, %[a0a].4b[1]\n"
+                        "udot    v26.4s, %[b2].16b, %[a0a].4b[2]\n"
+                        "udot    v27.4s, %[b2].16b, %[a0a].4b[3]\n"
+                        "add    %[b_ptr], %[b_ptr], #96\n"
+                        "udot    v28.4s, %[b2].16b, %[a1a].4b[0]\n"
+                        "udot    v29.4s, %[b2].16b, %[a1a].4b[1]\n"
+                        "udot    v30.4s, %[b2].16b, %[a1a].4b[2]\n"
+                        "udot    v31.4s, %[b2].16b, %[a1a].4b[3]\n"
+
+                        // Clean up macro namespace
+                        ".purgem udot\n"
+
+                        :
+                        [a_ptr] "+r"(a_ptr), [b_ptr] "+r"(b_ptr),
+                        [a0] "+w"(a0), [a1] "+w"(a1), [a0a] "+w"(a0a), [a1a] "+w"(a1a),
+                        [b0] "+w"(b0), [b1] "+w"(b1), [b2] "+w"(b2), [k] "+r"(k)
+                        : [c_ptr0] "r"(c_ptr0), [c_ptr1] "r"(c_ptr1), [c_ptr2] "r"(c_ptr2), [c_ptr3] "r"(c_ptr3),
+                        [c_ptr4] "r"(c_ptr4), [c_ptr5] "r"(c_ptr5), [c_ptr6] "r"(c_ptr6), [c_ptr7] "r"(c_ptr7)
+                        : "x20", "x21");
+
+                    stincpld(r00, r10, r20, r30, r40, r50, r60, r70, c_ptr0, c_ptr1, c_ptr2, c_ptr3, c_ptr4, c_ptr5, c_ptr6, c_ptr7);
+                    stinc(r01, r11, r21, r31, r41, r51, r61, r71, c_ptr0, c_ptr1, c_ptr2, c_ptr3, c_ptr4, c_ptr5, c_ptr6, c_ptr7);
+                    stinc(r02, r12, r22, r32, r42, r52, r62, r72, c_ptr0, c_ptr1, c_ptr2, c_ptr3, c_ptr4, c_ptr5, c_ptr6, c_ptr7);
+                }
+            }
+        }
+    }
+}
+} // namespace arm_compute
diff --git a/src/runtime/NEON/functions/NEGEMMLowp.cpp b/src/runtime/NEON/functions/NEGEMMLowp.cpp
index 7413b28..90e47ce 100644
--- a/src/runtime/NEON/functions/NEGEMMLowp.cpp
+++ b/src/runtime/NEON/functions/NEGEMMLowp.cpp
@@ -26,28 +26,100 @@
 #include "arm_compute/core/Error.h"
 #include "arm_compute/core/Helpers.h"
 #include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/NEON/kernels/arm64/NEGEMMLowpAArch64V8P4Kernel.h"
 #include "arm_compute/core/TensorInfo.h"
 #include "arm_compute/core/Types.h"
 #include "arm_compute/core/Validate.h"
 #include "arm_compute/runtime/NEON/NEScheduler.h"
 #include "arm_compute/runtime/TensorAllocator.h"
+#include "support/ToolchainSupport.h"
 
 using namespace arm_compute;
 
+#define NEGEMMLOWP_VALIDATE_DIMENSIONS(a, b, output)                                                                                                                                      \
+    ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN((a), 1, DataType::U8);                                                                                                                  \
+    ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN((b), 1, DataType::U8);                                                                                                                  \
+    ARM_COMPUTE_ERROR_ON_MSG((a)->info()->dimension(0) != (b)->info()->dimension(1), "The product AB is defined only if the number of columns in A is equal to the number of rows in B"); \
+    ARM_COMPUTE_ERROR_ON_MSG((a)->info()->dimension(1) != (output)->info()->dimension(1), "The C matrix must have the same number of rows as the matrix A");                              \
+    ARM_COMPUTE_ERROR_ON_MSG((b)->info()->dimension(0) != (output)->info()->dimension(0), "The C matrix must have the same number of columns as the matrix C");
+
 NEGEMMLowp::NEGEMMLowp(std::shared_ptr<IMemoryManager> memory_manager)
-    : _memory_group(std::move(memory_manager)), _interleave_kernel(), _transpose_kernel(), _mm_kernel(), _tmp_a(), _tmp_b()
+    : _memory_group(std::move(memory_manager)), _interleave_kernel(), _transpose_kernel(), _mm_kernel(), _mm_optimised_kernel(nullptr), _interleave_blocked(), _interleave_blocked_transposed(), _tmp_a(),
+      _tmp_b()
 {
 }
 
+void NEGEMMLowp::configure(const ITensor *a, const ITensor *b, ITensor *output)
+{
+    NEGEMMLOWP_VALIDATE_DIMENSIONS(a, b, output);
+    ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U32);
+
+    const struct CPUInfo ci              = NEScheduler::get().cpu_info();
+    const int            cpu_has_dotprod = static_cast<int>(ci.CPU) & static_cast<int>(CPUTarget::DOT);
+    if(cpu_has_dotprod != 0)
+    {
+#if defined(__aarch64__)
+        // NEGEMMLowpAArch64V8P4Kernel only compiled in AArch64 targets
+        _mm_optimised_kernel    = support::cpp14::make_unique<NEGEMMLowpAArch64V8P4Kernel>();
+        TensorShape shape_a_int = a->info()->tensor_shape();
+        shape_a_int.set(0, a->info()->dimension(0) * 8.f);
+        shape_a_int.set(1, std::ceil(a->info()->dimension(1) / 8.f));
+
+        TensorShape shape_b_int = b->info()->tensor_shape();
+        shape_b_int.set(0, b->info()->dimension(0) * 12.f);
+        shape_b_int.set(1, std::ceil(b->info()->dimension(1) / 12.f));
+
+        TensorInfo info_a_int(shape_a_int, 1, a->info()->data_type());
+        TensorInfo info_b_int(shape_b_int, 1, b->info()->data_type());
+        _tmp_a.allocator()->init(info_a_int);
+        _tmp_b.allocator()->init(info_b_int);
+
+        _memory_group.manage(&_tmp_a);
+        _memory_group.manage(&_tmp_b);
+
+        _interleave_blocked.configure(a, &_tmp_a, 8, 4, false);
+        _interleave_blocked_transposed.configure(b, &_tmp_b, 12, 4, true);
+        _mm_optimised_kernel->configure(&_tmp_a, &_tmp_b, output);
+
+        _tmp_a.allocator()->allocate();
+        _tmp_b.allocator()->allocate();
+#endif /* defined(__aarch64__) */
+    }
+    else
+    {
+        ARM_COMPUTE_ERROR("Not implemented");
+        // This is in the process of being updated, for more info please refer to COMPMID-624.
+    }
+}
+
+void NEGEMMLowp::run()
+{
+    _memory_group.acquire();
+
+    if(_mm_optimised_kernel != nullptr)
+    {
+        NEScheduler::get().schedule(&_interleave_blocked, Window::DimY);
+        NEScheduler::get().schedule(&_interleave_blocked_transposed, Window::DimY);
+        NEScheduler::get().schedule(_mm_optimised_kernel.get(), Window::DimY);
+    }
+    else
+    {
+        /* Run interleave kernel */
+        NEScheduler::get().schedule(&_interleave_kernel, Window::DimY);
+        /* Run transpose kernel */
+        NEScheduler::get().schedule(&_transpose_kernel, Window::DimY);
+        /* Run matrix multiply kernel */
+        NEScheduler::get().schedule(&_mm_kernel, Window::DimY);
+    }
+
+    _memory_group.release();
+}
+
 void NEGEMMLowp::configure(const ITensor *a, const ITensor *b, ITensor *output, int32_t a_offset, int32_t b_offset, int32_t output_offset, int32_t output_mult_int, int32_t shift)
 {
-    ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(a, 1, DataType::U8);
-    ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(b, 1, DataType::U8);
-    ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8);
+    NEGEMMLOWP_VALIDATE_DIMENSIONS(a, b, output);
     ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(a, b, output);
-    ARM_COMPUTE_ERROR_ON_MSG(a->info()->dimension(0) != b->info()->dimension(1), "The product AB is defined only if the number of columns in A is equal to the number of rows in B");
-    ARM_COMPUTE_ERROR_ON_MSG(a->info()->dimension(1) != output->info()->dimension(1), "The C matrix must have the same number of rows as the matrix A");
-    ARM_COMPUTE_ERROR_ON_MSG(b->info()->dimension(0) != output->info()->dimension(0), "The C matrix must have the same number of columns as the matrix C");
+    ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8);
 
     /* The interleaved output matrix will have the following shape: [ a_height * 4, ceil(a_width / 4.0f) ] */
     TensorShape shape_tmp_a = a->info()->tensor_shape();
@@ -75,18 +147,4 @@
     _tmp_b.allocator()->allocate();
 }
 
-void NEGEMMLowp::run()
-{
-    _memory_group.acquire();
-
-    /* Run interleave kernel */
-    NEScheduler::get().schedule(&_interleave_kernel, Window::DimY);
-
-    /* Run transpose kernel */
-    NEScheduler::get().schedule(&_transpose_kernel, Window::DimY);
-
-    /* Run matrix multiply kernel */
-    NEScheduler::get().schedule(&_mm_kernel, Window::DimY);
-
-    _memory_group.release();
-}
+#undef NEGEMMLOWP_VALIDATE_DIMENSIONS
diff --git a/tests/NEON/Helper.h b/tests/NEON/Helper.h
index 4efab17..8bd11cc 100644
--- a/tests/NEON/Helper.h
+++ b/tests/NEON/Helper.h
@@ -25,6 +25,8 @@
 #define __ARM_COMPUTE_TEST_NEON_HELPER_H__
 
 #include "arm_compute/runtime/Array.h"
+#include "arm_compute/runtime/NEON/INESimpleFunction.h"
+#include "support/ToolchainSupport.h"
 #include "tests/Globals.h"
 
 #include <algorithm>
@@ -48,6 +50,20 @@
     }
 }
 
+// This template synthetizes an INESimpleFunction which runs the given kernel K
+template <typename K>
+class NESynthetizeFunction : public INESimpleFunction
+{
+public:
+    template <typename... Args>
+    void configure(Args &&... args)
+    {
+        auto k = arm_compute::support::cpp14::make_unique<K>();
+        k->configure(std::forward<Args>(args)...);
+        _kernel = std::move(k);
+    }
+};
+
 } // namespace test
 } // namespace arm_compute
 #endif /* __ARM_COMPUTE_TEST_NEON_HELPER_H__ */
diff --git a/tests/benchmark/NEON/GEMMLowp.cpp b/tests/benchmark/NEON/GEMMLowp.cpp
new file mode 100644
index 0000000..8cf1433
--- /dev/null
+++ b/tests/benchmark/NEON/GEMMLowp.cpp
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/TensorShape.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/NEON/functions/NEGEMMLowp.h"
+#include "arm_compute/runtime/Tensor.h"
+#include "arm_compute/runtime/TensorAllocator.h"
+#include "tests/NEON/Accessor.h"
+#include "tests/benchmark/fixtures/GEMMLowpFixture.h"
+#include "tests/framework/Macros.h"
+#include "tests/framework/datasets/Datasets.h"
+#include "utils/TypePrinter.h"
+
+#include "arm_compute/core/NEON/kernels/NEGEMMInterleaveBlockedKernel.h"
+#include "tests/NEON/Helper.h"
+
+namespace arm_compute
+{
+namespace test
+{
+const auto data_int_blk = framework::dataset::make("M", 800) * framework::dataset::make("N", 800) * framework::dataset::make("by", 8, 13) * framework::dataset::make("block", 4, 9);
+
+TEST_SUITE(NEON)
+
+TEST_SUITE(INTERLEAVE_BLOCKED)
+using NEInterleaveBlocked            = NESynthetizeFunction<NEGEMMInterleaveBlockedKernel>;
+using NEGEMMInterleaveBlockedFixture = GEMMInterleaveBlockedFixture<Tensor, NEInterleaveBlocked, Accessor>;
+REGISTER_FIXTURE_DATA_TEST_CASE(InterleaveBlocked, NEGEMMInterleaveBlockedFixture, framework::DatasetMode::ALL, data_int_blk);
+TEST_SUITE_END()
+
+#if 0  //FIXME: enable when we update NEGEMMLowp interface to work without offsets
+TEST_SUITE(U32)
+using NEGEMMLowpFixture = GEMMLowpFixture<Tensor, NEGEMMLowp, Accessor>;
+REGISTER_FIXTURE_DATA_TEST_CASE(GEMMLowp, NEGEMMLowpFixture, framework::DatasetMode::ALL, framework::dataset::make("M", 100, 120) * framework::dataset::make("N", 100,
+                                110)
+                                * framework::dataset::make("K", 16, 20));
+
+TEST_SUITE_END()
+#endif // defined(__aarch64__)
+
+TEST_SUITE_END()
+
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/benchmark/fixtures/GEMMLowpFixture.h b/tests/benchmark/fixtures/GEMMLowpFixture.h
new file mode 100644
index 0000000..b640705
--- /dev/null
+++ b/tests/benchmark/fixtures/GEMMLowpFixture.h
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_TEST_GEMMFIXTURE
+#define ARM_COMPUTE_TEST_GEMMFIXTURE
+
+#include "arm_compute/core/TensorShape.h"
+#include "arm_compute/core/Types.h"
+#include "tests/Globals.h"
+#include "tests/Utils.h"
+#include "tests/framework/Fixture.h"
+
+namespace arm_compute
+{
+namespace test
+{
+template <typename TensorType, typename Function, typename Accessor, bool Transposed = false>
+class GEMMInterleaveBlockedFixture : public framework::Fixture
+{
+public:
+    template <typename...>
+    void setup(size_t x, size_t y, int int_by, int block)
+    {
+        const float       interleave_by_f32 = int_by;
+        const TensorShape shape_a(x, y);
+        const TensorShape shape_b(static_cast<size_t>(x * interleave_by_f32), static_cast<size_t>(std::ceil(y / interleave_by_f32)));
+        // Create tensors
+        a = create_tensor<TensorType>(shape_a, DataType::U8, 1);
+        b = create_tensor<TensorType>(shape_b, DataType::U8, 1);
+
+        // Create and configure function
+        f.configure(&a, &b, int_by, block, Transposed);
+
+        // Allocate tensors
+        a.allocator()->allocate();
+        b.allocator()->allocate();
+    }
+    void run()
+    {
+        f.run();
+    }
+
+    void teardown()
+    {
+        a.allocator()->free();
+        b.allocator()->free();
+    }
+
+private:
+    TensorType a{};
+    TensorType b{};
+    Function   f{};
+};
+
+/** Fixture that can be used for NEON and CL */
+template <typename TensorType, typename Function, typename Accessor>
+class GEMMLowpFixture : public framework::Fixture
+{
+public:
+    template <typename...>
+    void setup(size_t m, size_t n, size_t k)
+    {
+        const TensorShape shape_a(k, m);
+        const TensorShape shape_b(n, k);
+        const TensorShape shape_c(n, m);
+        // Create tensors
+        a = create_tensor<TensorType>(shape_a, DataType::U8, 1);
+        b = create_tensor<TensorType>(shape_b, DataType::U8, 1);
+        c = create_tensor<TensorType>(shape_c, DataType::U32, 1);
+
+        // Create and configure function
+        gemmlowp.configure(&a, &b, &c);
+
+        // Allocate tensors
+        a.allocator()->allocate();
+        b.allocator()->allocate();
+        c.allocator()->allocate();
+
+        // Fill tensors
+        library->fill_tensor_uniform(Accessor(a), 0);
+        library->fill_tensor_uniform(Accessor(b), 1);
+        library->fill_tensor_uniform(Accessor(c), 2);
+    }
+    void run()
+    {
+        gemmlowp.run();
+    }
+
+    void teardown()
+    {
+        a.allocator()->free();
+        b.allocator()->free();
+        c.allocator()->free();
+    }
+
+private:
+    TensorType a{};
+    TensorType b{};
+    TensorType c{};
+    Function   gemmlowp{};
+};
+
+} // namespace test
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_TEST_GEMMFIXTURE */
diff --git a/tests/validation/CPP/GEMMInterleaveBlocked.h b/tests/validation/CPP/GEMMInterleaveBlocked.h
new file mode 100644
index 0000000..ff5a0d6
--- /dev/null
+++ b/tests/validation/CPP/GEMMInterleaveBlocked.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "GEMM.h"
+
+#include "arm_compute/core/Types.h"
+#include "tests/validation/FixedPoint.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace reference
+{
+template <typename T>
+T safe_read(const SimpleTensor<T> &t, int y, int x)
+{
+    const int stride = t.shape().x();
+    const int M      = t.shape().y();
+    const int N      = t.shape().x();
+    if((y < M) && (x < N))
+    {
+        return t[y * stride + x];
+    }
+    return 0;
+}
+
+template <typename T>
+SimpleTensor<T> gemm_interleave_blocked(const SimpleTensor<T> &in, SimpleTensor<T> &out, int int_by, int block, bool transposed)
+{
+    const int M = out.shape().y();
+    const int N = out.shape().x();
+    for(int y = 0; y < M; y++)
+    {
+        T *out_ptr = &out[y * N];
+        for(int x = 0; x < (N / int_by); x += block)
+        {
+            for(int z = 0; z < int_by; z++)
+            {
+                for(int a = 0; (out_ptr <= &out[y * N + (N - 1)]) && a < block; a++)
+                {
+                    if(!transposed)
+                        *out_ptr++ = safe_read(in, (y * int_by) + z, x + a);
+                    else
+                    {
+                        const T value = safe_read(in, x + a, (y * int_by) + z);
+                        *out_ptr++    = value;
+                    }
+                }
+            }
+        }
+    }
+    return out;
+}
+
+template SimpleTensor<uint8_t> gemm_interleave_blocked(const SimpleTensor<uint8_t> &in, SimpleTensor<uint8_t> &out, int int_by, int block, bool transposed);
+} // namespace reference
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/validation/CPP/GEMMLowp.cpp b/tests/validation/CPP/GEMMLowp.cpp
index d172a77..06926e6 100644
--- a/tests/validation/CPP/GEMMLowp.cpp
+++ b/tests/validation/CPP/GEMMLowp.cpp
@@ -34,6 +34,42 @@
 {
 namespace reference
 {
+SimpleTensor<uint32_t> gemmlowp(const SimpleTensor<uint8_t> &a, const SimpleTensor<uint8_t> &b, SimpleTensor<uint32_t> &c)
+{
+    ARM_COMPUTE_UNUSED(a);
+    ARM_COMPUTE_UNUSED(b);
+    ARM_COMPUTE_UNUSED(c);
+    const int            K       = a.shape().x();
+    const int            b_width = b.shape().x();
+    const int            rows    = c.shape().y(); //M
+    const int            cols    = c.shape().x(); //N
+    std::vector<int32_t> acc;
+    acc.resize(cols);
+    for(int i = 0; i < rows; ++i)
+    {
+        for(int j = 0; j < cols; ++j)
+        {
+            acc[j] = 0;
+        }
+        for(int k = 0; k < K; ++k)
+        {
+            auto tmp_a = static_cast<int32_t>(a[k + i * K]);
+            for(int j = 0; j < b_width; ++j)
+            {
+                auto          tmp_b       = static_cast<int32_t>(b[j + k * b_width]);
+                const int32_t mult_as_int = tmp_a * tmp_b;
+                acc[j] += mult_as_int;
+            }
+        }
+        for(int j = 0; j < cols; ++j)
+        {
+            c[j + i * cols] = acc[j];
+        }
+    }
+
+    return c;
+}
+
 template <typename T>
 SimpleTensor<T> gemmlowp(const SimpleTensor<T> &a, const SimpleTensor<T> &b, SimpleTensor<T> &c,
                          int32_t a_offset, int32_t b_offset, int32_t c_offset, int32_t c_mult_int, int32_t out_shift)
diff --git a/tests/validation/CPP/GEMMLowp.h b/tests/validation/CPP/GEMMLowp.h
index 2160975..0428e9e 100644
--- a/tests/validation/CPP/GEMMLowp.h
+++ b/tests/validation/CPP/GEMMLowp.h
@@ -35,6 +35,8 @@
 {
 namespace reference
 {
+SimpleTensor<uint32_t> gemmlowp(const SimpleTensor<uint8_t> &a, const SimpleTensor<uint8_t> &b, SimpleTensor<uint32_t> &c);
+
 template <typename T>
 SimpleTensor<T> gemmlowp(const SimpleTensor<T> &a, const SimpleTensor<T> &b, SimpleTensor<T> &c,
                          int32_t a_offset, int32_t b_offset, int32_t c_offset, int32_t c_mult_int, int32_t out_shift);
diff --git a/tests/validation/NEON/GEMMLowp.cpp b/tests/validation/NEON/GEMMLowp.cpp
index 3d83f80..045d334 100644
--- a/tests/validation/NEON/GEMMLowp.cpp
+++ b/tests/validation/NEON/GEMMLowp.cpp
@@ -30,8 +30,12 @@
 #include "tests/framework/Macros.h"
 #include "tests/framework/datasets/Datasets.h"
 #include "tests/validation/Validation.h"
+#include "tests/validation/fixtures/GEMMInterleaveBlockedFixture.h"
 #include "tests/validation/fixtures/GEMMLowpFixture.h"
 
+#include "arm_compute/core/NEON/kernels/NEGEMMInterleaveBlockedKernel.h"
+#include "tests/NEON/Helper.h"
+
 namespace arm_compute
 {
 namespace test
@@ -42,17 +46,44 @@
 {
 constexpr AbsoluteTolerance<float> tolerance_f(0.001f); /**< Tolerance value for comparing reference's output against implementation's output for floating point data types */
 
-const auto data_mnk     = framework::dataset::make("M", 8, 12) * framework::dataset::make("N", 8, 12) * framework::dataset::make("K", 8, 12);
+const auto data_mnk     = framework::dataset::make("M", 12, 20) * framework::dataset::make("N", 12, 20) * framework::dataset::make("K", 12, 15);
 const auto data_offsets = framework::dataset::make("a", -3, 3) * framework::dataset::make("b", -1, 2) * framework::dataset::make("c", 1, 3) * framework::dataset::make("cm", 0,
                           3)
                           * framework::dataset::make("shift", 0, 4);
 
+const auto data_int_blk = framework::dataset::make("M", 8, 12) * framework::dataset::make("N", 8, 12) * framework::dataset::make("by", 8, 13) * framework::dataset::make("block", 4, 9);
+
+const auto data_int_blk_tr = framework::dataset::make("M", 8, 17) * framework::dataset::make("N", 8, 14) * framework::dataset::make("by", 12) * framework::dataset::make("block", 4);
+
 } // namespace
 
 TEST_SUITE(NEON)
 TEST_SUITE(GEMMLowp)
 
 TEST_SUITE(U8)
+
+TEST_SUITE(INTERLEAVE_BLOCKED)
+
+using NEInterleaveBlocked            = NESynthetizeFunction<NEGEMMInterleaveBlockedKernel>;
+using NEGEMMInterleaveBlockedFixture = GEMMInterleaveBlockedValidationFixture<Tensor, Accessor, NEInterleaveBlocked>;
+FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMInterleaveBlockedFixture, framework::DatasetMode::PRECOMMIT, data_int_blk)
+{
+    // Validate output
+    validate(Accessor(_target), _reference, tolerance_f);
+}
+TEST_SUITE_END()
+
+TEST_SUITE(INTERLEAVE_BLOCKED_TRANSPOSED)
+using NEInterleaveBlockedTransposed            = NESynthetizeFunction<NEGEMMInterleaveBlockedKernel>;
+using NEGEMMInterleaveBlockedTransposedFixture = GEMMInterleaveBlockedValidationFixture<Tensor, Accessor, NEInterleaveBlockedTransposed, true>;
+FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMInterleaveBlockedTransposedFixture, framework::DatasetMode::PRECOMMIT, data_int_blk_tr)
+{
+    // Validate output
+    validate(Accessor(_target), _reference, tolerance_f);
+}
+
+TEST_SUITE_END()
+
 using NEGEMMLowpOffsetFixture = GEMMLowpOffsetValidationFixture<Tensor, Accessor, NEGEMMLowp>;
 FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpOffsetFixture, framework::DatasetMode::PRECOMMIT, data_mnk *data_offsets)
 {
@@ -61,6 +92,17 @@
 }
 TEST_SUITE_END()
 
+#if defined(__aarch64__)
+TEST_SUITE(U32)
+using NEGEMMLowpFixture = GEMMLowpValidationFixture<Tensor, Accessor, NEGEMMLowp>;
+FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpFixture, framework::DatasetMode::PRECOMMIT, framework::dataset::make("M", 12, 20) * framework::dataset::make("N", 12, 20) * framework::dataset::make("K",
+                       16))
+{
+    // Validate output
+    validate(Accessor(_target), _reference, tolerance_f);
+}
+TEST_SUITE_END()
+#endif // defined(__aarch64__)
 TEST_SUITE_END()
 TEST_SUITE_END()
 } // namespace validation
diff --git a/tests/validation/fixtures/GEMMInterleaveBlockedFixture.h b/tests/validation/fixtures/GEMMInterleaveBlockedFixture.h
new file mode 100644
index 0000000..89c188f
--- /dev/null
+++ b/tests/validation/fixtures/GEMMInterleaveBlockedFixture.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_TEST_GEMM_INTERLEAVE_BLOCKED_FIXTURE
+#define ARM_COMPUTE_TEST_GEMM_INTERLEAVE_BLOCKED_FIXTURE
+
+#include "arm_compute/core/TensorShape.h"
+#include "arm_compute/core/Types.h"
+#include "tests/AssetsLibrary.h"
+#include "tests/Globals.h"
+#include "tests/IAccessor.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Fixture.h"
+#include "tests/validation/CPP/GEMMInterleaveBlocked.h"
+#include "tests/validation/Helpers.h"
+
+#include <random>
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+template <typename TensorType, typename AccessorType, typename FunctionType, bool Transposed = false>
+class GEMMInterleaveBlockedValidationFixture : public framework::Fixture
+{
+public:
+    template <typename...>
+    void setup(size_t x, size_t y, int int_by, int block)
+    {
+        const float       interleave_by_f32 = int_by;
+        const TensorShape shape_a(x, y);
+        const TensorShape shape_b(static_cast<size_t>(x * interleave_by_f32), static_cast<size_t>(std::ceil(y / interleave_by_f32)));
+        _target    = compute_target(shape_a, shape_b, int_by, block);
+        _reference = compute_reference(shape_a, shape_b, int_by, block);
+    }
+
+protected:
+    template <typename U>
+    void fill(U &&tensor, int i)
+    {
+        ARM_COMPUTE_ERROR_ON(tensor.data_type() != DataType::U8);
+        std::uniform_int_distribution<> distribution(0, 255);
+        library->fill(tensor, distribution, i);
+    }
+
+    TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, int int_by, int block)
+    {
+        // Create tensors
+        TensorType a = create_tensor<TensorType>(shape_a, DataType::U8, 1);
+        TensorType b = create_tensor<TensorType>(shape_b, DataType::U8, 1);
+
+        // Create and configure function
+        FunctionType f;
+        f.configure(&a, &b, int_by, block, Transposed);
+
+        ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
+        ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+        // Allocate tensors
+        a.allocator()->allocate();
+        b.allocator()->allocate();
+
+        ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS);
+        ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+        // Fill tensors
+        fill(AccessorType(a), 0);
+
+        // Compute GEMM function
+        f.run();
+        return b;
+    }
+
+    SimpleTensor<uint8_t> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, int int_by, int block)
+    {
+        // Create reference
+        SimpleTensor<uint8_t> a{ shape_a, DataType::U8, 1 };
+        SimpleTensor<uint8_t> b{ shape_b, DataType::U8, 1 };
+
+        // Fill reference
+        fill(a, 0);
+        return reference::gemm_interleave_blocked<uint8_t>(a, b, int_by, block, Transposed);
+    }
+
+    TensorType            _target{};
+    SimpleTensor<uint8_t> _reference{};
+};
+
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_TEST_GEMM_INTERLEAVE_BLOCKED_FIXTURE */
diff --git a/tests/validation/fixtures/GEMMLowpFixture.h b/tests/validation/fixtures/GEMMLowpFixture.h
index c972469..556b6c4 100644
--- a/tests/validation/fixtures/GEMMLowpFixture.h
+++ b/tests/validation/fixtures/GEMMLowpFixture.h
@@ -120,6 +120,81 @@
     SimpleTensor<uint8_t> _reference{};
 };
 
+template <typename TensorType, typename AccessorType, typename FunctionType>
+class GEMMLowpValidationFixture : public framework::Fixture
+{
+public:
+    template <typename...>
+    void setup(size_t m, size_t n, size_t k)
+    {
+        const TensorShape shape_a(k, m);
+        const TensorShape shape_b(n, k);
+        const TensorShape shape_c(n, m);
+        _target    = compute_target(shape_a, shape_b, shape_c);
+        _reference = compute_reference(shape_a, shape_b, shape_c);
+    }
+
+protected:
+    template <typename U>
+    void fill(U &&tensor, int i, int lo, int hi)
+    {
+        std::uniform_int_distribution<> distribution(lo, hi);
+        library->fill(tensor, distribution, i);
+    }
+
+    TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c)
+    {
+        // Create tensors
+        TensorType a = create_tensor<TensorType>(shape_a, DataType::U8, 1);
+        TensorType b = create_tensor<TensorType>(shape_b, DataType::U8, 1);
+        TensorType c = create_tensor<TensorType>(shape_c, DataType::U32, 1);
+
+        // Create and configure function
+        FunctionType gemmlowp;
+        gemmlowp.configure(&a, &b, &c);
+
+        ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
+        ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
+        ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+        // Allocate tensors
+        a.allocator()->allocate();
+        b.allocator()->allocate();
+        c.allocator()->allocate();
+
+        ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS);
+        ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS);
+        ARM_COMPUTE_EXPECT(!c.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+        // Fill tensors
+        fill(AccessorType(a), 0, 0, 3);
+        fill(AccessorType(b), 1, 0, 3);
+        fill(AccessorType(c), 2, 0, 0);
+
+        // Compute GEMM function
+        gemmlowp.run();
+        return c;
+    }
+
+    SimpleTensor<uint32_t> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c)
+    {
+        // Create reference
+        SimpleTensor<uint8_t>  a{ shape_a, DataType::U8, 1 };
+        SimpleTensor<uint8_t>  b{ shape_b, DataType::U8, 1 };
+        SimpleTensor<uint32_t> c{ shape_c, DataType::U32, 1 };
+
+        // Fill reference
+        fill(a, 0, 0, 3);
+        fill(b, 1, 0, 3);
+        fill(c, 2, 0, 0);
+
+        return reference::gemmlowp(a, b, c);
+    }
+
+    TensorType             _target{};
+    SimpleTensor<uint32_t> _reference{};
+};
+
 } // namespace validation
 } // namespace test
 } // namespace arm_compute