Port NEGEMM to memory injecting interface (Part 1)

- Start porting NEGEMM to the new API
- Port NEGEMMInterleave4x4Kernel to the new API
- Port NEGEMMMatrixAdditionKernel to the new API
- Port NEGEMMTranspose1xWKernel to the new API
- Remove padding from NEGEMMMatrixAdditionKernel
- Remove unused INESimpleKernel and ICPPSimpleKernel

Partially resolves: COMPMID-4402

Change-Id: I63edadddfe00a54586e5384d6a0211db25ae9042
Signed-off-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5857
Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
diff --git a/Android.bp b/Android.bp
index a4e3d94..5b9f4e0 100644
--- a/Android.bp
+++ b/Android.bp
@@ -133,7 +133,6 @@
         "src/core/CL/kernels/CLTileKernel.cpp",
         "src/core/CL/kernels/CLWeightsReshapeKernel.cpp",
         "src/core/CPP/CPPTypes.cpp",
-        "src/core/CPP/ICPPSimpleKernel.cpp",
         "src/core/CPP/kernels/CPPBoxWithNonMaximaSuppressionLimitKernel.cpp",
         "src/core/CPP/kernels/CPPNonMaximumSuppressionKernel.cpp",
         "src/core/CPP/kernels/CPPPermuteKernel.cpp",
@@ -163,14 +162,11 @@
         "src/core/NEON/kernels/NEFFTScaleKernel.cpp",
         "src/core/NEON/kernels/NEFillBorderKernel.cpp",
         "src/core/NEON/kernels/NEFuseBatchNormalizationKernel.cpp",
-        "src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.cpp",
         "src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp",
         "src/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.cpp",
         "src/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.cpp",
         "src/core/NEON/kernels/NEGEMMLowpReductionKernel.cpp",
-        "src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.cpp",
         "src/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.cpp",
-        "src/core/NEON/kernels/NEGEMMTranspose1xWKernel.cpp",
         "src/core/NEON/kernels/NEGatherKernel.cpp",
         "src/core/NEON/kernels/NEGenerateProposalsLayerKernel.cpp",
         "src/core/NEON/kernels/NEIm2ColKernel.cpp",
@@ -281,10 +277,13 @@
         "src/core/cpu/kernels/CpuElementwiseUnaryKernel.cpp",
         "src/core/cpu/kernels/CpuFillKernel.cpp",
         "src/core/cpu/kernels/CpuFloorKernel.cpp",
+        "src/core/cpu/kernels/CpuGemmInterleave4x4Kernel.cpp",
         "src/core/cpu/kernels/CpuGemmLowpQuantizeDownInt32ScaleKernel.cpp",
         "src/core/cpu/kernels/CpuGemmLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel.cpp",
         "src/core/cpu/kernels/CpuGemmLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel.cpp",
         "src/core/cpu/kernels/CpuGemmLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.cpp",
+        "src/core/cpu/kernels/CpuGemmMatrixAdditionKernel.cpp",
+        "src/core/cpu/kernels/CpuGemmTranspose1xWKernel.cpp",
         "src/core/cpu/kernels/CpuMulKernel.cpp",
         "src/core/cpu/kernels/CpuPermuteKernel.cpp",
         "src/core/cpu/kernels/CpuPool2dKernel.cpp",
diff --git a/arm_compute/core/CPP/ICPPSimpleKernel.h b/arm_compute/core/CPP/ICPPSimpleKernel.h
deleted file mode 100644
index c31d487..0000000
--- a/arm_compute/core/CPP/ICPPSimpleKernel.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Copyright (c) 2017-2019 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_ICPPSIMPLEKERNEL_H
-#define ARM_COMPUTE_ICPPSIMPLEKERNEL_H
-
-#include "arm_compute/core/CPP/ICPPKernel.h"
-
-namespace arm_compute
-{
-class ITensor;
-
-/** Interface for simple C++ kernels having 1 tensor input and 1 tensor output */
-class ICPPSimpleKernel : public ICPPKernel
-{
-public:
-    /** Constructor */
-    ICPPSimpleKernel();
-    /** Prevent instances of this class from being copied (As this class contains pointers) */
-    ICPPSimpleKernel(const ICPPSimpleKernel &) = delete;
-    /** Prevent instances of this class from being copied (As this class contains pointers) */
-    ICPPSimpleKernel &operator=(const ICPPSimpleKernel &) = delete;
-    /** Allow instances of this class to be moved */
-    ICPPSimpleKernel(ICPPSimpleKernel &&) = default;
-    /** Allow instances of this class to be moved */
-    ICPPSimpleKernel &operator=(ICPPSimpleKernel &&) = default;
-    /** Default destructor */
-    ~ICPPSimpleKernel() = default;
-
-protected:
-    /** Configure the kernel
-     *
-     * @param[in]  input                             Source tensor.
-     * @param[out] output                            Destination tensor.
-     * @param[in]  num_elems_processed_per_iteration Number of processed elements per iteration.
-     * @param[in]  border_undefined                  (Optional) True if the border mode is undefined. False if it's replicate or constant.
-     * @param[in]  border_size                       (Optional) Size of the border.
-     */
-    void configure(const ITensor *input, ITensor *output, unsigned int num_elems_processed_per_iteration, bool border_undefined = false, const BorderSize &border_size = BorderSize());
-    /** Static function to check if given info will lead to a valid configuration of @ref ICPPSimpleKernel.
-     *
-     * @param[in] input                             Source tensor info.
-     * @param[in] output                            Destination tensor info.
-     * @param[in] num_elems_processed_per_iteration Number of processed elements per iteration.
-     * @param[in] border_undefined                  (Optional) True if the border mode is undefined. False if it's replicate or constant.
-     * @param[in] border_size                       (Optional) Size of the border.
-     */
-    static Status validate(const ITensorInfo *input, const ITensorInfo *output, unsigned int num_elems_processed_per_iteration,
-                           bool border_undefined = false, const BorderSize &border_size = BorderSize());
-
-protected:
-    const ITensor *_input;
-    ITensor       *_output;
-};
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_ICPPSIMPLEKERNEL_H */
diff --git a/arm_compute/core/Types.h b/arm_compute/core/Types.h
index 48c87cd..f6658e7 100644
--- a/arm_compute/core/Types.h
+++ b/arm_compute/core/Types.h
@@ -1750,10 +1750,10 @@
 
 /** GEMM reshape information class. This class stores the necessary information about matrix A and matrix B reshape.
  *
- * The matrix A can only be reshaped through @ref opencl::kernels::ClGemmReshapeLhsMatrixKernel or  @ref NEGEMMInterleave4x4Kernel
+ * The matrix A can only be reshaped through @ref opencl::kernels::ClGemmReshapeLhsMatrixKernel or  @ref cpu::kernels::CpuGemmInterleave4x4Kernel
  * Note: Optionally just for @ref opencl::kernels::ClGemmReshapeLhsMatrixKernel is it possible to set mult_interleave4x4_height, the multiplication factor for the height of the 4x4 interleaved block
  *
- * The matrix B can only be reshaped through @ref opencl::kernels::ClGemmReshapeRhsMatrixKernel or  @ref NEGEMMTranspose1xWKernel
+ * The matrix B can only be reshaped through @ref opencl::kernels::ClGemmReshapeRhsMatrixKernel or  @ref cpu::kernels::CpuGemmTranspose1xWKernel
  * Note: Optionally just for @ref opencl::kernels::ClGemmReshapeRhsMatrixKernel is it possible to set mult_transpose1xW_width, the multiplication factor for the width of the 1xW transposed block
  *
  */
diff --git a/arm_compute/runtime/NEON/functions/NEConvolutionLayer.h b/arm_compute/runtime/NEON/functions/NEConvolutionLayer.h
index f19aa80..bb4c456 100644
--- a/arm_compute/runtime/NEON/functions/NEConvolutionLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEConvolutionLayer.h
@@ -111,7 +111,7 @@
      *                              Data types supported: Same as @p input.
      * @param[in]  conv_info        Contains padding and stride information described in @ref PadStrideInfo.
      * @param[in]  weights_info     Specifies if the weights tensor has been reshaped with NEWeightsReshapeKernel. If this is not part of the fully connected layer the weights
-     *                              tensor has also been transposed with NEGEMMTranspose1xWKernel. Data type supported: Same as @p input.
+     *                              tensor has also been transposed with cpu::kernels::CpuGemmTranspose1xWKernel. Data type supported: Same as @p input.
      * @param[in]  dilation         (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
      * @param[in]  act_info         (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU supported.
      * @param[in]  enable_fast_math (Optional) Enable fast math computation. In case this flag were set, the function could dispatch the fastest implementation
@@ -133,7 +133,7 @@
      *                             Data types supported: Same as @p input.
      * @param[in] conv_info        Contains padding and stride information described in @ref PadStrideInfo.
      * @param[in] weights_info     Specifies if the weights tensor has been reshaped with NEWeightsReshapeKernel. If this is not part of the fully connected layer the weights
-     *                             tensor has also been transposed with NEGEMMTranspose1xWKernel. Data type supported: Same as @p input.
+     *                             tensor has also been transposed with cpu::kernels::CpuGemmTranspose1xWKernel. Data type supported: Same as @p input.
      * @param[in] dilation         (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
      * @param[in] act_info         (Optional) Activation layer information in case of a fused activation.
      * @param[in] enable_fast_math (Optional) Enable fast math computation. In case this flag were set, the function could dispatch the fastest implementation
@@ -156,7 +156,7 @@
      *                             Data types supported: Same as @p input.
      * @param[in] conv_info        Contains padding and stride information described in @ref PadStrideInfo.
      * @param[in] weights_info     Specifies if the weights tensor has been reshaped with NEWeightsReshapeKernel. If this is not part of the fully connected layer the weights
-     *                             tensor has also been transposed with NEGEMMTranspose1xWKernel. Data type supported: Same as @p input.
+     *                             tensor has also been transposed with cpu::kernels::CpuGemmTranspose1xWKernel. Data type supported: Same as @p input.
      * @param[in] dilation         (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
      * @param[in] act_info         (Optional) Activation layer information in case of a fused activation.
      * @param[in] enable_fast_math (Optional) Enable fast math computation. In case this flag were set, the function could dispatch the fastest implementation
diff --git a/arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h b/arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h
index d2cd60e..22ec9e0 100644
--- a/arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h
@@ -80,7 +80,7 @@
  *  -# @ref NEIm2ColKernel (called when the input comes from a convolutional layer)
  *  -# @ref NETranspose (if @p are_weights_reshaped is set to false and transpose_weights is set to true ) (called once)
  *  -# @ref NEGEMMMatrixMultiplyKernel or @ref NEGEMMLowpMatrixMultiplyCore (if quantized asymmetric)
- *  -# @ref NEGEMMMatrixAdditionKernel or @ref NEGEMMLowpOutputStage (if quantized asymmetric) (if @p biases is not equal to nullptr)
+ *  -# @ref cpu::kernels::CpuGemmMatrixAdditionKernel or @ref NEGEMMLowpOutputStage (if quantized asymmetric) (if @p biases is not equal to nullptr)
  *
  * @note  The fully connected layer accepts "weights" tensors only with 2 dimensions.
  */
diff --git a/arm_compute/runtime/NEON/functions/NEGEMM.h b/arm_compute/runtime/NEON/functions/NEGEMM.h
index 6c5be0e..c1ae11b 100644
--- a/arm_compute/runtime/NEON/functions/NEGEMM.h
+++ b/arm_compute/runtime/NEON/functions/NEGEMM.h
@@ -29,40 +29,26 @@
 #include "arm_compute/runtime/IMemoryManager.h"
 #include "arm_compute/runtime/IWeightsManager.h"
 #include "arm_compute/runtime/MemoryGroup.h"
-#include "arm_compute/runtime/NEON/functions/NEActivationLayer.h"
-#include "arm_compute/runtime/NEON/functions/NEArithmeticAddition.h"
-#include "arm_compute/runtime/Tensor.h"
-#include "src/core/helpers/MemoryHelpers.h"
 
 #include <memory>
 
 namespace arm_compute
 {
-// Forward declarations
-class NEGEMMInterleave4x4Kernel;
-class NEGEMMMatrixAdditionKernel;
-class NEGEMMMatrixMultiplyKernel;
-class NEGEMMTranspose1xWKernel;
-namespace cpu
-{
-class CpuGemmAssemblyDispatch;
-}
-
 /** Basic function to execute GEMM. This function calls the following kernels:
  *
  * If optimized assembly is available:
  *  -# @ref cpu::CpuGemmAssemblyDispatch
- *  -# @ref NEActivationLayer (if alpha != 1.0)
+ *  -# @ref cpu::CpuActivation (if alpha != 1.0)
  * Else:
- *  -# @ref NEGEMMInterleave4x4Kernel (if the output tensor is a matrix)
- *  -# @ref NEGEMMTranspose1xWKernel (if the output tensor is a matrix)
+ *  -# @ref cpu::kernels::CpuGemmInterleave4x4Kernel (if the output tensor is a matrix)
+ *  -# @ref cpu::kernels::CpuGemmTranspose1xWKernel (if the output tensor is a matrix)
  *  -# @ref NEGEMMMatrixMultiplyKernel
  * In both cases:
- *  -# @ref NEGEMMMatrixAdditionKernel (if c != nullptr and beta != 0.0 and is not reshaped once)
+ *  -# @ref cpu::kernels::CpuGemmMatrixAdditionKernel (if c != nullptr and beta != 0.0 and is not reshaped once)
  * Else:
- *  -# @ref NEArithmeticAddition (if c != nullptr and is reshaped once and not optimized assembly in place)
+ *  -# @ref cpu::CpuAdd (if c != nullptr and is reshaped once and not optimized assembly in place)
  *
- *  -# @ref NEActivationLayer (if activation is specified in GEMMInfo)
+ *  -# @ref cpu::CpuActivation (if activation is specified in GEMMInfo)
  */
 class NEGEMM : public IFunction
 {
@@ -117,33 +103,8 @@
     void prepare() override;
 
 private:
-    MemoryGroup                                   _memory_group;
-    IWeightsManager                              *_weights_manager;
-    std::unique_ptr<NEGEMMInterleave4x4Kernel>    _interleave_kernel;
-    std::unique_ptr<NEGEMMTranspose1xWKernel>     _transpose_kernel;
-    std::unique_ptr<NEGEMMMatrixMultiplyKernel>   _mm_kernel;
-    std::unique_ptr<cpu::CpuGemmAssemblyDispatch> _asm_glue;
-    std::unique_ptr<NEGEMMMatrixAdditionKernel>   _ma_kernel;
-    NEActivationLayer                             _alpha_scale_func;
-    NEArithmeticAddition                          _add_bias;
-    NEActivationLayer                             _activation_func;
-
-    Tensor         _tmp_a;
-    Tensor         _tmp_b;
-    Tensor         _tmp_d;
-    const ITensor *_original_b;
-    bool           _run_vector_matrix_multiplication;
-    bool           _run_alpha_scale;
-    bool           _run_addition;
-    bool           _run_bias_addition;
-    bool           _run_activation;
-    bool           _reshape_b_only_on_first_run;
-    bool           _is_prepared;
-
-    ITensorPack                      _asm_glue_run_pack;
-    ITensorPack                      _asm_glue_prep_pack;
-    WorkspaceData<Tensor>            _asm_glue_workspace;
-    experimental::MemoryRequirements _aux_mem_req;
+    struct Impl;
+    std::unique_ptr<Impl> _impl;
 };
 } // namespace arm_compute
 #endif /*ARM_COMPUTE_NEGEMM_H */
diff --git a/arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h b/arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h
index edb58e9..d334d51 100644
--- a/arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h
@@ -203,7 +203,7 @@
      *                          Data types supported: Same as @p input.
      * @param[in]  conv_info    Contains padding and stride information described in @ref PadStrideInfo.
      * @param[in]  weights_info Specifies if the weights tensor has been reshaped with NEWeightsReshapeKernel. If this is not part of the fully connected layer the weights
-     *                          tensor has also been transposed with NEGEMMTranspose1xWKernel. Data type supported: Same as @p input.
+     *                          tensor has also been transposed with cpu::kernels::CpuGemmTranspose1xWKernel. Data type supported: Same as @p input.
      * @param[in]  dilation     (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
      * @param[in]  act_info     (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU supported.
      * @param[in]  num_groups   (Optional) Number of groups when performing a grouped convolution. num_groups != 1 is not supported
@@ -223,7 +223,7 @@
      *                         Data types supported: Same as @p input.
      * @param[in] conv_info    Contains padding and stride information described in @ref PadStrideInfo.
      * @param[in] weights_info Specifies if the weights tensor has been reshaped with NEWeightsReshapeKernel. If this is not part of the fully connected layer the weights
-     *                         tensor has also been transposed with NEGEMMTranspose1xWKernel. Data type supported: Same as @p input.
+     *                         tensor has also been transposed with cpu::kernels::CpuGemmTranspose1xWKernel. Data type supported: Same as @p input.
      * @param[in] dilation     (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
      * @param[in] act_info     (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU supported.
      * @param[in] num_groups   (Optional) Number of groups when performing a grouped convolution. num_groups != 1 is not supported
diff --git a/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h b/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h
index ff88876..60cfd8f 100644
--- a/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h
+++ b/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h
@@ -35,8 +35,8 @@
 class ITensor;
 /** Basic function to execute GEMMLowpMatrixMultiplyCore. This function calls the following kernels if the DOT product instruction is not available:
  *
- *  -# @ref NEGEMMInterleave4x4Kernel
- *  -# @ref NEGEMMTranspose1xWKernel
+ *  -# @ref cpu::kernels::CpuGemmInterleave4x4Kernel
+ *  -# @ref cpu::kernels::CpuGemmTranspose1xWKernel
  *  -# @ref NEGEMMLowpMatrixMultiplyKernel
  *  -# @ref NEGEMMLowpOffsetContributionKernel
  *  -# @ref NEActivationLayer
diff --git a/arm_compute/runtime/NEON/functions/NELSTMLayer.h b/arm_compute/runtime/NEON/functions/NELSTMLayer.h
index 075fb45..4272215 100644
--- a/arm_compute/runtime/NEON/functions/NELSTMLayer.h
+++ b/arm_compute/runtime/NEON/functions/NELSTMLayer.h
@@ -25,7 +25,7 @@
 #define ARM_COMPUTE_NELSTMLAYER_H
 
 #include "arm_compute/core/Types.h"
-#include "arm_compute/runtime/NEON/functions/NEArithmeticAddition.h"
+#include "arm_compute/runtime/NEON/functions/NEActivationLayer.h"
 #include "arm_compute/runtime/NEON/functions/NEArithmeticAddition.h"
 #include "arm_compute/runtime/NEON/functions/NEArithmeticSubtraction.h"
 #include "arm_compute/runtime/NEON/functions/NEConcatenateLayer.h"
diff --git a/docs/user_guide/release_version_and_change_log.dox b/docs/user_guide/release_version_and_change_log.dox
index fd3806a..cccf5b9 100644
--- a/docs/user_guide/release_version_and_change_log.dox
+++ b/docs/user_guide/release_version_and_change_log.dox
@@ -239,12 +239,12 @@
    - NEComplexPixelWiseMultiplicationKernel
    - NENonMaximaSuppression3x3Kernel
    - @ref NERemapKernel
-   - @ref NEGEMMInterleave4x4Kernel
+   - NEGEMMInterleave4x4Kernel
    - NEDirectConvolutionLayerKernel
    - NEScaleKernel
    - NELocallyConnectedMatrixMultiplyKernel
    - @ref NEGEMMLowpOffsetContributionKernel
-   - @ref NEGEMMTranspose1xWKernel
+   - NEGEMMTranspose1xWKernel
    - NEPoolingLayerKernel
    - NEConvolutionKernel
    - NEDepthwiseConvolutionLayerNativeKernel
@@ -1020,7 +1020,7 @@
  - Added the validate method in:
     - @ref NEDepthConvertLayer
     - @ref NEFloor / @ref CLFloor
-    - @ref NEGEMMMatrixAdditionKernel
+    - NEGEMMMatrixAdditionKernel
     - @ref NEReshapeLayer / @ref CLReshapeLayer
     - @ref CLScale
  - Added new examples:
@@ -1377,7 +1377,7 @@
    - CLLaplacianPyramid, CLLaplacianReconstruct
  - New Arm® Neon™ kernels / functions:
    - NEActivationLayerKernel / @ref NEActivationLayer
-   - GEMM refactoring + FP16 support (Requires armv8.2 CPU): @ref NEGEMMInterleave4x4Kernel, @ref NEGEMMTranspose1xWKernel, @ref NEGEMMMatrixMultiplyKernel, @ref NEGEMMMatrixAdditionKernel / @ref NEGEMM
+   - GEMM refactoring + FP16 support (Requires armv8.2 CPU): NEGEMMInterleave4x4Kernel, NEGEMMTranspose1xWKernel, NEGEMMMatrixMultiplyKernel, NEGEMMMatrixAdditionKernel / @ref NEGEMM
    - NEPoolingLayerKernel / @ref NEPoolingLayer
 
 v17.02.1 Sources preview
diff --git a/filelist.json b/filelist.json
index 13c6ecb..b8a69c5 100644
--- a/filelist.json
+++ b/filelist.json
@@ -1163,10 +1163,10 @@
       "GEMM": {
         "files": {
           "kernel": [
-            "src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.cpp",
+            "src/core/cpu/kernels/CpuGemmMatrixAdditionKernel.cpp",
             "src/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.cpp",
-            "src/core/NEON/kernels/NEGEMMTranspose1xWKernel.cpp",
-            "src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.cpp"
+            "src/core/cpu/kernels/CpuGemmTranspose1xWKernel.cpp",
+            "src/core/cpu/kernels/CpuGemmInterleave4x4Kernel.cpp"
           ]
         }
       },
diff --git a/src/core/CPP/ICPPSimpleKernel.cpp b/src/core/CPP/ICPPSimpleKernel.cpp
deleted file mode 100644
index 9e4df5e..0000000
--- a/src/core/CPP/ICPPSimpleKernel.cpp
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Copyright (c) 2016-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/core/CPP/ICPPSimpleKernel.h"
-
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/IAccessWindow.h"
-#include "arm_compute/core/ITensor.h"
-#include "src/core/helpers/WindowHelpers.h"
-
-namespace arm_compute
-{
-namespace
-{
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output, unsigned int num_elems_processed_per_iteration,
-                                                        bool border_undefined, const arm_compute::BorderSize &border_size)
-{
-    // Configure kernel window
-    Window                 win = calculate_max_window(*input, Steps(num_elems_processed_per_iteration), border_undefined, border_size);
-    AccessWindowHorizontal input_access(input, 0, num_elems_processed_per_iteration);
-    AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration);
-
-    bool window_changed = update_window_and_padding(win, input_access, output_access);
-
-    output_access.set_valid_region(win, input->valid_region(), border_undefined, border_size);
-
-    Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
-    return std::make_pair(err, win);
-}
-} // namespace
-
-ICPPSimpleKernel::ICPPSimpleKernel()
-    : _input{ nullptr }, _output{ nullptr }
-{
-}
-
-void ICPPSimpleKernel::configure(const ITensor *input, ITensor *output, unsigned int num_elems_processed_per_iteration, bool border_undefined, const BorderSize &border_size)
-{
-    _input  = input;
-    _output = output;
-
-    // Configure kernel window
-    auto win_config = validate_and_configure_window(input->info(), output->info(), num_elems_processed_per_iteration, border_undefined, border_size);
-    ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
-    ICPPKernel::configure(win_config.second);
-}
-
-Status ICPPSimpleKernel::validate(const ITensorInfo *input, const ITensorInfo *output, unsigned int num_elems_processed_per_iteration,
-                                  bool border_undefined, const arm_compute::BorderSize &border_size)
-{
-    ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), output->clone().get(), num_elems_processed_per_iteration, border_undefined, border_size).first);
-    return Status{};
-}
-
-} // namespace arm_compute
diff --git a/src/core/NEON/INESimpleKernel.h b/src/core/NEON/INESimpleKernel.h
deleted file mode 100644
index 2986e7b..0000000
--- a/src/core/NEON/INESimpleKernel.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright (c) 2016-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_INESIMPLEKERNEL_H
-#define ARM_COMPUTE_INESIMPLEKERNEL_H
-
-#include "arm_compute/core/CPP/ICPPSimpleKernel.h"
-
-namespace arm_compute
-{
-/** Interface for simple CPU kernels having 1 tensor input and 1 tensor output */
-using INESimpleKernel = ICPPSimpleKernel;
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_INESIMPLEKERNEL_H */
diff --git a/src/core/NEON/NEKernels.h b/src/core/NEON/NEKernels.h
index 268871a..0f7475c 100644
--- a/src/core/NEON/NEKernels.h
+++ b/src/core/NEON/NEKernels.h
@@ -41,14 +41,11 @@
 #include "src/core/NEON/kernels/NEFFTScaleKernel.h"
 #include "src/core/NEON/kernels/NEFillBorderKernel.h"
 #include "src/core/NEON/kernels/NEFuseBatchNormalizationKernel.h"
-#include "src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h"
 #include "src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h"
 #include "src/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.h"
 #include "src/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.h"
 #include "src/core/NEON/kernels/NEGEMMLowpReductionKernel.h"
-#include "src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.h"
 #include "src/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.h"
-#include "src/core/NEON/kernels/NEGEMMTranspose1xWKernel.h"
 #include "src/core/NEON/kernels/NEGatherKernel.h"
 #include "src/core/NEON/kernels/NEGenerateProposalsLayerKernel.h"
 #include "src/core/NEON/kernels/NEIm2ColKernel.h"
diff --git a/src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.cpp b/src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.cpp
deleted file mode 100644
index 9011680..0000000
--- a/src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.cpp
+++ /dev/null
@@ -1,187 +0,0 @@
-/*
- * Copyright (c) 2016-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h"
-
-#include "arm_compute/core/Error.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/ITensor.h"
-#include "arm_compute/core/Types.h"
-#include "arm_compute/core/Validate.h"
-#include "arm_compute/core/Window.h"
-#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "src/core/NEON/INEKernel.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-
-#include <arm_neon.h>
-#include <cstddef>
-#include <cstdint>
-#include <tuple>
-
-using namespace arm_compute;
-using namespace arm_compute::misc::shape_calculator;
-
-namespace
-{
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output)
-{
-    ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input);
-    //Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input) is not needed here as this kernel doesn't use CPU FP16 instructions.
-    ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN);
-    ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
-
-    if(output->total_size() != 0)
-    {
-        TensorShape output_shape = input->tensor_shape();
-        output_shape.set(0, input->dimension(0) * 4);
-        output_shape.set(1, std::ceil(input->dimension(1) / 4.0f));
-        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), output_shape);
-        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
-        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(input, output);
-    }
-
-    return Status{};
-}
-} // namespace
-
-NEGEMMInterleave4x4Kernel::NEGEMMInterleave4x4Kernel()
-    : _func(nullptr)
-{
-}
-
-void NEGEMMInterleave4x4Kernel::configure(const ITensor *input, ITensor *output)
-{
-    ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
-
-    // Output auto inizialitation if not yet initialized
-    auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(compute_interleaved_shape(*input->info())));
-
-    // Perform validate step
-    ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info()));
-
-    _input  = input;
-    _output = output;
-
-    switch(input->info()->element_size())
-    {
-        case 1:
-            _func = &NEGEMMInterleave4x4Kernel::gemm_interleave4x4<uint8_t>;
-            break;
-        case 2:
-            _func = &NEGEMMInterleave4x4Kernel::gemm_interleave4x4<uint16_t>;
-            break;
-        case 4:
-            _func = &NEGEMMInterleave4x4Kernel::gemm_interleave4x4<uint32_t>;
-            break;
-        default:
-            ARM_COMPUTE_ERROR_ON("Element size not supported");
-            break;
-    }
-
-    Window win = calculate_max_window(*input->info(), Steps(1, 4));
-
-    INEKernel::configure(win);
-}
-
-Status NEGEMMInterleave4x4Kernel::validate(const ITensorInfo *input, const ITensorInfo *output)
-{
-    ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output));
-
-    return Status{};
-}
-
-template <typename ScalarType>
-void NEGEMMInterleave4x4Kernel::gemm_interleave4x4(const ITensor *input, ITensor *output, const Window &window)
-{
-    const size_t window_start_x = window.x().start();
-    const size_t window_end_x   = window.x().end();
-
-    const size_t in_height = input->info()->dimension(1);
-    const size_t in_stride = input->info()->strides_in_bytes()[1];
-
-    const size_t partial_y = in_height % 4;
-
-    // Set window for the input tensor
-    Window win = window;
-    win.set(Window::DimX, Window::Dimension(0, 1, 1));
-
-    // Set window for the output tensor
-    Window win_out(window);
-    win_out.set(Window::DimX, Window::Dimension(0, 1, 1));
-    win_out.scale(Window::DimY, 0.25f);
-
-    Iterator in(input, win);
-    Iterator out(output, win_out);
-
-    execute_window_loop(win, [&](const Coordinates & id)
-    {
-        if(id.y() + 4 <= static_cast<int>(in_height))
-        {
-            for(size_t x = window_start_x; x < window_end_x; ++x)
-            {
-                const ScalarType data[4] =
-                {
-                    *(reinterpret_cast<const ScalarType *>(in.ptr() + 0 * in_stride) + x),
-                    *(reinterpret_cast<const ScalarType *>(in.ptr() + 1 * in_stride) + x),
-                    *(reinterpret_cast<const ScalarType *>(in.ptr() + 2 * in_stride) + x),
-                    *(reinterpret_cast<const ScalarType *>(in.ptr() + 3 * in_stride) + x),
-                };
-                std::memcpy(out.ptr() + x * 4 * sizeof(ScalarType), data, 4 * sizeof(ScalarType));
-            }
-        }
-        else
-        {
-            for(size_t x = window_start_x; x < window_end_x; ++x)
-            {
-                ScalarType data[4] = { 0, 0, 0, 0 };
-
-                for(size_t y = 0; y < partial_y; ++y)
-                {
-                    data[y] = *(reinterpret_cast<const ScalarType *>(in.ptr() + y * in_stride) + x);
-                }
-
-                std::memcpy(out.ptr() + x * 4 * sizeof(ScalarType), data, 4 * sizeof(ScalarType));
-            }
-        }
-    },
-    in, out);
-}
-
-void NEGEMMInterleave4x4Kernel::run(const Window &window, const ThreadInfo &info)
-{
-    ARM_COMPUTE_UNUSED(info);
-    ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
-    ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
-    ARM_COMPUTE_ERROR_ON(_func == nullptr);
-    /*
-    *  This kernel puts the values in a 4x4 block of Matrix A on the same row (Interleaved values)
-    *         |a00 a01 a02 a03|
-    *         |a10 a11 a12 a13|
-    *         |a20 a21 a22 a23| = | a00 a10 a20 a30 || a01 a11 a21 a31 || a02 a12 a22 a32 || a03 a13 a23 a33 |
-    *         |a30 a31 a32 a33|
-    *
-    *         After this operation, the output matrix will have the following shape: [ height * 4, ceil(width / 4.0f) ]
-    */
-    (this->*_func)(_input, _output, window);
-}
diff --git a/src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h b/src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h
deleted file mode 100644
index e592d5e..0000000
--- a/src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Copyright (c) 2016-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_NEGEMMINTERLEAVE4x4KERNEL_H
-#define ARM_COMPUTE_NEGEMMINTERLEAVE4x4KERNEL_H
-
-#include "src/core/NEON/INESimpleKernel.h"
-
-namespace arm_compute
-{
-class ITensor;
-
-/** Kernel to interleave the elements of a matrix
- *
- * This function puts the values in a 4x4 block of Matrix A on the same row (Interleaved values)
- *
- * @f[
- * \left( \begin{array}{cccc}
- * a00 & a01 & a02 & a03 \\
- * a10 & a11 & a12 & a13 \\
- * a20 & a21 & a22 & a23 \\
- * a30 & a31 & a32 & a33 \\
- * \end{array} \right)
- * \rightarrow
- * \left( \begin{array}{ccccccccccccccccc}
- * a00 & a10 & a20 & a30 & a01 & a11 & a21 & a31 & a02 & a12 & a22 & a32 & a03 & a13 & a23 & a33 \\
- * \end{array} \right)
- * @f]
- *
- * After this operation, the output matrix will have the following shape: [ height * 4, ceil(width / 4.0f) ]
- */
-class NEGEMMInterleave4x4Kernel : public INESimpleKernel
-{
-public:
-    const char *name() const override
-    {
-        return "NEGEMMInterleave4x4Kernel";
-    }
-    /** Constructor */
-    NEGEMMInterleave4x4Kernel();
-    /** Prevent instances of this class from being copied (As this class contains pointers) */
-    NEGEMMInterleave4x4Kernel(const NEGEMMInterleave4x4Kernel &) = delete;
-    /** Prevent instances of this class from being copied (As this class contains pointers) */
-    NEGEMMInterleave4x4Kernel &operator=(const NEGEMMInterleave4x4Kernel &) = delete;
-    /** Allow instances of this class to be moved */
-    NEGEMMInterleave4x4Kernel(NEGEMMInterleave4x4Kernel &&) = default;
-    /** Allow instances of this class to be moved */
-    NEGEMMInterleave4x4Kernel &operator=(NEGEMMInterleave4x4Kernel &&) = default;
-    /** Default destructor */
-    ~NEGEMMInterleave4x4Kernel() = default;
-    /** Initialise the kernel's input and output.
-     *
-     * @param[in]  input  Input tensor. Data types supported: All
-     * @param[out] output Output tensor which stores the interleaved matrix. Data type supported: same as @p input.
-     */
-    void configure(const ITensor *input, ITensor *output);
-    /** Static function to check if given info will lead to a valid configuration of @ref NEGEMMInterleave4x4Kernel
-     *
-     * @param[in] input  Input tensor info. Data types supported: All
-     * @param[in] output Output tensor info which stores the interleaved matrix. Data type supported: same as @p input.
-     *
-     * @return a status
-     */
-    static Status validate(const ITensorInfo *input, const ITensorInfo *output);
-
-    // Inherited methods overridden:
-    void run(const Window &window, const ThreadInfo &info) override;
-
-private:
-    /** Template function to run gemm interleave 4x4
-     *
-     * @tparam ScalarType Scalar datatype
-     *
-     * @param[in]  input  Input tensor. Data types supported: uint32_t, uint16_t and uint8_t
-     * @param[out] output Output tensor. Data types supported: uint32_t, uint16_t and uint8_t
-     * @param[in]  window Region on which to execute the kernel. (Must be a valid region of the window returned by window()).
-     */
-    template <typename ScalarType>
-    void gemm_interleave4x4(const ITensor *input, ITensor *output, const Window &window);
-
-    /** Common signature for all the specialised gemm interleave 4x4 functions
-     *
-     * @param[in]  input  Input tensor. Data types supported: uint32_t, uint16_t and uint8_t
-     * @param[out] output Output tensor. Data types supported: uint32_t, uint16_t and uint8_t
-     * @param[in]  window Region on which to execute the kernel. (Must be a valid region of the window returned by window()).
-     */
-    using GEMMInterleaveFunctionFuncPtr = void (NEGEMMInterleave4x4Kernel::*)(const ITensor *input, ITensor *output, const Window &window);
-
-    GEMMInterleaveFunctionFuncPtr _func;
-};
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_NEGEMMINTERLEAVE4x4KERNEL_H*/
diff --git a/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp b/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp
index b95bdd4..6bcf59e 100644
--- a/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp
+++ b/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp
@@ -686,7 +686,7 @@
     const auto   width_out  = static_cast<int>(out_info.dimension(0));
     const auto   height_out = static_cast<int>(out_info.dimension(1));
     const size_t out_stride = out_info.strides_in_bytes()[1] / out_info.element_size();
-    // The implementation assumes that the matrix A and Matrix B have been reshaped respectively with NEGEMMInterleave4x4 and NEGEMMTranspose1xW
+    // The implementation assumes that the matrix A and Matrix B have been reshaped respectively with CpuGemmInterleave4x4 and CpuGemmTranspose1xW
     // The reshaping of the matrices helps to have a cache friendly implementation and helps to avoid the data re-arrangements needed for computing 16x4 elements per iteration
     // All the values needed for computing a single 4x4 block will be read from consecutive memory positions
     execute_window_loop(window, [&](const Coordinates & id)
diff --git a/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h b/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h
index acfb79e..b9a1b5e 100644
--- a/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h
+++ b/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h
@@ -61,7 +61,7 @@
     ~NEGEMMLowpMatrixMultiplyKernel() = default;
     /** Initialise the kernel's input and output.
      *
-     * The input matrices @p input0 and @p input1 must be the output of the kernels: @ref NEGEMMInterleave4x4Kernel and @ref NEGEMMTranspose1xWKernel. These two
+     * The input matrices @p input0 and @p input1 must be the output of the kernels: cpu::kernels::CpuGemmInterleave4x4Kernel and @ref cpu::kernels::CpuGemmTranspose1xWKernel. These two
      * kernels change the layout of the original matrices to be more cache-friendly.
      *
      * @param[in]  input0 Input tensor containing the interleaved Matrix A. Data type supported: U8/QASYMM8/S8/QASYMM8_SIGNED
diff --git a/src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.cpp b/src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.cpp
deleted file mode 100644
index 6a2802a..0000000
--- a/src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.cpp
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * Copyright (c) 2016-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.h"
-
-#include "arm_compute/core/Error.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/Types.h"
-#include "arm_compute/core/Validate.h"
-#include "src/core/CPP/Validate.h"
-#include "src/core/NEON/NEFixedPoint.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-
-#include <arm_neon.h>
-
-namespace arm_compute
-{
-namespace
-{
-constexpr unsigned int num_elems_processed_per_iteration = 16;
-
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, float beta)
-{
-    ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
-    ARM_COMPUTE_UNUSED(beta);
-
-    ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input);
-    ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32);
-
-    if(output->total_size() > 0)
-    {
-        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
-        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
-    }
-
-    return Status{};
-}
-
-void matrix_addition_f32(const ITensor *input, ITensor *output, const Window &window, float beta)
-{
-    const float32x4_t beta_f32 = vdupq_n_f32(beta);
-
-    Iterator in(input, window);
-    Iterator out(output, window);
-
-    execute_window_loop(window, [&](const Coordinates &)
-    {
-        const auto in_ptr  = reinterpret_cast<const float *>(in.ptr());
-        const auto out_ptr = reinterpret_cast<float *>(out.ptr());
-
-        float32x4x4_t       alpha_ab = vld4q_f32(out_ptr);
-        const float32x4x4_t c        = vld4q_f32(in_ptr);
-
-        // Multiply matrix C by its weight and accumulate
-        alpha_ab.val[0] = vmlaq_f32(alpha_ab.val[0], c.val[0], beta_f32);
-        alpha_ab.val[1] = vmlaq_f32(alpha_ab.val[1], c.val[1], beta_f32);
-        alpha_ab.val[2] = vmlaq_f32(alpha_ab.val[2], c.val[2], beta_f32);
-        alpha_ab.val[3] = vmlaq_f32(alpha_ab.val[3], c.val[3], beta_f32);
-
-        vst4q_f32(out_ptr, alpha_ab);
-    },
-    in, out);
-}
-
-#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-void matrix_addition_f16(const ITensor *input, ITensor *output, const Window &window, float beta)
-{
-    const float16x8_t beta_f16 = vdupq_n_f16(beta);
-
-    Iterator in(input, window);
-    Iterator out(output, window);
-
-    execute_window_loop(window, [&](const Coordinates &)
-    {
-        const auto in_ptr  = reinterpret_cast<const float16_t *>(in.ptr());
-        const auto out_ptr = reinterpret_cast<float16_t *>(out.ptr());
-
-        float16x8x2_t       alpha_ab = vld2q_f16(out_ptr);
-        const float16x8x2_t c        = vld2q_f16(in_ptr);
-        // Multiply matrix C by its weight and accumulate
-        alpha_ab.val[0] = vaddq_f16(alpha_ab.val[0], vmulq_f16(c.val[0], beta_f16));
-        alpha_ab.val[1] = vaddq_f16(alpha_ab.val[1], vmulq_f16(c.val[1], beta_f16));
-
-        vst2q_f16(out_ptr + 0, alpha_ab);
-    },
-    in, out);
-}
-#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
-
-} // namespace
-
-NEGEMMMatrixAdditionKernel::NEGEMMMatrixAdditionKernel()
-    : INESimpleKernel(), _func(nullptr), _beta(0.0f)
-{
-}
-
-void NEGEMMMatrixAdditionKernel::configure(const ITensor *input, ITensor *output, float beta)
-{
-    ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
-
-    // Perform validation step
-    ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), beta));
-
-    switch(input->info()->data_type())
-    {
-        case DataType::F32:
-            _func = &matrix_addition_f32;
-            break;
-        case DataType::F16:
-#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-            _func = &matrix_addition_f16;
-            break;
-#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
-        default:
-            ARM_COMPUTE_ERROR("Data type not supported");
-            break;
-    }
-
-    // Configure kernel window
-    INESimpleKernel::configure(input, output, num_elems_processed_per_iteration);
-
-    _beta = beta;
-}
-
-Status NEGEMMMatrixAdditionKernel::validate(const ITensorInfo *input, const ITensorInfo *output, float beta)
-{
-    ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, beta));
-    ARM_COMPUTE_RETURN_ON_ERROR(INESimpleKernel::validate(input->clone().get(), output->clone().get(), num_elems_processed_per_iteration));
-    return Status{};
-}
-
-void NEGEMMMatrixAdditionKernel::run(const Window &window, const ThreadInfo &info)
-{
-    ARM_COMPUTE_UNUSED(info);
-    ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
-    ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INESimpleKernel::window(), window);
-
-    if(_beta != 0.0f)
-    {
-        (*_func)(_input, _output, window, _beta);
-    }
-}
-} // namespace arm_compute
diff --git a/src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.h b/src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.h
deleted file mode 100644
index c896cab..0000000
--- a/src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.h
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Copyright (c) 2016-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_NEGEMMMATRIXADDITIONKERNEL_H
-#define ARM_COMPUTE_NEGEMMMATRIXADDITIONKERNEL_H
-
-#include "src/core/NEON/INESimpleKernel.h"
-
-namespace arm_compute
-{
-class ITensor;
-
-/** Kernel to perform the in-place matrix addition between 2 matrices taking into account that the second matrix might be weighted by a scalar value beta:
- *
- * @note [ MTX_OUT = MTX_0 + beta * MTX_1 ] with MTX_0 and MTX_1 of the same size
- *
- * @note This stage is used to finalize the GEMM result and it is computed if and only if beta != 0.0. In case this kernel is used for finalizing GEMM result, we have:
- *        - MTX_0 = A * B * alpha, where MTX_0 is the output of @ref NEGEMMMatrixMultiplyKernel
- *        - MTX_1 = C
- */
-class NEGEMMMatrixAdditionKernel : public INESimpleKernel
-{
-public:
-    const char *name() const override
-    {
-        return "NEGEMMMatrixAdditionKernel";
-    }
-    /** Constructor */
-    NEGEMMMatrixAdditionKernel();
-    /** Prevent instances of this class from being copied */
-    NEGEMMMatrixAdditionKernel(const NEGEMMMatrixAdditionKernel &) = delete;
-    /** Prevent instances of this class from being copied */
-    NEGEMMMatrixAdditionKernel &operator=(const NEGEMMMatrixAdditionKernel &) = delete;
-    /** Allow instances of this class to be moved */
-    NEGEMMMatrixAdditionKernel(NEGEMMMatrixAdditionKernel &&) = default;
-    /** Allow instances of this class to be moved */
-    NEGEMMMatrixAdditionKernel &operator=(NEGEMMMatrixAdditionKernel &&) = default;
-    /** Default destructor */
-    ~NEGEMMMatrixAdditionKernel() = default;
-    /** Initialise the kernel's input and output.
-     *
-     * @note The input and output tensor must have the same dimensions
-     *
-     * @param[in]      input  Input tensor (Matrix C). Data types supported: F16/F32
-     * @param[in, out] output Output tensor. If this kernel is used to finalize the GEMM result, output contains the result obtained by the kernel @ref NEGEMMMatrixMultiplyKernel. Data type supported: the same as @p input.
-     * @param[in]      beta   Weight of matrix C
-     */
-    void configure(const ITensor *input, ITensor *output, float beta);
-    /** Static function to check if given info will lead to a valid configuration of @ref NEGEMMMatrixAdditionKernel.
-     *
-     * @note The input and output tensor must have the same dimensions
-     *
-     * @param[in] input  Input tensor info (Matrix C). Data types supported: F16/F32
-     * @param[in] output Output tensor info. If this kernel is used to finalize the GEMM result, output contains the result obtained by the kernel @ref NEGEMMMatrixMultiplyKernel. Data type supported: the same as @p input.
-     * @param[in] beta   Weight of matrix C
-     *
-     * @return a status
-     */
-    static Status validate(const ITensorInfo *input, const ITensorInfo *output, float beta);
-
-    // Inherited methods overridden:
-    void run(const Window &window, const ThreadInfo &info) override;
-
-private:
-    /** Common signature for all the matrix addition functions
-     *
-     * @param[in]  input  An input tensor. Data types supported: F16/F32
-     * @param[out] output The output tensor. Data type supported: same as @p input
-     * @param[in]  window Region on which to execute the kernel.
-     * @param[in]  beta   Weight of matrix C
-     */
-    using MatrixAdditionFunction = void(const ITensor *input, ITensor *output, const Window &window, float beta);
-    /** Matrix addition function to use for the particular tensor types passed to configure() */
-    MatrixAdditionFunction *_func;
-    float                   _beta;
-};
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_NEGEMMMATRIXADDITIONKERNEL_H */
diff --git a/src/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.h b/src/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.h
index 3bc162a..4341ff0 100644
--- a/src/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.h
+++ b/src/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.h
@@ -32,7 +32,7 @@
 
 /** Kernel to multiply two input matrices "A" and "B". All elements of the output matrix/vector will be multiplied by alpha after the matrix multiplication
  *
- * @note If the output tensor is a matrix, the implementation assumes that the input tensors @p input0 and @p input1 are both matrices and reshaped respectively with @ref NEGEMMInterleave4x4Kernel" and @ref NEGEMMTranspose1xWKernel
+ * @note If the output tensor is a matrix, the implementation assumes that the input tensors @p input0 and @p input1 are both matrices and reshaped respectively with @ref cpu::kernels::CpuGemmInterleave4x4Kernel and @ref cpu::kernels::CpuGemmTranspose1xWKernel
  * @note If the output tensor is a vector and the data type is F32, the implementation assumes that the first input tensor @p input0 is a vector and the second input tensor @p input1 a matrix. The implementation also assumes that both tensors have not been reshaped
  *
  */
@@ -55,7 +55,7 @@
     NEGEMMMatrixMultiplyKernel &operator=(NEGEMMMatrixMultiplyKernel &&) = default;
     /** Initialise the kernel's input and output.
      *
-     * @note If the output tensor is a matrix, the input matrices @p input0 and @p input1 should be the output of the kernels: @ref NEGEMMInterleave4x4Kernel and @ref NEGEMMTranspose1xWKernel
+     * @note If the output tensor is a matrix, the input matrices @p input0 and @p input1 should be the output of the kernels: @ref cpu::kernels::CpuGemmInterleave4x4Kernel and @ref cpu::kernels::CpuGemmTranspose1xWKernel
      *       These two kernels change the layout of the original matrices to be more cache-friendly.
      *
      * @param[in]  input0         Input tensor containing the interleaved Matrix A or the vector A. Data types supported: F16/F32
@@ -63,7 +63,7 @@
      *                            If the output tensor is a vector, input1 must contain the matrix B not reshaped. Data type supported: same as @p input0
      * @param[out] output         Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0.
      * @param[in]  alpha          Weight of the matrix product
-     * @param[in]  is_interleaved (Optional) True if input0 and input1 have been reshaped respectively using @ref NEGEMMInterleave4x4Kernel and @ref NEGEMMTranspose1xWKernel
+     * @param[in]  is_interleaved (Optional) True if input0 and input1 have been reshaped respectively using @ref cpu::kernels::CpuGemmInterleave4x4Kernel and @ref cpu::kernels::CpuGemmTranspose1xWKernel
      * @param[in]  reshape_info   (Optional) GEMM reshape info. If is_interleaved_transposed = true, this object must contain the information to understand how the matrix A and matrix B have been reshaped
      */
     void configure(const ITensor *input0, const ITensor *input1, ITensor *output, float alpha, bool is_interleaved, const GEMMReshapeInfo &reshape_info = GEMMReshapeInfo());
@@ -74,7 +74,7 @@
      *                           If the output tensor is a vector, input1 must contain the matrix B not reshaped. Data type supported: same as @p input0
      * @param[in] output         Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0.
      * @param[in] alpha          Weight of the matrix product
-     * @param[in] is_interleaved (Optional) True if input0 and input1 have been reshaped respectively using @ref NEGEMMInterleave4x4Kernel and @ref NEGEMMTranspose1xWKernel
+     * @param[in] is_interleaved (Optional) True if input0 and input1 have been reshaped respectively using @ref cpu::kernels::CpuGemmInterleave4x4Kernel and @ref cpu::kernels::CpuGemmTranspose1xWKernel
      * @param[in] reshape_info   (Optional) GEMM reshape info. If is_interleaved_transposed = true, this object must contain the information to understand how the matrix A and matrix B have been reshaped
      *
      * @return a status
diff --git a/src/core/NEON/kernels/NEGEMMTranspose1xWKernel.cpp b/src/core/NEON/kernels/NEGEMMTranspose1xWKernel.cpp
deleted file mode 100644
index 20b0cab..0000000
--- a/src/core/NEON/kernels/NEGEMMTranspose1xWKernel.cpp
+++ /dev/null
@@ -1,144 +0,0 @@
-/*
- * Copyright (c) 2016-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/NEON/kernels/NEGEMMTranspose1xWKernel.h"
-
-#include "arm_compute/core/ITensor.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Validate.h"
-#include "arm_compute/core/Window.h"
-#include "src/core/NEON/INEKernel.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-
-#include <arm_neon.h>
-
-namespace arm_compute
-{
-namespace
-{
-TensorShape get_output_shape(const ITensorInfo *input)
-{
-    TensorShape  output_shape{ input->tensor_shape() };
-    const size_t transpose_w = 16 / input->element_size();
-    output_shape.set(0, input->dimension(1) * transpose_w);
-    output_shape.set(1, static_cast<size_t>(std::ceil((input->dimension(0) / static_cast<float>(transpose_w)))));
-    return output_shape;
-}
-
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output)
-{
-    ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input);
-    ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN);
-    //Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input) is not needed here as this kernel doesn't use CPU FP16 instructions.
-
-    if(output->total_size() != 0)
-    {
-        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), get_output_shape(input));
-        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
-        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(input, output);
-    }
-
-    return Status{};
-}
-} // namespace
-
-void NEGEMMTranspose1xWKernel::configure(const ITensor *input, ITensor *output)
-{
-    ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
-
-    // Output tensor auto inizialitation if not yet initialized
-    auto_init_if_empty(*output->info(), get_output_shape(input->info()), 1, input->info()->data_type());
-
-    // Perform validate step
-    ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info()));
-
-    _input  = input;
-    _output = output;
-
-    const size_t vector_size = 16 / input->info()->element_size();
-
-    // Configure kernel window
-    Window win = calculate_max_window(*input->info(), Steps(vector_size));
-
-    INEKernel::configure(win);
-}
-
-Status NEGEMMTranspose1xWKernel::validate(const ITensorInfo *input, const ITensorInfo *output)
-{
-    ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output));
-
-    return Status{};
-}
-
-void NEGEMMTranspose1xWKernel::run(const Window &window, const ThreadInfo &info)
-{
-    ARM_COMPUTE_UNUSED(info);
-    ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
-    ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INESimpleKernel::window(), window);
-
-    /*
-     * Following an example of how the transposition1xW works when the input data type is F32
-     *
-     *         |a00 a01 a02 a03|
-     *         |a10 a11 a12 a13|
-     *         |a20 a21 a22 a23| = | a00 a01 a02 a03 || a10 a11 a12 a13 || a20 a21 a22 a23 || a30 a31 a32 a33 |
-     *         |a30 a31 a32 a33|
-     *
-     * The output matrix will have the following shape: [ height * W, ceil(width / W) ], where W = (16 / element size of the tensor)
-     */
-
-    // Set window for output tensor. Set to 0 the X and Y dimensions in order to allow multi-threading implementation and future batched matrix multiplications
-    Window win_out(window);
-    win_out.set(Window::DimX, Window::Dimension(0, 0, 0));
-    win_out.set(Window::DimY, Window::Dimension(0, 0, 0));
-
-    Iterator in(_input, window);
-    Iterator out(_output, win_out);
-
-    const size_t in_width     = _input->info()->dimension(0);
-    const size_t element_size = _input->info()->element_size();
-    const size_t out_stride   = _output->info()->strides_in_bytes()[1];
-    const size_t vector_size  = 16 / element_size;
-
-    execute_window_loop(window, [&](const Coordinates & id)
-    {
-        const uint8_t *in_ptr  = in.ptr();
-        uint8_t *const out_ptr = out.ptr() + (id.y() * vector_size) * element_size + (id.x() / vector_size) * out_stride;
-
-        for(size_t k = 0; k < vector_size; ++k)
-        {
-            // If the input width is not multiple of W, we fill the reference with 0s
-            if((id.x() + k) >= in_width)
-            {
-                std::memset(out_ptr + k * element_size, 0, element_size);
-            }
-            else
-            {
-                std::memcpy(out_ptr + k * element_size, in_ptr + k * element_size, element_size);
-            }
-        }
-    },
-    in, out);
-}
-} // namespace arm_compute
diff --git a/src/core/cpu/kernels/CpuGemmInterleave4x4Kernel.cpp b/src/core/cpu/kernels/CpuGemmInterleave4x4Kernel.cpp
new file mode 100644
index 0000000..67f2a49
--- /dev/null
+++ b/src/core/cpu/kernels/CpuGemmInterleave4x4Kernel.cpp
@@ -0,0 +1,178 @@
+/*
+ * Copyright (c) 2016-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/core/cpu/kernels/CpuGemmInterleave4x4Kernel.h"
+
+#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/core/Window.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "src/core/helpers/AutoConfiguration.h"
+#include "src/core/helpers/WindowHelpers.h"
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace cpu
+{
+namespace kernels
+{
+using namespace arm_compute::misc::shape_calculator;
+
+namespace
+{
+template <typename ScalarType>
+void gemm_interleave4x4(const ITensor *src, ITensor *dst, const Window &window)
+{
+    const size_t window_start_x = window.x().start();
+    const size_t window_end_x   = window.x().end();
+
+    const size_t in_height = src->info()->dimension(1);
+    const size_t in_stride = src->info()->strides_in_bytes()[1];
+
+    const size_t partial_y = in_height % 4;
+
+    // Set window for the src tensor
+    Window win = window;
+    win.set(Window::DimX, Window::Dimension(0, 1, 1));
+
+    // Set window for the dst tensor
+    Window win_out(window);
+    win_out.set(Window::DimX, Window::Dimension(0, 1, 1));
+    win_out.scale(Window::DimY, 0.25f);
+
+    Iterator in(src, win);
+    Iterator out(dst, win_out);
+
+    execute_window_loop(win, [&](const Coordinates & id)
+    {
+        if(id.y() + 4 <= static_cast<int>(in_height))
+        {
+            for(size_t x = window_start_x; x < window_end_x; ++x)
+            {
+                const ScalarType data[4] =
+                {
+                    *(reinterpret_cast<const ScalarType *>(in.ptr() + 0 * in_stride) + x),
+                    *(reinterpret_cast<const ScalarType *>(in.ptr() + 1 * in_stride) + x),
+                    *(reinterpret_cast<const ScalarType *>(in.ptr() + 2 * in_stride) + x),
+                    *(reinterpret_cast<const ScalarType *>(in.ptr() + 3 * in_stride) + x),
+                };
+                std::memcpy(out.ptr() + x * 4 * sizeof(ScalarType), data, 4 * sizeof(ScalarType));
+            }
+        }
+        else
+        {
+            for(size_t x = window_start_x; x < window_end_x; ++x)
+            {
+                ScalarType data[4] = { 0, 0, 0, 0 };
+
+                for(size_t y = 0; y < partial_y; ++y)
+                {
+                    data[y] = *(reinterpret_cast<const ScalarType *>(in.ptr() + y * in_stride) + x);
+                }
+
+                std::memcpy(out.ptr() + x * 4 * sizeof(ScalarType), data, 4 * sizeof(ScalarType));
+            }
+        }
+    },
+    in, out);
+}
+} // namespace
+
+void CpuGemmInterleave4x4Kernel::configure(const ITensorInfo *src, ITensorInfo *dst)
+{
+    ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
+
+    // dst auto inizialitation if not yet initialized
+    auto_init_if_empty(*dst, src->clone()->set_tensor_shape(compute_interleaved_shape(*src)));
+
+    // Perform validate step
+    ARM_COMPUTE_ERROR_THROW_ON(CpuGemmInterleave4x4Kernel::validate(src, dst));
+
+    switch(src->element_size())
+    {
+        case 1:
+            _func = &gemm_interleave4x4<uint8_t>;
+            break;
+        case 2:
+            _func = &gemm_interleave4x4<uint16_t>;
+            break;
+        case 4:
+            _func = &gemm_interleave4x4<uint32_t>;
+            break;
+        default:
+            ARM_COMPUTE_ERROR_ON("Element size not supported");
+            break;
+    }
+
+    Window win = calculate_max_window(*src, Steps(1, 4));
+    ICPPKernel::configure(win);
+}
+
+Status CpuGemmInterleave4x4Kernel::validate(const ITensorInfo *src, const ITensorInfo *dst)
+{
+    ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
+    //Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(src) is not needed here as this kernel doesn't use CPU FP16 instructions.
+    ARM_COMPUTE_RETURN_ERROR_ON(src->data_type() == DataType::UNKNOWN);
+
+    if(dst->total_size() != 0)
+    {
+        const TensorShape dst_shape = compute_interleaved_shape(*src);
+        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(dst->tensor_shape(), dst_shape);
+        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst);
+        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(src, dst);
+    }
+
+    return Status{};
+}
+
+void CpuGemmInterleave4x4Kernel::run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info)
+{
+    ARM_COMPUTE_UNUSED(info);
+    ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+    ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
+    ARM_COMPUTE_ERROR_ON(_func == nullptr);
+    ARM_COMPUTE_ERROR_ON(tensors.empty());
+    /*
+    *  This kernel puts the values in a 4x4 block of Matrix A on the same row (Interleaved values)
+    *         |a00 a01 a02 a03|
+    *         |a10 a11 a12 a13|
+    *         |a20 a21 a22 a23| = | a00 a10 a20 a30 || a01 a11 a21 a31 || a02 a12 a22 a32 || a03 a13 a23 a33 |
+    *         |a30 a31 a32 a33|
+    *
+    *         After this operation, the dst matrix will have the following shape: [ height * 4, ceil(width / 4.0f) ]
+    */
+    const ITensor *src = tensors.get_const_tensor(TensorType::ACL_SRC);
+    ITensor       *dst = tensors.get_tensor(TensorType::ACL_DST);
+
+    (*_func)(src, dst, window);
+}
+
+const char *CpuGemmInterleave4x4Kernel::name() const
+{
+    return "CpuGemmInterleave4x4Kernel";
+}
+} // namespace kernels
+} // namespace cpu
+} // namespace arm_compute
diff --git a/src/core/cpu/kernels/CpuGemmInterleave4x4Kernel.h b/src/core/cpu/kernels/CpuGemmInterleave4x4Kernel.h
new file mode 100644
index 0000000..94d88c7
--- /dev/null
+++ b/src/core/cpu/kernels/CpuGemmInterleave4x4Kernel.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2016-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_CPU_GEMM_INTERLEAVE4x4_KERNEL_H
+#define ARM_COMPUTE_CPU_GEMM_INTERLEAVE4x4_KERNEL_H
+
+#include "src/core/common/Macros.h"
+#include "src/core/cpu/ICpuKernel.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+namespace kernels
+{
+/** Kernel to interleave the elements of a matrix
+ *
+ * This function puts the values in a 4x4 block of Matrix A on the same row (Interleaved values)
+ *
+ * @f[
+ * \left( \begin{array}{cccc}
+ * a00 & a01 & a02 & a03 \\
+ * a10 & a11 & a12 & a13 \\
+ * a20 & a21 & a22 & a23 \\
+ * a30 & a31 & a32 & a33 \\
+ * \end{array} \right)
+ * \rightarrow
+ * \left( \begin{array}{ccccccccccccccccc}
+ * a00 & a10 & a20 & a30 & a01 & a11 & a21 & a31 & a02 & a12 & a22 & a32 & a03 & a13 & a23 & a33 \\
+ * \end{array} \right)
+ * @f]
+ *
+ * After this operation, the dst matrix will have the following shape: [ height * 4, ceil(width / 4.0f) ]
+ */
+class CpuGemmInterleave4x4Kernel : public ICpuKernel
+{
+public:
+    /** Default Constructor */
+    CpuGemmInterleave4x4Kernel() = default;
+    ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuGemmInterleave4x4Kernel);
+    /** Initialise the kernel's src and dst.
+     *
+     * @param[in]  src Input tensor info. Data types supported: All
+     * @param[out] dst Output tensor info which stores the interleaved matrix. Data type supported: same as @p src.
+     */
+    void configure(const ITensorInfo *src, ITensorInfo *dst);
+    /** Static function to check if given info will lead to a valid configuration of @ref CpuGemmInterleave4x4Kernel
+     *
+     * Similar to @ref CpuGemmInterleave4x4Kernel::configure()
+     *
+     * @return a status
+     */
+    static Status validate(const ITensorInfo *src, const ITensorInfo *dst);
+
+    // Inherited methods overridden:
+    void run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) override;
+    const char *name() const override;
+
+private:
+    /** Common signature for all the specialised gemm interleave 4x4 functions
+     *
+     * @param[in]  src    Input tensor. Data types supported: uint32_t, uint16_t and uint8_t
+     * @param[out] dst    Output tensor. Data types supported: uint32_t, uint16_t and uint8_t
+     * @param[in]  window Region on which to execute the kernel. (Must be a valid region of the window returned by window()).
+     */
+    using GEMMInterleaveFunctionPtr = void (*)(const ITensor *src, ITensor *dst, const Window &window);
+
+    GEMMInterleaveFunctionPtr _func{ nullptr };
+};
+} // namespace kernels
+} // namespace cpu
+} // namespace arm_compute
+#endif /*ARM_COMPUTE_CPU_GEMM_INTERLEAVE4x4_KERNEL_H*/
diff --git a/src/core/cpu/kernels/CpuGemmMatrixAdditionKernel.cpp b/src/core/cpu/kernels/CpuGemmMatrixAdditionKernel.cpp
new file mode 100644
index 0000000..cc39cdf
--- /dev/null
+++ b/src/core/cpu/kernels/CpuGemmMatrixAdditionKernel.cpp
@@ -0,0 +1,200 @@
+/*
+ * Copyright (c) 2016-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/core/cpu/kernels/CpuGemmMatrixAdditionKernel.h"
+
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/Validate.h"
+#include "src/core/CPP/Validate.h"
+#include "src/core/NEON/NEFixedPoint.h"
+#include "src/core/helpers/AutoConfiguration.h"
+#include "src/core/helpers/WindowHelpers.h"
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace cpu
+{
+namespace kernels
+{
+namespace
+{
+void matrix_addition_f32(const ITensor *src, ITensor *dst, const Window &window, float beta)
+{
+    ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
+    const float32x4_t beta_f32 = vdupq_n_f32(beta);
+
+    constexpr int window_step_x  = 16;
+    const auto    window_start_x = static_cast<int>(window.x().start());
+    const auto    window_end_x   = static_cast<int>(window.x().end());
+
+    Window win = window.collapse_if_possible(window, Window::DimZ);
+    win.set(Window::DimX, Window::Dimension(0, 1, 1));
+
+    Iterator in(src, win);
+    Iterator out(dst, win);
+
+    execute_window_loop(win, [&](const Coordinates &)
+    {
+        const auto in_ptr  = reinterpret_cast<const float *>(in.ptr());
+        const auto out_ptr = reinterpret_cast<float *>(out.ptr());
+
+        int x = window_start_x;
+        for(; x < (window_end_x - window_step_x); x += window_step_x)
+        {
+            float32x4x4_t       alpha_ab = vld4q_f32(out_ptr + x);
+            const float32x4x4_t c        = vld4q_f32(in_ptr + x);
+
+            // Multiply matrix C by its weight and accumulate
+            alpha_ab.val[0] = vmlaq_f32(alpha_ab.val[0], c.val[0], beta_f32);
+            alpha_ab.val[1] = vmlaq_f32(alpha_ab.val[1], c.val[1], beta_f32);
+            alpha_ab.val[2] = vmlaq_f32(alpha_ab.val[2], c.val[2], beta_f32);
+            alpha_ab.val[3] = vmlaq_f32(alpha_ab.val[3], c.val[3], beta_f32);
+
+            vst4q_f32(out_ptr + x, alpha_ab);
+        }
+
+        // Left-over loop
+        for(; x < window_end_x; ++x)
+        {
+            *(out_ptr + x) += *(in_ptr + x) * beta;
+        }
+    },
+    in, out);
+}
+
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+void matrix_addition_f16(const ITensor *src, ITensor *dst, const Window &window, float beta)
+{
+    ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
+    const float16x8_t beta_f16 = vdupq_n_f16(beta);
+
+    constexpr int window_step_x  = 16;
+    const auto    window_start_x = static_cast<int>(window.x().start());
+    const auto    window_end_x   = static_cast<int>(window.x().end());
+
+    Window win = window.collapse_if_possible(window, Window::DimZ);
+    win.set(Window::DimX, Window::Dimension(0, 1, 1));
+
+    Iterator in(src, win);
+    Iterator out(dst, win);
+
+    execute_window_loop(window, [&](const Coordinates &)
+    {
+        const auto in_ptr  = reinterpret_cast<const float16_t *>(in.ptr());
+        const auto out_ptr = reinterpret_cast<float16_t *>(out.ptr());
+
+        int x = window_start_x;
+        for(; x < (window_end_x - window_step_x); x += window_step_x)
+        {
+            float16x8x2_t       alpha_ab = vld2q_f16(out_ptr + x);
+            const float16x8x2_t c        = vld2q_f16(in_ptr + x);
+            // Multiply matrix C by its weight and accumulate
+            alpha_ab.val[0] = vaddq_f16(alpha_ab.val[0], vmulq_f16(c.val[0], beta_f16));
+            alpha_ab.val[1] = vaddq_f16(alpha_ab.val[1], vmulq_f16(c.val[1], beta_f16));
+
+            vst2q_f16(out_ptr + x, alpha_ab);
+        }
+
+        // Left-over loop
+        for(; x < window_end_x; ++x)
+        {
+            *(out_ptr + x) += *(in_ptr + x) * static_cast<float16_t>(beta);
+        }
+    },
+    in, out);
+}
+#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
+
+} // namespace
+
+void CpuGemmMatrixAdditionKernel::configure(const ITensorInfo *src, ITensorInfo *dst, float beta)
+{
+    ARM_COMPUTE_UNUSED(dst);
+    ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
+
+    // Perform validation step
+    ARM_COMPUTE_ERROR_THROW_ON(CpuGemmMatrixAdditionKernel::validate(src, dst, beta));
+
+    _beta = beta;
+    switch(src->data_type())
+    {
+        case DataType::F32:
+            _func = &matrix_addition_f32;
+            break;
+        case DataType::F16:
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+            _func = &matrix_addition_f16;
+            break;
+#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
+        default:
+            ARM_COMPUTE_ERROR("Data type not supported");
+            break;
+    }
+
+    // Configure kernel window
+    Window win = calculate_max_window(*src, Steps());
+    ICPPKernel::configure(win);
+}
+
+Status CpuGemmMatrixAdditionKernel::validate(const ITensorInfo *src, const ITensorInfo *dst, float beta)
+{
+    ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
+    ARM_COMPUTE_UNUSED(beta);
+
+    ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(src);
+    ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::F16, DataType::F32);
+
+    if(dst->total_size() > 0)
+    {
+        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst);
+        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(src, dst);
+    }
+    return Status{};
+}
+
+void CpuGemmMatrixAdditionKernel::run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info)
+{
+    ARM_COMPUTE_UNUSED(info);
+    ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+    ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
+    ARM_COMPUTE_ERROR_ON(tensors.empty());
+
+    const ITensor *src = tensors.get_const_tensor(TensorType::ACL_SRC);
+    ITensor       *dst = tensors.get_tensor(TensorType::ACL_DST);
+
+    if(_beta != 0.0f)
+    {
+        (*_func)(src, dst, window, _beta);
+    }
+}
+
+const char *CpuGemmMatrixAdditionKernel::name() const
+{
+    return "CpuGemmMatrixAdditionKernel";
+}
+} // namespace kernels
+} // namespace cpu
+} // namespace arm_compute
diff --git a/src/core/cpu/kernels/CpuGemmMatrixAdditionKernel.h b/src/core/cpu/kernels/CpuGemmMatrixAdditionKernel.h
new file mode 100644
index 0000000..216e61b
--- /dev/null
+++ b/src/core/cpu/kernels/CpuGemmMatrixAdditionKernel.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2016-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_CPU_GEMM_MATRIX_ADDITION_KERNEL_H
+#define ARM_COMPUTE_CPU_GEMM_MATRIX_ADDITION_KERNEL_H
+
+#include "src/core/common/Macros.h"
+#include "src/core/cpu/ICpuKernel.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+namespace kernels
+{
+/** Kernel to perform the in-place matrix addition between 2 matrices taking into account that the second matrix might be weighted by a scalar value beta:
+ *
+ * @note [ MTX_OUT = MTX_0 + beta * MTX_1 ] with MTX_0 and MTX_1 of the same size
+ *
+ * @note This stage is used to finalize the GEMM result and it is computed if and only if beta != 0.0. In case this kernel is used for finalizing GEMM result, we have:
+ *        - MTX_0 = A * B * alpha, where MTX_0 is the output of @ref NEGEMMMatrixMultiplyKernel
+ *        - MTX_1 = C
+ */
+class CpuGemmMatrixAdditionKernel : public ICpuKernel
+{
+public:
+    /** Constructor */
+    CpuGemmMatrixAdditionKernel() = default;
+    ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuGemmMatrixAdditionKernel);
+    /** Initialise the kernel's input and output.
+     *
+     * @note The input and output tensor must have the same dimensions
+     *
+     * @param[in]      src  Input tensor info (Matrix C). Data types supported: F16/F32
+     * @param[in, out] dst  Output tensor info. If this kernel is used to finalize the GEMM result, output contains the result obtained by the kernel @ref NEGEMMMatrixMultiplyKernel. Data type supported: the same as @p src.
+     * @param[in]      beta Weight of matrix C
+     */
+    void configure(const ITensorInfo *src, ITensorInfo *dst, float beta);
+    /** Static function to check if given info will lead to a valid configuration of @ref CpuGemmMatrixAdditionKernel.
+     *
+     * @note The input and output tensor must have the same dimensions
+     *
+     * Similar to @ref CpuGemmMatrixAdditionKernel::configure()
+     *
+     * @return a status
+     */
+    static Status validate(const ITensorInfo *src, const ITensorInfo *dst, float beta);
+
+    // Inherited methods overridden:
+    void run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) override;
+    const char *name() const override;
+
+private:
+    /** Common signature for all the matrix addition functions
+     *
+     * @param[in]  src    An input tensor. Data types supported: F16/F32
+     * @param[out] dst    The output tensor. Data type supported: same as @p src
+     * @param[in]  window Region on which to execute the kernel.
+     * @param[in]  beta   Weight of matrix C
+     */
+    using MatrixAdditionFunctionPtr = void (*)(const ITensor *src, ITensor *dst, const Window &window, float beta);
+    /** Matrix addition function to use for the particular tensor types passed to configure() */
+    MatrixAdditionFunctionPtr _func{ nullptr };
+    float                     _beta{ 0.f };
+};
+} // namespace kernels
+} // namespace cpu
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_CPU_GEMM_MATRIX_ADDITION_KERNEL_H */
diff --git a/src/core/cpu/kernels/CpuGemmTranspose1xWKernel.cpp b/src/core/cpu/kernels/CpuGemmTranspose1xWKernel.cpp
new file mode 100644
index 0000000..4b059f5
--- /dev/null
+++ b/src/core/cpu/kernels/CpuGemmTranspose1xWKernel.cpp
@@ -0,0 +1,137 @@
+/*
+ * Copyright (c) 2016-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/core/cpu/kernels/CpuGemmTranspose1xWKernel.h"
+
+#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/core/Window.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "src/core/helpers/AutoConfiguration.h"
+#include "src/core/helpers/WindowHelpers.h"
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace cpu
+{
+namespace kernels
+{
+using namespace arm_compute::misc::shape_calculator;
+
+void CpuGemmTranspose1xWKernel::configure(const ITensorInfo *src, ITensorInfo *dst)
+{
+    ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
+
+    // Output tensor auto inizialitation if not yet initialized
+    auto_init_if_empty(*dst, src->clone()->set_tensor_shape(compute_transpose1xW_with_element_size_shape(*src)));
+
+    // Perform validate step
+    ARM_COMPUTE_ERROR_THROW_ON(CpuGemmTranspose1xWKernel::validate(src, dst));
+
+    const size_t vector_size = 16 / src->element_size();
+
+    // Configure kernel window
+    Window win = calculate_max_window(*src, Steps(vector_size));
+    ICPPKernel::configure(win);
+}
+
+Status CpuGemmTranspose1xWKernel::validate(const ITensorInfo *src, const ITensorInfo *dst)
+{
+    ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src);
+    ARM_COMPUTE_RETURN_ERROR_ON(src->data_type() == DataType::UNKNOWN);
+    //Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(src) is not needed here as this kernel doesn't use CPU FP16 instructions.
+
+    if(dst->total_size() != 0)
+    {
+        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(dst->tensor_shape(), compute_transpose1xW_with_element_size_shape(*src));
+        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst);
+        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(src, dst);
+    }
+
+    return Status{};
+}
+
+void CpuGemmTranspose1xWKernel::run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info)
+{
+    ARM_COMPUTE_UNUSED(info);
+    ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+    ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
+    ARM_COMPUTE_ERROR_ON(tensors.empty());
+
+    /*
+     * Following an example of how the transposition1xW works when the src data type is F32
+     *
+     *         |a00 a01 a02 a03|
+     *         |a10 a11 a12 a13|
+     *         |a20 a21 a22 a23| = | a00 a01 a02 a03 || a10 a11 a12 a13 || a20 a21 a22 a23 || a30 a31 a32 a33 |
+     *         |a30 a31 a32 a33|
+     *
+     * The dst matrix will have the following shape: [ height * W, ceil(width / W) ], where W = (16 / element size of the tensor)
+     */
+
+    // Set window for dst tensor. Set to 0 the X and Y dimensions in order to allow multi-threading implementation and future batched matrix multiplications
+    Window win_out(window);
+    win_out.set(Window::DimX, Window::Dimension(0, 0, 0));
+    win_out.set(Window::DimY, Window::Dimension(0, 0, 0));
+
+    const ITensor *src = tensors.get_const_tensor(TensorType::ACL_SRC);
+    ITensor       *dst = tensors.get_tensor(TensorType::ACL_DST);
+
+    Iterator in(src, window);
+    Iterator out(dst, win_out);
+
+    const size_t in_width     = src->info()->dimension(0);
+    const size_t element_size = src->info()->element_size();
+    const size_t out_stride   = dst->info()->strides_in_bytes()[1];
+    const size_t vector_size  = 16 / element_size;
+
+    execute_window_loop(window, [&](const Coordinates & id)
+    {
+        const uint8_t *in_ptr  = in.ptr();
+        uint8_t *const out_ptr = out.ptr() + (id.y() * vector_size) * element_size + (id.x() / vector_size) * out_stride;
+
+        for(size_t k = 0; k < vector_size; ++k)
+        {
+            // If the src width is not multiple of W, we fill the reference with 0s
+            if((id.x() + k) >= in_width)
+            {
+                std::memset(out_ptr + k * element_size, 0, element_size);
+            }
+            else
+            {
+                std::memcpy(out_ptr + k * element_size, in_ptr + k * element_size, element_size);
+            }
+        }
+    },
+    in, out);
+}
+
+const char *CpuGemmTranspose1xWKernel::name() const
+{
+    return "CpuGemmTranspose1xWKernel";
+}
+} // namespace kernels
+} // namespace cpu
+} // namespace arm_compute
diff --git a/src/core/NEON/kernels/NEGEMMTranspose1xWKernel.h b/src/core/cpu/kernels/CpuGemmTranspose1xWKernel.h
similarity index 62%
rename from src/core/NEON/kernels/NEGEMMTranspose1xWKernel.h
rename to src/core/cpu/kernels/CpuGemmTranspose1xWKernel.h
index 7ca71cf41..c9c22bd 100644
--- a/src/core/NEON/kernels/NEGEMMTranspose1xWKernel.h
+++ b/src/core/cpu/kernels/CpuGemmTranspose1xWKernel.h
@@ -21,16 +21,18 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#ifndef ARM_COMPUTE_NEGEMMTRANSPOSE1xWKERNEL_H
-#define ARM_COMPUTE_NEGEMMTRANSPOSE1xWKERNEL_H
+#ifndef ARM_COMPUTE_CPU_GEMM_TRANSPOSE1xW_KERNEL_H
+#define ARM_COMPUTE_CPU_GEMM_TRANSPOSE1xW_KERNEL_H
 
-#include "src/core/NEON/INESimpleKernel.h"
+#include "src/core/common/Macros.h"
+#include "src/core/cpu/ICpuKernel.h"
 
 namespace arm_compute
 {
-// Forward declarations
-class ITensor;
-
+namespace cpu
+{
+namespace kernels
+{
 /** Kernel which transposes the elements of a matrix in chunks of 1xW, where W is equal to (16 / element size of the tensor)
  *
  * Following an example of how the transposition1xW works when the input data is F32
@@ -66,42 +68,31 @@
  * @note The output matrix will have the following shape: [ height * W, ceil(width / W) ], where W = (16 / element size of the tensor)
  *
  */
-class NEGEMMTranspose1xWKernel : public INESimpleKernel
+class CpuGemmTranspose1xWKernel : public ICpuKernel
 {
 public:
-    const char *name() const override
-    {
-        return "NEGEMMTranspose1xWKernel";
-    }
     /** Constructor */
-    NEGEMMTranspose1xWKernel() = default;
-    /** Prevent instances of this class from being copied (As this class contains pointers) */
-    NEGEMMTranspose1xWKernel(const NEGEMMTranspose1xWKernel &) = delete;
-    /** Prevent instances of this class from being copied (As this class contains pointers) */
-    NEGEMMTranspose1xWKernel &operator=(const NEGEMMTranspose1xWKernel &) = delete;
-    /** Allow instances of this class to be moved */
-    NEGEMMTranspose1xWKernel(NEGEMMTranspose1xWKernel &&) = default;
-    /** Allow instances of this class to be moved */
-    NEGEMMTranspose1xWKernel &operator=(NEGEMMTranspose1xWKernel &&) = default;
-    /** Default destructor */
-    ~NEGEMMTranspose1xWKernel() = default;
-    /** Initialise the kernel's input and output.
+    CpuGemmTranspose1xWKernel() = default;
+    ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuGemmTranspose1xWKernel);
+    /** Configure kernel for a given list of arguments
      *
-     * @param[in]  input  Input tensor. Data types supported: All
-     * @param[out] output Output tensor. Data type supported: same as @p input.
+     * @param[in]  src Input tensor info. Data types supported: All
+     * @param[out] dst Output tensor info. Data type supported: same as @p src.
      */
-    void configure(const ITensor *input, ITensor *output);
-    /** Static function to check if given info will lead to a valid configuration of @ref NEGEMMTranspose1xWKernel
+    void configure(const ITensorInfo *src, ITensorInfo *dst);
+    /** Static function to check if given info will lead to a valid configuration of @ref CpuGemmTranspose1xWKernel
      *
-     * @param[in] input  Input tensor info. Data types supported: All
-     * @param[in] output Output tensor info. Data type supported: same as @p input.
+     * Similar to @ref CpuGemmTranspose1xWKernel::configure()
      *
      * @return a status
      */
-    static Status validate(const ITensorInfo *input, const ITensorInfo *output);
+    static Status validate(const ITensorInfo *src, const ITensorInfo *dst);
 
     // Inherited methods overridden:
-    void run(const Window &window, const ThreadInfo &info) override;
+    void run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) override;
+    const char *name() const override;
 };
+} // namespace kernels
+} // namespace cpu
 } // namespace arm_compute
-#endif /*ARM_COMPUTE_NEGEMMTRANSPOSE1xWKERNEL_H */
+#endif /*ARM_COMPUTE_CPU_GEMM_TRANSPOSE1xW_KERNEL_H */
diff --git a/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp b/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp
index f469a0b..daa14b1 100644
--- a/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp
+++ b/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp
@@ -30,15 +30,6 @@
 #include "arm_compute/core/utils/misc/ShapeCalculator.h"
 #include "arm_compute/core/utils/quantization/AsymmHelpers.h"
 #include "arm_compute/runtime/NEON/NEScheduler.h"
-#include "src/core/NEON/kernels/NEConvertQuantizedSignednessKernel.h"
-#include "src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h"
-#include "src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h"
-#include "src/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.h"
-#include "src/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.h"
-#include "src/core/NEON/kernels/NEGEMMLowpReductionKernel.h"
-#include "src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.h"
-#include "src/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.h"
-#include "src/core/NEON/kernels/NEGEMMTranspose1xWKernel.h"
 #include "src/core/cpu/kernels/CpuTransposeKernel.h"
 
 #include <cmath>
diff --git a/src/runtime/NEON/functions/NEGEMM.cpp b/src/runtime/NEON/functions/NEGEMM.cpp
index 9b14052..99a7db7 100644
--- a/src/runtime/NEON/functions/NEGEMM.cpp
+++ b/src/runtime/NEON/functions/NEGEMM.cpp
@@ -31,18 +31,19 @@
 #include "arm_compute/core/Validate.h"
 #include "arm_compute/core/utils/misc/ShapeCalculator.h"
 #include "arm_compute/runtime/NEON/NEScheduler.h"
+#include "arm_compute/runtime/Tensor.h"
 #include "arm_compute/runtime/TensorAllocator.h"
 #include "src/core/CPP/Validate.h"
-#include "src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h"
-#include "src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.h"
 #include "src/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.h"
-#include "src/core/NEON/kernels/NEGEMMTranspose1xWKernel.h"
+#include "src/core/cpu/kernels/CpuGemmInterleave4x4Kernel.h"
+#include "src/core/cpu/kernels/CpuGemmMatrixAdditionKernel.h"
+#include "src/core/cpu/kernels/CpuGemmTranspose1xWKernel.h"
 #include "src/core/helpers/AutoConfiguration.h"
 #include "src/core/helpers/MemoryHelpers.h"
+#include "src/runtime/cpu/operators/CpuActivation.h"
+#include "src/runtime/cpu/operators/CpuAdd.h"
 #include "src/runtime/cpu/operators/internal/CpuGemmAssemblyDispatch.h"
 
-#include <cmath>
-
 using namespace arm_compute::experimental;
 using namespace arm_compute::misc::shape_calculator;
 
@@ -62,96 +63,117 @@
 }
 } // namespace
 
-NEGEMM::NEGEMM(std::shared_ptr<IMemoryManager> memory_manager, IWeightsManager *weights_manager)
-    : _memory_group(memory_manager),
-      _weights_manager(weights_manager),
-      _interleave_kernel(),
-      _transpose_kernel(),
-      _mm_kernel(),
-      _asm_glue(std::make_unique<cpu::CpuGemmAssemblyDispatch>()),
-      _ma_kernel(),
-      _alpha_scale_func(nullptr),
-      _add_bias(),
-      _activation_func(),
-      _tmp_a(),
-      _tmp_b(),
-      _tmp_d(),
-      _original_b(nullptr),
-      _run_vector_matrix_multiplication(false),
-      _run_alpha_scale(false),
-      _run_addition(false),
-      _run_bias_addition(false),
-      _run_activation(false),
-      _reshape_b_only_on_first_run(false),
-      _is_prepared(false),
-      _asm_glue_run_pack(),
-      _asm_glue_prep_pack(),
-      _asm_glue_workspace(),
-      _aux_mem_req()
+struct NEGEMM::Impl
 {
+    MemoryGroup      memory_group{};
+    IWeightsManager *weights_manager{ nullptr };
+
+    std::unique_ptr<cpu::kernels::CpuGemmInterleave4x4Kernel>  interleave_kernel{ nullptr };
+    std::unique_ptr<cpu::kernels::CpuGemmTranspose1xWKernel>   transpose_kernel{ nullptr };
+    std::unique_ptr<NEGEMMMatrixMultiplyKernel>                mm_kernel{ nullptr };
+    std::unique_ptr<cpu::CpuGemmAssemblyDispatch>              asm_glue{ nullptr };
+    std::unique_ptr<cpu::kernels::CpuGemmMatrixAdditionKernel> ma_kernel{ nullptr };
+    std::unique_ptr<cpu::CpuActivation>                        alpha_scale_func{ nullptr };
+    std::unique_ptr<cpu::CpuAdd>                               add_bias{ nullptr };
+    std::unique_ptr<cpu::CpuActivation>                        activation_func{ nullptr };
+
+    const ITensor *a{ nullptr };
+    const ITensor *c{ nullptr };
+    ITensor       *d{ nullptr };
+    ITensor       *gemm_output_to_use{ nullptr };
+    Tensor         tmp_a{};
+    Tensor         tmp_b{};
+    Tensor         tmp_d{};
+    const ITensor *original_b{ nullptr };
+    bool           run_vector_matrix_multiplication{ false };
+    bool           run_alpha_scale{ false };
+    bool           run_addition{ false };
+    bool           run_bias_addition{ false };
+    bool           run_activation{ false };
+    bool           reshape_b_only_on_first_run{ false };
+    bool           is_prepared{ false };
+
+    ITensorPack                      asm_glue_run_pack{};
+    ITensorPack                      asm_glue_prep_pack{};
+    WorkspaceData<Tensor>            asm_glue_workspace{};
+    experimental::MemoryRequirements aux_mem_req{};
+};
+
+NEGEMM::NEGEMM(std::shared_ptr<IMemoryManager> memory_manager, IWeightsManager *weights_manager)
+    : _impl(std::make_unique<Impl>())
+{
+    _impl->memory_group    = MemoryGroup(std::move(memory_manager));
+    _impl->weights_manager = weights_manager;
 }
 
 NEGEMM::~NEGEMM() = default;
 
 void NEGEMM::configure(const ITensor *a, const ITensor *b, const ITensor *c, ITensor *d, float alpha, float beta, const GEMMInfo &gemm_info)
 {
+    ARM_COMPUTE_ERROR_ON_NULLPTR(a, b, d);
     ARM_COMPUTE_ERROR_THROW_ON(NEGEMM::validate(a->info(), b->info(), (c != nullptr) ? c->info() : nullptr, d->info(), alpha, beta, gemm_info));
 
     const cpu::AsmGemmInfo asm_info      = init_assembly_metadata(gemm_info);
     const bool             is_c_bias     = gemm_info.reshape_b_only_on_first_run();
     bool                   run_optimised = bool(cpu::CpuGemmAssemblyDispatch::validate(a->info(), b->info(), (is_c_bias && c != nullptr) ? c->info() : nullptr, d->info(), asm_info));
 
+    _impl->a                  = a;
+    _impl->c                  = c;
+    _impl->d                  = d;
+    _impl->gemm_output_to_use = d;
     // Check if we need to reshape the matrix B only on the first run
-    _is_prepared                      = false;
-    _reshape_b_only_on_first_run      = gemm_info.reshape_b_only_on_first_run();
-    _run_vector_matrix_multiplication = a->info()->dimension(1) < 2;
-    _original_b                       = b;
-    _run_alpha_scale                  = alpha != 1.f;
-    _run_bias_addition                = c != nullptr && gemm_info.reshape_b_only_on_first_run();
-    _run_addition                     = beta != 0 && c != nullptr && !gemm_info.reshape_b_only_on_first_run();
-    _run_activation                   = gemm_info.activation_info().enabled() && (!run_optimised || (run_optimised && !cpu::CpuGemmAssemblyDispatch::is_activation_supported(gemm_info.activation_info())));
+    _impl->is_prepared                      = false;
+    _impl->reshape_b_only_on_first_run      = gemm_info.reshape_b_only_on_first_run();
+    _impl->run_vector_matrix_multiplication = a->info()->dimension(1) < 2;
+    _impl->original_b                       = b;
+    _impl->run_alpha_scale                  = alpha != 1.f;
+    _impl->run_bias_addition                = c != nullptr && gemm_info.reshape_b_only_on_first_run();
+    _impl->run_addition                     = beta != 0 && c != nullptr && !gemm_info.reshape_b_only_on_first_run();
+    _impl->run_activation                   = gemm_info.activation_info().enabled() && (!run_optimised || (run_optimised
+                                                                                                           && !cpu::CpuGemmAssemblyDispatch::is_activation_supported(gemm_info.activation_info())));
 
     if(run_optimised)
     {
         const ITensor     *c_to_use      = is_c_bias ? c : nullptr;
         const ITensorInfo *c_info_to_use = c_to_use != nullptr ? c_to_use->info() : nullptr;
-        _asm_glue->configure(a->info(), b->info(), c_info_to_use, d->info(), asm_info);
-        ARM_COMPUTE_ERROR_ON(!_asm_glue->is_configured());
+        _impl->asm_glue                  = std::make_unique<cpu::CpuGemmAssemblyDispatch>();
+        _impl->asm_glue->configure(a->info(), b->info(), c_info_to_use, d->info(), asm_info);
+        ARM_COMPUTE_ERROR_ON(!_impl->asm_glue->is_configured());
 
-        _aux_mem_req = _asm_glue->workspace();
-        _asm_glue_run_pack =
+        _impl->aux_mem_req = _impl->asm_glue->workspace();
+        _impl->asm_glue_run_pack =
         {
             { ACL_SRC_0, a },
             { ACL_SRC_1, b },
             { ACL_SRC_2, c_to_use },
             { ACL_DST, d },
         };
-        _asm_glue_prep_pack = { { ACL_SRC_1, b }, { ACL_SRC_2, c_to_use } };
-        _asm_glue_workspace = manage_workspace<Tensor>(_aux_mem_req, _memory_group, _asm_glue_run_pack, _asm_glue_prep_pack);
+        _impl->asm_glue_prep_pack = { { ACL_SRC_1, b }, { ACL_SRC_2, c_to_use } };
+        _impl->asm_glue_workspace = manage_workspace<Tensor>(_impl->aux_mem_req, _impl->memory_group, _impl->asm_glue_run_pack, _impl->asm_glue_prep_pack);
 
         // Scale product by alpha
-        if(_run_alpha_scale)
+        if(_impl->run_alpha_scale)
         {
-            _alpha_scale_func.configure(d, nullptr, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LINEAR, alpha, 0.f));
+            _impl->alpha_scale_func = std::make_unique<cpu::CpuActivation>();
+            _impl->alpha_scale_func->configure(d->info(), nullptr, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LINEAR, alpha, 0.f));
         }
     }
     else
     {
         // Pick output tensor in case bias addition should be performed
-        ITensor *gemm_output_to_use = d;
-        if(_run_bias_addition)
+        if(_impl->run_bias_addition)
         {
-            gemm_output_to_use = &_tmp_d;
-            _memory_group.manage(&_tmp_d);
+            _impl->gemm_output_to_use = &_impl->tmp_d;
+            _impl->memory_group.manage(&_impl->tmp_d);
         }
 
-        _mm_kernel = std::make_unique<NEGEMMMatrixMultiplyKernel>();
+        _impl->mm_kernel = std::make_unique<NEGEMMMatrixMultiplyKernel>();
 
         // Select between GEMV and GEMM
-        if(_run_vector_matrix_multiplication)
+        if(_impl->run_vector_matrix_multiplication)
         {
             // Configure the matrix multiply kernel
-            _mm_kernel->configure(a, b, gemm_output_to_use, alpha, false);
+            _impl->mm_kernel->configure(a, b, _impl->gemm_output_to_use, alpha, false);
         }
         else
         {
@@ -168,14 +190,14 @@
             TensorInfo info_a = a->info()->clone()->set_tensor_shape(shape_tmp_a).set_is_resizable(true);
             TensorInfo info_b = b->info()->clone()->set_tensor_shape(shape_tmp_b).set_is_resizable(true);
 
-            _tmp_a.allocator()->init(info_a);
-            _tmp_b.allocator()->init(info_b);
+            _impl->tmp_a.allocator()->init(info_a);
+            _impl->tmp_b.allocator()->init(info_b);
 
             // Manage intermediate buffers
-            _memory_group.manage(&_tmp_a);
-            if(!_reshape_b_only_on_first_run)
+            _impl->memory_group.manage(&_impl->tmp_a);
+            if(!_impl->reshape_b_only_on_first_run)
             {
-                _memory_group.manage(&_tmp_b);
+                _impl->memory_group.manage(&_impl->tmp_b);
             }
 
             int m = a->info()->dimension(1);
@@ -183,43 +205,45 @@
             int k = a->info()->dimension(0);
 
             // Configure interleave kernel
-            _interleave_kernel = std::make_unique<NEGEMMInterleave4x4Kernel>();
-            _interleave_kernel->configure(a, &_tmp_a);
+            _impl->interleave_kernel = std::make_unique<cpu::kernels::CpuGemmInterleave4x4Kernel>();
+            _impl->interleave_kernel->configure(a->info(), &info_a);
 
             // Configure transpose kernel
-            _transpose_kernel = std::make_unique<NEGEMMTranspose1xWKernel>();
-            _transpose_kernel->configure(b, &_tmp_b);
+            _impl->transpose_kernel = std::make_unique<cpu::kernels::CpuGemmTranspose1xWKernel>();
+            _impl->transpose_kernel->configure(b->info(), _impl->tmp_b.info());
 
             // Configure matrix multiplication kernel
-            _mm_kernel->configure(&_tmp_a, &_tmp_b, gemm_output_to_use, alpha, true, GEMMReshapeInfo(m, n, k));
+            _impl->mm_kernel->configure(&_impl->tmp_a, &_impl->tmp_b, _impl->gemm_output_to_use, alpha, true, GEMMReshapeInfo(m, n, k));
 
             // Allocate once the all configure methods have been called
-            _tmp_a.allocator()->allocate();
-            if(!_reshape_b_only_on_first_run)
+            _impl->tmp_a.allocator()->allocate();
+            if(!_impl->reshape_b_only_on_first_run)
             {
-                _tmp_b.allocator()->allocate();
+                _impl->tmp_b.allocator()->allocate();
             }
         }
 
-        if(_run_bias_addition)
+        if(_impl->run_bias_addition)
         {
-            _add_bias.configure(gemm_output_to_use, c, d, ConvertPolicy::SATURATE);
-            _tmp_d.allocator()->allocate();
+            _impl->add_bias = std::make_unique<cpu::CpuAdd>();
+            _impl->add_bias->configure(_impl->gemm_output_to_use->info(), c->info(), d->info(), ConvertPolicy::SATURATE);
+            _impl->tmp_d.allocator()->allocate();
         }
     }
 
     // Configure matrix addition kernel
-    if(_run_addition)
+    if(_impl->run_addition)
     {
-        _ma_kernel = std::make_unique<NEGEMMMatrixAdditionKernel>();
-        _ma_kernel->configure(c, d, beta);
+        _impl->ma_kernel = std::make_unique<cpu::kernels::CpuGemmMatrixAdditionKernel>();
+        _impl->ma_kernel->configure(c->info(), d->info(), beta);
     }
 
     // Configure activation
     const ActivationLayerInfo &activation = gemm_info.activation_info();
-    if(_run_activation)
+    if(_impl->run_activation)
     {
-        _activation_func.configure(d, nullptr, activation);
+        _impl->activation_func = std::make_unique<cpu::CpuActivation>();
+        _impl->activation_func->configure(d->info(), nullptr, activation);
     }
 }
 
@@ -285,7 +309,7 @@
         const bool run_interleave_transpose = !run_vector_matrix_multiplication && !(gemm_info.reshape_b_only_on_first_run());
 
         // Arguments used by GEMMReshapeInfo
-        // If we pass the matrix A and matrix B reshaped to NEGEMMMatrixMultiplyKernel, we need to pass m, n, k, mult_transpose1xW_width and mult_interleave4x4_height to NEGEMMReshapeInfo
+        // If we pass the matrix A and matrix B reshaped to NEGEMMMatrixMultiplyKernel, we need to pass m, n, k, mult_transpose1xW_width and mult_interleave4x4_height to GEMMReshapeInfo
         // in order to know how the matrices have been reshaped
         const int m                         = a->dimension(1);
         const int n                         = b->dimension(0);
@@ -309,11 +333,11 @@
 
             // Validate interleave kernel
             auto_init_if_empty(tmp_a_info, a->clone()->set_tensor_shape(compute_interleaved_shape(*a, mult_interleave4x4_height, gemm_info.reinterpret_input_as_3d())));
-            ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMInterleave4x4Kernel::validate(a, &tmp_a_info));
+            ARM_COMPUTE_RETURN_ON_ERROR(cpu::kernels::CpuGemmInterleave4x4Kernel::validate(a, &tmp_a_info));
 
             // Validate transpose kernel
             auto_init_if_empty(tmp_b_info, b->clone()->set_tensor_shape(compute_transpose1xW_with_element_size_shape(*b, mult_transpose1xW_width)));
-            ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMTranspose1xWKernel::validate(b, &tmp_b_info));
+            ARM_COMPUTE_RETURN_ON_ERROR(cpu::kernels::CpuGemmTranspose1xWKernel::validate(b, &tmp_b_info));
         }
 
         // Validate matrix multiply
@@ -322,21 +346,21 @@
 
         if(c != nullptr && gemm_info.reshape_b_only_on_first_run())
         {
-            ARM_COMPUTE_RETURN_ON_ERROR(NEArithmeticAddition::validate(&tmp_output_info, c, output, ConvertPolicy::SATURATE));
+            ARM_COMPUTE_RETURN_ON_ERROR(cpu::CpuAdd::validate(&tmp_output_info, c, output, ConvertPolicy::SATURATE));
         }
     }
 
     // Validate matrix addition kernel
     if(beta != 0 && c != nullptr && !is_c_bias)
     {
-        ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMMatrixAdditionKernel::validate(c, output, beta));
+        ARM_COMPUTE_RETURN_ON_ERROR(cpu::kernels::CpuGemmMatrixAdditionKernel::validate(c, output, beta));
     }
 
     // Validate activation
     const ActivationLayerInfo &activation = gemm_info.activation_info();
     if(activation.enabled())
     {
-        ARM_COMPUTE_RETURN_ON_ERROR(NEActivationLayer::validate(output, nullptr, activation));
+        ARM_COMPUTE_RETURN_ON_ERROR(cpu::CpuActivation::validate(output, nullptr, activation));
     }
 
     return Status{};
@@ -346,90 +370,97 @@
 {
     prepare();
 
-    MemoryGroupResourceScope scope_mg(_memory_group);
+    MemoryGroupResourceScope scope_mg(_impl->memory_group);
 
-    if(_asm_glue->is_configured())
+    if(_impl->asm_glue->is_configured())
     {
-        _asm_glue->run(_asm_glue_run_pack);
-        if(_run_alpha_scale)
+        _impl->asm_glue->run(_impl->asm_glue_run_pack);
+        if(_impl->run_alpha_scale)
         {
-            _alpha_scale_func.run();
+            ITensorPack pack{ { ACL_SRC, _impl->d }, { ACL_DST, _impl->d } };
+            _impl->alpha_scale_func->run(pack);
         }
     }
     else
     {
-        if(!_run_vector_matrix_multiplication)
+        if(!_impl->run_vector_matrix_multiplication)
         {
             // Run interleave kernel
-            NEScheduler::get().schedule(_interleave_kernel.get(), Window::DimY);
+            ITensorPack interleave_pack{ { ACL_SRC, _impl->a }, { ACL_DST, &_impl->tmp_a } };
+            NEScheduler::get().schedule_op(_impl->interleave_kernel.get(), Window::DimY, _impl->interleave_kernel->window(), interleave_pack);
 
-            if(!_reshape_b_only_on_first_run)
+            if(!_impl->reshape_b_only_on_first_run)
             {
                 // Run transpose kernel
-                NEScheduler::get().schedule(_transpose_kernel.get(), Window::DimY);
+                ITensorPack transpose_pack{ { ACL_SRC, _impl->original_b }, { ACL_DST, &_impl->tmp_b } };
+                NEScheduler::get().schedule_op(_impl->transpose_kernel.get(), Window::DimY, _impl->transpose_kernel->window(), transpose_pack);
             }
         }
 
-        NEScheduler::get().schedule(_mm_kernel.get(), _run_vector_matrix_multiplication ? Window::DimX : Window::DimY);
+        NEScheduler::get().schedule(_impl->mm_kernel.get(), _impl->run_vector_matrix_multiplication ? Window::DimX : Window::DimY);
 
         // Run bias addition kernel
-        if(_run_bias_addition)
+        if(_impl->run_bias_addition)
         {
-            _add_bias.run();
+            ITensorPack pack{ { ACL_SRC_0, _impl->gemm_output_to_use }, { ACL_SRC_1, _impl->c }, { ACL_DST, _impl->d } };
+            _impl->add_bias->run(pack);
         }
     }
 
     // Run matrix addition kernel
-    if(_run_addition)
+    if(_impl->run_addition)
     {
-        NEScheduler::get().schedule(_ma_kernel.get(), Window::DimY);
+        ITensorPack c_add_pack{ { ACL_SRC, _impl->c }, { ACL_DST, _impl->d } };
+        NEScheduler::get().schedule_op(_impl->ma_kernel.get(), Window::DimY, _impl->ma_kernel->window(), c_add_pack);
     }
 
     // Run activation function
-    if(_run_activation)
+    if(_impl->run_activation)
     {
-        _activation_func.run();
+        ITensorPack pack{ { ACL_SRC, _impl->d }, { ACL_DST, _impl->d } };
+        _impl->activation_func->run(pack);
     }
 }
 
 void NEGEMM::prepare()
 {
-    if(!_is_prepared)
+    if(!_impl->is_prepared)
     {
-        const bool original_b_managed_by_weights_manager = _weights_manager && _weights_manager->are_weights_managed(_original_b);
-        if(_asm_glue->is_configured())
+        const bool original_b_managed_by_weights_manager = _impl->weights_manager && _impl->weights_manager->are_weights_managed(_impl->original_b);
+        if(_impl->asm_glue->is_configured())
         {
-            _asm_glue->prepare(_asm_glue_prep_pack);
+            _impl->asm_glue->prepare(_impl->asm_glue_prep_pack);
 
-            auto has_reshape = std::find_if(_aux_mem_req.begin(),
-                                            _aux_mem_req.end(),
+            auto has_reshape = std::find_if(_impl->aux_mem_req.begin(),
+                                            _impl->aux_mem_req.end(),
                                             [](const MemoryInfo & m) -> bool { return m.lifetime == MemoryLifetime::Persistent; });
 
-            if(has_reshape != std::end(_aux_mem_req))
+            if(has_reshape != std::end(_impl->aux_mem_req))
             {
-                _original_b->mark_as_unused();
+                _impl->original_b->mark_as_unused();
             }
             else
             {
-                _asm_glue_run_pack.add_const_tensor(ACL_SRC_1, _original_b);
+                _impl->asm_glue_run_pack.add_const_tensor(ACL_SRC_1, _impl->original_b);
             }
         }
-        else if(_reshape_b_only_on_first_run && !_run_vector_matrix_multiplication && !_asm_glue->is_configured())
+        else if(_impl->reshape_b_only_on_first_run && !_impl->run_vector_matrix_multiplication && !_impl->asm_glue->is_configured())
         {
             if(!original_b_managed_by_weights_manager)
             {
-                ARM_COMPUTE_ERROR_ON(!_original_b->is_used());
+                ARM_COMPUTE_ERROR_ON(!_impl->original_b->is_used());
             }
 
-            _tmp_b.allocator()->allocate();
-            NEScheduler::get().schedule(_transpose_kernel.get(), Window::DimY);
+            _impl->tmp_b.allocator()->allocate();
+            ITensorPack transpose_pack{ { ACL_SRC, _impl->original_b }, { ACL_DST, &_impl->tmp_b } };
+            NEScheduler::get().schedule_op(_impl->transpose_kernel.get(), Window::DimY, _impl->transpose_kernel->window(), transpose_pack);
             if(!original_b_managed_by_weights_manager)
             {
-                _original_b->mark_as_unused();
+                _impl->original_b->mark_as_unused();
             }
         }
 
-        _is_prepared = true;
+        _impl->is_prepared = true;
     }
 }
 } // namespace arm_compute
diff --git a/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp b/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp
index 2876c25..f40cbda 100644
--- a/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp
+++ b/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp
@@ -31,16 +31,6 @@
 #include "arm_compute/runtime/NEON/NEScheduler.h"
 
 #include "src/core/NEON/kernels/NECol2ImKernel.h"
-#include "src/core/NEON/kernels/NEConvertQuantizedSignednessKernel.h"
-#include "src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h"
-#include "src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h"
-#include "src/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.h"
-#include "src/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.h"
-#include "src/core/NEON/kernels/NEGEMMLowpReductionKernel.h"
-#include "src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.h"
-#include "src/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.h"
-#include "src/core/NEON/kernels/NEGEMMTranspose1xWKernel.h"
-#include "src/core/NEON/kernels/NEGEMMTranspose1xWKernel.h"
 #include "src/core/NEON/kernels/NEIm2ColKernel.h"
 #include "src/core/NEON/kernels/NEWeightsReshapeKernel.h"
 
diff --git a/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp b/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp
index 224fb1e..0aba3c0 100644
--- a/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp
+++ b/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp
@@ -41,12 +41,12 @@
 #include "arm_compute/runtime/MemoryGroup.h"
 #include "arm_compute/runtime/NEON/functions/NEActivationLayer.h"
 #include "src/core/NEON/kernels/NEConvertQuantizedSignednessKernel.h"
-#include "src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h"
 #include "src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h"
 #include "src/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.h"
 #include "src/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.h"
 #include "src/core/NEON/kernels/NEGEMMLowpReductionKernel.h"
-#include "src/core/NEON/kernels/NEGEMMTranspose1xWKernel.h"
+#include "src/core/cpu/kernels/CpuGemmInterleave4x4Kernel.h"
+#include "src/core/cpu/kernels/CpuGemmTranspose1xWKernel.h"
 #include "src/runtime/cpu/operators/internal/CpuGemmAssemblyDispatch.h"
 
 namespace arm_compute
@@ -72,8 +72,8 @@
     IWeightsManager                                               *weights_manager{ nullptr };
     std::unique_ptr<cpu::CpuGemmAssemblyDispatch>                  asm_glue{ nullptr };
     std::unique_ptr<NEGEMMLowpMatrixMultiplyKernel>                mm_kernel{ nullptr };
-    std::unique_ptr<NEGEMMInterleave4x4Kernel>                     mtx_a_reshape_kernel{ nullptr };
-    std::unique_ptr<NEGEMMTranspose1xWKernel>                      mtx_b_reshape_kernel{ nullptr };
+    std::unique_ptr<cpu::kernels::CpuGemmInterleave4x4Kernel>      mtx_a_reshape_kernel{ nullptr };
+    std::unique_ptr<cpu::kernels::CpuGemmTranspose1xWKernel>       mtx_b_reshape_kernel{ nullptr };
     std::unique_ptr<NEGEMMLowpMatrixAReductionKernel>              mtx_a_reduction_kernel{ nullptr };
     std::unique_ptr<NEGEMMLowpMatrixBReductionKernel>              mtx_b_reduction_kernel{ nullptr };
     std::unique_ptr<NEGEMMLowpOffsetContributionKernel>            offset_contribution_kernel{ nullptr };
@@ -82,6 +82,7 @@
     std::unique_ptr<NEConvertQuantizedSignednessKernel>            convert_to_signed_asymm{ nullptr };
     std::unique_ptr<NEConvertQuantizedSignednessKernel>            convert_from_signed_asymm{ nullptr };
 
+    const ITensor *a_to_use{ nullptr };
     Tensor         vector_sum_col{};
     Tensor         vector_sum_row{};
     Tensor         tmp_a{};
@@ -142,20 +143,20 @@
 
     _impl->asm_glue = std::make_unique<cpu::CpuGemmAssemblyDispatch>();
 
-    const ITensor *a_to_use = a;
+    _impl->a_to_use = a;
 
     // Convert to QASYMM8 -> QASYMM8_SIGNED and back
     if(_impl->flip_signedness)
     {
         const int32_t                 offset_correction = 128;
         const DataType                dt                = DataType::QASYMM8_SIGNED;
-        const UniformQuantizationInfo iqinfo            = a_to_use->info()->quantization_info().uniform();
+        const UniformQuantizationInfo iqinfo            = _impl->a_to_use->info()->quantization_info().uniform();
 
-        _impl->signed_a.allocator()->init(a_to_use->info()->clone()->set_data_type(dt).set_quantization_info(QuantizationInfo(iqinfo.scale, iqinfo.offset + offset_correction)));
+        _impl->signed_a.allocator()->init(_impl->a_to_use->info()->clone()->set_data_type(dt).set_quantization_info(QuantizationInfo(iqinfo.scale, iqinfo.offset + offset_correction)));
         _impl->memory_group.manage(&_impl->signed_a);
         _impl->convert_to_signed_asymm = std::make_unique<NEConvertQuantizedSignednessKernel>();
-        _impl->convert_to_signed_asymm->configure(a_to_use, &_impl->signed_a);
-        a_to_use        = &_impl->signed_a;
+        _impl->convert_to_signed_asymm->configure(_impl->a_to_use, &_impl->signed_a);
+        _impl->a_to_use = &_impl->signed_a;
         _impl->a_offset = _impl->signed_a.info()->quantization_info().uniform().offset;
 
         const UniformQuantizationInfo oqinfo = output->info()->quantization_info().uniform();
@@ -192,10 +193,10 @@
         case DataType::U8:
         case DataType::S8:
         {
-            if(is_data_type_quantized_asymmetric(a_to_use->info()->data_type()) && info.gemmlowp_output_stage().type == GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT)
+            if(is_data_type_quantized_asymmetric(_impl->a_to_use->info()->data_type()) && info.gemmlowp_output_stage().type == GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT)
             {
                 auto c_info_to_use = c == nullptr ? nullptr : c->info();
-                _impl->asm_glue->configure(a_to_use->info(), b->info(), c_info_to_use, output->info(), asm_info);
+                _impl->asm_glue->configure(_impl->a_to_use->info(), b->info(), c_info_to_use, output->info(), asm_info);
                 _impl->fused_assembly_path = _impl->asm_glue->is_configured();
                 _impl->asm_glue_run_pack.add_const_tensor(TensorType::ACL_SRC_2, c);
                 _impl->asm_glue_run_pack.add_tensor(TensorType::ACL_DST, output);
@@ -203,14 +204,14 @@
             else
             {
                 auto output_to_use = (_impl->fuse_output_stage ? &_impl->mm_result_s32 : output);
-                _impl->asm_glue->configure(a_to_use->info(), b->info(), nullptr, output_to_use->info(), asm_info);
+                _impl->asm_glue->configure(_impl->a_to_use->info(), b->info(), nullptr, output_to_use->info(), asm_info);
                 _impl->asm_glue_run_pack.add_tensor(TensorType::ACL_DST, output_to_use);
             }
             _impl->assembly_path = _impl->asm_glue->is_configured();
 
             if(_impl->assembly_path)
             {
-                _impl->asm_glue_run_pack.add_const_tensor(TensorType::ACL_SRC_0, a_to_use);
+                _impl->asm_glue_run_pack.add_const_tensor(TensorType::ACL_SRC_0, _impl->a_to_use);
 
                 _impl->aux_mem_req        = _impl->asm_glue->workspace();
                 _impl->asm_glue_prep_pack = { { TensorType::ACL_SRC_1, b }, { TensorType::ACL_SRC_2, c } };
@@ -232,7 +233,7 @@
         matrix_b = &_impl->tmp_b;
 
         // The interleaved output matrix will have the following shape: [ a_height * 4, ceil(a_width / 4.0f) ]
-        TensorInfo a_info(compute_interleaved_shape(*a_to_use->info()), 1, a_to_use->info()->data_type(), a_to_use->info()->quantization_info());
+        TensorInfo a_info(compute_interleaved_shape(*_impl->a_to_use->info()), 1, _impl->a_to_use->info()->data_type(), _impl->a_to_use->info()->quantization_info());
         // The transpose1xW output matrix will have the following shape: [ b_height * 16, ceil(b_width / 16.0f) ]
         TensorInfo b_info(compute_transpose1xW_shape(*b->info()), 1, b->info()->data_type(), b->info()->quantization_info());
         _impl->tmp_a.allocator()->init(a_info);
@@ -244,18 +245,18 @@
         }
 
         // Configure interleave kernel
-        _impl->mtx_a_reshape_kernel = std::make_unique<NEGEMMInterleave4x4Kernel>();
-        _impl->mtx_a_reshape_kernel->configure(a_to_use, &_impl->tmp_a);
+        _impl->mtx_a_reshape_kernel = std::make_unique<cpu::kernels::CpuGemmInterleave4x4Kernel>();
+        _impl->mtx_a_reshape_kernel->configure(_impl->a_to_use->info(), _impl->tmp_a.info());
 
         // Configure transpose kernel
-        _impl->mtx_b_reshape_kernel = std::make_unique<NEGEMMTranspose1xWKernel>();
-        _impl->mtx_b_reshape_kernel->configure(b, &_impl->tmp_b);
+        _impl->mtx_b_reshape_kernel = std::make_unique<cpu::kernels::CpuGemmTranspose1xWKernel>();
+        _impl->mtx_b_reshape_kernel->configure(b->info(), _impl->tmp_b.info());
     }
 
     if(!_impl->fused_assembly_path)
     {
         // Build reduction info
-        const GEMMLowpReductionKernelInfo reduction_info(a_to_use->info()->dimension(0), false, 0, false);
+        const GEMMLowpReductionKernelInfo reduction_info(_impl->a_to_use->info()->dimension(0), false, 0, false);
 
         // Initialize matrix B reduction kernel only if _impl->a_offset is not equal to 0
         if(_impl->a_offset != 0)
@@ -276,14 +277,14 @@
         // Initialize Matrix A reduction kernel only if _impl->b_offset is not equal to 0
         if(_impl->b_offset != 0)
         {
-            TensorInfo info_vector_sum_row(compute_reductionB_shape(*a_to_use->info()), 1, DataType::S32);
+            TensorInfo info_vector_sum_row(compute_reductionB_shape(*_impl->a_to_use->info()), 1, DataType::S32);
 
             _impl->vector_sum_row.allocator()->init(info_vector_sum_row);
             _impl->memory_group.manage(&_impl->vector_sum_row);
 
             // Configure matrix A reduction kernel
             _impl->mtx_a_reduction_kernel = std::make_unique<NEGEMMLowpMatrixAReductionKernel>();
-            _impl->mtx_a_reduction_kernel->configure(a_to_use, &_impl->vector_sum_row, reduction_info);
+            _impl->mtx_a_reduction_kernel->configure(_impl->a_to_use, &_impl->vector_sum_row, reduction_info);
         }
 
         if(_impl->fuse_output_stage)
@@ -319,7 +320,8 @@
             }
             // Configure offset contribution kernel
             _impl->offset_contribution_kernel = std::make_unique<NEGEMMLowpOffsetContributionKernel>();
-            _impl->offset_contribution_kernel->configure(output, _impl->a_offset == 0 ? nullptr : &_impl->vector_sum_col, _impl->b_offset == 0 ? nullptr : &_impl->vector_sum_row, a_to_use->info()->dimension(0),
+            _impl->offset_contribution_kernel->configure(output, _impl->a_offset == 0 ? nullptr : &_impl->vector_sum_col, _impl->b_offset == 0 ? nullptr : &_impl->vector_sum_row,
+                                                         _impl->a_to_use->info()->dimension(0),
                                                          _impl->a_offset, _impl->b_offset);
         }
     }
@@ -487,8 +489,8 @@
             auto_init_if_empty(tmp_a_info, a_to_use->clone()->set_tensor_shape(shape_tmp_a));
             auto_init_if_empty(tmp_b_info, b->clone()->set_tensor_shape(shape_tmp_b));
 
-            ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMInterleave4x4Kernel::validate(a_to_use, &tmp_a_info));
-            ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMTranspose1xWKernel::validate(b, &tmp_b_info));
+            ARM_COMPUTE_RETURN_ON_ERROR(cpu::kernels::CpuGemmInterleave4x4Kernel::validate(a_to_use, &tmp_a_info));
+            ARM_COMPUTE_RETURN_ON_ERROR(cpu::kernels::CpuGemmTranspose1xWKernel::validate(b, &tmp_b_info));
         }
     }
 
@@ -585,12 +587,14 @@
         if(!_impl->run_vector_matrix_multiplication)
         {
             // Run interleave kernel
-            NEScheduler::get().schedule(_impl->mtx_a_reshape_kernel.get(), Window::DimY);
+            ITensorPack interleave_pack{ { ACL_SRC, _impl->a_to_use }, { ACL_DST, &_impl->tmp_a } };
+            NEScheduler::get().schedule_op(_impl->mtx_a_reshape_kernel.get(), Window::DimY, _impl->mtx_a_reshape_kernel->window(), interleave_pack);
 
             if(!_impl->reshape_b_only_on_first_run)
             {
                 // Run transpose kernel
-                NEScheduler::get().schedule(_impl->mtx_b_reshape_kernel.get(), Window::DimY);
+                ITensorPack reshape_b_pack{ { ACL_SRC, _impl->original_b }, { ACL_DST, &_impl->tmp_b } };
+                NEScheduler::get().schedule_op(_impl->mtx_b_reshape_kernel.get(), Window::DimY, _impl->mtx_b_reshape_kernel->window(), reshape_b_pack);
             }
         }
         NEScheduler::get().schedule(_impl->mm_kernel.get(), Window::DimY);
@@ -662,7 +666,8 @@
         {
             // Run reshape kernel and mark original weights tensor as unused
             _impl->tmp_b.allocator()->allocate();
-            NEScheduler::get().schedule(_impl->mtx_b_reshape_kernel.get(), Window::DimY);
+            ITensorPack reshape_b_pack{ { ACL_SRC, _impl->original_b }, { ACL_DST, &_impl->tmp_b } };
+            NEScheduler::get().schedule_op(_impl->mtx_b_reshape_kernel.get(), Window::DimY, _impl->mtx_b_reshape_kernel->window(), reshape_b_pack);
         }
 
         // Run matrix B reduction kernel only if _impl->a_offset is not equal to 0
diff --git a/src/runtime/NEON/functions/NELSTMLayer.cpp b/src/runtime/NEON/functions/NELSTMLayer.cpp
index d338e4f..2eb5e9a 100644
--- a/src/runtime/NEON/functions/NELSTMLayer.cpp
+++ b/src/runtime/NEON/functions/NELSTMLayer.cpp
@@ -29,15 +29,6 @@
 #include "arm_compute/core/utils/misc/ShapeCalculator.h"
 #include "arm_compute/core/utils/quantization/AsymmHelpers.h"
 #include "arm_compute/runtime/common/LSTMParams.h"
-#include "src/core/NEON/kernels/NEConvertQuantizedSignednessKernel.h"
-#include "src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h"
-#include "src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h"
-#include "src/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.h"
-#include "src/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.h"
-#include "src/core/NEON/kernels/NEGEMMLowpReductionKernel.h"
-#include "src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.h"
-#include "src/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.h"
-#include "src/core/NEON/kernels/NEGEMMTranspose1xWKernel.h"
 
 namespace arm_compute
 {
diff --git a/src/runtime/NEON/functions/NELSTMLayerQuantized.cpp b/src/runtime/NEON/functions/NELSTMLayerQuantized.cpp
index a56e168..eb7d584 100644
--- a/src/runtime/NEON/functions/NELSTMLayerQuantized.cpp
+++ b/src/runtime/NEON/functions/NELSTMLayerQuantized.cpp
@@ -26,15 +26,6 @@
 #include "arm_compute/core/Utils.h"
 #include "arm_compute/core/Validate.h"
 #include "arm_compute/core/utils/quantization/AsymmHelpers.h"
-#include "src/core/NEON/kernels/NEConvertQuantizedSignednessKernel.h"
-#include "src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h"
-#include "src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h"
-#include "src/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.h"
-#include "src/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.h"
-#include "src/core/NEON/kernels/NEGEMMLowpReductionKernel.h"
-#include "src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.h"
-#include "src/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.h"
-#include "src/core/NEON/kernels/NEGEMMTranspose1xWKernel.h"
 #include "src/core/helpers/AutoConfiguration.h"
 
 #include <cmath>
diff --git a/src/runtime/NEON/functions/NEQLSTMLayer.cpp b/src/runtime/NEON/functions/NEQLSTMLayer.cpp
index 85d62ac..f3a3d23 100644
--- a/src/runtime/NEON/functions/NEQLSTMLayer.cpp
+++ b/src/runtime/NEON/functions/NEQLSTMLayer.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2020 Arm Limited.
+ * Copyright (c) 2020-2021 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -31,12 +31,10 @@
 #include "arm_compute/core/utils/quantization/AsymmHelpers.h"
 #include "arm_compute/runtime/NEON/NEScheduler.h"
 #include "src/core/NEON/kernels/NEConvertQuantizedSignednessKernel.h"
-#include "src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h"
 #include "src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h"
 #include "src/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.h"
 #include "src/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.h"
 #include "src/core/NEON/kernels/NEGEMMLowpReductionKernel.h"
-#include "src/core/NEON/kernels/NEGEMMTranspose1xWKernel.h"
 #include "src/core/NEON/kernels/NEQLSTMLayerNormalizationKernel.h"
 #include "src/core/helpers/WindowHelpers.h"
 
diff --git a/src/runtime/NEON/functions/NERNNLayer.cpp b/src/runtime/NEON/functions/NERNNLayer.cpp
index d59f7da..6f6d4d9 100644
--- a/src/runtime/NEON/functions/NERNNLayer.cpp
+++ b/src/runtime/NEON/functions/NERNNLayer.cpp
@@ -30,15 +30,6 @@
 #include "arm_compute/core/Validate.h"
 #include "arm_compute/core/utils/misc/ShapeCalculator.h"
 #include "arm_compute/runtime/NEON/NEScheduler.h"
-#include "src/core/NEON/kernels/NEConvertQuantizedSignednessKernel.h"
-#include "src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h"
-#include "src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h"
-#include "src/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.h"
-#include "src/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.h"
-#include "src/core/NEON/kernels/NEGEMMLowpReductionKernel.h"
-#include "src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.h"
-#include "src/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.h"
-#include "src/core/NEON/kernels/NEGEMMTranspose1xWKernel.h"
 
 namespace arm_compute
 {
diff --git a/tests/validation/NEON/GEMM.cpp b/tests/validation/NEON/GEMM.cpp
index 500c602..ddd1bca 100644
--- a/tests/validation/NEON/GEMM.cpp
+++ b/tests/validation/NEON/GEMM.cpp
@@ -25,9 +25,9 @@
 #include "arm_compute/runtime/NEON/functions/NEGEMM.h"
 #include "arm_compute/runtime/Tensor.h"
 #include "arm_compute/runtime/TensorAllocator.h"
-#include "src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h"
 #include "src/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.h"
-#include "src/core/NEON/kernels/NEGEMMTranspose1xWKernel.h"
+#include "src/core/cpu/kernels/CpuGemmInterleave4x4Kernel.h"
+#include "src/core/cpu/kernels/CpuGemmTranspose1xWKernel.h"
 #include "tests/NEON/Accessor.h"
 #include "tests/NEON/Helper.h"
 #include "tests/PaddingCalculator.h"
@@ -88,6 +88,27 @@
     return in.info()->padding().empty();
 }
 
+/** Zero padding test
+ *
+ * TODO(COMPMID-4402): merge with previous when all kernels have been ported
+ */
+template <typename FunctionType>
+bool validate_zero_padding_new(unsigned int dim0_value, unsigned int dim1_value)
+{
+    const TensorShape in_shape(dim0_value, dim1_value);
+    TensorInfo        in(in_shape, 1, DataType::U32);
+    TensorInfo        dst;
+
+    ARM_COMPUTE_EXPECT(in.is_resizable(), framework::LogLevel::ERRORS);
+
+    // Validate zero-padding
+    FunctionType func;
+
+    func.configure(&in, &dst);
+
+    return in.padding().empty();
+}
+
 /* Zero padding test for GEMM kernels */
 bool validate_gemm_zero_padding(const TensorShape shape0, const TensorShape shape1)
 {
@@ -108,19 +129,19 @@
 TEST_SUITE(GEMM)
 
 TEST_SUITE(TRANSPOSE_1XW)
-using NEGEMMTranspose1xW = NESynthetizeFunctionWithZeroConstantBorder<NEGEMMTranspose1xWKernel, 4>;
+using CpuGemmTranspose1xW = NESynthetizeFunctionWithZeroConstantKernelBorder<cpu::kernels::CpuGemmTranspose1xWKernel>;
 DATA_TEST_CASE(ValidateZeroPadding, framework::DatasetMode::ALL, zip(
                    framework::dataset::make("N", { 1, 23, 63, 101 }),
                    framework::dataset::make("K", { 1, 47, 29, 27 })),
                n_value, k_value)
 {
-    bool status = validate_zero_padding<NEGEMMTranspose1xWKernel>(n_value, k_value);
+    bool status = validate_zero_padding_new<CpuGemmTranspose1xW>(n_value, k_value);
     ARM_COMPUTE_EXPECT(status, framework::LogLevel::ERRORS);
 }
 
 TEST_SUITE(U32)
-using NEGEMMTranspose1xWFixture = GEMMTranspose1xWValidationFixture<Tensor, Accessor, NEGEMMTranspose1xW, uint32_t>;
-FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMTranspose1xWFixture, framework::DatasetMode::PRECOMMIT, data_transpose * framework::dataset::make("DataType", DataType::U32))
+using CpuGemmTranspose1xWFixture = GEMMTranspose1xWValidationFixture<Tensor, Accessor, CpuGemmTranspose1xW, uint32_t>;
+FIXTURE_DATA_TEST_CASE(RunSmall, CpuGemmTranspose1xWFixture, framework::DatasetMode::PRECOMMIT, data_transpose * framework::dataset::make("DataType", DataType::U32))
 {
     // Validate output
     validate(Accessor(_target), _reference);
@@ -128,8 +149,8 @@
 TEST_SUITE_END() // U32
 
 TEST_SUITE(U16)
-using NEGEMMTranspose1xWFixture = GEMMTranspose1xWValidationFixture<Tensor, Accessor, NEGEMMTranspose1xW, uint16_t>;
-FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMTranspose1xWFixture, framework::DatasetMode::PRECOMMIT, data_transpose * framework::dataset::make("DataType", DataType::U16))
+using CpuGemmTranspose1xWFixture = GEMMTranspose1xWValidationFixture<Tensor, Accessor, CpuGemmTranspose1xW, uint16_t>;
+FIXTURE_DATA_TEST_CASE(RunSmall, CpuGemmTranspose1xWFixture, framework::DatasetMode::PRECOMMIT, data_transpose * framework::dataset::make("DataType", DataType::U16))
 {
     // Validate output
     validate(Accessor(_target), _reference);
@@ -137,8 +158,8 @@
 TEST_SUITE_END() // U16
 
 TEST_SUITE(U8)
-using NEGEMMTranspose1xWFixture = GEMMTranspose1xWValidationFixture<Tensor, Accessor, NEGEMMTranspose1xW, uint8_t>;
-FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMTranspose1xWFixture, framework::DatasetMode::PRECOMMIT, data_transpose * framework::dataset::make("DataType", DataType::U8))
+using CpuGemmTranspose1xWFixture = GEMMTranspose1xWValidationFixture<Tensor, Accessor, CpuGemmTranspose1xW, uint8_t>;
+FIXTURE_DATA_TEST_CASE(RunSmall, CpuGemmTranspose1xWFixture, framework::DatasetMode::PRECOMMIT, data_transpose * framework::dataset::make("DataType", DataType::U8))
 {
     // Validate output
     validate(Accessor(_target), _reference);
@@ -148,20 +169,20 @@
 TEST_SUITE_END() // TRANSPOSE_1XW
 
 TEST_SUITE(INTERLEAVE_4X4)
-using NEGEMMInterleave4x4 = NESynthetizeFunctionWithZeroConstantBorder<NEGEMMInterleave4x4Kernel, 4>;
+using CpuGemmInterleave4x4 = NESynthetizeFunctionWithZeroConstantKernelBorder<cpu::kernels::CpuGemmInterleave4x4Kernel>;
 
 DATA_TEST_CASE(ValidateZeroPadding, framework::DatasetMode::ALL, zip(
                    framework::dataset::make("M", { 1, 23, 63, 101 }),
                    framework::dataset::make("K", { 1, 47, 29, 27 })),
                m_value, k_value)
 {
-    bool status = validate_zero_padding<NEGEMMInterleave4x4Kernel>(m_value, k_value);
+    bool status = validate_zero_padding_new<cpu::kernels::CpuGemmInterleave4x4Kernel>(m_value, k_value);
     ARM_COMPUTE_EXPECT(status, framework::LogLevel::ERRORS);
 }
 
 TEST_SUITE(U32)
-using NEGEMMInterleave4x4Fixture = GEMMInterleave4x4ValidationFixture<Tensor, Accessor, NEGEMMInterleave4x4, uint32_t>;
-FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMInterleave4x4Fixture, framework::DatasetMode::PRECOMMIT, data_interleave * framework::dataset::make("DataType", DataType::U32))
+using CpuGemmInterleave4x4Fixture = GEMMInterleave4x4ValidationFixture<Tensor, Accessor, CpuGemmInterleave4x4, uint32_t>;
+FIXTURE_DATA_TEST_CASE(RunSmall, CpuGemmInterleave4x4Fixture, framework::DatasetMode::PRECOMMIT, data_interleave * framework::dataset::make("DataType", DataType::U32))
 {
     // Validate output
     validate(Accessor(_target), _reference);
@@ -169,8 +190,8 @@
 TEST_SUITE_END() // U32
 
 TEST_SUITE(U16)
-using NEGEMMInterleave4x4Fixture = GEMMInterleave4x4ValidationFixture<Tensor, Accessor, NEGEMMInterleave4x4, uint16_t>;
-FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMInterleave4x4Fixture, framework::DatasetMode::PRECOMMIT, data_interleave * framework::dataset::make("DataType", DataType::U16))
+using CpuGemmInterleave4x4Fixture = GEMMInterleave4x4ValidationFixture<Tensor, Accessor, CpuGemmInterleave4x4, uint16_t>;
+FIXTURE_DATA_TEST_CASE(RunSmall, CpuGemmInterleave4x4Fixture, framework::DatasetMode::PRECOMMIT, data_interleave * framework::dataset::make("DataType", DataType::U16))
 {
     // Validate output
     validate(Accessor(_target), _reference);
@@ -178,8 +199,8 @@
 TEST_SUITE_END() // U16
 
 TEST_SUITE(U8)
-using NEGEMMInterleave4x4Fixture = GEMMInterleave4x4ValidationFixture<Tensor, Accessor, NEGEMMInterleave4x4, uint8_t>;
-FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMInterleave4x4Fixture, framework::DatasetMode::PRECOMMIT, data_interleave * framework::dataset::make("DataType", DataType::QASYMM8))
+using CpuGemmInterleave4x4Fixture = GEMMInterleave4x4ValidationFixture<Tensor, Accessor, CpuGemmInterleave4x4, uint8_t>;
+FIXTURE_DATA_TEST_CASE(RunSmall, CpuGemmInterleave4x4Fixture, framework::DatasetMode::PRECOMMIT, data_interleave * framework::dataset::make("DataType", DataType::QASYMM8))
 {
     // Validate output
     validate(Accessor(_target), _reference);
diff --git a/tests/validation/fixtures/GEMMInterleave4x4Fixture.h b/tests/validation/fixtures/GEMMInterleave4x4Fixture.h
index 44dc0dd..72cea8c 100644
--- a/tests/validation/fixtures/GEMMInterleave4x4Fixture.h
+++ b/tests/validation/fixtures/GEMMInterleave4x4Fixture.h
@@ -88,7 +88,7 @@
 
         // Create and configure function
         FunctionType f;
-        f.configure(&a, &b);
+        f.configure(a.info(), b.info());
 
         ARM_COMPUTE_ASSERT(a.info()->is_resizable());
         ARM_COMPUTE_ASSERT(b.info()->is_resizable());
@@ -104,8 +104,9 @@
         fill(AccessorType(a), 0);
         fill(AccessorType(b), 0);
 
-        // Compute GEMM function
-        f.run();
+        // Compute GEMM interleave kernel
+        ITensorPack tensors{ { ACL_SRC, &a }, { ACL_DST, &b } };
+        f.run(tensors);
         return b;
     }
 
diff --git a/tests/validation/fixtures/GEMMTranspose1xWFixture.h b/tests/validation/fixtures/GEMMTranspose1xWFixture.h
index 7caea1d..08f478f 100644
--- a/tests/validation/fixtures/GEMMTranspose1xWFixture.h
+++ b/tests/validation/fixtures/GEMMTranspose1xWFixture.h
@@ -89,7 +89,7 @@
 
         // Create and configure function
         FunctionType f;
-        f.configure(&a, &b);
+        f.configure(a.info(), b.info());
 
         ARM_COMPUTE_ASSERT(a.info()->is_resizable());
         ARM_COMPUTE_ASSERT(b.info()->is_resizable());
@@ -106,7 +106,8 @@
         fill(AccessorType(b), 1);
 
         // Compute GEMM function
-        f.run();
+        ITensorPack tensors{ { ACL_SRC, &a }, { ACL_DST, &b } };
+        f.run(tensors);
 
         return b;
     }