Port NEGEMMLowp Part 2

Details:
Extend NEConvertQuantizedSignednessKernel
Port NEGEMMInterleave4x4Kernel to CpuGemmInterleave4x4Kernel
Port NEGEMMTranspose1xWKernel to CpuGemmTranspose1xWKernel
Port NEGEMMLowpMatrixAReductionKernel to CpuGemmLowpMatrixAReductionKernel
Port NEGEMMLowpMatrixBReductionKernel to CpuGemmLowpMatrixBReductionKernel
Port NEGEMMLowpOffsetContributionOutputStageKernel to CpuGemmLowpOffsetContributionOutputStageKernel
Port NEGEMMLowpOffsetContributionKernel to CpuGemmLowpOffsetContributionKernel

Resolves: COMPMID-4403

Change-Id: I3227f052f25e7b41d073bbea1da8a881fcd78b8e
Signed-off-by: Manuel Bottini <manuel.bottini@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5875
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com>
diff --git a/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp b/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp
index 0aba3c0..641a2c2 100644
--- a/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp
+++ b/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp
@@ -23,660 +23,104 @@
  */
 #include "arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h"
 
-#include "arm_compute/core/Error.h"
-#include "arm_compute/core/Helpers.h"
 #include "arm_compute/core/ITensor.h"
-#include "arm_compute/core/KernelDescriptors.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Types.h"
 #include "arm_compute/core/Validate.h"
-#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "arm_compute/runtime/IWeightsManager.h"
+#include "arm_compute/runtime/MemoryGroup.h"
 #include "arm_compute/runtime/NEON/NEScheduler.h"
 #include "arm_compute/runtime/Tensor.h"
-#include "arm_compute/runtime/TensorAllocator.h"
-#include "src/core/helpers/AutoConfiguration.h"
 #include "src/core/helpers/MemoryHelpers.h"
 
-#include "arm_compute/core/ITensorPack.h"
-#include "arm_compute/runtime/MemoryGroup.h"
-#include "arm_compute/runtime/NEON/functions/NEActivationLayer.h"
-#include "src/core/NEON/kernels/NEConvertQuantizedSignednessKernel.h"
-#include "src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h"
-#include "src/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.h"
-#include "src/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.h"
-#include "src/core/NEON/kernels/NEGEMMLowpReductionKernel.h"
-#include "src/core/cpu/kernels/CpuGemmInterleave4x4Kernel.h"
-#include "src/core/cpu/kernels/CpuGemmTranspose1xWKernel.h"
-#include "src/runtime/cpu/operators/internal/CpuGemmAssemblyDispatch.h"
+#include "src/runtime/cpu/operators/CpuGemmLowpMatrixMultiplyCore.h"
+
+using namespace arm_compute::experimental;
 
 namespace arm_compute
 {
-namespace
-{
-cpu::AsmGemmInfo init_assembly_metadata(const GEMMInfo &info)
-{
-    cpu::AsmGemmInfo asm_info;
-    asm_info.method                  = cpu::AsmConvMethod::Im2Col;
-    asm_info.reinterpret_input_as_3d = info.reinterpret_input_as_3d();
-    asm_info.depth_output_gemm3d     = info.depth_output_gemm3d();
-    asm_info.activation_info         = info.activation_info();
-    asm_info.output_stage            = info.gemmlowp_output_stage();
-
-    return asm_info;
-}
-} // namespace
-
 struct NEGEMMLowpMatrixMultiplyCore::Impl
 {
-    MemoryGroup                                                    memory_group{};
-    IWeightsManager                                               *weights_manager{ nullptr };
-    std::unique_ptr<cpu::CpuGemmAssemblyDispatch>                  asm_glue{ nullptr };
-    std::unique_ptr<NEGEMMLowpMatrixMultiplyKernel>                mm_kernel{ nullptr };
-    std::unique_ptr<cpu::kernels::CpuGemmInterleave4x4Kernel>      mtx_a_reshape_kernel{ nullptr };
-    std::unique_ptr<cpu::kernels::CpuGemmTranspose1xWKernel>       mtx_b_reshape_kernel{ nullptr };
-    std::unique_ptr<NEGEMMLowpMatrixAReductionKernel>              mtx_a_reduction_kernel{ nullptr };
-    std::unique_ptr<NEGEMMLowpMatrixBReductionKernel>              mtx_b_reduction_kernel{ nullptr };
-    std::unique_ptr<NEGEMMLowpOffsetContributionKernel>            offset_contribution_kernel{ nullptr };
-    std::unique_ptr<NEGEMMLowpOffsetContributionOutputStageKernel> offset_contribution_output_stage_kernel{ nullptr };
-    std::unique_ptr<NEActivationLayer>                             activation_func{ nullptr };
-    std::unique_ptr<NEConvertQuantizedSignednessKernel>            convert_to_signed_asymm{ nullptr };
-    std::unique_ptr<NEConvertQuantizedSignednessKernel>            convert_from_signed_asymm{ nullptr };
-
-    const ITensor *a_to_use{ nullptr };
-    Tensor         vector_sum_col{};
-    Tensor         vector_sum_row{};
-    Tensor         tmp_a{};
-    Tensor         tmp_b{};
-    Tensor         mm_result_s32{};
-    Tensor         signed_a{};
-    Tensor         signed_output{};
-    const ITensor *original_b{ nullptr };
-    int32_t        a_offset{ 0 };
-    int32_t        b_offset{ 0 };
-
-    bool run_vector_matrix_multiplication{ false };
-    bool assembly_path{ false };
-    bool fused_assembly_path{ false };
-    bool reshape_b_only_on_first_run{ false };
-    bool is_prepared{ false };
-    bool fuse_output_stage{ false };
-    bool run_activation{ false };
-    bool flip_signedness{ false };
-
-    experimental::MemoryRequirements aux_mem_req{};
-    ITensorPack                      asm_glue_run_pack{};
-    ITensorPack                      asm_glue_prep_pack{};
-    WorkspaceData<Tensor>            asm_glue_workspace{};
+    const ITensor                                      *b{ nullptr };
+    std::unique_ptr<cpu::CpuGemmLowpMatrixMultiplyCore> op{ nullptr };
+    ITensorPack                                         run_pack{};
+    ITensorPack                                         prep_pack{};
+    MemoryGroup                                         memory_group{};
+    IWeightsManager                                    *weights_manager{ nullptr };
+    MemoryRequirements                                  aux_mem_req{};
+    WorkspaceData<Tensor>                               workspace_tensors{};
+    bool                                                is_prepared{ false };
 };
 
-using namespace arm_compute::experimental;
-using namespace arm_compute::misc::shape_calculator;
-
-NEGEMMLowpMatrixMultiplyCore::~NEGEMMLowpMatrixMultiplyCore() = default;
-
 NEGEMMLowpMatrixMultiplyCore::NEGEMMLowpMatrixMultiplyCore(std::shared_ptr<IMemoryManager> memory_manager, IWeightsManager *weights_manager)
-    : _impl(std::make_unique<struct NEGEMMLowpMatrixMultiplyCore::Impl>())
+    : _impl(std::make_unique<Impl>())
 {
-    _impl->memory_group    = MemoryGroup(memory_manager);
     _impl->weights_manager = weights_manager;
+    _impl->memory_group    = MemoryGroup(memory_manager);
 }
+NEGEMMLowpMatrixMultiplyCore::~NEGEMMLowpMatrixMultiplyCore() = default;
 
 void NEGEMMLowpMatrixMultiplyCore::configure(const ITensor *a, const ITensor *b, const ITensor *c, ITensor *output, const GEMMInfo &gemm_info)
 {
     ARM_COMPUTE_ERROR_ON_NULLPTR(a, b, output);
-    ARM_COMPUTE_UNUSED(c);
-    ARM_COMPUTE_ERROR_THROW_ON(NEGEMMLowpMatrixMultiplyCore::validate(a->info(), b->info(), c != nullptr ? c->info() : nullptr, output->info(), gemm_info));
-
-    const ITensor *matrix_a = a;
-    const ITensor *matrix_b = b;
-    GEMMInfo       info     = gemm_info;
-
-    // Set internal variables
-    _impl->a_offset                         = a->info()->quantization_info().uniform().offset;
-    _impl->b_offset                         = b->info()->quantization_info().uniform().offset;
-    _impl->run_vector_matrix_multiplication = a->info()->dimension(1) < 2;
-    _impl->reshape_b_only_on_first_run      = info.reshape_b_only_on_first_run();
-    _impl->is_prepared                      = false;
-    _impl->fused_assembly_path              = false;
-    _impl->flip_signedness                  = is_data_type_quantized_per_channel(b->info()->data_type()) && (a->info()->data_type() == DataType::QASYMM8) && _impl->reshape_b_only_on_first_run;
-    _impl->original_b                       = b;
-
-    _impl->asm_glue = std::make_unique<cpu::CpuGemmAssemblyDispatch>();
-
-    _impl->a_to_use = a;
-
-    // Convert to QASYMM8 -> QASYMM8_SIGNED and back
-    if(_impl->flip_signedness)
+    _impl->b  = b;
+    _impl->op = std::make_unique<cpu::CpuGemmLowpMatrixMultiplyCore>();
+    _impl->op->configure(a->info(), b->info(), (c != nullptr ? c->info() : nullptr), output->info(), gemm_info);
+    _impl->run_pack =
     {
-        const int32_t                 offset_correction = 128;
-        const DataType                dt                = DataType::QASYMM8_SIGNED;
-        const UniformQuantizationInfo iqinfo            = _impl->a_to_use->info()->quantization_info().uniform();
-
-        _impl->signed_a.allocator()->init(_impl->a_to_use->info()->clone()->set_data_type(dt).set_quantization_info(QuantizationInfo(iqinfo.scale, iqinfo.offset + offset_correction)));
-        _impl->memory_group.manage(&_impl->signed_a);
-        _impl->convert_to_signed_asymm = std::make_unique<NEConvertQuantizedSignednessKernel>();
-        _impl->convert_to_signed_asymm->configure(_impl->a_to_use, &_impl->signed_a);
-        _impl->a_to_use = &_impl->signed_a;
-        _impl->a_offset = _impl->signed_a.info()->quantization_info().uniform().offset;
-
-        const UniformQuantizationInfo oqinfo = output->info()->quantization_info().uniform();
-        _impl->memory_group.manage(&_impl->signed_output);
-        _impl->signed_output.allocator()->init(output->info()->clone()->set_data_type(dt).set_quantization_info(QuantizationInfo(oqinfo.scale, oqinfo.offset - offset_correction)));
-
-        // Output stage correction
-        GEMMLowpOutputStageInfo output_stage_corr = info.gemmlowp_output_stage();
-        output_stage_corr.gemmlowp_offset         = _impl->signed_output.info()->quantization_info().uniform().offset;
-        output_stage_corr.gemmlowp_min_bound -= offset_correction;
-        output_stage_corr.gemmlowp_max_bound -= offset_correction;
-        info.set_gemmlowp_output_stage(output_stage_corr);
-
-        // Update matrix a
-        matrix_a = &_impl->signed_a;
-    }
-
-    // If GEMMLowpOutputStage != NONE, fuse the offset contribution with the output stage
-    if(info.gemmlowp_output_stage().type != GEMMLowpOutputStageType::NONE)
+        { TensorType::ACL_SRC_0, a },
+        { TensorType::ACL_SRC_1, b },
+        { TensorType::ACL_SRC_2, c },
+        { TensorType::ACL_DST, output }
+    };
+    _impl->prep_pack =
     {
-        _impl->fuse_output_stage = true;
-        _impl->memory_group.manage(&_impl->mm_result_s32);
-        TensorInfo info_mm_result_s32(output->info()->tensor_shape(), 1, DataType::S32);
-        _impl->mm_result_s32.allocator()->init(info_mm_result_s32);
-    }
-
-    // Initialize assembly kernel meta-data
-    const cpu::AsmGemmInfo asm_info = init_assembly_metadata(gemm_info);
-#ifdef __aarch64__
-    switch(a->info()->data_type())
-    {
-        case DataType::QASYMM8:
-        case DataType::QASYMM8_SIGNED:
-        case DataType::U8:
-        case DataType::S8:
-        {
-            if(is_data_type_quantized_asymmetric(_impl->a_to_use->info()->data_type()) && info.gemmlowp_output_stage().type == GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT)
-            {
-                auto c_info_to_use = c == nullptr ? nullptr : c->info();
-                _impl->asm_glue->configure(_impl->a_to_use->info(), b->info(), c_info_to_use, output->info(), asm_info);
-                _impl->fused_assembly_path = _impl->asm_glue->is_configured();
-                _impl->asm_glue_run_pack.add_const_tensor(TensorType::ACL_SRC_2, c);
-                _impl->asm_glue_run_pack.add_tensor(TensorType::ACL_DST, output);
-            }
-            else
-            {
-                auto output_to_use = (_impl->fuse_output_stage ? &_impl->mm_result_s32 : output);
-                _impl->asm_glue->configure(_impl->a_to_use->info(), b->info(), nullptr, output_to_use->info(), asm_info);
-                _impl->asm_glue_run_pack.add_tensor(TensorType::ACL_DST, output_to_use);
-            }
-            _impl->assembly_path = _impl->asm_glue->is_configured();
-
-            if(_impl->assembly_path)
-            {
-                _impl->asm_glue_run_pack.add_const_tensor(TensorType::ACL_SRC_0, _impl->a_to_use);
-
-                _impl->aux_mem_req        = _impl->asm_glue->workspace();
-                _impl->asm_glue_prep_pack = { { TensorType::ACL_SRC_1, b }, { TensorType::ACL_SRC_2, c } };
-
-                _impl->asm_glue_workspace = manage_workspace<Tensor>(_impl->aux_mem_req, _impl->memory_group, _impl->asm_glue_run_pack, _impl->asm_glue_prep_pack);
-            }
-            break;
-        }
-        default:
-        {
-            ARM_COMPUTE_ERROR("Datatype not supported");
-            break;
-        }
-    }
-#endif /* __aarch64__ */
-    if(!(_impl->assembly_path || _impl->run_vector_matrix_multiplication))
-    {
-        matrix_a = &_impl->tmp_a;
-        matrix_b = &_impl->tmp_b;
-
-        // The interleaved output matrix will have the following shape: [ a_height * 4, ceil(a_width / 4.0f) ]
-        TensorInfo a_info(compute_interleaved_shape(*_impl->a_to_use->info()), 1, _impl->a_to_use->info()->data_type(), _impl->a_to_use->info()->quantization_info());
-        // The transpose1xW output matrix will have the following shape: [ b_height * 16, ceil(b_width / 16.0f) ]
-        TensorInfo b_info(compute_transpose1xW_shape(*b->info()), 1, b->info()->data_type(), b->info()->quantization_info());
-        _impl->tmp_a.allocator()->init(a_info);
-        _impl->tmp_b.allocator()->init(b_info);
-        _impl->memory_group.manage(&_impl->tmp_a);
-        if(!_impl->reshape_b_only_on_first_run)
-        {
-            _impl->memory_group.manage(&_impl->tmp_b);
-        }
-
-        // Configure interleave kernel
-        _impl->mtx_a_reshape_kernel = std::make_unique<cpu::kernels::CpuGemmInterleave4x4Kernel>();
-        _impl->mtx_a_reshape_kernel->configure(_impl->a_to_use->info(), _impl->tmp_a.info());
-
-        // Configure transpose kernel
-        _impl->mtx_b_reshape_kernel = std::make_unique<cpu::kernels::CpuGemmTranspose1xWKernel>();
-        _impl->mtx_b_reshape_kernel->configure(b->info(), _impl->tmp_b.info());
-    }
-
-    if(!_impl->fused_assembly_path)
-    {
-        // Build reduction info
-        const GEMMLowpReductionKernelInfo reduction_info(_impl->a_to_use->info()->dimension(0), false, 0, false);
-
-        // Initialize matrix B reduction kernel only if _impl->a_offset is not equal to 0
-        if(_impl->a_offset != 0)
-        {
-            TensorInfo info_vector_sum_col(compute_reductionA_shape(*b->info()), 1, DataType::S32);
-
-            _impl->vector_sum_col.allocator()->init(info_vector_sum_col);
-            if(!_impl->reshape_b_only_on_first_run)
-            {
-                _impl->memory_group.manage(&_impl->vector_sum_col);
-            }
-
-            // Configure Matrix B reduction kernel
-            _impl->mtx_b_reduction_kernel = std::make_unique<NEGEMMLowpMatrixBReductionKernel>();
-            _impl->mtx_b_reduction_kernel->configure(b, &_impl->vector_sum_col, reduction_info);
-        }
-
-        // Initialize Matrix A reduction kernel only if _impl->b_offset is not equal to 0
-        if(_impl->b_offset != 0)
-        {
-            TensorInfo info_vector_sum_row(compute_reductionB_shape(*_impl->a_to_use->info()), 1, DataType::S32);
-
-            _impl->vector_sum_row.allocator()->init(info_vector_sum_row);
-            _impl->memory_group.manage(&_impl->vector_sum_row);
-
-            // Configure matrix A reduction kernel
-            _impl->mtx_a_reduction_kernel = std::make_unique<NEGEMMLowpMatrixAReductionKernel>();
-            _impl->mtx_a_reduction_kernel->configure(_impl->a_to_use, &_impl->vector_sum_row, reduction_info);
-        }
-
-        if(_impl->fuse_output_stage)
-        {
-            // Configure matrix multiply kernel
-            if(!_impl->assembly_path)
-            {
-                _impl->mm_kernel = std::make_unique<NEGEMMLowpMatrixMultiplyKernel>();
-                _impl->mm_kernel->configure(matrix_a, matrix_b, &_impl->mm_result_s32);
-            }
-
-            _impl->offset_contribution_output_stage_kernel = std::make_unique<NEGEMMLowpOffsetContributionOutputStageKernel>();
-            _impl->offset_contribution_output_stage_kernel->configure(&_impl->mm_result_s32,
-                                                                      _impl->a_offset == 0 ? nullptr : &_impl->vector_sum_col,
-                                                                      _impl->b_offset == 0 ? nullptr : &_impl->vector_sum_row, c,
-                                                                      _impl->flip_signedness ? &_impl->signed_output : output,
-                                                                      a->info()->dimension(0),
-                                                                      _impl->a_offset, _impl->b_offset, info.gemmlowp_output_stage());
-
-            if(_impl->flip_signedness)
-            {
-                _impl->convert_from_signed_asymm = std::make_unique<NEConvertQuantizedSignednessKernel>();
-                _impl->convert_from_signed_asymm->configure(&_impl->signed_output, output);
-            }
-        }
-        else
-        {
-            // Configure matrix multiply kernel
-            if(!_impl->assembly_path)
-            {
-                _impl->mm_kernel = std::make_unique<NEGEMMLowpMatrixMultiplyKernel>();
-                _impl->mm_kernel->configure(matrix_a, matrix_b, output);
-            }
-            // Configure offset contribution kernel
-            _impl->offset_contribution_kernel = std::make_unique<NEGEMMLowpOffsetContributionKernel>();
-            _impl->offset_contribution_kernel->configure(output, _impl->a_offset == 0 ? nullptr : &_impl->vector_sum_col, _impl->b_offset == 0 ? nullptr : &_impl->vector_sum_row,
-                                                         _impl->a_to_use->info()->dimension(0),
-                                                         _impl->a_offset, _impl->b_offset);
-        }
-    }
-    // Configure activation
-    const ActivationLayerInfo &activation = gemm_info.activation_info();
-    _impl->run_activation                 = activation.enabled() && (!_impl->assembly_path || !cpu::CpuGemmAssemblyDispatch::is_activation_supported(activation));
-    if(_impl->run_activation)
-    {
-        _impl->activation_func = std::make_unique<NEActivationLayer>();
-        _impl->activation_func->configure(output, nullptr, activation);
-    }
-
-    // Allocate tensors
-    if(!_impl->assembly_path && !_impl->run_vector_matrix_multiplication)
-    {
-        _impl->tmp_a.allocator()->allocate();
-        if(!_impl->reshape_b_only_on_first_run)
-        {
-            _impl->tmp_b.allocator()->allocate();
-        }
-    }
-
-    if(!_impl->fused_assembly_path)
-    {
-        if(_impl->a_offset != 0 && !_impl->reshape_b_only_on_first_run)
-        {
-            _impl->vector_sum_col.allocator()->allocate();
-        }
-
-        if(_impl->b_offset != 0)
-        {
-            _impl->vector_sum_row.allocator()->allocate();
-        }
-    }
-
-    if(_impl->fuse_output_stage)
-    {
-        _impl->mm_result_s32.allocator()->allocate();
-    }
-
-    if(_impl->flip_signedness)
-    {
-        _impl->signed_a.allocator()->allocate();
-        _impl->signed_output.allocator()->allocate();
-    }
+        { TensorType::ACL_SRC_1, b },
+        { TensorType::ACL_SRC_2, c }
+    };
+    _impl->aux_mem_req       = _impl->op->workspace();
+    _impl->workspace_tensors = manage_workspace<Tensor>(_impl->aux_mem_req, _impl->memory_group, _impl->run_pack, _impl->prep_pack);
 }
 
 Status NEGEMMLowpMatrixMultiplyCore::validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *output, const GEMMInfo &gemm_info)
 {
-    ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(a, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED);
-    ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(b, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::QSYMM8, DataType::QSYMM8_PER_CHANNEL);
-    ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S32, DataType::QASYMM8, DataType::QASYMM8_SIGNED);
-    ARM_COMPUTE_RETURN_ERROR_ON_MSG(c != nullptr && gemm_info.gemmlowp_output_stage().type == GEMMLowpOutputStageType::NONE, "Bias addition not supported in NEGEMMLowpMatrixMultiplyCore for output S32");
-    ARM_COMPUTE_RETURN_ERROR_ON_MSG((a)->dimension(0) != (b)->dimension(1),
-                                    "The product AB is defined only if the number of columns in A is equal to the number of rows in B");
-    ARM_COMPUTE_RETURN_ERROR_ON_MSG(gemm_info.is_a_reshaped(), "Matrix A already reshaped is not supported");
-    ARM_COMPUTE_RETURN_ERROR_ON_MSG(gemm_info.is_b_reshaped(), "Matrix B already reshaped is not supported");
-
-    GEMMInfo           info          = gemm_info;
-    const ITensorInfo *matrix_a_info = a;
-    const ITensorInfo *matrix_b_info = b;
-
-    const ITensorInfo *a_to_use = a;
-
-    TensorInfo tmp_a_info{};
-    TensorInfo tmp_b_info{};
-    TensorInfo mm_result_s32_info{};
-
-    int32_t a_offset = a->quantization_info().uniform().offset;
-    int32_t b_offset = b->quantization_info().uniform().offset;
-
-    bool fuse_output_stage = info.gemmlowp_output_stage().type != GEMMLowpOutputStageType::NONE;
-    if(fuse_output_stage)
-    {
-        auto_init_if_empty(mm_result_s32_info, a->clone()->set_tensor_shape(output->tensor_shape()).set_data_type(DataType::S32));
-    }
-
-    // Convert QASYMM8->QASYMM8_SIGNED
-    TensorInfo signed_a{};
-    TensorInfo signed_output{};
-    bool       flip_signedness = is_data_type_quantized_per_channel(b->data_type()) && (a->data_type() == DataType::QASYMM8) && info.reshape_b_only_on_first_run();
-    if(flip_signedness)
-    {
-        const int32_t                 offset_correction = 128;
-        const DataType                dt                = DataType::QASYMM8_SIGNED;
-        const UniformQuantizationInfo iqinfo            = a_to_use->quantization_info().uniform();
-
-        signed_a = a_to_use->clone()->set_data_type(dt).set_quantization_info(QuantizationInfo(iqinfo.scale, iqinfo.offset + offset_correction));
-        ARM_COMPUTE_RETURN_ON_ERROR(NEConvertQuantizedSignednessKernel::validate(a_to_use, &signed_a));
-        a_to_use = &signed_a;
-        a_offset = signed_a.quantization_info().uniform().offset;
-
-        const UniformQuantizationInfo oqinfo = output->quantization_info().uniform();
-        signed_output                        = output->clone()->set_data_type(dt).set_quantization_info(QuantizationInfo(oqinfo.scale, oqinfo.offset - offset_correction));
-
-        // Output stage correction
-        GEMMLowpOutputStageInfo output_stage_corr = info.gemmlowp_output_stage();
-        output_stage_corr.gemmlowp_offset         = signed_output.quantization_info().uniform().offset;
-        output_stage_corr.gemmlowp_min_bound -= offset_correction;
-        output_stage_corr.gemmlowp_max_bound -= offset_correction;
-        info.set_gemmlowp_output_stage(output_stage_corr);
-
-        // Update matrix a
-        matrix_a_info = &signed_a;
-    }
-
-    // Initialize assembly kernel meta-data
-    const cpu::AsmGemmInfo asm_info = init_assembly_metadata(info);
-
-    // Check if we need to run the optimized assembly kernel
-    bool run_optimised             = false;
-    bool run_optimised_requantized = false;
-    if(is_data_type_quantized_asymmetric(a_to_use->data_type()) && info.gemmlowp_output_stage().type == GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT)
-    {
-        run_optimised             = bool(cpu::CpuGemmAssemblyDispatch::validate(a_to_use, b, c, output, asm_info));
-        run_optimised_requantized = run_optimised;
-    }
-    else
-    {
-        run_optimised = bool(cpu::CpuGemmAssemblyDispatch::validate(a_to_use, b, nullptr, fuse_output_stage ? &mm_result_s32_info : output, asm_info));
-    }
-
-    if(run_optimised)
-    {
-        ARM_COMPUTE_RETURN_ERROR_ON(b->dimension(0) != output->dimension(0));
-        if(info.depth_output_gemm3d() != 0)
-        {
-            if(info.reinterpret_input_as_3d())
-            {
-                ARM_COMPUTE_RETURN_ERROR_ON(a->dimension(1) != output->dimension(1));
-                ARM_COMPUTE_RETURN_ERROR_ON(a->dimension(2) != output->dimension(2));
-            }
-            else
-            {
-                ARM_COMPUTE_RETURN_ERROR_ON(a->dimension(1) != output->dimension(1) * output->dimension(2));
-            }
-        }
-        else
-        {
-            ARM_COMPUTE_RETURN_ERROR_ON(a->dimension(1) != output->dimension(1));
-        }
-    }
-    else
-    {
-        ARM_COMPUTE_RETURN_ERROR_ON_MSG(info.reinterpret_input_as_3d(), "NEGEMM cannot reinterpret the input tensor as 3D");
-        ARM_COMPUTE_RETURN_ERROR_ON_MSG(info.depth_output_gemm3d() != 0, "NEGEMM cannot reinterpret the output tensor as 3D");
-
-        const bool run_vector_matrix_multiplication = a->dimension(1) < 2;
-        if(!run_vector_matrix_multiplication)
-        {
-            matrix_a_info = &tmp_a_info;
-            matrix_b_info = &tmp_b_info;
-
-            // The interleaved output matrix will have the following shape: [ a_height * 4, ceil(a_width / 4.0f) ]
-            TensorShape shape_tmp_a = a->tensor_shape();
-            shape_tmp_a.set(0, a->dimension(0) * 4);
-            shape_tmp_a.set(1, std::ceil(a->dimension(1) / 4.f));
-
-            // The transpose1xW output matrix will have the following shape: [ b_height * 16, ceil(b_width / 16.0f) ]
-            TensorShape shape_tmp_b = b->tensor_shape();
-            shape_tmp_b.set(0, b->dimension(1) * 16);
-            shape_tmp_b.set(1, std::ceil(b->dimension(0) / 16.f));
-
-            // Validate interleave kernel
-            auto_init_if_empty(tmp_a_info, a_to_use->clone()->set_tensor_shape(shape_tmp_a));
-            auto_init_if_empty(tmp_b_info, b->clone()->set_tensor_shape(shape_tmp_b));
-
-            ARM_COMPUTE_RETURN_ON_ERROR(cpu::kernels::CpuGemmInterleave4x4Kernel::validate(a_to_use, &tmp_a_info));
-            ARM_COMPUTE_RETURN_ON_ERROR(cpu::kernels::CpuGemmTranspose1xWKernel::validate(b, &tmp_b_info));
-        }
-    }
-
-    if(!run_optimised_requantized)
-    {
-        TensorInfo info_vector_sum_col{};
-        TensorInfo info_vector_sum_row{};
-
-        const GEMMLowpReductionKernelInfo reduction_info(a_to_use->dimension(0), false, 0, false);
-
-        // Validate matrix B reduction kernel only if _a_offset is not equal to 0
-        if(a_offset != 0)
-        {
-            info_vector_sum_col = TensorInfo(compute_reductionA_shape(*b), 1, DataType::S32);
-
-            // Configure Matrix B reduction kernel
-            ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMLowpMatrixBReductionKernel::validate(b, &info_vector_sum_col, reduction_info));
-        }
-
-        // Validate Matrix A reduction kernel only if _b_offset is not equal to 0
-        if(b_offset != 0)
-        {
-            info_vector_sum_row = TensorInfo(compute_reductionB_shape(*a), 1, DataType::S32);
-
-            // Configure matrix A reduction kernel
-            ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMLowpMatrixAReductionKernel::validate(a_to_use, &info_vector_sum_row, reduction_info));
-        }
-
-        if(fuse_output_stage)
-        {
-            if(!run_optimised)
-            {
-                ARM_COMPUTE_RETURN_ERROR_ON_MSG(info.reinterpret_input_as_3d(), "NEGEMMLowpMatrixMultiplyKernel cannot reinterpret the input tensor as 3D");
-                ARM_COMPUTE_RETURN_ERROR_ON_MSG(info.depth_output_gemm3d() != 0, "NEGEMMLowpMatrixMultiplyKernel cannot reinterpret the output tensor as 3D");
-
-                ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMLowpMatrixMultiplyKernel::validate(matrix_a_info, matrix_b_info, &mm_result_s32_info));
-            }
-
-            // Validate offset contribution kernel
-            ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMLowpOffsetContributionOutputStageKernel::validate(&mm_result_s32_info,
-                                                                                                a_offset == 0 ? nullptr : &info_vector_sum_col,
-                                                                                                b_offset == 0 ? nullptr : &info_vector_sum_row,
-                                                                                                c,
-                                                                                                flip_signedness ? &signed_output : output,
-                                                                                                a_offset, b_offset,
-                                                                                                info.gemmlowp_output_stage()));
-        }
-        else
-        {
-            if(!run_optimised)
-            {
-                ARM_COMPUTE_RETURN_ERROR_ON_MSG(info.reinterpret_input_as_3d(), "NEGEMMLowpMatrixMultiplyKernel cannot reinterpret the input tensor as 3D");
-                ARM_COMPUTE_RETURN_ERROR_ON_MSG(info.depth_output_gemm3d() != 0, "NEGEMMLowpMatrixMultiplyKernel cannot reinterpret the output tensor as 3D");
-
-                ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMLowpMatrixMultiplyKernel::validate(matrix_a_info, matrix_b_info, output));
-            }
-            // Validate offset contribution kernel
-            ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMLowpOffsetContributionKernel::validate(output,
-                                                                                     a_offset == 0 ? nullptr : &info_vector_sum_col,
-                                                                                     b_offset == 0 ? nullptr : &info_vector_sum_row,
-                                                                                     a_offset, b_offset));
-        }
-    }
-
-    // Validate activation
-    const ActivationLayerInfo &activation = gemm_info.activation_info();
-    if(activation.enabled())
-    {
-        ARM_COMPUTE_RETURN_ON_ERROR(NEActivationLayer::validate(output, nullptr, activation));
-    }
-
-    return Status{};
+    return cpu::CpuGemmLowpMatrixMultiplyCore::validate(a, b, c, output, gemm_info);
 }
 
 void NEGEMMLowpMatrixMultiplyCore::run()
 {
     prepare();
-
     MemoryGroupResourceScope scope_mg(_impl->memory_group);
-
-    // Convert QASYMM8->QASYMM8_SIGNED
-    if(_impl->flip_signedness)
-    {
-        NEScheduler::get().schedule(_impl->convert_to_signed_asymm.get(), Window::DimY);
-    }
-
-    // Run GEMM
-    if(_impl->asm_glue->is_configured())
-    {
-        _impl->asm_glue->run(_impl->asm_glue_run_pack);
-    }
-    else
-    {
-        if(!_impl->run_vector_matrix_multiplication)
-        {
-            // Run interleave kernel
-            ITensorPack interleave_pack{ { ACL_SRC, _impl->a_to_use }, { ACL_DST, &_impl->tmp_a } };
-            NEScheduler::get().schedule_op(_impl->mtx_a_reshape_kernel.get(), Window::DimY, _impl->mtx_a_reshape_kernel->window(), interleave_pack);
-
-            if(!_impl->reshape_b_only_on_first_run)
-            {
-                // Run transpose kernel
-                ITensorPack reshape_b_pack{ { ACL_SRC, _impl->original_b }, { ACL_DST, &_impl->tmp_b } };
-                NEScheduler::get().schedule_op(_impl->mtx_b_reshape_kernel.get(), Window::DimY, _impl->mtx_b_reshape_kernel->window(), reshape_b_pack);
-            }
-        }
-        NEScheduler::get().schedule(_impl->mm_kernel.get(), Window::DimY);
-    }
-
-    if(!_impl->fused_assembly_path)
-    {
-        // Run matrix A reduction kernel only if _impl->b_offset is not equal to 0
-        if(_impl->b_offset != 0)
-        {
-            NEScheduler::get().schedule(_impl->mtx_a_reduction_kernel.get(), Window::DimX);
-        }
-
-        // Run matrix B reduction kernel only if _impl->a_offset is not equal to 0
-        if(_impl->a_offset != 0 && !_impl->reshape_b_only_on_first_run)
-        {
-            NEScheduler::get().schedule(_impl->mtx_b_reduction_kernel.get(), Window::DimX);
-        }
-
-        if(_impl->fuse_output_stage)
-        {
-            // Run offset contribution kernel
-            NEScheduler::get().schedule(_impl->offset_contribution_output_stage_kernel.get(), Window::DimY);
-        }
-        else
-        {
-            // Run offset contribution kernel
-            NEScheduler::get().schedule(_impl->offset_contribution_kernel.get(), Window::DimY);
-        }
-    }
-
-    // Convert QASYMM8_SIGNED->QASYMM8
-    if(!_impl->fused_assembly_path && _impl->fuse_output_stage && _impl->flip_signedness)
-    {
-        NEScheduler::get().schedule(_impl->convert_from_signed_asymm.get(), Window::DimY);
-    }
-
-    // Run fused activation unless already run in the fused assembly
-    if(_impl->run_activation)
-    {
-        _impl->activation_func->run();
-    }
+    _impl->op->run(_impl->run_pack);
 }
 
 void NEGEMMLowpMatrixMultiplyCore::prepare()
 {
     if(!_impl->is_prepared)
     {
-        // Run assembly reshape
-        if(_impl->asm_glue->is_configured())
+        _impl->op->prepare(_impl->prep_pack);
+
+        auto has_reshape = std::find_if(_impl->aux_mem_req.begin(),
+                                        _impl->aux_mem_req.end(),
+                                        [](const MemoryInfo & m) -> bool { return m.lifetime == MemoryLifetime::Persistent; });
+
+        if(has_reshape != std::end(_impl->aux_mem_req))
         {
-            _impl->asm_glue->prepare(_impl->asm_glue_prep_pack);
+            _impl->b->mark_as_unused();
+        }
 
-            auto has_reshape = std::find_if(_impl->aux_mem_req.begin(),
-                                            _impl->aux_mem_req.end(),
-                                            [](const MemoryInfo & m) -> bool { return m.lifetime == MemoryLifetime::Persistent; });
-
-            if(has_reshape != std::end(_impl->aux_mem_req))
+        // Release temporary tensors that are only used in prepare stage
+        for(auto &ws : _impl->workspace_tensors)
+        {
+            const int slot = ws.first;
+            for(auto &m : _impl->aux_mem_req)
             {
-                _impl->original_b->mark_as_unused();
-            }
-            else
-            {
-                _impl->asm_glue_run_pack.add_const_tensor(ACL_SRC_1, _impl->original_b);
+                if(m.slot == slot && m.lifetime == MemoryLifetime::Prepare)
+                {
+                    auto tensor = ws.second.get();
+                    tensor->allocator()->free();
+                    break;
+                }
             }
         }
-        // Run non-assembly reshape
-        else if(_impl->reshape_b_only_on_first_run && !_impl->run_vector_matrix_multiplication && !_impl->asm_glue->is_configured())
-        {
-            // Run reshape kernel and mark original weights tensor as unused
-            _impl->tmp_b.allocator()->allocate();
-            ITensorPack reshape_b_pack{ { ACL_SRC, _impl->original_b }, { ACL_DST, &_impl->tmp_b } };
-            NEScheduler::get().schedule_op(_impl->mtx_b_reshape_kernel.get(), Window::DimY, _impl->mtx_b_reshape_kernel->window(), reshape_b_pack);
-        }
-
-        // Run matrix B reduction kernel only if _impl->a_offset is not equal to 0
-        if(!_impl->fused_assembly_path && _impl->a_offset != 0 && _impl->reshape_b_only_on_first_run)
-        {
-            _impl->vector_sum_col.allocator()->allocate();
-            NEScheduler::get().schedule(_impl->mtx_b_reduction_kernel.get(), Window::DimX);
-        }
-
         _impl->is_prepared = true;
     }
 }
diff --git a/src/runtime/NEON/functions/NEQLSTMLayer.cpp b/src/runtime/NEON/functions/NEQLSTMLayer.cpp
index f3a3d23..946791a 100644
--- a/src/runtime/NEON/functions/NEQLSTMLayer.cpp
+++ b/src/runtime/NEON/functions/NEQLSTMLayer.cpp
@@ -23,6 +23,7 @@
  */
 #include "arm_compute/runtime/NEON/functions/NEQLSTMLayer.h"
 
+#include "arm_compute/core/ITensorPack.h"
 #include "arm_compute/core/KernelDescriptors.h"
 #include "arm_compute/core/QuantizationInfo.h"
 #include "arm_compute/core/Utils.h"
@@ -30,12 +31,8 @@
 #include "arm_compute/core/utils/misc/InfoHelpers.h"
 #include "arm_compute/core/utils/quantization/AsymmHelpers.h"
 #include "arm_compute/runtime/NEON/NEScheduler.h"
-#include "src/core/NEON/kernels/NEConvertQuantizedSignednessKernel.h"
-#include "src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h"
-#include "src/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.h"
-#include "src/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.h"
-#include "src/core/NEON/kernels/NEGEMMLowpReductionKernel.h"
 #include "src/core/NEON/kernels/NEQLSTMLayerNormalizationKernel.h"
+#include "src/core/cpu/kernels/CpuGemmLowpMatrixReductionKernel.h"
 #include "src/core/helpers/WindowHelpers.h"
 
 namespace arm_compute
@@ -223,29 +220,29 @@
         _input_to_input_weights     = lstm_params.input_to_input_weights();
         _recurrent_to_input_weights = lstm_params.recurrent_to_input_weights();
 
-        _input_to_input_reduction     = std::make_unique<NEGEMMLowpMatrixAReductionKernel>();
-        _recurrent_to_input_reduction = std::make_unique<NEGEMMLowpMatrixAReductionKernel>();
-        _input_to_input_reduction->configure(_input_to_input_weights, &_input_to_input_eff_bias, GEMMLowpReductionKernelInfo(num_units, false, -qinput.offset, true));
-        _recurrent_to_input_reduction->configure(_recurrent_to_input_weights, &_recurrent_to_input_eff_bias, GEMMLowpReductionKernelInfo(num_units, false, -qoutput_state_in.offset, true));
+        _input_to_input_reduction     = std::make_unique<cpu::kernels::CpuGemmLowpMatrixAReductionKernel>();
+        _recurrent_to_input_reduction = std::make_unique<cpu::kernels::CpuGemmLowpMatrixAReductionKernel>();
+        _input_to_input_reduction->configure(_input_to_input_weights->info(), _input_to_input_eff_bias.info(), GEMMLowpReductionKernelInfo(num_units, false, -qinput.offset, true));
+        _recurrent_to_input_reduction->configure(_recurrent_to_input_weights->info(), _recurrent_to_input_eff_bias.info(), GEMMLowpReductionKernelInfo(num_units, false, -qoutput_state_in.offset, true));
     }
 
-    _input_to_forget_reduction     = std::make_unique<NEGEMMLowpMatrixAReductionKernel>();
-    _recurrent_to_forget_reduction = std::make_unique<NEGEMMLowpMatrixAReductionKernel>();
-    _input_to_cell_reduction       = std::make_unique<NEGEMMLowpMatrixAReductionKernel>();
-    _recurrent_to_cell_reduction   = std::make_unique<NEGEMMLowpMatrixAReductionKernel>();
-    _input_to_output_reduction     = std::make_unique<NEGEMMLowpMatrixAReductionKernel>();
-    _recurrent_to_output_reduction = std::make_unique<NEGEMMLowpMatrixAReductionKernel>();
+    _input_to_forget_reduction     = std::make_unique<cpu::kernels::CpuGemmLowpMatrixAReductionKernel>();
+    _recurrent_to_forget_reduction = std::make_unique<cpu::kernels::CpuGemmLowpMatrixAReductionKernel>();
+    _input_to_cell_reduction       = std::make_unique<cpu::kernels::CpuGemmLowpMatrixAReductionKernel>();
+    _recurrent_to_cell_reduction   = std::make_unique<cpu::kernels::CpuGemmLowpMatrixAReductionKernel>();
+    _input_to_output_reduction     = std::make_unique<cpu::kernels::CpuGemmLowpMatrixAReductionKernel>();
+    _recurrent_to_output_reduction = std::make_unique<cpu::kernels::CpuGemmLowpMatrixAReductionKernel>();
 
-    _recurrent_to_cell_reduction->configure(input_to_forget_weights, &_input_to_forget_eff_bias, GEMMLowpReductionKernelInfo(num_units, false, -qinput.offset, true));
-    _recurrent_to_forget_reduction->configure(recurrent_to_forget_weights, &_recurrent_to_forget_eff_bias, GEMMLowpReductionKernelInfo(num_units, false, -qoutput_state_in.offset, true));
-    _input_to_cell_reduction->configure(input_to_cell_weights, &_input_to_cell_eff_bias, GEMMLowpReductionKernelInfo(num_units, false, -qinput.offset, true));
-    _recurrent_to_cell_reduction->configure(recurrent_to_cell_weights, &_recurrent_to_cell_eff_bias, GEMMLowpReductionKernelInfo(num_units, false, -qoutput_state_in.offset, true));
-    _input_to_output_reduction->configure(input_to_output_weights, &_input_to_output_eff_bias, GEMMLowpReductionKernelInfo(num_units, false, -qinput.offset, true));
-    _recurrent_to_output_reduction->configure(recurrent_to_output_weights, &_recurrent_to_output_eff_bias, GEMMLowpReductionKernelInfo(num_units, false, -qoutput_state_in.offset, true));
+    _input_to_forget_reduction->configure(input_to_forget_weights->info(), _input_to_forget_eff_bias.info(), GEMMLowpReductionKernelInfo(num_units, false, -qinput.offset, true));
+    _recurrent_to_forget_reduction->configure(recurrent_to_forget_weights->info(), _recurrent_to_forget_eff_bias.info(), GEMMLowpReductionKernelInfo(num_units, false, -qoutput_state_in.offset, true));
+    _input_to_cell_reduction->configure(input_to_cell_weights->info(), _input_to_cell_eff_bias.info(), GEMMLowpReductionKernelInfo(num_units, false, -qinput.offset, true));
+    _recurrent_to_cell_reduction->configure(recurrent_to_cell_weights->info(), _recurrent_to_cell_eff_bias.info(), GEMMLowpReductionKernelInfo(num_units, false, -qoutput_state_in.offset, true));
+    _input_to_output_reduction->configure(input_to_output_weights->info(), _input_to_output_eff_bias.info(), GEMMLowpReductionKernelInfo(num_units, false, -qinput.offset, true));
+    _recurrent_to_output_reduction->configure(recurrent_to_output_weights->info(), _recurrent_to_output_eff_bias.info(), GEMMLowpReductionKernelInfo(num_units, false, -qoutput_state_in.offset, true));
     if(_has_projection)
     {
-        _projection_reduction = std::make_unique<NEGEMMLowpMatrixAReductionKernel>();
-        _projection_reduction->configure(_projection_weights, &_projection_eff_bias, GEMMLowpReductionKernelInfo(output_size, false, lstm_params.hidden_state_zero(), true));
+        _projection_reduction = std::make_unique<cpu::kernels::CpuGemmLowpMatrixAReductionKernel>();
+        _projection_reduction->configure(_projection_weights->info(), _projection_eff_bias.info(), GEMMLowpReductionKernelInfo(output_size, false, lstm_params.hidden_state_zero(), true));
         if(_projection_bias != nullptr)
         {
             _projection_bias_add.configure(_projection_bias, &_projection_eff_bias, &_projection_eff_bias, ConvertPolicy::SATURATE);
@@ -658,21 +655,26 @@
     const TensorInfo projection_eff_bias_info(TensorShape(output_size), 1, DataType::S32);
     if(!lstm_params.has_cifg_opt())
     {
-        ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMLowpMatrixAReductionKernel::validate(lstm_params.input_to_input_weights(), &eff_bias_info, GEMMLowpReductionKernelInfo(num_units, false, -qinput.offset, true)));
-        ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMLowpMatrixAReductionKernel::validate(lstm_params.recurrent_to_input_weights(), &eff_bias_info, GEMMLowpReductionKernelInfo(num_units, false, -qoutput_state_in.offset,
-                                                                               true)));
+        ARM_COMPUTE_RETURN_ON_ERROR(cpu::kernels::CpuGemmLowpMatrixAReductionKernel::validate(lstm_params.input_to_input_weights(), &eff_bias_info, GEMMLowpReductionKernelInfo(num_units, false,
+                                                                                              -qinput.offset, true)));
+        ARM_COMPUTE_RETURN_ON_ERROR(cpu::kernels::CpuGemmLowpMatrixAReductionKernel::validate(lstm_params.recurrent_to_input_weights(), &eff_bias_info, GEMMLowpReductionKernelInfo(num_units, false,
+                                                                                              -qoutput_state_in.offset,
+                                                                                              true)));
     }
-    ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMLowpMatrixAReductionKernel::validate(input_to_forget_weights, &eff_bias_info, GEMMLowpReductionKernelInfo(num_units, false, -qinput.offset, true)));
-    ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMLowpMatrixAReductionKernel::validate(recurrent_to_forget_weights, &eff_bias_info, GEMMLowpReductionKernelInfo(num_units, false, -qoutput_state_in.offset, true)));
-    ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMLowpMatrixAReductionKernel::validate(input_to_cell_weights, &eff_bias_info, GEMMLowpReductionKernelInfo(num_units, false, -qinput.offset, true)));
-    ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMLowpMatrixAReductionKernel::validate(recurrent_to_cell_weights, &eff_bias_info, GEMMLowpReductionKernelInfo(num_units, false, -qoutput_state_in.offset, true)));
-    ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMLowpMatrixAReductionKernel::validate(input_to_output_weights, &eff_bias_info, GEMMLowpReductionKernelInfo(num_units, false, -qinput.offset, true)));
-    ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMLowpMatrixAReductionKernel::validate(recurrent_to_output_weights, &eff_bias_info, GEMMLowpReductionKernelInfo(num_units, false, -qoutput_state_in.offset, true)));
+    ARM_COMPUTE_RETURN_ON_ERROR(cpu::kernels::CpuGemmLowpMatrixAReductionKernel::validate(input_to_forget_weights, &eff_bias_info, GEMMLowpReductionKernelInfo(num_units, false, -qinput.offset, true)));
+    ARM_COMPUTE_RETURN_ON_ERROR(cpu::kernels::CpuGemmLowpMatrixAReductionKernel::validate(recurrent_to_forget_weights, &eff_bias_info, GEMMLowpReductionKernelInfo(num_units, false,
+                                                                                          -qoutput_state_in.offset, true)));
+    ARM_COMPUTE_RETURN_ON_ERROR(cpu::kernels::CpuGemmLowpMatrixAReductionKernel::validate(input_to_cell_weights, &eff_bias_info, GEMMLowpReductionKernelInfo(num_units, false, -qinput.offset, true)));
+    ARM_COMPUTE_RETURN_ON_ERROR(cpu::kernels::CpuGemmLowpMatrixAReductionKernel::validate(recurrent_to_cell_weights, &eff_bias_info, GEMMLowpReductionKernelInfo(num_units, false, -qoutput_state_in.offset,
+                                                                                          true)));
+    ARM_COMPUTE_RETURN_ON_ERROR(cpu::kernels::CpuGemmLowpMatrixAReductionKernel::validate(input_to_output_weights, &eff_bias_info, GEMMLowpReductionKernelInfo(num_units, false, -qinput.offset, true)));
+    ARM_COMPUTE_RETURN_ON_ERROR(cpu::kernels::CpuGemmLowpMatrixAReductionKernel::validate(recurrent_to_output_weights, &eff_bias_info, GEMMLowpReductionKernelInfo(num_units, false,
+                                                                                          -qoutput_state_in.offset, true)));
     if(lstm_params.has_projection())
     {
-        ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMLowpMatrixAReductionKernel::validate(lstm_params.projection_weights(), &projection_eff_bias_info, GEMMLowpReductionKernelInfo(output_size, false,
-                                                                               lstm_params.hidden_state_zero(),
-                                                                               true)));
+        ARM_COMPUTE_RETURN_ON_ERROR(cpu::kernels::CpuGemmLowpMatrixAReductionKernel::validate(lstm_params.projection_weights(), &projection_eff_bias_info, GEMMLowpReductionKernelInfo(output_size, false,
+                                                                                              lstm_params.hidden_state_zero(),
+                                                                                              true)));
         if(lstm_params.projection_bias() != nullptr)
         {
             ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(lstm_params.projection_bias(), 1, DataType::S32);
@@ -1107,8 +1109,20 @@
         {
             _input_to_input_eff_bias.allocator()->allocate();
             _recurrent_to_input_eff_bias.allocator()->allocate();
-            NEScheduler::get().schedule(_input_to_input_reduction.get(), Window::DimY);
-            NEScheduler::get().schedule(_recurrent_to_input_reduction.get(), Window::DimY);
+
+            ITensorPack packII =
+            {
+                { TensorType::ACL_SRC, _input_to_input_weights },
+                { TensorType::ACL_DST, &_input_to_input_eff_bias }
+            };
+            NEScheduler::get().schedule_op(_input_to_input_reduction.get(), Window::DimY, _input_to_input_reduction->window(), packII);
+
+            ITensorPack packRI =
+            {
+                { TensorType::ACL_SRC, _recurrent_to_input_weights },
+                { TensorType::ACL_DST, &_recurrent_to_input_eff_bias }
+            };
+            NEScheduler::get().schedule_op(_recurrent_to_input_reduction.get(), Window::DimY, _recurrent_to_input_reduction->window(), packRI);
 
             _input_to_input_weights_transposed.allocator()->allocate();
             _recurrent_to_input_weights_transposed.allocator()->allocate();
@@ -1123,17 +1137,58 @@
         _recurrent_to_cell_eff_bias.allocator()->allocate();
         _input_to_output_eff_bias.allocator()->allocate();
         _recurrent_to_output_eff_bias.allocator()->allocate();
-        NEScheduler::get().schedule(_input_to_forget_reduction.get(), Window::DimY);
-        NEScheduler::get().schedule(_recurrent_to_forget_reduction.get(), Window::DimY);
-        NEScheduler::get().schedule(_input_to_cell_reduction.get(), Window::DimY);
-        NEScheduler::get().schedule(_recurrent_to_cell_reduction.get(), Window::DimY);
-        NEScheduler::get().schedule(_input_to_output_reduction.get(), Window::DimY);
-        NEScheduler::get().schedule(_recurrent_to_output_reduction.get(), Window::DimY);
+
+        ITensorPack packIF =
+        {
+            { TensorType::ACL_SRC, _input_to_forget_weights },
+            { TensorType::ACL_DST, &_input_to_forget_eff_bias }
+        };
+        NEScheduler::get().schedule_op(_input_to_forget_reduction.get(), Window::DimY, _input_to_forget_reduction->window(), packIF);
+
+        ITensorPack packRF =
+        {
+            { TensorType::ACL_SRC, _recurrent_to_forget_weights },
+            { TensorType::ACL_DST, &_recurrent_to_forget_eff_bias }
+        };
+        NEScheduler::get().schedule_op(_recurrent_to_forget_reduction.get(), Window::DimY, _recurrent_to_forget_reduction->window(), packRF);
+
+        ITensorPack packIC =
+        {
+            { TensorType::ACL_SRC, _input_to_cell_weights },
+            { TensorType::ACL_DST, &_input_to_cell_eff_bias }
+        };
+        NEScheduler::get().schedule_op(_input_to_cell_reduction.get(), Window::DimY, _input_to_cell_reduction->window(), packIC);
+
+        ITensorPack packRC =
+        {
+            { TensorType::ACL_SRC, _recurrent_to_cell_weights },
+            { TensorType::ACL_DST, &_recurrent_to_cell_eff_bias }
+        };
+        NEScheduler::get().schedule_op(_recurrent_to_cell_reduction.get(), Window::DimY, _recurrent_to_cell_reduction->window(), packRC);
+
+        ITensorPack packIO =
+        {
+            { TensorType::ACL_SRC, _input_to_output_weights },
+            { TensorType::ACL_DST, &_input_to_output_eff_bias }
+        };
+        NEScheduler::get().schedule_op(_input_to_output_reduction.get(), Window::DimY, _input_to_output_reduction->window(), packIO);
+
+        ITensorPack packRO =
+        {
+            { TensorType::ACL_SRC, _recurrent_to_output_weights },
+            { TensorType::ACL_DST, &_recurrent_to_output_eff_bias }
+        };
+        NEScheduler::get().schedule_op(_recurrent_to_output_reduction.get(), Window::DimY, _recurrent_to_output_reduction->window(), packRO);
 
         if(_has_projection)
         {
             _projection_eff_bias.allocator()->allocate();
-            NEScheduler::get().schedule(_projection_reduction.get(), Window::DimY);
+            ITensorPack pack =
+            {
+                { TensorType::ACL_SRC, _projection_weights },
+                { TensorType::ACL_DST, &_projection_eff_bias }
+            };
+            NEScheduler::get().schedule_op(_projection_reduction.get(), Window::DimY, _projection_reduction->window(), pack);
             if(_projection_bias != nullptr)
             {
                 _projection_bias_add.run();
diff --git a/src/runtime/cpu/operators/CpuGemmLowpMatrixMultiplyCore.cpp b/src/runtime/cpu/operators/CpuGemmLowpMatrixMultiplyCore.cpp
new file mode 100644
index 0000000..651ce43
--- /dev/null
+++ b/src/runtime/cpu/operators/CpuGemmLowpMatrixMultiplyCore.cpp
@@ -0,0 +1,717 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/runtime/cpu/operators/CpuGemmLowpMatrixMultiplyCore.h"
+
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/KernelDescriptors.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "arm_compute/runtime/NEON/NEScheduler.h"
+#include "arm_compute/runtime/TensorAllocator.h"
+#include "src/core/helpers/AutoConfiguration.h"
+#include "src/core/helpers/MemoryHelpers.h"
+
+#include "src/core/cpu/kernels/CpuConvertQuantizedSignednessKernel.h"
+#include "src/core/cpu/kernels/CpuGemmInterleave4x4Kernel.h"
+#include "src/core/cpu/kernels/CpuGemmLowpMatrixMultiplyKernel.h"
+#include "src/core/cpu/kernels/CpuGemmLowpMatrixReductionKernel.h"
+#include "src/core/cpu/kernels/CpuGemmLowpOffsetContributionKernel.h"
+#include "src/core/cpu/kernels/CpuGemmLowpOffsetContributionOutputStageKernel.h"
+#include "src/core/cpu/kernels/CpuGemmTranspose1xWKernel.h"
+#include "src/runtime/cpu/operators/CpuActivation.h"
+#include "src/runtime/cpu/operators/internal/CpuGemmAssemblyDispatch.h"
+#include "src/runtime/cpu/utils/CpuAuxTensorHandler.h"
+
+using namespace arm_compute::misc::shape_calculator;
+using namespace arm_compute::experimental;
+
+namespace arm_compute
+{
+namespace cpu
+{
+namespace
+{
+cpu::AsmGemmInfo init_assembly_metadata(const GEMMInfo &info)
+{
+    cpu::AsmGemmInfo asm_info;
+    asm_info.method                  = cpu::AsmConvMethod::Im2Col;
+    asm_info.reinterpret_input_as_3d = info.reinterpret_input_as_3d();
+    asm_info.depth_output_gemm3d     = info.depth_output_gemm3d();
+    asm_info.activation_info         = info.activation_info();
+    asm_info.output_stage            = info.gemmlowp_output_stage();
+
+    return asm_info;
+}
+} // namespace
+
+CpuGemmLowpMatrixMultiplyCore::CpuGemmLowpMatrixMultiplyCore()
+    : _asm_glue(std::make_unique<CpuGemmAssemblyDispatch>()),
+      _mm_kernel(),
+      _mtx_a_reshape_kernel(),
+      _mtx_b_reshape_kernel(),
+      _mtx_a_reduction_kernel(),
+      _mtx_b_reduction_kernel(),
+      _offset_contribution_kernel(),
+      _offset_contribution_output_stage_kernel(),
+      _activation_func(),
+      _convert_to_signed_asymm(),
+      _convert_from_signed_asymm(),
+      _vector_sum_col(),
+      _vector_sum_row(),
+      _tmp_a(),
+      _tmp_b(),
+      _mm_result_s32(),
+      _signed_a(),
+      _signed_output(),
+      _a_offset(0),
+      _b_offset(0),
+      _run_vector_matrix_multiplication(false),
+      _assembly_path(false),
+      _fused_assembly_path(false),
+      _reshape_b_only_on_first_run(false),
+      _is_prepared(false),
+      _fuse_output_stage(false),
+      _run_activation(false),
+      _flip_signedness(false),
+      _gemm_info(),
+      _aux_mem(Count)
+{
+}
+CpuGemmLowpMatrixMultiplyCore::~CpuGemmLowpMatrixMultiplyCore() = default;
+
+void CpuGemmLowpMatrixMultiplyCore::configure(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, ITensorInfo *dst, const GEMMInfo &gemm_info)
+{
+    ARM_COMPUTE_ERROR_ON_NULLPTR(a, b, dst);
+    ARM_COMPUTE_ERROR_THROW_ON(CpuGemmLowpMatrixMultiplyCore::validate(a, b, c, dst, gemm_info));
+
+    const ITensorInfo *matrix_a = a;
+    const ITensorInfo *matrix_b = b;
+    GEMMInfo           info     = gemm_info;
+
+    // Set internal variables
+    _a_offset                         = a->quantization_info().uniform().offset;
+    _b_offset                         = b->quantization_info().uniform().offset;
+    _run_vector_matrix_multiplication = a->dimension(1) < 2;
+    _reshape_b_only_on_first_run      = info.reshape_b_only_on_first_run();
+    _is_prepared                      = false;
+    _fused_assembly_path              = false;
+    _flip_signedness                  = is_data_type_quantized_per_channel(b->data_type()) && (a->data_type() == DataType::QASYMM8) && _reshape_b_only_on_first_run;
+    _gemm_info                        = gemm_info;
+
+    _asm_glue = std::make_unique<cpu::CpuGemmAssemblyDispatch>();
+
+    const ITensorInfo *a_to_use = a;
+
+    // Convert to QASYMM8 -> QASYMM8_SIGNED and back
+    if(_flip_signedness)
+    {
+        const int32_t                 offset_correction = 128;
+        const DataType                dt                = DataType::QASYMM8_SIGNED;
+        const UniformQuantizationInfo iqinfo            = a_to_use->quantization_info().uniform();
+
+        _signed_a                = a_to_use->clone()->set_data_type(dt).set_quantization_info(QuantizationInfo(iqinfo.scale, iqinfo.offset + offset_correction));
+        _convert_to_signed_asymm = std::make_unique<kernels::CpuConvertQuantizedSignednessKernel>();
+        _convert_to_signed_asymm->configure(a_to_use, &_signed_a);
+        a_to_use  = &_signed_a;
+        _a_offset = _signed_a.quantization_info().uniform().offset;
+
+        const UniformQuantizationInfo oqinfo = dst->quantization_info().uniform();
+        _signed_output                       = dst->clone()->set_data_type(dt).set_quantization_info(QuantizationInfo(oqinfo.scale, oqinfo.offset - offset_correction));
+
+        // Output stage correction
+        GEMMLowpOutputStageInfo output_stage_corr = info.gemmlowp_output_stage();
+        output_stage_corr.gemmlowp_offset         = _signed_output.quantization_info().uniform().offset;
+        output_stage_corr.gemmlowp_min_bound -= offset_correction;
+        output_stage_corr.gemmlowp_max_bound -= offset_correction;
+        info.set_gemmlowp_output_stage(output_stage_corr);
+
+        // Update matrix a
+        matrix_a = &_signed_a;
+    }
+
+    // If GEMMLowpOutputStage != NONE, fuse the offset contribution with the output stage
+    if(info.gemmlowp_output_stage().type != GEMMLowpOutputStageType::NONE)
+    {
+        _fuse_output_stage = true;
+        _mm_result_s32     = TensorInfo(dst->tensor_shape(), 1, DataType::S32);
+    }
+
+    // Initialize assembly kernel meta-data
+    const cpu::AsmGemmInfo asm_info = init_assembly_metadata(gemm_info);
+#ifdef __aarch64__
+    switch(a->data_type())
+    {
+        case DataType::QASYMM8:
+        case DataType::QASYMM8_SIGNED:
+        case DataType::U8:
+        case DataType::S8:
+        {
+            if(is_data_type_quantized_asymmetric(a_to_use->data_type()) && info.gemmlowp_output_stage().type == GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT)
+            {
+                auto c_info_to_use = c == nullptr ? nullptr : c;
+                _asm_glue->configure(a_to_use, b, c_info_to_use, dst, asm_info);
+                _fused_assembly_path = _asm_glue->is_configured();
+            }
+            else
+            {
+                auto output_to_use = (_fuse_output_stage ? &_mm_result_s32 : dst);
+                _asm_glue->configure(a_to_use, b, nullptr, output_to_use, asm_info);
+            }
+            _assembly_path = _asm_glue->is_configured();
+            break;
+        }
+        default:
+        {
+            ARM_COMPUTE_ERROR("Datatype not supported");
+            break;
+        }
+    }
+#endif /* __aarch64__ */
+    if(!(_assembly_path || _run_vector_matrix_multiplication))
+    {
+        matrix_a = &_tmp_a;
+        matrix_b = &_tmp_b;
+
+        // The interleaved output matrix will have the following shape: [ a_height * 4, ceil(a_width / 4.0f) ]
+        _tmp_a = TensorInfo(compute_interleaved_shape(*a_to_use), 1, a_to_use->data_type(), a_to_use->quantization_info());
+        // The transpose1xW output matrix will have the following shape: [ b_height * 16, ceil(b_width / 16.0f) ]
+        _tmp_b = TensorInfo(compute_transpose1xW_shape(*b), 1, b->data_type(), b->quantization_info());
+
+        // Configure interleave kernel
+        _mtx_a_reshape_kernel = std::make_unique<kernels::CpuGemmInterleave4x4Kernel>();
+        _mtx_a_reshape_kernel->configure(a_to_use, &_tmp_a);
+
+        // Configure transpose kernel
+        _mtx_b_reshape_kernel = std::make_unique<kernels::CpuGemmTranspose1xWKernel>();
+        _mtx_b_reshape_kernel->configure(b, &_tmp_b);
+    }
+
+    if(!_fused_assembly_path)
+    {
+        // Build reduction info
+        const GEMMLowpReductionKernelInfo reduction_info(a_to_use->dimension(0), false, 0, false);
+
+        // Initialize matrix B reduction kernel only if _a_offset is not equal to 0
+        if(_a_offset != 0)
+        {
+            _vector_sum_col = TensorInfo(compute_reductionA_shape(*b), 1, DataType::S32);
+
+            // Configure Matrix B reduction kernel
+            _mtx_b_reduction_kernel = std::make_unique<kernels::CpuGemmLowpMatrixBReductionKernel>();
+            _mtx_b_reduction_kernel->configure(b, &_vector_sum_col, reduction_info);
+        }
+
+        // Initialize Matrix A reduction kernel only if _b_offset is not equal to 0
+        if(_b_offset != 0)
+        {
+            _vector_sum_row = TensorInfo(compute_reductionB_shape(*a_to_use), 1, DataType::S32);
+
+            // Configure matrix A reduction kernel
+            _mtx_a_reduction_kernel = std::make_unique<kernels::CpuGemmLowpMatrixAReductionKernel>();
+            _mtx_a_reduction_kernel->configure(a_to_use, &_vector_sum_row, reduction_info);
+        }
+
+        if(_fuse_output_stage)
+        {
+            // Configure matrix multiply kernel
+            if(!_assembly_path)
+            {
+                _mm_kernel = std::make_unique<kernels::CpuGemmLowpMatrixMultiplyKernel>();
+                _mm_kernel->configure(matrix_a, matrix_b, &_mm_result_s32);
+            }
+
+            _offset_contribution_output_stage_kernel = std::make_unique<kernels::CpuGemmLowpOffsetContributionOutputStageKernel>();
+            _offset_contribution_output_stage_kernel->configure(&_mm_result_s32,
+                                                                _a_offset == 0 ? nullptr : &_vector_sum_col,
+                                                                _b_offset == 0 ? nullptr : &_vector_sum_row, c,
+                                                                _flip_signedness ? &_signed_output : dst,
+                                                                a->dimension(0),
+                                                                _a_offset, _b_offset, info.gemmlowp_output_stage());
+
+            if(_flip_signedness)
+            {
+                _convert_from_signed_asymm = std::make_unique<kernels::CpuConvertQuantizedSignednessKernel>();
+                _convert_from_signed_asymm->configure(&_signed_output, dst);
+            }
+        }
+        else
+        {
+            // Configure matrix multiply kernel
+            if(!_assembly_path)
+            {
+                _mm_kernel = std::make_unique<kernels::CpuGemmLowpMatrixMultiplyKernel>();
+                _mm_kernel->configure(matrix_a, matrix_b, dst);
+            }
+            // Configure offset contribution kernel
+            _offset_contribution_kernel = std::make_unique<kernels::CpuGemmLowpOffsetContributionKernel>();
+            _offset_contribution_kernel->configure(dst, _a_offset == 0 ? nullptr : &_vector_sum_col, _b_offset == 0 ? nullptr : &_vector_sum_row, a_to_use->dimension(0),
+                                                   _a_offset, _b_offset);
+        }
+    }
+    // Configure activation
+    const ActivationLayerInfo &activation = gemm_info.activation_info();
+    _run_activation                       = activation.enabled() && (!_assembly_path || !cpu::CpuGemmAssemblyDispatch::is_activation_supported(activation));
+    if(_run_activation)
+    {
+        _activation_func = std::make_unique<CpuActivation>();
+        _activation_func->configure(dst, nullptr, activation);
+    }
+
+    if(_assembly_path)
+    {
+        auto asm_mem_req           = _asm_glue->workspace();
+        _aux_mem[AsmGemmWorkspace] = asm_mem_req[AsmGemmWorkspace];
+        _aux_mem[Pretranspose]     = asm_mem_req[Pretranspose];
+    }
+
+    // Request memory for LHS and RHS reshape matrix
+    _aux_mem[VectorSumCol] = MemoryInfo(offset_int_vec(VectorSumCol), !_fused_assembly_path && _a_offset != 0
+                                        && _reshape_b_only_on_first_run ?
+                                        MemoryLifetime::Persistent :
+                                        MemoryLifetime::Temporary,
+                                        _vector_sum_col.total_size());
+    _aux_mem[VectorSumRow] = MemoryInfo(offset_int_vec(VectorSumRow), MemoryLifetime::Temporary, _vector_sum_row.total_size());
+    _aux_mem[TmpA]         = MemoryInfo(offset_int_vec(TmpA), MemoryLifetime::Temporary, _tmp_a.total_size());
+    _aux_mem[TmpB]         = MemoryInfo(offset_int_vec(TmpB), _reshape_b_only_on_first_run ? MemoryLifetime::Persistent : MemoryLifetime::Temporary, _tmp_b.total_size());
+    _aux_mem[MMResultS32]  = MemoryInfo(offset_int_vec(MMResultS32), MemoryLifetime::Temporary, _mm_result_s32.total_size());
+    _aux_mem[SignedA]      = MemoryInfo(offset_int_vec(SignedA), MemoryLifetime::Temporary, _signed_a.total_size());
+    _aux_mem[SignedOutput] = MemoryInfo(offset_int_vec(SignedOutput), MemoryLifetime::Temporary, _signed_output.total_size());
+}
+
+Status CpuGemmLowpMatrixMultiplyCore::validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *output, const GEMMInfo &gemm_info)
+{
+    ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(a, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED);
+    ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(b, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::QSYMM8, DataType::QSYMM8_PER_CHANNEL);
+    ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S32, DataType::QASYMM8, DataType::QASYMM8_SIGNED);
+    ARM_COMPUTE_RETURN_ERROR_ON_MSG(c != nullptr && gemm_info.gemmlowp_output_stage().type == GEMMLowpOutputStageType::NONE, "Bias addition not supported in NEGEMMLowpMatrixMultiplyCore for output S32");
+    ARM_COMPUTE_RETURN_ERROR_ON_MSG((a)->dimension(0) != (b)->dimension(1),
+                                    "The product AB is defined only if the number of columns in A is equal to the number of rows in B");
+    ARM_COMPUTE_RETURN_ERROR_ON_MSG(gemm_info.is_a_reshaped(), "Matrix A already reshaped is not supported");
+    ARM_COMPUTE_RETURN_ERROR_ON_MSG(gemm_info.is_b_reshaped(), "Matrix B already reshaped is not supported");
+
+    GEMMInfo           info          = gemm_info;
+    const ITensorInfo *matrix_a_info = a;
+    const ITensorInfo *matrix_b_info = b;
+
+    const ITensorInfo *a_to_use = a;
+
+    TensorInfo tmp_a_info{};
+    TensorInfo tmp_b_info{};
+    TensorInfo mm_result_s32_info{};
+
+    int32_t a_offset = a->quantization_info().uniform().offset;
+    int32_t b_offset = b->quantization_info().uniform().offset;
+
+    bool fuse_output_stage = info.gemmlowp_output_stage().type != GEMMLowpOutputStageType::NONE;
+    if(fuse_output_stage)
+    {
+        auto_init_if_empty(mm_result_s32_info, a->clone()->set_tensor_shape(output->tensor_shape()).set_data_type(DataType::S32));
+    }
+
+    // Convert QASYMM8->QASYMM8_SIGNED
+    TensorInfo signed_a{};
+    TensorInfo signed_output{};
+    bool       flip_signedness = is_data_type_quantized_per_channel(b->data_type()) && (a->data_type() == DataType::QASYMM8) && info.reshape_b_only_on_first_run();
+    if(flip_signedness)
+    {
+        const int32_t                 offset_correction = 128;
+        const DataType                dt                = DataType::QASYMM8_SIGNED;
+        const UniformQuantizationInfo iqinfo            = a_to_use->quantization_info().uniform();
+
+        signed_a = a_to_use->clone()->set_data_type(dt).set_quantization_info(QuantizationInfo(iqinfo.scale, iqinfo.offset + offset_correction));
+        ARM_COMPUTE_RETURN_ON_ERROR(kernels::CpuConvertQuantizedSignednessKernel::validate(a_to_use, &signed_a));
+        a_to_use = &signed_a;
+        a_offset = signed_a.quantization_info().uniform().offset;
+
+        const UniformQuantizationInfo oqinfo = output->quantization_info().uniform();
+        signed_output                        = output->clone()->set_data_type(dt).set_quantization_info(QuantizationInfo(oqinfo.scale, oqinfo.offset - offset_correction));
+
+        // Output stage correction
+        GEMMLowpOutputStageInfo output_stage_corr = info.gemmlowp_output_stage();
+        output_stage_corr.gemmlowp_offset         = signed_output.quantization_info().uniform().offset;
+        output_stage_corr.gemmlowp_min_bound -= offset_correction;
+        output_stage_corr.gemmlowp_max_bound -= offset_correction;
+        info.set_gemmlowp_output_stage(output_stage_corr);
+
+        // Update matrix a
+        matrix_a_info = &signed_a;
+    }
+
+    // Initialize assembly kernel meta-data
+    const AsmGemmInfo asm_info = init_assembly_metadata(info);
+
+    // Check if we need to run the optimized assembly kernel
+    bool run_optimised             = false;
+    bool run_optimised_requantized = false;
+    if(is_data_type_quantized_asymmetric(a_to_use->data_type()) && info.gemmlowp_output_stage().type == GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT)
+    {
+        run_optimised             = bool(CpuGemmAssemblyDispatch::validate(a_to_use, b, c, output, asm_info));
+        run_optimised_requantized = run_optimised;
+    }
+    else
+    {
+        run_optimised = bool(CpuGemmAssemblyDispatch::validate(a_to_use, b, nullptr, fuse_output_stage ? &mm_result_s32_info : output, asm_info));
+    }
+
+    if(run_optimised)
+    {
+        ARM_COMPUTE_RETURN_ERROR_ON(b->dimension(0) != output->dimension(0));
+        if(info.depth_output_gemm3d() != 0)
+        {
+            if(info.reinterpret_input_as_3d())
+            {
+                ARM_COMPUTE_RETURN_ERROR_ON(a->dimension(1) != output->dimension(1));
+                ARM_COMPUTE_RETURN_ERROR_ON(a->dimension(2) != output->dimension(2));
+            }
+            else
+            {
+                ARM_COMPUTE_RETURN_ERROR_ON(a->dimension(1) != output->dimension(1) * output->dimension(2));
+            }
+        }
+        else
+        {
+            ARM_COMPUTE_RETURN_ERROR_ON(a->dimension(1) != output->dimension(1));
+        }
+    }
+    else
+    {
+        ARM_COMPUTE_RETURN_ERROR_ON_MSG(info.reinterpret_input_as_3d(), "NEGEMM cannot reinterpret the input tensor as 3D");
+        ARM_COMPUTE_RETURN_ERROR_ON_MSG(info.depth_output_gemm3d() != 0, "NEGEMM cannot reinterpret the output tensor as 3D");
+
+        const bool run_vector_matrix_multiplication = a->dimension(1) < 2;
+        if(!run_vector_matrix_multiplication)
+        {
+            matrix_a_info = &tmp_a_info;
+            matrix_b_info = &tmp_b_info;
+
+            // The interleaved output matrix will have the following shape: [ a_height * 4, ceil(a_width / 4.0f) ]
+            TensorShape shape_tmp_a = a->tensor_shape();
+            shape_tmp_a.set(0, a->dimension(0) * 4);
+            shape_tmp_a.set(1, std::ceil(a->dimension(1) / 4.f));
+
+            // The transpose1xW output matrix will have the following shape: [ b_height * 16, ceil(b_width / 16.0f) ]
+            TensorShape shape_tmp_b = b->tensor_shape();
+            shape_tmp_b.set(0, b->dimension(1) * 16);
+            shape_tmp_b.set(1, std::ceil(b->dimension(0) / 16.f));
+
+            // Validate interleave kernel
+            auto_init_if_empty(tmp_a_info, a_to_use->clone()->set_tensor_shape(shape_tmp_a));
+            auto_init_if_empty(tmp_b_info, b->clone()->set_tensor_shape(shape_tmp_b));
+
+            ARM_COMPUTE_RETURN_ON_ERROR(kernels::CpuGemmInterleave4x4Kernel::validate(a_to_use, &tmp_a_info));
+            ARM_COMPUTE_RETURN_ON_ERROR(kernels::CpuGemmTranspose1xWKernel::validate(b, &tmp_b_info));
+        }
+    }
+
+    if(!run_optimised_requantized)
+    {
+        TensorInfo info_vector_sum_col{};
+        TensorInfo info_vector_sum_row{};
+
+        const GEMMLowpReductionKernelInfo reduction_info(a_to_use->dimension(0), false, 0, false);
+
+        // Validate matrix B reduction kernel only if _a_offset is not equal to 0
+        if(a_offset != 0)
+        {
+            info_vector_sum_col = TensorInfo(compute_reductionA_shape(*b), 1, DataType::S32);
+
+            // Configure Matrix B reduction kernel
+            ARM_COMPUTE_RETURN_ON_ERROR(kernels::CpuGemmLowpMatrixBReductionKernel::validate(b, &info_vector_sum_col, reduction_info));
+        }
+
+        // Validate Matrix A reduction kernel only if _b_offset is not equal to 0
+        if(b_offset != 0)
+        {
+            info_vector_sum_row = TensorInfo(compute_reductionB_shape(*a), 1, DataType::S32);
+
+            // Configure matrix A reduction kernel
+            ARM_COMPUTE_RETURN_ON_ERROR(kernels::CpuGemmLowpMatrixAReductionKernel::validate(a_to_use, &info_vector_sum_row, reduction_info));
+        }
+
+        if(fuse_output_stage)
+        {
+            if(!run_optimised)
+            {
+                ARM_COMPUTE_RETURN_ERROR_ON_MSG(info.reinterpret_input_as_3d(), "CpuGemmLowpMatrixMultiplyKernel cannot reinterpret the input tensor as 3D");
+                ARM_COMPUTE_RETURN_ERROR_ON_MSG(info.depth_output_gemm3d() != 0, "CpuGemmLowpMatrixMultiplyKernel cannot reinterpret the output tensor as 3D");
+
+                ARM_COMPUTE_RETURN_ON_ERROR(kernels::CpuGemmLowpMatrixMultiplyKernel::validate(matrix_a_info, matrix_b_info, &mm_result_s32_info));
+            }
+
+            // Validate offset contribution kernel
+            ARM_COMPUTE_RETURN_ON_ERROR(kernels::CpuGemmLowpOffsetContributionOutputStageKernel::validate(&mm_result_s32_info,
+                                                                                                          a_offset == 0 ? nullptr : &info_vector_sum_col,
+                                                                                                          b_offset == 0 ? nullptr : &info_vector_sum_row,
+                                                                                                          c,
+                                                                                                          flip_signedness ? &signed_output : output,
+                                                                                                          a_offset, b_offset,
+                                                                                                          info.gemmlowp_output_stage()));
+        }
+        else
+        {
+            if(!run_optimised)
+            {
+                ARM_COMPUTE_RETURN_ERROR_ON_MSG(info.reinterpret_input_as_3d(), "CpuGemmLowpMatrixMultiplyKernel cannot reinterpret the input tensor as 3D");
+                ARM_COMPUTE_RETURN_ERROR_ON_MSG(info.depth_output_gemm3d() != 0, "CpuGemmLowpMatrixMultiplyKernel cannot reinterpret the output tensor as 3D");
+
+                ARM_COMPUTE_RETURN_ON_ERROR(kernels::CpuGemmLowpMatrixMultiplyKernel::validate(matrix_a_info, matrix_b_info, output));
+            }
+            // Validate offset contribution kernel
+            ARM_COMPUTE_RETURN_ON_ERROR(kernels::CpuGemmLowpOffsetContributionKernel::validate(output,
+                                                                                               a_offset == 0 ? nullptr : &info_vector_sum_col,
+                                                                                               b_offset == 0 ? nullptr : &info_vector_sum_row,
+                                                                                               a_offset, b_offset));
+        }
+    }
+
+    // Validate activation
+    const ActivationLayerInfo &activation = gemm_info.activation_info();
+    if(activation.enabled())
+    {
+        ARM_COMPUTE_RETURN_ON_ERROR(CpuActivation::validate(output, nullptr, activation));
+    }
+
+    return Status{};
+}
+
+void CpuGemmLowpMatrixMultiplyCore::run(ITensorPack &tensors)
+{
+    prepare(tensors);
+    auto a        = tensors.get_const_tensor(TensorType::ACL_SRC_0);
+    auto b        = tensors.get_const_tensor(TensorType::ACL_SRC_1);
+    auto c        = tensors.get_const_tensor(TensorType::ACL_SRC_2);
+    auto dst      = tensors.get_tensor(TensorType::ACL_DST);
+    auto a_to_use = a;
+    auto matrix_a = a;
+    auto matrix_b = b;
+
+    CpuAuxTensorHandler vector_sum_col(offset_int_vec(VectorSumCol), _vector_sum_col, tensors, false);
+    CpuAuxTensorHandler vector_sum_row(offset_int_vec(VectorSumRow), _vector_sum_row, tensors, false);
+    CpuAuxTensorHandler tmp_a(offset_int_vec(TmpA), _tmp_a, tensors, false);
+    CpuAuxTensorHandler tmp_b(offset_int_vec(TmpB), _tmp_b, tensors, true);
+    CpuAuxTensorHandler mm_result_s32(offset_int_vec(MMResultS32), _mm_result_s32, tensors, false);
+    CpuAuxTensorHandler signed_a(offset_int_vec(SignedA), _signed_a, tensors, false);
+    CpuAuxTensorHandler signed_output(offset_int_vec(SignedOutput), _signed_output, tensors, false);
+
+    // Convert QASYMM8->QASYMM8_SIGNED
+    if(_flip_signedness)
+    {
+        ITensorPack pack =
+        {
+            { TensorType::ACL_SRC, a },
+            { TensorType::ACL_DST, signed_a.get() }
+        };
+        NEScheduler::get().schedule_op(_convert_to_signed_asymm.get(), Window::DimY, _convert_to_signed_asymm->window(), pack);
+        a_to_use = signed_a.get();
+    }
+
+    // Run GEMM
+    if(_asm_glue->is_configured())
+    {
+        ITensorPack asm_glue_tensors = tensors;
+        auto        output_to_use    = (_fuse_output_stage ? mm_result_s32.get() : dst);
+        if(is_data_type_quantized_asymmetric(a_to_use->info()->data_type()) && _gemm_info.gemmlowp_output_stage().type == GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT)
+        {
+            asm_glue_tensors.add_const_tensor(TensorType::ACL_SRC_0, a_to_use);
+            asm_glue_tensors.add_const_tensor(TensorType::ACL_SRC_1, b);
+            asm_glue_tensors.add_const_tensor(TensorType::ACL_SRC_2, c);
+            asm_glue_tensors.add_tensor(TensorType::ACL_DST, dst);
+        }
+        else
+        {
+            asm_glue_tensors.add_const_tensor(TensorType::ACL_SRC_0, a_to_use);
+            asm_glue_tensors.add_const_tensor(TensorType::ACL_SRC_1, b);
+            asm_glue_tensors.add_tensor(TensorType::ACL_DST, output_to_use);
+        }
+        _asm_glue->run(asm_glue_tensors);
+    }
+    else
+    {
+        if(!_run_vector_matrix_multiplication)
+        {
+            matrix_a = tmp_a.get();
+            matrix_b = tmp_b.get();
+            // Run interleave kernel
+            ITensorPack pack_a =
+            {
+                { TensorType::ACL_SRC, a_to_use },
+                { TensorType::ACL_DST, tmp_a.get() }
+            };
+            NEScheduler::get().schedule_op(_mtx_a_reshape_kernel.get(), Window::DimY, _mtx_a_reshape_kernel->window(), pack_a);
+
+            if(!_reshape_b_only_on_first_run)
+            {
+                ITensorPack pack_b =
+                {
+                    { TensorType::ACL_SRC, b },
+                    { TensorType::ACL_DST, tmp_b.get() }
+                };
+                // Run transpose kernel
+                NEScheduler::get().schedule_op(_mtx_b_reshape_kernel.get(), Window::DimY, _mtx_b_reshape_kernel->window(), pack_b);
+            }
+        }
+        ITensorPack pack_mm =
+        {
+            { TensorType::ACL_SRC_0, matrix_a },
+            { TensorType::ACL_SRC_1, matrix_b }
+        };
+        if(_fuse_output_stage)
+        {
+            pack_mm.add_tensor(TensorType::ACL_DST, mm_result_s32.get());
+        }
+        else
+        {
+            pack_mm.add_tensor(TensorType::ACL_DST, dst);
+        }
+        NEScheduler::get().schedule_op(_mm_kernel.get(), Window::DimY, _mm_kernel->window(), pack_mm);
+    }
+
+    if(!_fused_assembly_path)
+    {
+        // Run matrix A reduction kernel only if _b_offset is not equal to 0
+        if(_b_offset != 0)
+        {
+            ITensorPack pack =
+            {
+                { TensorType::ACL_SRC, a_to_use },
+                { TensorType::ACL_DST, vector_sum_row.get() }
+            };
+            NEScheduler::get().schedule_op(_mtx_a_reduction_kernel.get(), Window::DimX, _mtx_a_reduction_kernel->window(), pack);
+        }
+
+        // Run matrix B reduction kernel only if _a_offset is not equal to 0
+        if(_a_offset != 0 && !_reshape_b_only_on_first_run)
+        {
+            ITensorPack pack =
+            {
+                { TensorType::ACL_SRC, b },
+                { TensorType::ACL_DST, vector_sum_col.get() }
+            };
+            NEScheduler::get().schedule_op(_mtx_b_reduction_kernel.get(), Window::DimX, _mtx_b_reduction_kernel->window(), pack);
+        }
+
+        if(_fuse_output_stage)
+        {
+            ITensorPack pack;
+            pack.add_tensor(TensorType::ACL_SRC_0, mm_result_s32.get());
+            pack.add_tensor(TensorType::ACL_SRC_1, _a_offset == 0 ? nullptr : vector_sum_col.get());
+            pack.add_tensor(TensorType::ACL_SRC_2, _b_offset == 0 ? nullptr : vector_sum_row.get());
+            pack.add_tensor(TensorType::ACL_SRC_3, c);
+            pack.add_tensor(TensorType::ACL_DST, _flip_signedness ? signed_output.get() : dst);
+
+            // Run offset contribution kernel
+            NEScheduler::get().schedule_op(_offset_contribution_output_stage_kernel.get(), Window::DimY, _offset_contribution_output_stage_kernel->window(), pack);
+        }
+        else
+        {
+            ITensorPack pack;
+            pack.add_tensor(TensorType::ACL_SRC_0, _a_offset == 0 ? nullptr : vector_sum_col.get());
+            pack.add_tensor(TensorType::ACL_SRC_1, _b_offset == 0 ? nullptr : vector_sum_row.get());
+            pack.add_tensor(TensorType::ACL_DST, dst);
+
+            // Run offset contribution kernel
+            NEScheduler::get().schedule_op(_offset_contribution_kernel.get(), Window::DimY, _offset_contribution_kernel->window(), pack);
+        }
+    }
+
+    // Convert QASYMM8_SIGNED->QASYMM8
+    if(!_fused_assembly_path && _fuse_output_stage && _flip_signedness)
+    {
+        ITensorPack pack =
+        {
+            { TensorType::ACL_SRC, signed_output.get() },
+            { TensorType::ACL_DST, dst }
+        };
+        NEScheduler::get().schedule_op(_convert_from_signed_asymm.get(), Window::DimY, _convert_from_signed_asymm->window(), pack);
+    }
+
+    // Run fused activation unless already run in the fused assembly
+    if(_run_activation)
+    {
+        ITensorPack pack =
+        {
+            { TensorType::ACL_SRC, dst },
+            { TensorType::ACL_DST, dst }
+        };
+        _activation_func->run(pack);
+    }
+}
+
+void CpuGemmLowpMatrixMultiplyCore::prepare(ITensorPack &tensors)
+{
+    if(!_is_prepared)
+    {
+        auto original_b = tensors.get_const_tensor(TensorType::ACL_SRC_1);
+        // Run assembly reshape
+        if(_asm_glue->is_configured())
+        {
+            _asm_glue->prepare(tensors);
+
+            auto has_reshape = std::find_if(_aux_mem.begin(),
+                                            _aux_mem.end(),
+                                            [](const MemoryInfo & m) -> bool { return m.lifetime == MemoryLifetime::Persistent; });
+
+            if(has_reshape != std::end(_aux_mem))
+            {
+                original_b->mark_as_unused();
+            }
+        }
+        // Run non-assembly reshape
+        else if(_reshape_b_only_on_first_run && !_run_vector_matrix_multiplication && !_asm_glue->is_configured())
+        {
+            // Run reshape kernel and mark original weights tensor as unused
+            ITensor            *tmp_b_p = utils::cast::polymorphic_downcast<ITensor *>(tensors.get_tensor(offset_int_vec(TmpB)));
+            CpuAuxTensorHandler tmp_b(_tmp_b, *tmp_b_p);
+            ITensorPack         pack =
+            {
+                { TensorType::ACL_SRC, original_b },
+                { TensorType::ACL_DST, tmp_b.get() }
+            };
+            NEScheduler::get().schedule_op(_mtx_b_reshape_kernel.get(), Window::DimY, _mtx_b_reshape_kernel->window(), pack);
+        }
+
+        // Run matrix B reduction kernel only if _a_offset is not equal to 0
+        if(!_fused_assembly_path && _a_offset != 0 && _reshape_b_only_on_first_run)
+        {
+            ITensor            *vector_sum_col_p = utils::cast::polymorphic_downcast<ITensor *>(tensors.get_tensor(offset_int_vec(VectorSumCol)));
+            CpuAuxTensorHandler vector_sum_col(_vector_sum_col, *vector_sum_col_p);
+            ITensorPack         pack =
+            {
+                { TensorType::ACL_SRC, original_b },
+                { TensorType::ACL_DST, vector_sum_col.get() }
+            };
+            NEScheduler::get().schedule_op(_mtx_b_reduction_kernel.get(), Window::DimX, _mtx_b_reduction_kernel->window(), pack);
+        }
+        _is_prepared = true;
+    }
+}
+experimental::MemoryRequirements CpuGemmLowpMatrixMultiplyCore::workspace() const
+{
+    return _aux_mem;
+}
+} // namespace cpu
+} // namespace arm_compute
diff --git a/src/runtime/cpu/operators/CpuGemmLowpMatrixMultiplyCore.h b/src/runtime/cpu/operators/CpuGemmLowpMatrixMultiplyCore.h
new file mode 100644
index 0000000..1d0e470
--- /dev/null
+++ b/src/runtime/cpu/operators/CpuGemmLowpMatrixMultiplyCore.h
@@ -0,0 +1,174 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_CPU_GEMMLOWP_MATRIXMULTIPLY_CORE_H
+#define ARM_COMPUTE_CPU_GEMMLOWP_MATRIXMULTIPLY_CORE_H
+
+#include "arm_compute/core/TensorInfo.h"
+#include "src/core/common/Macros.h"
+#include "src/runtime/cpu/ICpuOperator.h"
+
+#include <memory>
+
+namespace arm_compute
+{
+namespace cpu
+{
+namespace kernels
+{
+class CpuGemmInterleave4x4Kernel;
+class CpuGemmLowpMatrixMultiplyKernel;
+class CpuGemmLowpOffsetContributionKernel;
+class CpuGemmLowpOffsetContributionOutputStageKernel;
+class CpuGemmLowpMatrixAReductionKernel;
+class CpuGemmLowpMatrixBReductionKernel;
+class CpuGemmTranspose1xWKernel;
+class CpuConvertQuantizedSignednessKernel;
+} // namespace kernels
+class CpuGemmAssemblyDispatch;
+class CpuActivation;
+
+/** Basic function to execute GEMMLowpMatrixMultiplyCore. This function calls the following kernels if the DOT product instruction is not available:
+ *
+ *  -# @ref kernels::CpuGemmInterleave4x4Kernel
+ *  -# @ref kernels::CpuGemmTranspose1xWKernel
+ *  -# @ref kernels::CpuGemmLowpMatrixMultiplyKernel
+ *  -# @ref kernels::CpuGemmLowpOffsetContributionKernel
+ *  -# @ref CpuActivation
+ *
+ * otherwise if the DOT product instruction is available:
+ *
+ *  -# @ref kernels::CpuGemmLowpOffsetContributionKernel
+ *
+*/
+class CpuGemmLowpMatrixMultiplyCore : public ICpuOperator
+{
+public:
+    /** Constructor */
+    CpuGemmLowpMatrixMultiplyCore();
+    ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuGemmLowpMatrixMultiplyCore);
+    /** Destructor */
+    ~CpuGemmLowpMatrixMultiplyCore();
+    /** Initialise the kernel's inputs, output
+     *
+     * Valid data layouts:
+     * - NHWC
+     * - NCHW
+     *
+     * Valid data type configurations:
+     * |src0           |src1               |src2     |dst            |
+     * |:--------------|:------------------|:--------|:--------------|
+     * |QASYMM8        |QASYMM8            |S32      |QASYMM8        |
+     * |QASYMM8        |QSYMM8_PER_CHANNEL |S32      |QASYMM8        |
+     * |QASYMM8        |QSYMM8             |S32      |QASYMM8        |
+     * |QASYMM8        |QASYMM8            |S32      |S32            |
+     * |QASYMM8        |QSYMM8_PER_CHANNEL |S32      |S32            |
+     * |QASYMM8        |QSYMM8             |S32      |S32            |
+     * |QASYMM8_SIGNED |QASYMM8_SIGNED     |S32      |QASYMM8_SIGNED |
+     * |QASYMM8_SIGNED |QSYMM8_PER_CHANNEL |S32      |QASYMM8_SIGNED |
+     * |QASYMM8_SIGNED |QSYMM8             |S32      |QASYMM8_SIGNED |
+     * |QASYMM8_SIGNED |QASYMM8_SIGNED     |S32      |S32            |
+     * |QASYMM8_SIGNED |QSYMM8_PER_CHANNEL |S32      |S32            |
+     * |QASYMM8_SIGNED |QSYMM8             |S32      |S32            |
+     *
+     * @note GEMM_LOWP:  low precision GEMM kernel
+     *  This kernel performs the following computations:
+     *
+     *  -# Convert a values from QASYMM8 to int32 and add a_offset to each of them.
+     *  -# Convert b values from QASYMM8 to int32 add b_offset to each of them.
+     *  -# Compute the matrix product of the resulting a * b in int32.
+     *
+     * @note The @p output type is S32 if @p gemm_info.type == GEMMLowpOutputStageType::NONE. It is QASYMM8/QASYMM8_SIGNED otherwise
+     *
+     * @param[in]  a         First input tensor info (Matrix A). Data type supported: QASYMM8/QASYMM8_SIGNED.
+     * @param[in]  b         Second input tensor info (Matrix B). Data type supported: QASYMM8/QASYMM8_SIGNED/QSYMM8/QSYMM8_PER_CHANNEL.
+     * @param[in]  c         Third input tensor info (Matrix C). It can be a nullptr. Data type supported: S32
+     * @param[out] dst       Output tensor info. Data type supported: Data type supported: S32/QASYMM8/QASYMM8_SIGNED
+     * @param[in]  gemm_info (Optional) Specifies if the matrix A and/or matrix B have been reshaped and
+     *                       if the reshape of matrix B should be executed only for the first run
+     */
+    void configure(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, ITensorInfo *dst, const GEMMInfo &gemm_info = GEMMInfo());
+    /** Static function to check if given info will lead to a valid configuration
+     *
+     * Similar to CpuGemmLowpMatrixMultiplyCore::configure()
+     *
+     * @return a status
+     */
+    static Status validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *dst, const GEMMInfo &gemm_info = GEMMInfo());
+
+    // Inherited methods overridden:
+    void run(ITensorPack &tensors) override;
+    void prepare(ITensorPack &tensors) override;
+    experimental::MemoryRequirements workspace() const override;
+
+private:
+    enum AuxTensorIdx
+    {
+        AsmGemmWorkspace = 0,
+        Pretranspose,
+        VectorSumCol,
+        VectorSumRow,
+        TmpA,
+        TmpB,
+        MMResultS32,
+        SignedA,
+        SignedOutput,
+        Count
+    };
+
+    std::unique_ptr<CpuGemmAssemblyDispatch>                                 _asm_glue;
+    std::unique_ptr<kernels::CpuGemmLowpMatrixMultiplyKernel>                _mm_kernel;
+    std::unique_ptr<kernels::CpuGemmInterleave4x4Kernel>                     _mtx_a_reshape_kernel;
+    std::unique_ptr<kernels::CpuGemmTranspose1xWKernel>                      _mtx_b_reshape_kernel;
+    std::unique_ptr<kernels::CpuGemmLowpMatrixAReductionKernel>              _mtx_a_reduction_kernel;
+    std::unique_ptr<kernels::CpuGemmLowpMatrixBReductionKernel>              _mtx_b_reduction_kernel;
+    std::unique_ptr<kernels::CpuGemmLowpOffsetContributionKernel>            _offset_contribution_kernel;
+    std::unique_ptr<kernels::CpuGemmLowpOffsetContributionOutputStageKernel> _offset_contribution_output_stage_kernel;
+    std::unique_ptr<CpuActivation>                                           _activation_func;
+    std::unique_ptr<kernels::CpuConvertQuantizedSignednessKernel>            _convert_to_signed_asymm;
+    std::unique_ptr<kernels::CpuConvertQuantizedSignednessKernel>            _convert_from_signed_asymm;
+
+    TensorInfo _vector_sum_col;
+    TensorInfo _vector_sum_row;
+    TensorInfo _tmp_a;
+    TensorInfo _tmp_b;
+    TensorInfo _mm_result_s32;
+    TensorInfo _signed_a;
+    TensorInfo _signed_output;
+    int32_t    _a_offset;
+    int32_t    _b_offset;
+
+    bool                             _run_vector_matrix_multiplication;
+    bool                             _assembly_path;
+    bool                             _fused_assembly_path;
+    bool                             _reshape_b_only_on_first_run;
+    bool                             _is_prepared;
+    bool                             _fuse_output_stage;
+    bool                             _run_activation;
+    bool                             _flip_signedness;
+    GEMMInfo                         _gemm_info;
+    experimental::MemoryRequirements _aux_mem{};
+};
+} // namespace cpu
+} // namespace arm_compute
+#endif /*ARM_COMPUTE_CPU_GEMMLOWP_MATRIXMULTIPLY_CORE_H */
diff --git a/src/runtime/gpu/cl/utils/ClAuxTensorHandler.h b/src/runtime/gpu/cl/utils/ClAuxTensorHandler.h
index 152e3c6..1cf717c 100644
--- a/src/runtime/gpu/cl/utils/ClAuxTensorHandler.h
+++ b/src/runtime/gpu/cl/utils/ClAuxTensorHandler.h
@@ -41,6 +41,10 @@
     CLAuxTensorHandler(int slot_id, TensorInfo &info, ITensorPack &pack, bool pack_inject = false)
         : _tensor()
     {
+        if(info.total_size() == 0)
+        {
+            return;
+        }
         _tensor.allocator()->soft_init(info);
 
         ICLTensor *packed_tensor = utils::cast::polymorphic_downcast<ICLTensor *>(pack.get_tensor(slot_id));