COMPMID-797 Integrate Mobilenet QASYMM8 with new graph.

Change-Id: I4df63ec2f4eb27a8a6eec2bea27741bf8dec6910
Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/126966
Tested-by: Jenkins <bsgcomp@arm.com>
Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
diff --git a/src/core/TensorInfo.cpp b/src/core/TensorInfo.cpp
index 539d0f8..676938a 100644
--- a/src/core/TensorInfo.cpp
+++ b/src/core/TensorInfo.cpp
@@ -322,7 +322,7 @@
 {
     _data_type = data_type;
     _format    = Format::UNKNOWN;
-    return *this;
+    return set_tensor_shape(tensor_shape()); // Force total size and strides to update
 }
 
 ITensorInfo &TensorInfo::set_num_channels(int num_channels)
diff --git a/src/graph/GraphBuilder.cpp b/src/graph/GraphBuilder.cpp
index 0d1bdc3..4ad34e7 100644
--- a/src/graph/GraphBuilder.cpp
+++ b/src/graph/GraphBuilder.cpp
@@ -206,7 +206,9 @@
 NodeID GraphBuilder::add_convolution_node(Graph &g, NodeParams params, NodeIdxPair input,
                                           Size2D kernel_spatial_extend, unsigned int depth, PadStrideInfo conv_info,
                                           unsigned int num_groups, ConvolutionMethod method,
-                                          ITensorAccessorUPtr weights_accessor, ITensorAccessorUPtr bias_accessor)
+                                          ITensorAccessorUPtr weights_accessor, ITensorAccessorUPtr bias_accessor,
+                                          const QuantizationInfo weights_quant_info,
+                                          const QuantizationInfo out_quant_info)
 {
     CHECK_NODEIDX_PAIR(input, g);
     ARM_COMPUTE_ERROR_ON(depth == 0);
@@ -220,7 +222,13 @@
     // Create weights node
     TensorDescriptor w_desc = input_tensor_desc;
     w_desc.shape            = TensorShape(kernel_spatial_extend.width, kernel_spatial_extend.height, w_desc.shape.z() / num_groups, depth);
-    NodeID w_nid            = add_const_node_with_name(g, params, "Weights", w_desc, std::move(weights_accessor));
+
+    if(!weights_quant_info.empty())
+    {
+        w_desc.quant_info = weights_quant_info;
+    }
+
+    NodeID w_nid = add_const_node_with_name(g, params, "Weights", w_desc, std::move(weights_accessor));
 
     // Create bias nodes
     NodeID b_nid = EmptyNodeID;
@@ -234,7 +242,7 @@
     if(num_groups == 1)
     {
         // Create convolution node and connect
-        NodeID conv_nid = g.add_node<ConvolutionLayerNode>(conv_info, method);
+        NodeID conv_nid = g.add_node<ConvolutionLayerNode>(conv_info, method, out_quant_info);
         g.add_connection(input.node_id, input.index, conv_nid, 0);
         g.add_connection(w_nid, 0, conv_nid, 1);
         if(has_bias)
@@ -270,7 +278,7 @@
 
 NodeID GraphBuilder::add_depthwise_convolution_node(Graph &g, NodeParams params, NodeIdxPair input, Size2D kernel_spatial_extend, PadStrideInfo conv_info,
                                                     DepthwiseConvolutionMethod method,
-                                                    ITensorAccessorUPtr weights_accessor, ITensorAccessorUPtr bias_accessor)
+                                                    ITensorAccessorUPtr weights_accessor, ITensorAccessorUPtr bias_accessor, const QuantizationInfo quant_info)
 {
     CHECK_NODEIDX_PAIR(input, g);
     ARM_COMPUTE_ERROR_ON((kernel_spatial_extend.width == 0) || (kernel_spatial_extend.height == 0));
@@ -283,7 +291,13 @@
     // Create weights node
     TensorDescriptor w_desc = input_tensor_desc;
     w_desc.shape            = TensorShape(kernel_spatial_extend.width, kernel_spatial_extend.height, w_desc.shape.z());
-    NodeID w_nid            = add_const_node_with_name(g, params, "Weights", w_desc, std::move(weights_accessor));
+
+    if(!quant_info.empty())
+    {
+        w_desc.quant_info = quant_info;
+    }
+
+    NodeID w_nid = add_const_node_with_name(g, params, "Weights", w_desc, std::move(weights_accessor));
 
     // Create bias nodes
     NodeID b_nid = EmptyNodeID;
diff --git a/src/graph/backends/CL/CLDeviceBackend.cpp b/src/graph/backends/CL/CLDeviceBackend.cpp
index f10eb33..92cb693 100644
--- a/src/graph/backends/CL/CLDeviceBackend.cpp
+++ b/src/graph/backends/CL/CLDeviceBackend.cpp
@@ -126,7 +126,7 @@
     ARM_COMPUTE_ERROR_ON(tensor_desc.target != Target::CL);
 
     // Create backend tensor handle
-    TensorInfo info(tensor_desc.shape, 1, tensor_desc.data_type);
+    TensorInfo info(tensor_desc.shape, 1, tensor_desc.data_type, tensor_desc.quant_info);
     auto       backend_tensor_handle = support::cpp14::make_unique<CLTensorHandle>(info);
 
     return std::move(backend_tensor_handle);
diff --git a/src/graph/backends/CL/CLFunctionsFactory.cpp b/src/graph/backends/CL/CLFunctionsFactory.cpp
index 1b448fe..ad73a79 100644
--- a/src/graph/backends/CL/CLFunctionsFactory.cpp
+++ b/src/graph/backends/CL/CLFunctionsFactory.cpp
@@ -154,10 +154,16 @@
     ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
 
     // Extract IO and info
-    ICLTensor              *input          = get_backing_tensor(node.input(0));
-    ICLTensor              *weights        = get_backing_tensor(node.input(1));
-    ICLTensor              *biases         = get_backing_tensor(node.input(2));
-    ICLTensor              *output         = get_backing_tensor(node.output(0));
+    ICLTensor *input   = get_backing_tensor(node.input(0));
+    ICLTensor *weights = get_backing_tensor(node.input(1));
+    ICLTensor *biases  = get_backing_tensor(node.input(2));
+    ICLTensor *output  = get_backing_tensor(node.output(0));
+
+    if(is_data_type_quantized_asymmetric(input->info()->data_type()))
+    {
+        biases->info()->set_data_type(DataType::S32);
+    }
+
     const PadStrideInfo     conv_info      = node.convolution_info();
     const ConvolutionMethod conv_algorithm = node.convolution_method();
 
@@ -190,6 +196,8 @@
     // Log info
     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << func_name
                                << " Data Type: " << input->info()->data_type()
+                               << " Input QuantInfo: " << input->info()->quantization_info()
+                               << " Weights QuantInfo: " << weights->info()->quantization_info()
                                << " Input shape: " << input->info()->tensor_shape()
                                << " Weights shape: " << weights->info()->tensor_shape()
                                << " Output shape: " << output->info()->tensor_shape()
@@ -251,10 +259,16 @@
     ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
 
     // Extract IO and info
-    ICLTensor                       *input         = get_backing_tensor(node.input(0));
-    ICLTensor                       *weights       = get_backing_tensor(node.input(1));
-    ICLTensor                       *biases        = get_backing_tensor(node.input(2));
-    ICLTensor                       *output        = get_backing_tensor(node.output(0));
+    ICLTensor *input   = get_backing_tensor(node.input(0));
+    ICLTensor *weights = get_backing_tensor(node.input(1));
+    ICLTensor *biases  = get_backing_tensor(node.input(2));
+    ICLTensor *output  = get_backing_tensor(node.output(0));
+
+    if(is_data_type_quantized_asymmetric(input->info()->data_type()))
+    {
+        biases->info()->set_data_type(DataType::S32);
+    }
+
     const PadStrideInfo              conv_info     = node.convolution_info();
     const DepthwiseConvolutionMethod dwc_algorithm = node.depthwise_convolution_method();
 
@@ -275,6 +289,8 @@
     // Log info
     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << func_name
                                << " Data Type: " << input->info()->data_type()
+                               << " Input QuantInfo: " << input->info()->quantization_info()
+                               << " Weights QuantInfo: " << weights->info()->quantization_info()
                                << " Input shape: " << input->info()->tensor_shape()
                                << " Weights shape: " << weights->info()->tensor_shape()
                                << " Output shape: " << output->info()->tensor_shape()
diff --git a/src/graph/backends/GLES/GCDeviceBackend.cpp b/src/graph/backends/GLES/GCDeviceBackend.cpp
index 8cd9994..a55215f 100644
--- a/src/graph/backends/GLES/GCDeviceBackend.cpp
+++ b/src/graph/backends/GLES/GCDeviceBackend.cpp
@@ -87,7 +87,7 @@
     ARM_COMPUTE_ERROR_ON(tensor_desc.target != Target::GC);
 
     // Create backend tensor handle
-    TensorInfo info(tensor_desc.shape, 1, tensor_desc.data_type);
+    TensorInfo info(tensor_desc.shape, 1, tensor_desc.data_type, tensor_desc.quant_info);
     auto       backend_tensor_handle = support::cpp14::make_unique<GCTensorHandle>(info);
 
     return std::move(backend_tensor_handle);
diff --git a/src/graph/backends/GLES/GCFunctionsFactory.cpp b/src/graph/backends/GLES/GCFunctionsFactory.cpp
index 12e7c04..d3c5737 100644
--- a/src/graph/backends/GLES/GCFunctionsFactory.cpp
+++ b/src/graph/backends/GLES/GCFunctionsFactory.cpp
@@ -154,10 +154,16 @@
     ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
 
     // Extract IO and info
-    IGCTensor              *input          = get_backing_tensor(node.input(0));
-    IGCTensor              *weights        = get_backing_tensor(node.input(1));
-    IGCTensor              *biases         = get_backing_tensor(node.input(2));
-    IGCTensor              *output         = get_backing_tensor(node.output(0));
+    IGCTensor *input   = get_backing_tensor(node.input(0));
+    IGCTensor *weights = get_backing_tensor(node.input(1));
+    IGCTensor *biases  = get_backing_tensor(node.input(2));
+    IGCTensor *output  = get_backing_tensor(node.output(0));
+
+    if(is_data_type_quantized_asymmetric(input->info()->data_type()))
+    {
+        biases->info()->set_data_type(DataType::S32);
+    }
+
     const PadStrideInfo     conv_info      = node.convolution_info();
     const ConvolutionMethod conv_algorithm = node.convolution_method();
 
@@ -180,6 +186,8 @@
     // Log info
     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << func_name
                                << " Data Type: " << input->info()->data_type()
+                               << " Input QuantInfo: " << input->info()->quantization_info()
+                               << " Weights QuantInfo: " << weights->info()->quantization_info()
                                << " Input shape: " << input->info()->tensor_shape()
                                << " Weights shape: " << weights->info()->tensor_shape()
                                << " Output shape: " << output->info()->tensor_shape()
@@ -241,10 +249,16 @@
     ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
 
     // Extract IO and info
-    IGCTensor                       *input         = get_backing_tensor(node.input(0));
-    IGCTensor                       *weights       = get_backing_tensor(node.input(1));
-    IGCTensor                       *biases        = get_backing_tensor(node.input(2));
-    IGCTensor                       *output        = get_backing_tensor(node.output(0));
+    IGCTensor *input   = get_backing_tensor(node.input(0));
+    IGCTensor *weights = get_backing_tensor(node.input(1));
+    IGCTensor *biases  = get_backing_tensor(node.input(2));
+    IGCTensor *output  = get_backing_tensor(node.output(0));
+
+    if(is_data_type_quantized_asymmetric(input->info()->data_type()))
+    {
+        biases->info()->set_data_type(DataType::S32);
+    }
+
     const PadStrideInfo              conv_info     = node.convolution_info();
     const DepthwiseConvolutionMethod dwc_algorithm = node.depthwise_convolution_method();
 
@@ -264,6 +278,8 @@
     // Log info
     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << func_name
                                << " Data Type: " << input->info()->data_type()
+                               << " Input QuantInfo: " << input->info()->quantization_info()
+                               << " Weights QuantInfo: " << weights->info()->quantization_info()
                                << " Input shape: " << input->info()->tensor_shape()
                                << " Weights shape: " << weights->info()->tensor_shape()
                                << " Output shape: " << output->info()->tensor_shape()
diff --git a/src/graph/backends/NEON/NEDeviceBackend.cpp b/src/graph/backends/NEON/NEDeviceBackend.cpp
index aaf0582..9123196 100644
--- a/src/graph/backends/NEON/NEDeviceBackend.cpp
+++ b/src/graph/backends/NEON/NEDeviceBackend.cpp
@@ -93,7 +93,7 @@
     ARM_COMPUTE_ERROR_ON(tensor_desc.target != Target::NEON);
 
     // Create backend tensor handle
-    TensorInfo info(tensor_desc.shape, 1, tensor_desc.data_type);
+    TensorInfo info(tensor_desc.shape, 1, tensor_desc.data_type, tensor_desc.quant_info);
     auto       backend_tensor_handle = support::cpp14::make_unique<NETensorHandle>(info);
 
     return std::move(backend_tensor_handle);
diff --git a/src/graph/backends/NEON/NEFunctionFactory.cpp b/src/graph/backends/NEON/NEFunctionFactory.cpp
index 228af9c..906378c 100644
--- a/src/graph/backends/NEON/NEFunctionFactory.cpp
+++ b/src/graph/backends/NEON/NEFunctionFactory.cpp
@@ -140,10 +140,16 @@
     ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
 
     // Extract IO and info
-    ITensor                *input          = get_backing_tensor(node.input(0));
-    ITensor                *weights        = get_backing_tensor(node.input(1));
-    ITensor                *biases         = get_backing_tensor(node.input(2));
-    ITensor                *output         = get_backing_tensor(node.output(0));
+    ITensor *input   = get_backing_tensor(node.input(0));
+    ITensor *weights = get_backing_tensor(node.input(1));
+    ITensor *biases  = get_backing_tensor(node.input(2));
+    ITensor *output  = get_backing_tensor(node.output(0));
+
+    if(is_data_type_quantized_asymmetric(input->info()->data_type()))
+    {
+        biases->info()->set_data_type(DataType::S32);
+    }
+
     const PadStrideInfo     conv_info      = node.convolution_info();
     const ConvolutionMethod conv_algorithm = node.convolution_method();
 
@@ -175,6 +181,8 @@
     // Log info
     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << func_name
                                << " Data Type: " << input->info()->data_type()
+                               << " Input QuantInfo: " << input->info()->quantization_info()
+                               << " Weights QuantInfo: " << weights->info()->quantization_info()
                                << " Input shape: " << input->info()->tensor_shape()
                                << " Weights shape: " << weights->info()->tensor_shape()
                                << " Output shape: " << output->info()->tensor_shape()
@@ -234,10 +242,16 @@
     ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
 
     // Extract IO and info
-    ITensor                         *input         = get_backing_tensor(node.input(0));
-    ITensor                         *weights       = get_backing_tensor(node.input(1));
-    ITensor                         *biases        = get_backing_tensor(node.input(2));
-    ITensor                         *output        = get_backing_tensor(node.output(0));
+    ITensor *input   = get_backing_tensor(node.input(0));
+    ITensor *weights = get_backing_tensor(node.input(1));
+    ITensor *biases  = get_backing_tensor(node.input(2));
+    ITensor *output  = get_backing_tensor(node.output(0));
+
+    if(is_data_type_quantized_asymmetric(input->info()->data_type()))
+    {
+        biases->info()->set_data_type(DataType::S32);
+    }
+
     const PadStrideInfo              conv_info     = node.convolution_info();
     const DepthwiseConvolutionMethod dwc_algorithm = node.depthwise_convolution_method();
 
@@ -258,6 +272,8 @@
     // Log info
     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << func_name
                                << " Data Type: " << input->info()->data_type()
+                               << " Input QuantInfo: " << input->info()->quantization_info()
+                               << " Weights QuantInfo: " << weights->info()->quantization_info()
                                << " Input shape: " << input->info()->tensor_shape()
                                << " Weights shape: " << weights->info()->tensor_shape()
                                << " Output shape: " << output->info()->tensor_shape()
diff --git a/src/graph/nodes/ConvolutionLayerNode.cpp b/src/graph/nodes/ConvolutionLayerNode.cpp
index 4617284..eb0c6a1 100644
--- a/src/graph/nodes/ConvolutionLayerNode.cpp
+++ b/src/graph/nodes/ConvolutionLayerNode.cpp
@@ -31,8 +31,8 @@
 {
 namespace graph
 {
-ConvolutionLayerNode::ConvolutionLayerNode(PadStrideInfo info, ConvolutionMethod method)
-    : _info(std::move(info)), _method(method)
+ConvolutionLayerNode::ConvolutionLayerNode(PadStrideInfo info, ConvolutionMethod method, QuantizationInfo out_quant_info)
+    : _info(std::move(info)), _method(method), _out_quant_info(out_quant_info)
 {
     _input_edges.resize(3, EmptyEdgeID);
     _outputs.resize(1, NullTensorID);
@@ -90,6 +90,12 @@
     TensorDescriptor output_info  = src->desc();
     TensorShape      output_shape = compute_output_shape(src->desc().shape, weights->desc().shape, _info);
     output_info.shape             = output_shape;
+
+    if(!_out_quant_info.empty())
+    {
+        output_info.quant_info = _out_quant_info;
+    }
+
     return output_info;
 }
 
diff --git a/src/graph/nodes/SoftmaxLayerNode.cpp b/src/graph/nodes/SoftmaxLayerNode.cpp
index 4c21ac6..b6241e6 100644
--- a/src/graph/nodes/SoftmaxLayerNode.cpp
+++ b/src/graph/nodes/SoftmaxLayerNode.cpp
@@ -63,7 +63,10 @@
     const Tensor *src = input(0);
     ARM_COMPUTE_ERROR_ON(src == nullptr);
 
-    return src->desc();
+    TensorDescriptor out_desc = src->desc();
+    out_desc.quant_info       = QuantizationInfo(1.f / 256.f, 0);
+
+    return out_desc;
 }
 
 Status SoftmaxLayerNode::validate()
diff --git a/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp b/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp
index 5f747bc..711b006 100644
--- a/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp
+++ b/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp
@@ -102,7 +102,10 @@
         matrix_b = &_tmp_b;
 
         _memory_group.manage(&_tmp_a);
-        _memory_group.manage(&_tmp_b);
+        if(!_reshape_b_only_on_first_run)
+        {
+            _memory_group.manage(&_tmp_b);
+        }
 
         // Configure interleave kernel
         _mtx_a_reshape_kernel.configure(a, &_tmp_a, mult_interleave4x4_height);
@@ -119,7 +122,10 @@
     {
         TensorInfo info_vector_sum_col(compute_reductionA_shape(*b->info()), 1, DataType::S32);
         _vector_sum_col.allocator()->init(info_vector_sum_col);
-        _memory_group.manage(&_vector_sum_col);
+        if(!_reshape_b_only_on_first_run)
+        {
+            _memory_group.manage(&_vector_sum_col);
+        }
 
         // Configure Matrix B reduction kernel
         _mtx_b_reduction_kernel.configure(b, &_vector_sum_col);
diff --git a/src/runtime/NEON/functions/NEGEMM.cpp b/src/runtime/NEON/functions/NEGEMM.cpp
index 18e6e91..e0859be 100644
--- a/src/runtime/NEON/functions/NEGEMM.cpp
+++ b/src/runtime/NEON/functions/NEGEMM.cpp
@@ -107,7 +107,10 @@
 
             // Manage intermediate buffers
             _memory_group.manage(&_tmp_a);
-            _memory_group.manage(&_tmp_b);
+            if(!_reshape_b_only_on_first_run)
+            {
+                _memory_group.manage(&_tmp_b);
+            }
 
             int m = a->info()->dimension(1);
             int n = b->info()->dimension(0);
diff --git a/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp b/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp
index 7f25c2e..3c48d69 100644
--- a/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp
+++ b/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp
@@ -109,14 +109,6 @@
         ARM_COMPUTE_RETURN_ERROR_ON(biases->num_dimensions() > 1);
     }
 
-    // Checks performed when biases are present
-    if(append_bias)
-    {
-        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(weights, biases);
-        ARM_COMPUTE_RETURN_ERROR_ON(biases->dimension(0) != weights->dimension(3));
-        ARM_COMPUTE_RETURN_ERROR_ON(biases->num_dimensions() > 1);
-    }
-
     if(transpose1xW)
     {
         TensorInfo weights_reshaped = weights->clone()->set_tensor_shape(get_reshaped_weights_shape(weights, append_bias));
@@ -344,7 +336,7 @@
     _memory_group.manage(&_input_im2col_reshaped);
 
     // Create tensor (interleave) to prepare input tensor for GEMM
-    if(!_is_fully_connected_convolution && !run_optimised)
+    if(!_is_fully_connected_convolution && !run_optimised && _is_interleaved)
     {
         TensorShape shape_interleaved(shape_im2col);
         shape_interleaved.set(0, shape_interleaved.x() * 4);
@@ -362,7 +354,9 @@
     TensorInfo info_gemm(shape_gemm, 1, gemm_data_type, input->info()->fixed_point_position());
     info_gemm.set_quantization_info(output->info()->quantization_info());
     _gemm_output.allocator()->init(info_gemm);
-    _memory_group.manage(&_gemm_output);
+
+    // FIXME: enabling memory manager for _gemm_output gives incorrect results (maybe bound to the assembly kernel in GEMMLowp?)
+    //  _memory_group.manage(&_gemm_output);
 
     // Configure kernels
     // Configure im2col
@@ -491,7 +485,7 @@
         reshaped_weights->set_tensor_shape(get_reshaped_weights_shape_conv(weights, append_bias, is_fully_connected_convolution));
         ARM_COMPUTE_RETURN_ON_ERROR(NEConvolutionLayerReshapeWeights::validate(weights, biases, reshaped_weights.get(), !is_fully_connected_convolution /* 1xW transpose */));
     }
-    else
+    else if(!is_quantized)
     {
         TensorShape reshaped_weights_shape;
 
diff --git a/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp b/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp
index 7372c6c..cbec73f 100644
--- a/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp
+++ b/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp
@@ -43,19 +43,19 @@
 NEGEMMLowpMatrixMultiplyCore::NEGEMMLowpMatrixMultiplyCore(std::shared_ptr<IMemoryManager> memory_manager)
     : _memory_group(std::move(memory_manager)), _asm_glue_unsigned(), _asm_glue_signed(), _mm_kernel(nullptr), _mtx_a_reshape_kernel(nullptr), _mtx_b_reshape_kernel(nullptr), _mtx_a_reduction_kernel(),
       _mtx_b_reduction_kernel(), _offset_contribution_kernel(), _vector_sum_col(), _vector_sum_row(), _tmp_a(), _tmp_b(), _workspace(), _a_offset(0), _b_offset(0), _run_vector_matrix_multiplication(false),
-      _dot_product_path(false)
+      _dot_product_path(false), _is_first_run(true), _reshape_b_only_on_first_run(false)
 {
 }
 
 void NEGEMMLowpMatrixMultiplyCore::configure(const ITensor *a, const ITensor *b, ITensor *output, const GEMMInfo &gemm_info)
 {
     ARM_COMPUTE_ERROR_ON_NULLPTR(a, b, output);
-    ARM_COMPUTE_UNUSED(gemm_info);
     ARM_COMPUTE_ERROR_THROW_ON(NEGEMMLowpMatrixMultiplyCore::validate(a->info(), b->info(), output->info(), gemm_info));
 
     _a_offset                         = a->info()->quantization_info().offset;
     _b_offset                         = b->info()->quantization_info().offset;
     _run_vector_matrix_multiplication = a->info()->dimension(1) < 2;
+    _reshape_b_only_on_first_run      = gemm_info.reshape_b_only_on_first_run();
 
 #ifdef __aarch64__
     switch(a->info()->data_type())
@@ -98,7 +98,10 @@
             _tmp_a.allocator()->init(info_a);
             _tmp_b.allocator()->init(info_b);
             _memory_group.manage(&_tmp_a);
-            _memory_group.manage(&_tmp_b);
+            if(!_reshape_b_only_on_first_run)
+            {
+                _memory_group.manage(&_tmp_b);
+            }
 
             // Configure interleave kernel
             {
@@ -129,7 +132,10 @@
         TensorInfo info_vector_sum_col(compute_reductionA_shape(*b->info()), 1, DataType::S32);
 
         _vector_sum_col.allocator()->init(info_vector_sum_col);
-        _memory_group.manage(&_vector_sum_col);
+        if(!_reshape_b_only_on_first_run)
+        {
+            _memory_group.manage(&_vector_sum_col);
+        }
 
         // Configure Matrix B reduction kernel
         _mtx_b_reduction_kernel.configure(b, &_vector_sum_col, a->info()->dimension(0), false);
@@ -252,7 +258,7 @@
             NEScheduler::get().schedule(_mtx_a_reshape_kernel.get(), Window::DimY);
         }
 
-        if(_mtx_b_reshape_kernel)
+        if(_mtx_b_reshape_kernel && (_is_first_run || !_reshape_b_only_on_first_run))
         {
             NEScheduler::get().schedule(_mtx_b_reshape_kernel.get(), Window::DimY);
         }
@@ -278,7 +284,7 @@
     }
 
     // Run matrix B reduction kernel only if _a_offset is not equal to 0
-    if(_a_offset != 0)
+    if(_a_offset != 0 && (_is_first_run || !_reshape_b_only_on_first_run))
     {
         NEScheduler::get().schedule(&_mtx_b_reduction_kernel, Window::DimX);
     }
@@ -287,4 +293,6 @@
     NEScheduler::get().schedule(&_offset_contribution_kernel, Window::DimY);
 
     _memory_group.release();
+
+    _is_first_run = false;
 }