Remove OpenCL padding: CLReductionOperationKernel

Change the parallel implementation across the X, now every thread computes one row
Add missing test for MEAN_SUM
Make reduction on any axis != 0 work with num_channels > 1

Resolve COMPMID-3917

Signed-off-by: Giorgio Arena <giorgio.arena@arm.com>
Change-Id: Ib0f99540104e3c253bcd1ea637833db533f5e76e
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5522
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Manuel Bottini <manuel.bottini@arm.com>
Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
diff --git a/src/core/CL/cl_kernels/helpers.h b/src/core/CL/cl_kernels/helpers.h
index 2eae5ee..6cd7637 100644
--- a/src/core/CL/cl_kernels/helpers.h
+++ b/src/core/CL/cl_kernels/helpers.h
@@ -567,6 +567,16 @@
 #define SUM_REDUCE_STR(x, size) sum_reduce_##size(x)
 #define SUM_REDUCE(x, size) SUM_REDUCE_STR(x, size)
 
+#define prod_reduce_1(x) (x)
+#define prod_reduce_2(x) ((x).s0) * ((x).s1)
+#define prod_reduce_3(x) prod_reduce_2((x).s01) * ((x).s2)
+#define prod_reduce_4(x) prod_reduce_2((x).s01) * prod_reduce_2((x).s23)
+#define prod_reduce_8(x) prod_reduce_4((x).s0123) * prod_reduce_4((x).s4567)
+#define prod_reduce_16(x) prod_reduce_8((x).s01234567) * prod_reduce_8((x).s89ABCDEF)
+
+#define PROD_REDUCE_STR(x, size) prod_reduce_##size(x)
+#define PROD_REDUCE(x, size) PROD_REDUCE_STR(x, size)
+
 #define max_reduce_1(x) (x)
 #define max_reduce_2(x) max(((x).s0), ((x).s1))
 #define max_reduce_3(x) max(max_reduce_2((x).s01), ((x).s2))
diff --git a/src/core/CL/cl_kernels/helpers_asymm.h b/src/core/CL/cl_kernels/helpers_asymm.h
index 27878cd..562c5d3 100644
--- a/src/core/CL/cl_kernels/helpers_asymm.h
+++ b/src/core/CL/cl_kernels/helpers_asymm.h
@@ -425,9 +425,22 @@
 QUANTIZE_IMPL(char, 1)
 QUANTIZE_IMPL(uint, 1)
 QUANTIZE_IMPL(int, 1)
+QUANTIZE_IMPL(uchar, 2)
+QUANTIZE_IMPL(char, 2)
+QUANTIZE_IMPL(uint, 2)
+QUANTIZE_IMPL(int, 2)
+QUANTIZE_IMPL(uchar, 3)
+QUANTIZE_IMPL(char, 3)
+QUANTIZE_IMPL(uint, 3)
+QUANTIZE_IMPL(int, 3)
 QUANTIZE_IMPL(uchar, 4)
 QUANTIZE_IMPL(ushort, 4)
 QUANTIZE_IMPL(short, 4)
+QUANTIZE_IMPL(int, 4)
+QUANTIZE_IMPL(uchar, 8)
+QUANTIZE_IMPL(char, 8)
+QUANTIZE_IMPL(uint, 8)
+QUANTIZE_IMPL(int, 8)
 QUANTIZE_IMPL(uchar, 16)
 QUANTIZE_IMPL(char, 16)
 QUANTIZE_IMPL(ushort, 16)
@@ -439,9 +452,22 @@
 DEQUANTIZE_IMPL(char, 1)
 DEQUANTIZE_IMPL(uint, 1)
 DEQUANTIZE_IMPL(int, 1)
+DEQUANTIZE_IMPL(uchar, 2)
+DEQUANTIZE_IMPL(char, 2)
+DEQUANTIZE_IMPL(uint, 2)
+DEQUANTIZE_IMPL(int, 2)
+DEQUANTIZE_IMPL(uchar, 3)
+DEQUANTIZE_IMPL(char, 3)
+DEQUANTIZE_IMPL(uint, 3)
+DEQUANTIZE_IMPL(int, 3)
 DEQUANTIZE_IMPL(uchar, 4)
 DEQUANTIZE_IMPL(ushort, 4)
 DEQUANTIZE_IMPL(short, 4)
+DEQUANTIZE_IMPL(int, 4)
+DEQUANTIZE_IMPL(uchar, 8)
+DEQUANTIZE_IMPL(char, 8)
+DEQUANTIZE_IMPL(uint, 8)
+DEQUANTIZE_IMPL(int, 8)
 DEQUANTIZE_IMPL(uchar, 16)
 DEQUANTIZE_IMPL(char, 16)
 DEQUANTIZE_IMPL(ushort, 16)
diff --git a/src/core/CL/cl_kernels/reduction_operation.cl b/src/core/CL/cl_kernels/reduction_operation.cl
index b2e5692..912b6c9 100644
--- a/src/core/CL/cl_kernels/reduction_operation.cl
+++ b/src/core/CL/cl_kernels/reduction_operation.cl
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2020 Arm Limited.
+ * Copyright (c) 2016-2021 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -32,67 +32,18 @@
 #define ISGREATER(x, y) (x > y) ? 1 : 0
 #define ISLESS(x, y) (x < y) ? 1 : 0
 #else // !defined(WIDTH)
-#define ISGREATER(x, y) select((int16)0, (int16)-1, x > y)
-#define ISLESS(x, y) select((int16)0, (int16)-1, x < y)
+#define ISGREATER(x, y) select((VEC_DATA_TYPE(int, VEC_SIZE))0, (VEC_DATA_TYPE(int, VEC_SIZE)) - 1, x > y)
+#define ISLESS(x, y) select((VEC_DATA_TYPE(int, VEC_SIZE))0, (VEC_DATA_TYPE(int, VEC_SIZE)) - 1, x < y)
 #endif // defined(WIDTH)
 #endif // defined(FLOAT_DATA_TYPE)
 
-/** Calculate square sum of a vector
- *
- * @param[in] input Pointer to the first pixel.
- *
- * @return square sum of vector.
- */
-inline DATA_TYPE square_sum(__global const DATA_TYPE *input)
-{
-    VEC_DATA_TYPE(DATA_TYPE, 16)
-    in = vload16(0, input);
-
-    in *= in;
-
-    in.s01234567 += in.s89ABCDEF;
-    in.s0123 += in.s4567;
-    in.s01 += in.s23;
-
-    return (in.s0 + in.s1);
-}
-
-/** Calculate sum of a vector
- *
- * @param[in] input Pointer to the first pixel.
- *
- * @return sum of vector.
- */
-inline DATA_TYPE sum(__global const DATA_TYPE *input)
-{
-    VEC_DATA_TYPE(DATA_TYPE, 16)
-    in = vload16(0, input);
-
-    in.s01234567 += in.s89ABCDEF;
-    in.s0123 += in.s4567;
-    in.s01 += in.s23;
-
-    return (in.s0 + in.s1);
-}
-
-/** Calculate product of a vector
- *
- * @param[in] input Pointer to the first pixel.
- *
- * @return product of vector.
- */
-inline DATA_TYPE product(__global const DATA_TYPE *input)
-{
-    VEC_DATA_TYPE(DATA_TYPE, 16)
-    in = vload16(0, input);
-
-    in.s01234567 *= in.s89ABCDEF;
-    in.s0123 *= in.s4567;
-    in.s01 *= in.s23;
-
-    return (in.s0 * in.s1);
-}
+#if defined(WIDTH)
 #if defined(OPERATION)
+
+#define sum(in0, in1, size) (in0 + SUM_REDUCE(in1, size))
+#define square_sum(in0, in1, size) (in0 + SUM_REDUCE((in1 * in1), size))
+#define product(in0, in1, size) (in0 * PROD_REDUCE(in1, size))
+
 /** This kernel performs parallel reduction given an operation on x-axis.
  *
  * @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
@@ -101,65 +52,57 @@
  * @note The product flag must be passed at compile time using -DPROD if we want to compute the product, otherwise sum will be used
  * @note The width size must be passed at compile time using -DWIDTH e.g. -DWIDTH=128 if we want to compute the mean value
  *
- * @param[in] src_ptr                                   Pointer to the source tensor. Supported data types: F16/F32
- * @param[in] src_stride_x                              Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x                                src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y                              Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y                                src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes         The offset of the first element in the source tensor
- * @param[in] partial_res_ptr                           The local buffer to hold partial result values. Supported data types: same as @p src_ptr
- * @param[in] partial_res_stride_x                      Stride of the output tensor in X dimension (in bytes)
- * @param[in] partial_res_step_x                        partial_res_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] partial_res_stride_y                      Stride of the output tensor in Y dimension (in bytes)
- * @param[in] partial_res_step_y                        partial_res_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] partial_res_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[in] local_results                             Local buffer for storing the partial result
+ * @param[in] input_ptr                            Pointer to the source tensor. Supported data types: F16/F32
+ * @param[in] input_stride_x                       Stride of the source tensor in X dimension (in bytes)
+ * @param[in] input_step_x                         input_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] input_stride_y                       Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] input_step_y                         input_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] input_offset_first_element_in_bytes  The offset of the first element in the source tensor
+ * @param[in] output_ptr                           Pointer to the destination tensor. Supported data types: same as @p input
+ * @param[in] output_stride_x                      Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] output_step_x                        output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y                      Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] output_step_y                        output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
  */
 __kernel void reduction_operation_x(
-    IMAGE_DECLARATION(src),
-    IMAGE_DECLARATION(partial_res),
-    __local DATA_TYPE *local_results)
+    IMAGE_DECLARATION(input),
+    IMAGE_DECLARATION(output))
 {
-    Image src         = CONVERT_TO_IMAGE_STRUCT(src);
-    Image partial_res = CONVERT_TO_IMAGE_STRUCT(partial_res);
+    int y = get_global_id(1);
 
-    unsigned int lsize = get_local_size(0);
-    unsigned int lid   = get_local_id(0);
+    __global uchar *input_addr  = input_ptr + input_offset_first_element_in_bytes + y * input_stride_y;
+    __global uchar *output_addr = output_ptr + output_offset_first_element_in_bytes + y * output_stride_y;
 
-    for(unsigned int y = 0; y < get_local_size(1); ++y)
-    {
-        local_results[lid] = OPERATION((__global DATA_TYPE *)offset(&src, 0, y));
-        barrier(CLK_LOCAL_MEM_FENCE);
-
-        // Perform parallel reduction
-        for(unsigned int i = lsize >> 1; i > 0; i >>= 1)
-        {
-            if(lid < i)
-            {
 #if defined(PROD)
-                local_results[lid] *= local_results[lid + i];
-#else  // !defined(PROD)
-                local_results[lid] += local_results[lid + i];
+    DATA_TYPE res = (DATA_TYPE)1;
+#else  // defined(PROD)
+    DATA_TYPE res = (DATA_TYPE)0;
 #endif // defined(PROD)
-            }
-            barrier(CLK_LOCAL_MEM_FENCE);
-        }
 
-        if(lid == 0)
-        {
-#if defined(MEAN) && defined(WIDTH)
-            if(y == get_local_size(1) - 1)
-            {
-                local_results[0] /= WIDTH;
-            }
-#endif // defined(MEAN) && defined(WIDTH)
-            ((__global DATA_TYPE *)offset(&partial_res, get_group_id(0), y))[0] = local_results[0];
-        }
+    int x = 0;
+
+    for(; x <= (WIDTH - VEC_SIZE); x += VEC_SIZE)
+    {
+        VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+        vals = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(input_addr + x * sizeof(DATA_TYPE)));
+        res  = OPERATION(res, vals, VEC_SIZE);
     }
+
+#if(WIDTH % VEC_SIZE)
+    _Pragma("unroll") for(; x < WIDTH; ++x)
+    {
+        DATA_TYPE val = *((__global DATA_TYPE *)(input_addr + x * sizeof(DATA_TYPE)));
+        res           = OPERATION(res, val, 1);
+    }
+#endif // (WIDTH % VEC_SIZE)
+
+#if defined(MEAN)
+    res /= WIDTH;
+#endif // defined(MEAN)
+    *((__global DATA_TYPE *)output_addr) = res;
 }
 #endif // defined(OPERATION)
-
-#if defined(WIDTH)
 /** This kernel performs reduction on x-axis. (Non parallel)
  *
  * @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
@@ -167,23 +110,23 @@
  * @note The product flag must be passed at compile time using -DPROD if we want to compute the product, otherwise sum will be used
  * @note In case of MIN and MAX the condition data type must be passed at compile time using -DCOND_DATA_TYPE e.g. -DCOND_DATA_TYPE=short
  *
- * @param[in] src_ptr                              Pointer to the source tensor. Supported data types: S32/F16/F32 and QASYMM8/QASYMM8_SIGNED for operation MEAN
- * @param[in] src_stride_x                         Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x                           src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes    The offset of the first element in the source tensor
- * @param[in] output_ptr                           The local buffer to hold sumed values. Supported data types: same as @p src_ptr
+ * @param[in] input_ptr                            Pointer to the source tensor. Supported data types: S32/F16/F32 and QASYMM8/QASYMM8_SIGNED for operation MEAN
+ * @param[in] input_stride_x                       Stride of the source tensor in X dimension (in bytes)
+ * @param[in] input_step_x                         input_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] input_offset_first_element_in_bytes  The offset of the first element in the source tensor
+ * @param[in] output_ptr                           The local buffer to hold sumed values. Supported data types: same as @p input_ptr
  * @param[in] output_stride_x                      Stride of the output tensor in X dimension (in bytes)
  * @param[in] output_step_x                        output_stride_x * number of elements along X processed per workitem(in bytes)
  * @param[in] output_offset_first_element_in_bytes The offset of the first element in the source tensor
  */
 __kernel void reduction_operation_non_parallel_x(
-    VECTOR_DECLARATION(src),
+    VECTOR_DECLARATION(input),
     VECTOR_DECLARATION(output))
 {
-    Vector src    = CONVERT_TO_VECTOR_STRUCT(src);
+    Vector input  = CONVERT_TO_VECTOR_STRUCT(input);
     Vector output = CONVERT_TO_VECTOR_STRUCT(output);
 
-    DATA_TYPE_PROMOTED res = CONVERT(*((__global DATA_TYPE *)vector_offset(&src, 0)), DATA_TYPE_PROMOTED);
+    DATA_TYPE_PROMOTED res = CONVERT(*((__global DATA_TYPE *)vector_offset(&input, 0)), DATA_TYPE_PROMOTED);
 
     // Convert input into F32 in order to perform quantized multiplication
 #if defined(PROD) && defined(OFFSET) && defined(SCALE)
@@ -192,7 +135,7 @@
 
     for(unsigned int x = 1; x < WIDTH; ++x)
     {
-        DATA_TYPE_PROMOTED in = CONVERT(*((__global DATA_TYPE *)vector_offset(&src, x)), DATA_TYPE_PROMOTED);
+        DATA_TYPE_PROMOTED in = CONVERT(*((__global DATA_TYPE *)vector_offset(&input, x)), DATA_TYPE_PROMOTED);
 #if defined(MIN)
         res = select(res, in, CONVERT(ISLESS(in, res), COND_DATA_TYPE));
 #elif defined(MAX)
@@ -233,13 +176,13 @@
  * @note The input data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
  * @note The height size must be passed at compile time using -DHEIGHT e.g. -DHEIGHT=128
  *
- * @param[in] src_ptr                              Pointer to the source tensor. Supported data types: QASYMM8/QASYMM8_SIGNED/S32/F16/F32
- * @param[in] src_stride_x                         Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x                           src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y                         Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y                           src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes    The offset of the first element in the source tensor
- * @param[in] output_ptr                           The local buffer to hold sumed values. Supported data types: same as @p src_ptr
+ * @param[in] input_ptr                            Pointer to the source tensor. Supported data types: QASYMM8/QASYMM8_SIGNED/S32/F16/F32
+ * @param[in] input_stride_x                       Stride of the source tensor in X dimension (in bytes)
+ * @param[in] input_step_x                         input_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] input_stride_y                       Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] input_step_y                         input_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] input_offset_first_element_in_bytes  The offset of the first element in the source tensor
+ * @param[in] output_ptr                           The local buffer to hold sumed values. Supported data types: same as @p input_ptr
  * @param[in] output_stride_x                      Stride of the output tensor in X dimension (in bytes)
  * @param[in] output_step_x                        output_stride_x * number of elements along X processed per workitem(in bytes)
  * @param[in] output_stride_y                      Stride of the output tensor in Y dimension (in bytes)
@@ -247,18 +190,22 @@
  * @param[in] output_offset_first_element_in_bytes The offset of the first element in the source tensor
  */
 __kernel void reduction_operation_y(
-    IMAGE_DECLARATION(src),
+    IMAGE_DECLARATION(input),
     IMAGE_DECLARATION(output))
 {
-    Image src    = CONVERT_TO_IMAGE_STRUCT(src);
-    Image output = CONVERT_TO_IMAGE_STRUCT(output);
+    int x = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0);
+    int y = get_global_id(1);
 
-    VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16)
-    res = CONVERT(vload16(0, (__global DATA_TYPE *)offset(&src, 0, 0)), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16));
+    __global uchar *input_addr  = input_ptr + input_offset_first_element_in_bytes + x * sizeof(DATA_TYPE) + y * input_stride_y;
+    __global uchar *output_addr = output_ptr + output_offset_first_element_in_bytes + x * sizeof(DATA_TYPE) + y * output_stride_y;
+
+    VEC_DATA_TYPE(DATA_TYPE_PROMOTED, VEC_SIZE)
+    res = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)input_addr), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, VEC_SIZE));
 
     // Convert input into F32 in order to perform quantized multiplication
 #if defined(PROD) && defined(OFFSET) && defined(SCALE)
-    float16 res_f = DEQUANTIZE(res, OFFSET, SCALE, DATA_TYPE_PROMOTED, 16);
+    VEC_DATA_TYPE(float, VEC_SIZE)
+    res_f = DEQUANTIZE(res, OFFSET, SCALE, DATA_TYPE_PROMOTED, VEC_SIZE);
 #endif // defined(PROD) && defined(OFFSET) && defined(SCALE)
 
 #if defined(SUM_SQUARE)
@@ -267,8 +214,8 @@
 
     for(unsigned int y = 1; y < HEIGHT; ++y)
     {
-        VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16)
-        in = CONVERT(vload16(0, (__global DATA_TYPE *)offset(&src, 0, y)), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16));
+        VEC_DATA_TYPE(DATA_TYPE_PROMOTED, VEC_SIZE)
+        in = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(input_addr + y * input_stride_y)), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, VEC_SIZE));
 #if defined(MIN)
         res = select(res, in, ISLESS(in, res));
 #elif defined(MAX)
@@ -280,7 +227,7 @@
 #if defined(PROD)
 
 #if defined(OFFSET) && defined(SCALE)
-        res_f *= DEQUANTIZE(in, OFFSET, SCALE, DATA_TYPE_PROMOTED, 16);
+        res_f *= DEQUANTIZE(in, OFFSET, SCALE, DATA_TYPE_PROMOTED, VEC_SIZE);
 #else  // !(defined(OFFSET) && defined(SCALE))
         res *= in;
 #endif //  defined(OFFSET) && defined(SCALE)
@@ -302,11 +249,13 @@
 
     // Re-quantize
 #if defined(PROD) && defined(OFFSET) && defined(SCALE)
-    res = QUANTIZE(res_f, OFFSET, SCALE, DATA_TYPE_PROMOTED, 16);
+    res = QUANTIZE(res_f, OFFSET, SCALE, DATA_TYPE_PROMOTED, VEC_SIZE);
 #endif // defined(PROD) && defined(OFFSET) && defined(SCALE)
 
     // Store result
-    vstore16(CONVERT_SAT(res, VEC_DATA_TYPE(DATA_TYPE, 16)), 0, (__global DATA_TYPE *)output.ptr);
+    VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+    res0 = CONVERT_SAT(res, VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE));
+    STORE_VECTOR_SELECT(res, DATA_TYPE, output_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0);
 }
 #endif // defined(HEIGHT)
 
@@ -337,34 +286,30 @@
     TENSOR3D_DECLARATION(input),
     TENSOR3D_DECLARATION(output))
 {
-    Tensor3D input  = CONVERT_TO_TENSOR3D_STRUCT(input);
-    Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output);
+    int x = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0);
+    int y = get_global_id(1);
+    int z = get_global_id(2);
 
-    VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16)
-    res = CONVERT(vload16(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 0, 0)), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16));
+    __global uchar *input_addr  = input_ptr + input_offset_first_element_in_bytes + x * sizeof(DATA_TYPE) + y * input_stride_y + z * input_stride_z;
+    __global uchar *output_addr = output_ptr + output_offset_first_element_in_bytes + x * sizeof(DATA_TYPE) + y * output_stride_y + z * output_stride_z;
+
+    VEC_DATA_TYPE(DATA_TYPE_PROMOTED, VEC_SIZE)
+    res = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)input_addr), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, VEC_SIZE));
 
     // Convert input into F32 in order to perform quantized multiplication
 #if defined(PROD) && defined(OFFSET) && defined(SCALE)
-    float16 res_f = DEQUANTIZE(res, OFFSET, SCALE, DATA_TYPE_PROMOTED, 16);
+    VEC_DATA_TYPE(float, VEC_SIZE)
+    res_f = DEQUANTIZE(res, OFFSET, SCALE, DATA_TYPE_PROMOTED, VEC_SIZE);
 #endif // defined(PROD) && defined(OFFSET) && defined(SCALE)
 
-#if defined(COMPLEX)
-    VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16)
-    res1 = CONVERT(vload16(0, (__global DATA_TYPE *)tensor3D_offset(&input, 8, 0, 0)), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16));
-#endif // defined(COMPLEX)
 #if defined(SUM_SQUARE)
     res *= res;
 #endif // defined(SUM_SQUARE)
 
     for(unsigned int z = 1; z < DEPTH; ++z)
     {
-        VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16)
-        in = CONVERT(vload16(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 0, z)), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16));
-
-#if defined(COMPLEX)
-        VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16)
-        in1 = CONVERT(vload16(0, (__global DATA_TYPE *)tensor3D_offset(&input, 8, 0, z)), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16));
-#endif // defined(COMPLEX)
+        VEC_DATA_TYPE(DATA_TYPE_PROMOTED, VEC_SIZE)
+        in = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(input_addr + z * input_stride_z)), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, VEC_SIZE));
 
 #if defined(MIN)
         res = select(res, in, ISLESS(in, res));
@@ -377,16 +322,13 @@
 #if defined(PROD)
 
 #if defined(OFFSET) && defined(SCALE)
-        res_f *= DEQUANTIZE(in, OFFSET, SCALE, DATA_TYPE_PROMOTED, 16);
+        res_f *= DEQUANTIZE(in, OFFSET, SCALE, DATA_TYPE_PROMOTED, VEC_SIZE);
 #else  // !(defined(OFFSET) && defined(SCALE))
         res *= in;
 #endif //  defined(OFFSET) && defined(SCALE)
 
-#else // !defined(PROD)
+#else  // !defined(PROD)
         res += in;
-#if defined(COMPLEX)
-        res1 += in1;
-#endif // defined(COMPLEX)
 #endif // defined(PROD)
 #endif // defined(MAX) || defined(MIN)
     }
@@ -402,14 +344,14 @@
 
     // Re-quantize
 #if defined(PROD) && defined(OFFSET) && defined(SCALE)
-    res = QUANTIZE(res_f, OFFSET, SCALE, DATA_TYPE_PROMOTED, 16);
+    res = QUANTIZE(res_f, OFFSET, SCALE, DATA_TYPE_PROMOTED, VEC_SIZE);
 #endif // defined(PROD) && defined(OFFSET) && defined(SCALE)
 
     // Store result
-    vstore16(CONVERT_SAT(res, VEC_DATA_TYPE(DATA_TYPE, 16)), 0, (__global DATA_TYPE *)output.ptr);
-#if defined(COMPLEX)
-    vstore16(CONVERT(res1, VEC_DATA_TYPE(DATA_TYPE, 16)), 0, (__global DATA_TYPE *)tensor3D_offset(&output, 8, 0, 0));
-#endif // defined(COMPLEX)
+    VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+    res0 = CONVERT_SAT(res, VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE));
+
+    STORE_VECTOR_SELECT(res, DATA_TYPE, output_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0);
 }
 #endif /* defined(DEPTH) */
 
@@ -445,15 +387,20 @@
     TENSOR4D_DECLARATION(input),
     TENSOR4D_DECLARATION(output))
 {
-    Tensor4D input  = CONVERT_TO_TENSOR4D_STRUCT(input, DEPTH);
-    Tensor4D output = CONVERT_TO_TENSOR4D_STRUCT(output, DEPTH);
+    int x = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0);
+    int y = get_global_id(1);
+    int z = get_global_id(2);
 
-    VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16)
-    res = CONVERT(vload16(0, (__global DATA_TYPE *)tensor4D_offset(&input, 0, 0, 0, 0)), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16));
+    __global uchar *input_addr  = input_ptr + input_offset_first_element_in_bytes + x * sizeof(DATA_TYPE) + y * input_stride_y + (z % DEPTH) * input_stride_z + (z / DEPTH) * input_stride_w;
+    __global uchar *output_addr = output_ptr + output_offset_first_element_in_bytes + x * sizeof(DATA_TYPE) + y * output_stride_y + (z % DEPTH) * output_stride_z + (z / DEPTH) * output_stride_z;
+
+    VEC_DATA_TYPE(DATA_TYPE_PROMOTED, VEC_SIZE)
+    res = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)input_addr), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, VEC_SIZE));
 
     // Convert input into F32 in order to perform quantized multiplication
 #if defined(PROD) && defined(OFFSET) && defined(SCALE)
-    float16 res_f = DEQUANTIZE(res, OFFSET, SCALE, DATA_TYPE_PROMOTED, 16);
+    VEC_DATA_TYPE(float, VEC_SIZE)
+    res_f = DEQUANTIZE(res, OFFSET, SCALE, DATA_TYPE_PROMOTED, VEC_SIZE);
 #endif // defined(PROD) && defined(OFFSET) && defined(SCALE)
 
 #if defined(SUM_SQUARE)
@@ -462,8 +409,8 @@
 
     for(unsigned int w = 1; w < BATCH; ++w)
     {
-        VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16)
-        in = CONVERT(vload16(0, (__global DATA_TYPE *)tensor4D_offset(&input, 0, 0, 0, w)), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16));
+        VEC_DATA_TYPE(DATA_TYPE_PROMOTED, VEC_SIZE)
+        in = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(input_addr + w * input_stride_w)), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, VEC_SIZE));
 
 #if defined(MIN)
         res = select(res, in, ISLESS(in, res));
@@ -476,7 +423,7 @@
 #if defined(PROD)
 
 #if defined(OFFSET) && defined(SCALE)
-        res_f *= DEQUANTIZE(in, OFFSET, SCALE, DATA_TYPE_PROMOTED, 16);
+        res_f *= DEQUANTIZE(in, OFFSET, SCALE, DATA_TYPE_PROMOTED, VEC_SIZE);
 #else  // !(defined(OFFSET) && defined(SCALE))
         res *= in;
 #endif //  defined(OFFSET) && defined(SCALE)
@@ -498,10 +445,12 @@
 
     // Re-quantize
 #if defined(PROD) && defined(OFFSET) && defined(SCALE)
-    res = QUANTIZE(res_f, OFFSET, SCALE, DATA_TYPE_PROMOTED, 16);
+    res = QUANTIZE(res_f, OFFSET, SCALE, DATA_TYPE_PROMOTED, VEC_SIZE);
 #endif // defined(PROD) && defined(OFFSET) && defined(SCALE)
 
     // Store result
-    vstore16(CONVERT_SAT(res, VEC_DATA_TYPE(DATA_TYPE, 16)), 0, (__global DATA_TYPE *)output.ptr);
+    VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+    res0 = CONVERT_SAT(res, VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE));
+    STORE_VECTOR_SELECT(res, DATA_TYPE, output_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0);
 }
 #endif /* defined(BATCH) && defined(DEPTH) */
diff --git a/src/core/CL/kernels/CLReductionOperationKernel.cpp b/src/core/CL/kernels/CLReductionOperationKernel.cpp
index a5734d4..5c80f33 100644
--- a/src/core/CL/kernels/CLReductionOperationKernel.cpp
+++ b/src/core/CL/kernels/CLReductionOperationKernel.cpp
@@ -42,10 +42,7 @@
 {
 namespace
 {
-// OpenCL kernel requires input width to be a power of 2 for x-axis.
-constexpr unsigned int border_val = 64;
-
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, unsigned int axis, ReductionOperation op, unsigned int width)
+Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, unsigned int axis, ReductionOperation op)
 {
     ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
     ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
@@ -56,11 +53,13 @@
     else
     {
         ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 2, DataType::F16, DataType::F32);
+        ARM_COMPUTE_RETURN_ERROR_ON(axis == 0);
     }
     ARM_COMPUTE_RETURN_ERROR_ON_MSG(op == ReductionOperation::SUM_SQUARE && input->data_type() == DataType::QASYMM8, "Not supported reduction operation for QASYMM8");
     ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis >= TensorShape::num_max_dimensions, "Reduction axis greater than max number of dimensions");
     ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis > 3, "Unsupported reduction axis");
-    ARM_COMPUTE_RETURN_ERROR_ON((op == ReductionOperation::MEAN_SUM) && (axis == 0) && (width == 0) && (input->data_type() != DataType::QASYMM8) && (input->data_type() != DataType::QASYMM8_SIGNED));
+    ARM_COMPUTE_RETURN_ERROR_ON((op == ReductionOperation::MEAN_SUM) && (axis == 0) && (input->dimension(0) == 0) && (input->data_type() != DataType::QASYMM8)
+                                && (input->data_type() != DataType::QASYMM8_SIGNED));
     ARM_COMPUTE_RETURN_ERROR_ON_MSG((op == ReductionOperation::ARG_IDX_MAX) || (op == ReductionOperation::ARG_IDX_MIN), "Not supported reduction operation, use CLArgMinMaxLayer");
 
     if(output->total_size() != 0)
@@ -71,77 +70,34 @@
 
     return Status{};
 }
-
-std::tuple<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output, unsigned int axis, ReductionOperation op)
-{
-    // Output tensor auto initialization if not yet initialized
-    const TensorShape output_shape     = arm_compute::misc::shape_calculator::compute_reduced_shape(input->tensor_shape(), axis, true);
-    DataType          output_data_type = input->data_type();
-    auto_init_if_empty(*output, input->clone()->set_tensor_shape(output_shape).set_data_type(output_data_type).reset_padding().set_is_resizable(true));
-
-    const unsigned int num_elems_processed_per_iteration = (is_data_type_quantized(input->data_type()) && (axis == 0)) ? 1 : 16;
-    Window             win                               = calculate_max_window(*input, Steps(num_elems_processed_per_iteration));
-    bool               window_changed                    = false;
-    const bool         is_serial_op                      = needs_serialized_reduction(op, input->data_type(), axis);
-
-    switch(axis)
-    {
-        case 0:
-        {
-            if(!is_serial_op)
-            {
-                const unsigned int     border_width = ((input->dimension(0) % border_val) != 0) ? border_val - input->dimension(0) % border_val : 0;
-                AccessWindowStatic     input_access(input, 0, 0, input->dimension(0) + border_width, 1);
-                AccessWindowHorizontal output_access(output, 0, 1);
-                window_changed = update_window_and_padding(win, input_access, output_access);
-            }
-        }
-        break;
-        case 1:
-        case 2:
-        case 3:
-        {
-            AccessWindowHorizontal input_access(input, 0, num_elems_processed_per_iteration);
-            AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration);
-            window_changed = update_window_and_padding(win, input_access, output_access);
-        }
-        break;
-        default:
-            ARM_COMPUTE_ERROR("Not supported");
-    }
-
-    Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
-
-    return std::make_tuple(err, win);
-}
 } // namespace
 
 CLReductionOperationKernel::CLReductionOperationKernel()
-    : _input(nullptr), _output(nullptr), _reduction_axis(0), _op(ReductionOperation::SUM_SQUARE), _border_size()
+    : _input(nullptr), _output(nullptr), _reduction_axis(0), _op(ReductionOperation::SUM_SQUARE)
 {
 }
 
-BorderSize CLReductionOperationKernel::border_size() const
+void CLReductionOperationKernel::configure(const ICLTensor *input, ICLTensor *output, unsigned int axis, ReductionOperation op)
 {
-    return _border_size;
+    configure(CLKernelLibrary::get().get_compile_context(), input, output, axis, op);
 }
 
-void CLReductionOperationKernel::configure(const ICLTensor *input, ICLTensor *output, unsigned int axis, ReductionOperation op, unsigned int width)
-{
-    configure(CLKernelLibrary::get().get_compile_context(), input, output, axis, op, width);
-}
-
-void CLReductionOperationKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, unsigned int axis, ReductionOperation op, unsigned int width)
+void CLReductionOperationKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, unsigned int axis, ReductionOperation op)
 {
     ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
 
-    ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), axis, op, width));
+    ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), axis, op));
+
+    auto padding_info = get_padding_info({ input, output });
 
     _input          = input;
     _output         = output;
     _reduction_axis = axis;
     _op             = op;
 
+    const TensorShape output_shape = arm_compute::misc::shape_calculator::compute_reduced_shape(input->info()->tensor_shape(), axis, true);
+    auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(output_shape).reset_padding().set_is_resizable(true));
+
     // Set build options
     CLBuildOptions build_opts;
     DataType       data_type = input->info()->data_type();
@@ -156,8 +112,15 @@
         data_type_promoted = get_cl_type_from_data_type(data_type);
     }
 
+    const unsigned int width             = input->info()->dimension(0) * input->info()->num_channels();
+    unsigned int       vec_size          = (is_data_type_quantized(input->info()->data_type()) && (axis == 0)) ? 1 : 16;
+    vec_size                             = adjust_vec_size(vec_size, width);
+    const unsigned int vec_size_leftover = width % vec_size;
+
     build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(data_type));
     build_opts.add_option("-DDATA_TYPE_PROMOTED=" + data_type_promoted);
+    build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(vec_size));
+    build_opts.add_option("-DVEC_SIZE_LEFTOVER=" + support::cpp11::to_string(vec_size_leftover));
     build_opts.add_option_if(is_data_type_float(data_type), "-DFLOAT_DATA_TYPE");
     build_opts.add_option_if(op == ReductionOperation::SUM_SQUARE, "-DSUM_SQUARE");
     build_opts.add_option_if(op == ReductionOperation::MEAN_SUM, "-DMEAN");
@@ -165,7 +128,6 @@
     build_opts.add_option_if(op == ReductionOperation::PROD, "-DPROD");
     build_opts.add_option_if(op == ReductionOperation::MIN, "-DMIN");
     build_opts.add_option_if(op == ReductionOperation::MAX, "-DMAX");
-    build_opts.add_option_if(input->info()->num_channels() == 2, "-DCOMPLEX");
     build_opts.add_option_if(is_data_type_quantized(data_type), "-DOFFSET=" + support::cpp11::to_string(input->info()->quantization_info().uniform().offset));
     build_opts.add_option_if(is_data_type_quantized(data_type), "-DSCALE=" + float_to_string_with_full_precision(input->info()->quantization_info().uniform().scale));
 
@@ -189,7 +151,6 @@
     }
 
     // Create kernel
-    cl::NDRange lws_hint = CLKernelLibrary::get().default_ndrange();
     std::string kernel_axis_name;
     const bool  is_serial_op = needs_serialized_reduction(_op, _input->info()->data_type(), _reduction_axis);
 
@@ -197,22 +158,14 @@
     {
         case 0:
         {
+            build_opts.add_option("-DWIDTH=" + support::cpp11::to_string(width));
+            kernel_axis_name = "x";
+
             if(is_serial_op)
             {
-                build_opts.add_option("-DWIDTH=" + support::cpp11::to_string(input->info()->dimension(0)));
                 build_opts.add_option_if_else(_input->info()->data_type() == DataType::F16, "-DCOND_DATA_TYPE=short", "-DCOND_DATA_TYPE=int");
                 kernel_axis_name = "non_parallel_x";
             }
-            else
-            {
-                build_opts.add_option_if(op == ReductionOperation::MEAN_SUM, "-DWIDTH=" + support::cpp11::to_string(width));
-                const unsigned int width_leftover = input->info()->dimension(0) % border_val;
-                const unsigned int border_width   = (width_leftover != 0) ? border_val - width_leftover : 0;
-                kernel_axis_name                  = "x";
-
-                lws_hint     = create_lws_hint_parallel_implementations(input->info()->dimension(0), border_val);
-                _border_size = BorderSize(0, border_width, 0, 0);
-            }
         }
         break;
         case 1:
@@ -234,18 +187,16 @@
     _kernel = create_kernel(compile_context, "reduction_operation_" + kernel_axis_name, build_opts.options());
 
     // Configure kernel window
-    auto win_config = validate_and_configure_window(_input->info(), _output->info(), axis, op);
+    Window win = calculate_max_window(*input->info(), Steps(vec_size));
+    win.set(Window::DimX, Window::Dimension(win.x().start(), win.x().end() * _input->info()->num_channels(), win.x().step()));
+    ICLKernel::configure_internal(win);
 
-    ARM_COMPUTE_ERROR_THROW_ON(std::get<0>(win_config));
-
-    ICLKernel::configure_internal(std::get<1>(win_config), lws_hint);
+    ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
 }
 
-Status CLReductionOperationKernel::validate(const ITensorInfo *input, const ITensorInfo *output, unsigned int axis, ReductionOperation op, unsigned int width)
+Status CLReductionOperationKernel::validate(const ITensorInfo *input, const ITensorInfo *output, unsigned int axis, ReductionOperation op)
 {
-    ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, axis, op, width));
-    ARM_COMPUTE_RETURN_ON_ERROR(std::get<0>(validate_and_configure_window(input->clone().get(), output->clone().get(), axis, op)));
-
+    ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, axis, op));
     return Status{};
 }
 
@@ -277,36 +228,24 @@
                     unsigned int idx = 0;
                     add_1D_tensor_argument(idx, _input, in_slice);
                     add_1D_tensor_argument(idx, _output, out_slice);
-                    enqueue(queue, *this, in_slice, lws_hint());
+                    enqueue(queue, *this, in_slice);
                 }
                 while(window_in.slide_window_slice_1D(in_slice) && out_window.slide_window_slice_1D(out_slice));
             }
             else
             {
                 // Set out window
-                Window out_window(window);
-                out_window.set(Window::DimX, Window::Dimension(0, 0, 0));
+                bool   has_collapsed = true;
+                Window window_in     = window.collapse_if_possible(window, 1, &has_collapsed);
+                ARM_COMPUTE_ERROR_ON(!has_collapsed);
 
-                // Get first input and output slices
-                Window in_slice  = window.first_slice_window_2D();
-                Window out_slice = out_window.first_slice_window_2D();
+                Window window_out = window_in;
+                window_out.set(0, Window::Dimension());
 
-                // Reshape window
-                const unsigned int border_width = ((in_slice.x().end() % border_val) != 0) ? border_val - in_slice.x().end() % border_val : 0;
-                in_slice.set(Window::DimX, Window::Dimension(in_slice.x().start(), in_slice.x().end() + border_width, in_slice.x().step()));
-
-                // Set local sums buffer
-                unsigned int local_res_size = lws_hint()[0] * _input->info()->element_size();
-                _kernel.setArg(num_arguments_per_2D_tensor() * 2, local_res_size, nullptr);
-
-                do
-                {
-                    unsigned int idx = 0;
-                    add_2D_tensor_argument(idx, _input, in_slice);
-                    add_2D_tensor_argument(idx, _output, out_slice);
-                    enqueue(queue, *this, in_slice, lws_hint());
-                }
-                while(window.slide_window_slice_2D(in_slice) && window.slide_window_slice_2D(out_slice));
+                unsigned int idx = 0;
+                add_2D_tensor_argument(idx, _input, window_in);
+                add_2D_tensor_argument(idx, _output, window_out);
+                enqueue(queue, *this, window_in);
             }
         }
         break;
@@ -323,7 +262,7 @@
                 unsigned int idx = 0;
                 add_2D_tensor_argument(idx, _input, in_slice);
                 add_2D_tensor_argument(idx, _output, out_slice);
-                enqueue(queue, *this, in_slice, lws_hint());
+                enqueue(queue, *this, in_slice);
             }
             while(window_in.slide_window_slice_2D(in_slice) && window.slide_window_slice_2D(out_slice));
         }
@@ -341,7 +280,7 @@
                 unsigned int idx = 0;
                 add_3D_tensor_argument(idx, _input, in_slice);
                 add_3D_tensor_argument(idx, _output, out_slice);
-                enqueue(queue, *this, in_slice, lws_hint());
+                enqueue(queue, *this, in_slice);
             }
             while(window_in.slide_window_slice_3D(in_slice) && window.slide_window_slice_3D(out_slice));
         }
@@ -359,7 +298,7 @@
                 unsigned int idx = 0;
                 add_4D_tensor_argument(idx, _input, in_slice);
                 add_4D_tensor_argument(idx, _output, out_slice);
-                enqueue(queue, *this, in_slice, lws_hint());
+                enqueue(queue, *this, in_slice);
             }
             while(window_in.slide_window_slice_4D(in_slice) && window.slide_window_slice_4D(out_slice));
         }
diff --git a/src/core/CL/kernels/CLReductionOperationKernel.h b/src/core/CL/kernels/CLReductionOperationKernel.h
index ff9fd61..b456378 100644
--- a/src/core/CL/kernels/CLReductionOperationKernel.h
+++ b/src/core/CL/kernels/CLReductionOperationKernel.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -56,9 +56,8 @@
      *                    Output will have the same number of dimensions as input.
      * @param[in]  axis   Axis along which to reduce. Supported reduction axis : 0,1,2,3
      * @param[in]  op     Reduction operation to perform. Operations supported: MEAN_SUM, PROD, SUM_SQUARE, SUM, MIN, MAX
-     * @param[in]  width  (Optional)  In case of x-axis we also need to provide the width of the input image.
      */
-    void configure(const ICLTensor *input, ICLTensor *output, unsigned int axis, ReductionOperation op, unsigned int width = 0);
+    void configure(const ICLTensor *input, ICLTensor *output, unsigned int axis, ReductionOperation op);
     /** Set the input and output tensors.
      *
      * @param[in]  compile_context The compile context to be used.
@@ -67,9 +66,8 @@
      *                             Output will have the same number of dimensions as input.
      * @param[in]  axis            Axis along which to reduce. Supported reduction axis : 0,1,2,3
      * @param[in]  op              Reduction operation to perform. Operations supported: MEAN_SUM, PROD, SUM_SQUARE, SUM, MIN, MAX
-     * @param[in]  width           (Optional)  In case of x-axis we also need to provide the width of the input image.
      */
-    void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, unsigned int axis, ReductionOperation op, unsigned int width = 0);
+    void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, unsigned int axis, ReductionOperation op);
 
     /** Static function to check if given info will lead to a valid configuration of @ref CLReductionOperationKernel.
      *
@@ -78,22 +76,19 @@
      *                   Output will have the same number of dimensions as input.
      * @param[in] axis   Axis along which to reduce. Supported reduction axis : 0,1,2,3
      * @param[in] op     Reduction operation to perform. Operations supported: MEAN_SUM, PROD, SUM_SQUARE, SUM, MIN, MAX
-     * @param[in] width  (Optional)  In case of x-axis we also need to provide the width of the input image.
      *
      * @return a status
      */
-    static Status validate(const ITensorInfo *input, const ITensorInfo *output, unsigned int axis, ReductionOperation op, unsigned int width = 0);
+    static Status validate(const ITensorInfo *input, const ITensorInfo *output, unsigned int axis, ReductionOperation op);
 
     // Inherited methods overridden:
     void run(const Window &window, cl::CommandQueue &queue) override;
-    BorderSize border_size() const override;
 
 private:
     const ICLTensor   *_input;
     ICLTensor         *_output;
     unsigned int       _reduction_axis;
     ReductionOperation _op;
-    BorderSize         _border_size;
 };
 } // namespace arm_compute
 #endif /*ARM_COMPUTE_CLREDUCTIONOPERATIONKERNEL_H */
diff --git a/src/runtime/CL/functions/CLReductionOperation.cpp b/src/runtime/CL/functions/CLReductionOperation.cpp
index f40d945..61859f8 100644
--- a/src/runtime/CL/functions/CLReductionOperation.cpp
+++ b/src/runtime/CL/functions/CLReductionOperation.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -30,7 +30,6 @@
 #include "arm_compute/core/Validate.h"
 #include "arm_compute/core/utils/misc/ShapeCalculator.h"
 #include "arm_compute/runtime/CL/CLScheduler.h"
-#include "src/core/CL/kernels/CLFillBorderKernel.h"
 #include "src/core/CL/kernels/CLReductionOperationKernel.h"
 #include "src/core/helpers/AutoConfiguration.h"
 #include "src/runtime/Utils.h"
@@ -38,8 +37,7 @@
 namespace arm_compute
 {
 CLReductionOperation::CLReductionOperation(std::shared_ptr<IMemoryManager> memory_manager)
-    : _memory_group(std::move(memory_manager)), _results_vector(), _reduction_kernels_vector(), _border_handlers_vector(), _reshape(), _num_of_stages(), _reduction_axis(), _is_serial(),
-      _is_reshape_required(false)
+    : _memory_group(std::move(memory_manager)), _unreshaped_output(), _reduction_kernel(), _reshape(), _reduction_axis(), _is_reshape_required(false)
 {
 }
 
@@ -51,9 +49,7 @@
     ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis >= TensorShape::num_max_dimensions, "Reduction axis greater than max number of dimensions");
     ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis > 3, "Unsupported reduction axis");
 
-    const unsigned int num_of_stages       = utils::calculate_number_of_stages_only_x_axis(input->dimension(0), axis);
-    const bool         is_serial           = needs_serialized_reduction(op, input->data_type(), axis);
-    const bool         is_reshape_required = !keep_dims;
+    const bool is_reshape_required = !keep_dims;
 
     if(is_reshape_required && output->total_size() != 0)
     {
@@ -65,7 +61,6 @@
 
     TensorInfo output_before_reshape;
     const auto input_shape        = input->tensor_shape();
-    const auto input_data_type    = input->data_type();
     const auto input_num_channles = input->num_channels();
     const auto input_qinfo        = input->quantization_info();
     const auto output_data_type   = output->data_type();
@@ -83,73 +78,7 @@
         output_internal = &output_before_reshape;
     }
 
-    if(is_serial)
-    {
-        ARM_COMPUTE_RETURN_ON_ERROR(CLReductionOperationKernel::validate(input, output_internal, axis, op));
-    }
-    else
-    {
-        // Create temporary tensor infos
-        std::vector<TensorInfo> sums_vector(num_of_stages - 1);
-
-        // Create intermediate tensor info
-        TensorShape shape{ input_shape };
-
-        shape.set(0, ceil(shape.x() / 128.f));
-
-        for(unsigned int i = 0; i < num_of_stages - 1; i++)
-        {
-            initialize_tensorinfo(sums_vector[i], shape, input_data_type, input_num_channles, input_qinfo);
-        }
-
-        ReductionOperation first_kernel_op;
-        ReductionOperation intermediate_kernel_op;
-        ReductionOperation last_kernel_op;
-        switch(op)
-        {
-            case ReductionOperation::SUM:
-            case ReductionOperation::MEAN_SUM:
-                first_kernel_op        = ReductionOperation::SUM;
-                intermediate_kernel_op = ReductionOperation::SUM;
-                last_kernel_op         = op;
-                break;
-            case ReductionOperation::SUM_SQUARE:
-                first_kernel_op        = ReductionOperation::SUM_SQUARE;
-                intermediate_kernel_op = ReductionOperation::SUM;
-                last_kernel_op         = ReductionOperation::SUM;
-                break;
-            case ReductionOperation::PROD:
-                first_kernel_op        = ReductionOperation::PROD;
-                intermediate_kernel_op = ReductionOperation::PROD;
-                last_kernel_op         = ReductionOperation::PROD;
-                break;
-            case ReductionOperation::MIN:
-                first_kernel_op        = ReductionOperation::MIN;
-                intermediate_kernel_op = ReductionOperation::MIN;
-                last_kernel_op         = ReductionOperation::MIN;
-                break;
-            case ReductionOperation::MAX:
-                first_kernel_op        = ReductionOperation::MAX;
-                intermediate_kernel_op = ReductionOperation::MAX;
-                last_kernel_op         = ReductionOperation::MAX;
-                break;
-            default:
-                ARM_COMPUTE_ERROR("Not supported");
-        }
-
-        // Validate ReductionOperation only on first kernel
-        ARM_COMPUTE_RETURN_ON_ERROR(CLReductionOperationKernel::validate(input, &sums_vector[0], axis, first_kernel_op));
-
-        // Validate ReductionOperation on intermediate stages
-        for(unsigned int i = 1; i < num_of_stages - 1; ++i)
-        {
-            ARM_COMPUTE_RETURN_ON_ERROR(CLReductionOperationKernel::validate(&sums_vector[i - 1], &sums_vector[i], axis, intermediate_kernel_op));
-        }
-
-        // Validate ReductionOperation on the last stage
-        const unsigned int last_stage = num_of_stages - 1;
-        ARM_COMPUTE_RETURN_ON_ERROR(CLReductionOperationKernel::validate(&sums_vector[last_stage - 1], output_internal, axis, last_kernel_op, input->dimension(0)));
-    }
+    ARM_COMPUTE_RETURN_ON_ERROR(CLReductionOperationKernel::validate(input, output_internal, axis, op));
 
     if(is_reshape_required)
     {
@@ -161,33 +90,15 @@
 
 ICLTensor *CLReductionOperation::configure_intermediate_result_vector(ICLTensor *input, ICLTensor *output)
 {
-    if(!_is_reshape_required && _is_serial)
+    if(!_is_reshape_required)
     {
         return output;
     }
 
-    auto intermediate_result_vector_size = _is_serial ? 1 : _num_of_stages;
-
-    if(!_is_reshape_required)
-    {
-        --intermediate_result_vector_size;
-    }
-
-    _results_vector.resize(intermediate_result_vector_size);
     auto shape = input->info()->tensor_shape();
-
-    shape.set(_reduction_axis, _is_serial ? 1 : ceil(shape.x() / 128.f));
-
-    for(auto &v : _results_vector)
-    {
-        if(&v == &_results_vector.back() && _is_reshape_required)
-        {
-            shape.set(_reduction_axis, 1);
-        }
-        v.allocator()->init(input->info()->clone()->set_tensor_shape(shape));
-    }
-
-    return _is_reshape_required ? &_results_vector.back() : output;
+    shape.set(_reduction_axis, 1);
+    _unreshaped_output.allocator()->init(input->info()->clone()->set_tensor_shape(shape));
+    return &_unreshaped_output;
 }
 
 void CLReductionOperation::configure(ICLTensor *input, ICLTensor *output, unsigned int axis, ReductionOperation op, bool keep_dims)
@@ -198,9 +109,7 @@
 void CLReductionOperation::configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, unsigned int axis, ReductionOperation op, bool keep_dims)
 {
     ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
-    _num_of_stages       = utils::calculate_number_of_stages_only_x_axis(input->info()->dimension(0), axis);
     _reduction_axis      = axis;
-    _is_serial           = needs_serialized_reduction(op, input->info()->data_type(), axis);
     _is_reshape_required = !keep_dims;
 
     auto *output_internal = configure_intermediate_result_vector(input, output);
@@ -210,110 +119,17 @@
         const TensorShape output_shape     = arm_compute::misc::shape_calculator::compute_reduced_shape(input->info()->tensor_shape(), axis, false);
         const auto        output_data_type = input->info()->data_type();
         auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(output_shape).set_data_type(output_data_type).reset_padding().set_is_resizable(true));
+
+        _memory_group.manage(&_unreshaped_output);
     }
 
-    // Configure reduction operation kernels
-    _reduction_kernels_vector.reserve(_num_of_stages);
-
-    // Create temporary tensors
-    if(_is_serial)
-    {
-        if(_is_reshape_required)
-        {
-            _memory_group.manage(&_results_vector.back());
-        }
-
-        _reduction_kernels_vector.emplace_back(std::make_unique<CLReductionOperationKernel>());
-        _reduction_kernels_vector[0]->configure(compile_context, input, output_internal, axis, op, 0);
-    }
-    else
-    {
-        _border_handlers_vector.reserve(_num_of_stages);
-        _memory_group.manage(&_results_vector[0]);
-
-        ReductionOperation first_kernel_op;
-        ReductionOperation intermediate_kernel_op;
-        ReductionOperation last_kernel_op;
-        PixelValue         pixelValue;
-        switch(op)
-        {
-            case ReductionOperation::SUM:
-            case ReductionOperation::MEAN_SUM:
-                first_kernel_op        = ReductionOperation::SUM;
-                intermediate_kernel_op = ReductionOperation::SUM;
-                last_kernel_op         = op;
-                pixelValue             = PixelValue();
-                break;
-            case ReductionOperation::SUM_SQUARE:
-                first_kernel_op        = ReductionOperation::SUM_SQUARE;
-                intermediate_kernel_op = ReductionOperation::SUM;
-                last_kernel_op         = ReductionOperation::SUM;
-                pixelValue             = PixelValue();
-                break;
-            case ReductionOperation::PROD:
-                first_kernel_op        = ReductionOperation::PROD;
-                intermediate_kernel_op = ReductionOperation::PROD;
-                last_kernel_op         = ReductionOperation::PROD;
-                pixelValue             = PixelValue(1, input->info()->data_type());
-                break;
-            case ReductionOperation::MIN:
-                first_kernel_op        = ReductionOperation::MIN;
-                intermediate_kernel_op = ReductionOperation::MIN;
-                last_kernel_op         = ReductionOperation::MIN;
-                pixelValue             = std::get<1>(get_min_max(input->info()->data_type()));
-                break;
-            case ReductionOperation::MAX:
-                first_kernel_op        = ReductionOperation::MAX;
-                intermediate_kernel_op = ReductionOperation::MAX;
-                last_kernel_op         = ReductionOperation::MAX;
-                pixelValue             = std::get<0>(get_min_max(input->info()->data_type()));
-                break;
-            default:
-                ARM_COMPUTE_ERROR("Not supported");
-        }
-
-        _reduction_kernels_vector.emplace_back(std::make_unique<CLReductionOperationKernel>());
-        _reduction_kernels_vector[0]->configure(compile_context, input, &_results_vector[0], axis, first_kernel_op);
-
-        _border_handlers_vector.emplace_back(std::make_unique<CLFillBorderKernel>());
-        _border_handlers_vector[0]->configure(compile_context, input, _reduction_kernels_vector[0]->border_size(), BorderMode::CONSTANT, pixelValue);
-
-        // Apply ReductionOperation on intermediate stages
-        for(unsigned int i = 1; i < _num_of_stages - 1; ++i)
-        {
-            _memory_group.manage(&_results_vector[i]);
-
-            _reduction_kernels_vector.emplace_back(std::make_unique<CLReductionOperationKernel>());
-            _reduction_kernels_vector[i]->configure(compile_context, &_results_vector[i - 1], &_results_vector[i], axis, intermediate_kernel_op);
-
-            _border_handlers_vector.emplace_back(std::make_unique<CLFillBorderKernel>());
-            _border_handlers_vector[i]->configure(compile_context, &_results_vector[i - 1], _reduction_kernels_vector[i]->border_size(), BorderMode::CONSTANT, pixelValue);
-
-            _results_vector[i - 1].allocator()->allocate();
-        }
-
-        // Apply ReductionOperation on the last stage
-        const unsigned int last_stage  = _num_of_stages - 1;
-        const unsigned int input_width = input->info()->dimension(0);
-
-        if(_is_reshape_required)
-        {
-            _memory_group.manage(&_results_vector.back());
-        }
-
-        _reduction_kernels_vector.emplace_back(std::make_unique<CLReductionOperationKernel>());
-        _reduction_kernels_vector[last_stage]->configure(compile_context, &_results_vector[last_stage - 1], output_internal, axis, last_kernel_op, input_width);
-
-        _border_handlers_vector.emplace_back(std::make_unique<CLFillBorderKernel>());
-        _border_handlers_vector[last_stage]->configure(compile_context, &_results_vector[last_stage - 1], _reduction_kernels_vector[last_stage]->border_size(), BorderMode::CONSTANT, pixelValue);
-
-        _results_vector[last_stage - 1].allocator()->allocate();
-    }
+    _reduction_kernel = std::make_unique<CLReductionOperationKernel>();
+    _reduction_kernel->configure(compile_context, input, output_internal, axis, op);
 
     if(_is_reshape_required)
     {
-        _reshape.configure(compile_context, &_results_vector.back(), output);
-        _results_vector.back().allocator()->allocate();
+        _reshape.configure(compile_context, &_unreshaped_output, output);
+        _unreshaped_output.allocator()->allocate();
     }
 }
 
@@ -321,18 +137,7 @@
 {
     MemoryGroupResourceScope scope_mg(_memory_group);
 
-    if(_is_serial)
-    {
-        CLScheduler::get().enqueue(*_reduction_kernels_vector[0], false);
-    }
-    else
-    {
-        for(unsigned int i = 0; i < _num_of_stages; ++i)
-        {
-            CLScheduler::get().enqueue(*_border_handlers_vector[i], false);
-            CLScheduler::get().enqueue(*_reduction_kernels_vector[i], false);
-        }
-    }
+    CLScheduler::get().enqueue(*_reduction_kernel, false);
 
     if(_is_reshape_required)
     {