COMPMID-1816: Use parallel reduction on 0 axis in CL ARG_MIN/ARG_MAX

Introducing new CLArgMinMax kernel

Change-Id: I0b8254207cc3859d19ceef9b6429cf5c1c586db0
Signed-off-by: Manuel Bottini <manuel.bottini@arm.com>
Reviewed-on: https://review.mlplatform.org/c/2202
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Michalis Spyrou <michalis.spyrou@arm.com>
diff --git a/src/core/CL/CLHelpers.cpp b/src/core/CL/CLHelpers.cpp
index 28b1a32..9754beb 100644
--- a/src/core/CL/CLHelpers.cpp
+++ b/src/core/CL/CLHelpers.cpp
@@ -365,4 +365,12 @@
         return static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
     }
 }
+
+cl::NDRange create_lws_hint_parallel_implementations(unsigned int input_dimension, unsigned int vector_size)
+{
+    const unsigned int width_leftover = input_dimension % vector_size;
+    const unsigned int border_width   = (width_leftover != 0) ? vector_size - width_leftover : 0;
+    const unsigned int num_of_threads = ((input_dimension + border_width) / 16);
+    return cl::NDRange(std::min(8U, num_of_threads));
+}
 } // namespace arm_compute
diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp
index 5d52054..5b59094 100644
--- a/src/core/CL/CLKernelLibrary.cpp
+++ b/src/core/CL/CLKernelLibrary.cpp
@@ -150,6 +150,10 @@
     { "activation_layer", "activation_layer.cl" },
     { "activation_layer_quant", "activation_layer_quant.cl" },
     { "activation_layer_quant_f32", "activation_layer_quant.cl" },
+    { "arg_min_max_x", "arg_min_max.cl" },
+    { "arg_min_max_y", "arg_min_max.cl" },
+    { "arg_min_max_z", "arg_min_max.cl" },
+    { "arg_min_max_w", "arg_min_max.cl" },
     { "batch_to_space_nchw", "batch_to_space.cl" },
     { "batch_to_space_static_nchw", "batch_to_space.cl" },
     { "batch_to_space_nhwc", "batch_to_space.cl" },
@@ -585,6 +589,10 @@
 #include "./cl_kernels/activation_layer_quant.clembed"
     },
     {
+        "arg_min_max.cl",
+#include "./cl_kernels/arg_min_max.clembed"
+    },
+    {
         "batch_to_space.cl",
 #include "./cl_kernels/batch_to_space.clembed"
     },
diff --git a/src/core/CL/cl_kernels/arg_min_max.cl b/src/core/CL/cl_kernels/arg_min_max.cl
new file mode 100644
index 0000000..3f75377
--- /dev/null
+++ b/src/core/CL/cl_kernels/arg_min_max.cl
@@ -0,0 +1,431 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "helpers.h"
+
+#if defined(ARG_MAX)
+#define CONDITION_TO_USE(x, y) ISGREATER(x, y)
+#elif defined(ARG_MIN)
+#define CONDITION_TO_USE(x, y) ISLESS(x, y)
+#else // !(defined(ARG_MAX) || defined(ARG_MIN))
+#error "Unsupported reduction operation!"
+#endif // defined(ARG_MAX)
+
+#if defined(DATA_TYPE_OUTPUT)
+#if defined(WIDTH)
+#if defined(ARG_MIN)
+#if defined(PREV_OUTPUT)
+/** Find index minimum value of a vector
+ *
+ * @param[in] input Pointer to the first value.
+ *
+ * @return index of the vector.
+ */
+inline DATA_TYPE_OUTPUT arg_idx_min_prev_out(__global const DATA_TYPE *input, __global const DATA_TYPE_OUTPUT *prev_res, const int x_idx)
+{
+    int end_elem = (x_idx + 1) * 16;
+    if(end_elem > WIDTH)
+    {
+        end_elem = WIDTH - x_idx * 16;
+    }
+    DATA_TYPE_OUTPUT res = prev_res[0];
+    for(int x_v = 1; x_v < end_elem; ++x_v)
+    {
+        res = select(res, prev_res[x_v], *(input + prev_res[x_v]) < * (input + res));
+    }
+    return res;
+}
+#else // !defined(PREV_OUTPUT)
+/** Find index minimum value of a vector
+ *
+ * @param[in] input Pointer to the first value.
+ *
+ * @return index of the vector.
+ */
+inline DATA_TYPE_OUTPUT arg_idx_min(__global const DATA_TYPE *input, const int x_idx)
+{
+#if WIDTH < 16
+    DATA_TYPE_OUTPUT res = 0;
+    for(DATA_TYPE_OUTPUT x_v = res + 1; x_v < WIDTH; ++x_v)
+    {
+        res = select(res, x_v, *(input + x_v) < * (input + res));
+    }
+    return res;
+#else  // WIDTH >= 16
+    int       x_elem   = x_idx * 16;
+    const int x_goback = select(0, 16 - WIDTH % 16, x_elem + 16 > WIDTH);
+    x_elem -= x_goback;
+
+    VEC_DATA_TYPE(DATA_TYPE, 16)
+    in = vload16(0, input - x_goback);
+    VEC_DATA_TYPE(DATA_TYPE_OUTPUT, 16)
+    res = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 };
+
+    VEC_DATA_TYPE(COND_DATA_TYPE, 8)
+    idx_sel       = (in.s01234567 <= in.s89abcdef);
+    in.s01234567  = select(in.s89abcdef, in.s01234567, idx_sel);
+    res.s01234567 = select(res.s89abcdef, res.s01234567, CONVERT(idx_sel, int8));
+
+    idx_sel.s0123 = (in.s0123 < in.s4567) || (in.s0123 == in.s4567 && CONVERT((res.s0123 < res.s4567), VEC_DATA_TYPE(COND_DATA_TYPE, 4)));
+    in.s0123      = select(in.s4567, in.s0123, idx_sel.s0123);
+    res.s0123     = select(res.s4567, res.s0123, CONVERT(idx_sel.s0123, int4));
+
+    idx_sel.s01 = (in.s01 < in.s23) || (in.s01 == in.s23 && CONVERT((res.s01 < res.s23), VEC_DATA_TYPE(COND_DATA_TYPE, 2)));
+    in.s01      = select(in.s23, in.s01, idx_sel.s01);
+    res.s01     = select(res.s23, res.s01, CONVERT(idx_sel.s01, int2));
+
+    idx_sel.s0 = (in.s0 < in.s1) || (in.s0 == in.s1 && CONVERT((res.s0 < res.s1), COND_DATA_TYPE));
+    res.s0     = select(res.s1, res.s0, CONVERT(idx_sel.s0, int));
+
+    return res.s0 + x_elem;
+#endif // WIDTH < 16
+}
+#endif // defined(PREV_OUTPUT)
+#endif // defined(ARG_MIN)
+#if defined(ARG_MAX)
+#if defined(PREV_OUTPUT)
+/** Find index maximum value of a vector
+ *
+ * @param[in] input Pointer to the first value.
+ *
+ * @return index of the vector.
+ */
+inline DATA_TYPE_OUTPUT arg_idx_max_prev_out(__global const DATA_TYPE *input, __global const DATA_TYPE_OUTPUT *prev_res, const int x_idx)
+{
+    int end_elem = (x_idx + 1) * 16;
+    if(end_elem > WIDTH)
+    {
+        end_elem = WIDTH - x_idx * 16;
+    }
+    DATA_TYPE_OUTPUT res = prev_res[0];
+    for(int x_v = 1; x_v < end_elem; ++x_v)
+    {
+        res = select(res, prev_res[x_v], *(input + prev_res[x_v]) > *(input + res));
+    }
+    return res;
+}
+#else // !defined(PREV_OUTPUT)
+/** Find index maximum value of a vector
+ *
+ * @param[in] input Pointer to the first value.
+ *
+ * @return index of the vector.
+ */
+inline DATA_TYPE_OUTPUT arg_idx_max(__global const DATA_TYPE *input, const int x_idx)
+{
+#if WIDTH < 16
+    DATA_TYPE_OUTPUT res = 0;
+    for(DATA_TYPE_OUTPUT x_v = res + 1; x_v < WIDTH; ++x_v)
+    {
+        res = select(res, x_v, *(input + x_v) > *(input + res));
+    }
+    return res;
+#else  // WIDTH >= 16
+    int       x_elem   = x_idx * 16;
+    const int x_goback = select(0, 16 - WIDTH % 16, x_elem + 16 > WIDTH);
+    x_elem -= x_goback;
+
+    VEC_DATA_TYPE(DATA_TYPE, 16)
+    in = vload16(0, input - x_goback);
+    VEC_DATA_TYPE(DATA_TYPE_OUTPUT, 16)
+    res = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 };
+
+    VEC_DATA_TYPE(COND_DATA_TYPE, 8)
+    idx_sel       = (in.s01234567 >= in.s89abcdef);
+    in.s01234567  = select(in.s89abcdef, in.s01234567, idx_sel);
+    res.s01234567 = select(res.s89abcdef, res.s01234567, CONVERT(idx_sel, int8));
+
+    idx_sel.s0123 = (in.s0123 > in.s4567) || (in.s0123 == in.s4567 && CONVERT((res.s0123 < res.s4567), VEC_DATA_TYPE(COND_DATA_TYPE, 4)));
+    in.s0123      = select(in.s4567, in.s0123, idx_sel.s0123);
+    res.s0123     = select(res.s4567, res.s0123, CONVERT(idx_sel.s0123, int4));
+
+    idx_sel.s01 = (in.s01 > in.s23) || (in.s01 == in.s23 && CONVERT((res.s01 < res.s23), VEC_DATA_TYPE(COND_DATA_TYPE, 2)));
+    in.s01      = select(in.s23, in.s01, idx_sel.s01);
+    res.s01     = select(res.s23, res.s01, CONVERT(idx_sel.s01, int2));
+
+    idx_sel.s0 = (in.s0 > in.s1) || (in.s0 == in.s1 && CONVERT((res.s0 < res.s1), COND_DATA_TYPE));
+    res.s0     = select(res.s1, res.s0, CONVERT(idx_sel.s0, int));
+
+    return res.s0 + x_elem;
+#endif // WIDTH < 16
+}
+#endif // defined(PREV_OUTPUT)
+#endif // defined(ARG_MAX)
+
+/** This kernel performs parallel reduction given an operation on x-axis.
+ *
+ * @note In case the results of previous stages are passed the flag PREV_OUTPUT has to be passed using -DPREV_OUTPUT
+ * @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
+ * @note The data type of the output must be passed at compile time using -DDATA_TYPE_OUTPUT: e.g. -DDATA_TYPE_OUTPUT=uint
+ * @note The arg_max flag must be passed at compile time using -DARG_MAX if we want to compute the ArgMax
+ * @note The arg_min flag must be passed at compile time using -DARG_MIN if we want to compute the ArgMin
+ *
+ * @param[in] src_ptr                                   Pointer to the source tensor. Supported data types: S32/F16/F32
+ * @param[in] src_stride_x                              Stride of the source tensor in X dimension (in bytes)
+ * @param[in] src_step_x                                src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y                              Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_step_y                                src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes         The offset of the first element in the source tensor
+ * @param[in] prev_res_ptr                              (Optional) Pointer to previous results tensor. Supported data types: U32/S32
+ * @param[in] prev_res_stride_x                         (Optional) Stride of the output tensor in X dimension (in bytes)
+ * @param[in] prev_res_step_x                           (Optional) prev_res_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] prev_res_stride_y                         (Optional) Stride of the output tensor in Y dimension (in bytes)
+ * @param[in] prev_res_step_y                           (Optional) prev_res_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] prev_res_offset_first_element_in_bytes    (Optional) The offset of the first element in the previous results tensor
+ * @param[in] partial_res_ptr                           The local buffer to hold partial result values. Supported data types: U32/S32
+ * @param[in] partial_res_stride_x                      Stride of the output tensor in X dimension (in bytes)
+ * @param[in] partial_res_step_x                        partial_res_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] partial_res_stride_y                      Stride of the output tensor in Y dimension (in bytes)
+ * @param[in] partial_res_step_y                        partial_res_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] partial_res_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[in] local_results                             Local buffer for storing the partial result
+ */
+__kernel void arg_min_max_x(
+    IMAGE_DECLARATION(src),
+#if defined(PREV_OUTPUT)
+    IMAGE_DECLARATION(prev_res),
+#endif // defined(PREV_OUTPUT)
+    IMAGE_DECLARATION(partial_res),
+    __local DATA_TYPE_OUTPUT *local_results)
+{
+#if defined(PREV_OUTPUT)
+    Image src      = CONVERT_TO_IMAGE_STRUCT_NO_STEP(src);
+    Image prev_res = CONVERT_TO_IMAGE_STRUCT(prev_res);
+#else  // !defined(PREV_OUTPUT)
+    Image src                      = CONVERT_TO_IMAGE_STRUCT(src);
+#endif // defined(PREV_OUTPUT)
+    Image partial_res = CONVERT_TO_IMAGE_STRUCT(partial_res);
+
+    unsigned int lsize = get_local_size(0);
+    unsigned int lid   = get_local_id(0);
+
+    const uint     x_idx                 = get_global_id(0);
+    const uint     y_idx                 = get_global_id(1);
+    const __global DATA_TYPE *src_in_row = (const __global DATA_TYPE *)(src_ptr + src_offset_first_element_in_bytes + y_idx * src_step_y);
+
+    for(unsigned int y = 0; y < get_local_size(1); ++y)
+    {
+#if defined(ARG_MAX)
+#if defined(PREV_OUTPUT)
+        local_results[lid] = arg_idx_max_prev_out(src_in_row, (__global DATA_TYPE_OUTPUT *)offset(&prev_res, 0, y), x_idx);
+#else  // !defined(PREV_OUTPUT)
+        local_results[lid] = arg_idx_max((__global DATA_TYPE *)offset(&src, 0, y), x_idx);
+#endif // defined(PREV_OUTPUT)
+#else  // defined(ARG_MIN)
+#if defined(PREV_OUTPUT)
+        local_results[lid]         = arg_idx_min_prev_out(src_in_row, (__global DATA_TYPE_OUTPUT *)offset(&prev_res, 0, y), x_idx);
+#else  // !defined(PREV_OUTPUT)
+        local_results[lid] = arg_idx_min((__global DATA_TYPE *)offset(&src, 0, y), x_idx);
+#endif // defined(PREV_OUTPUT)
+#endif // defined(ARG_MAX) || defined(ARG_MIN)
+
+        barrier(CLK_LOCAL_MEM_FENCE);
+
+        // Perform parallel reduction
+        for(unsigned int i = lsize >> 1; i > 0; i >>= 1)
+        {
+            if(lid < i)
+            {
+                DATA_TYPE tmp0 = *(src_in_row + local_results[lid]);
+                DATA_TYPE tmp1 = *(src_in_row + local_results[lid + i]);
+#if defined(ARG_MAX)
+                local_results[lid] = select(
+                                         local_results[lid],
+                                         local_results[lid + i],
+                                         ((tmp0 == tmp1) && (local_results[lid + i] < local_results[lid])) || (tmp0 < tmp1));
+#else  // defined(ARG_MIN)
+                local_results[lid] = select(
+                                         local_results[lid],
+                                         local_results[lid + i],
+                                         ((tmp0 == tmp1) && (local_results[lid + i] < local_results[lid])) || (tmp0 > tmp1));
+#endif // defined(ARG_MAX) || defined(ARG_MIN)
+            }
+            barrier(CLK_LOCAL_MEM_FENCE);
+        }
+
+        if(lid == 0)
+        {
+            ((__global DATA_TYPE_OUTPUT *)offset(&partial_res, get_group_id(0), y))[0] = local_results[0];
+        }
+    }
+}
+#endif // defined(WIDTH)
+
+#if defined(HEIGHT)
+/** This kernel performs reduction on y-axis.
+ *
+ * @note The input data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
+ * @note The data type of the output must be passed at compile time using -DDATA_TYPE_OUTPUT: e.g. -DDATA_TYPE_OUTPUT=uint
+ * @note The data type of the intermediate results must be passed at compile time using -DDATA_TYPE_PROMOTED: e.g. -DDATA_TYPE_PROMOTED=uint
+ * @note The height size must be passed at compile time using -DHEIGHT e.g. -DHEIGHT=128
+ *
+ * @param[in] src_ptr                              Pointer to the source tensor. Supported data types: S32/F16/F32
+ * @param[in] src_stride_x                         Stride of the source tensor in X dimension (in bytes)
+ * @param[in] src_step_x                           src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y                         Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_step_y                           src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes    The offset of the first element in the source tensor
+ * @param[in] output_ptr                           The local buffer to hold sumed values. Supported data types: U32/S32
+ * @param[in] output_stride_x                      Stride of the output tensor in X dimension (in bytes)
+ * @param[in] output_step_x                        output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y                      Stride of the output tensor in Y dimension (in bytes)
+ * @param[in] output_step_y                        output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes The offset of the first element in the source tensor
+ */
+__kernel void arg_min_max_y(
+    IMAGE_DECLARATION(src),
+    IMAGE_DECLARATION(output))
+{
+    Image src    = CONVERT_TO_IMAGE_STRUCT(src);
+    Image output = CONVERT_TO_IMAGE_STRUCT(output);
+
+    VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16)
+    res = CONVERT(vload16(0, (__global DATA_TYPE *)offset(&src, 0, 0)), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16));
+
+    VEC_DATA_TYPE(DATA_TYPE_OUTPUT, 16)
+    indx = 0;
+    for(unsigned int y = 1; y < HEIGHT; ++y)
+    {
+        VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16)
+        in = CONVERT(vload16(0, (__global DATA_TYPE *)offset(&src, 0, y)), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16));
+
+        VEC_DATA_TYPE(DATA_TYPE_OUTPUT, 16)
+        cond_conv = CONVERT(CONDITION_TO_USE(in, res), VEC_DATA_TYPE(DATA_TYPE_OUTPUT, 16));
+        indx      = select(indx, y, cond_conv);
+        res       = select(res, in, CONDITION_TO_USE(in, res));
+    }
+
+    // Store result
+    vstore16(indx, 0, (__global DATA_TYPE_OUTPUT *)output.ptr);
+}
+#endif // defined(HEIGHT)
+
+#if defined(DEPTH)
+/** This kernel performs reduction on z-axis.
+ *
+ * @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
+ * @note The data type of the intermediate results must be passed at compile time using -DDATA_TYPE_PROMOTED: e.g. -DDATA_TYPE_PROMOTED=uint
+ * @note The depth size must be passed at compile time using -DDEPTH e.g. -DDEPTH=128
+ *
+ * @param[in] input_ptr                            Pointer to the source tensor. Supported data types: S32/F16/F32
+ * @param[in] input_stride_x                       Stride of the source tensor in X dimension (in bytes)
+ * @param[in] input_step_x                         input_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] input_stride_y                       Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] input_step_y                         input_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] input_stride_z                       Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] input_step_z                         input_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] input_offset_first_element_in_bytes  The offset of the first element in the source tensor
+ * @param[in] output_ptr                           The local buffer to hold sumed values. Supported data types: U32/S32
+ * @param[in] output_stride_x                      Stride of the output tensor in X dimension (in bytes)
+ * @param[in] output_step_x                        output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y                      Stride of the output tensor in Y dimension (in bytes)
+ * @param[in] output_step_y                        output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_stride_z                      Stride of the output tensor in Z dimension (in bytes)
+ * @param[in] output_step_z                        output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes The offset of the first element in the source tensor
+ */
+__kernel void arg_min_max_z(
+    TENSOR3D_DECLARATION(input),
+    TENSOR3D_DECLARATION(output))
+{
+    Tensor3D input  = CONVERT_TO_TENSOR3D_STRUCT(input);
+    Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output);
+
+    VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16)
+    res = CONVERT(vload16(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 0, 0)), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16));
+
+    VEC_DATA_TYPE(DATA_TYPE_OUTPUT, 16)
+    indx = 0;
+    for(DATA_TYPE_OUTPUT z = 1; z < DEPTH; ++z)
+    {
+        VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16)
+        in = CONVERT(vload16(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 0, z)), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16));
+
+        VEC_DATA_TYPE(DATA_TYPE_OUTPUT, 16)
+        cond_conv = CONVERT(CONDITION_TO_USE(in, res), VEC_DATA_TYPE(DATA_TYPE_OUTPUT, 16));
+        indx      = select(indx, z, cond_conv);
+        res       = select(res, in, CONDITION_TO_USE(in, res));
+    }
+
+    // Store result
+    vstore16(indx, 0, (__global DATA_TYPE_OUTPUT *)output.ptr);
+}
+#endif /* defined(DEPTH) */
+
+#if defined(BATCH) && defined(DEPTH)
+/** This kernel performs reduction on w-axis.
+ *
+ * @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
+ * @note The data type of the intermediate results must be passed at compile time using -DDATA_TYPE_PROMOTED: e.g. -DDATA_TYPE_PROMOTED=uint
+ * @note The batch size must be passed at compile time using -DBATCH e.g. -DBATCH=128
+ * @note The depth size must be passed at compile time using -DBATCH e.g. -DDEPTH=128
+ *
+ * @param[in] input_ptr                            Pointer to the source tensor. Supported data types: S32/F16/F32
+ * @param[in] input_stride_x                       Stride of the source tensor in X dimension (in bytes)
+ * @param[in] input_step_x                         input_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] input_stride_y                       Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] input_step_y                         input_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] input_stride_z                       Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] input_step_z                         input_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] input_stride_w                       Stride of the source tensor in W dimension (in bytes)
+ * @param[in] input_step_w                         input_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] input_offset_first_element_in_bytes  The offset of the first element in the source tensor
+ * @param[in] output_ptr                           The local buffer to hold sumed values. Supported data types: U32/S32
+ * @param[in] output_stride_x                      Stride of the output tensor in X dimension (in bytes)
+ * @param[in] output_step_x                        output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y                      Stride of the output tensor in Y dimension (in bytes)
+ * @param[in] output_step_y                        output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_stride_z                      Stride of the output tensor in Z dimension (in bytes)
+ * @param[in] output_step_z                        output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] output_stride_w                      Stride of the output tensor in W dimension (in bytes)
+ * @param[in] output_step_w                        output_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes The offset of the first element in the source tensor
+ */
+__kernel void arg_min_max_w(
+    TENSOR4D_DECLARATION(input),
+    TENSOR4D_DECLARATION(output))
+{
+    Tensor4D input  = CONVERT_TO_TENSOR4D_STRUCT(input, DEPTH);
+    Tensor4D output = CONVERT_TO_TENSOR4D_STRUCT(output, DEPTH);
+
+    VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16)
+    res = CONVERT(vload16(0, (__global DATA_TYPE *)tensor4D_offset(&input, 0, 0, 0, 0)), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16));
+
+    VEC_DATA_TYPE(DATA_TYPE_OUTPUT, 16)
+    indx = 0;
+    for(DATA_TYPE_OUTPUT w = 1; w < BATCH; ++w)
+    {
+        VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16)
+        in = CONVERT(vload16(0, (__global DATA_TYPE *)tensor4D_offset(&input, 0, 0, 0, w)), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16));
+
+        VEC_DATA_TYPE(DATA_TYPE_OUTPUT, 16)
+        cond_conv = CONVERT(CONDITION_TO_USE(in, res), VEC_DATA_TYPE(DATA_TYPE_OUTPUT, 16));
+        indx      = select(indx, w, cond_conv);
+        res       = select(res, in, CONDITION_TO_USE(in, res));
+    }
+
+    // Store result
+    vstore16(indx, 0, (__global DATA_TYPE_OUTPUT *)output.ptr);
+}
+#endif /* defined(BATCH) && defined(DEPTH) */
+#endif // defined(DATA_TYPE_OUTPUT)
\ No newline at end of file
diff --git a/src/core/CL/cl_kernels/helpers.h b/src/core/CL/cl_kernels/helpers.h
index eaeaa60..ec5701d 100644
--- a/src/core/CL/cl_kernels/helpers.h
+++ b/src/core/CL/cl_kernels/helpers.h
@@ -266,6 +266,19 @@
 #define CONVERT_SAT_ROUND_STR(x, type, round) (convert_##type##_sat_##round((x)))
 #define CONVERT_SAT_ROUND(x, type, round) CONVERT_SAT_ROUND_STR(x, type, round)
 
+#if FLOAT_DATA_TYPE
+#define ISGREATER(x, y) isgreater(x, y)
+#define ISLESS(x, y) isless(x, y)
+#else // !FLOAT_DATA_TYPE
+#if defined(WIDTH)
+#define ISGREATER(x, y) (x > y) ? 1 : 0
+#define ISLESS(x, y) (x < y) ? 1 : 0
+#else // !defined(WIDTH)
+#define ISGREATER(x, y) select((int16)0, (int16)-1, x > y)
+#define ISLESS(x, y) select((int16)0, (int16)-1, x < y)
+#endif // defined(WIDTH)
+#endif // FLOAT_DATA_TYPE
+
 #define VECTOR_DECLARATION(name)     \
     __global uchar *name##_ptr,      \
     uint        name##_stride_x, \
diff --git a/src/core/CL/cl_kernels/reduction_operation.cl b/src/core/CL/cl_kernels/reduction_operation.cl
index 5a4bb9f..451b962 100644
--- a/src/core/CL/cl_kernels/reduction_operation.cl
+++ b/src/core/CL/cl_kernels/reduction_operation.cl
@@ -23,19 +23,6 @@
  */
 #include "helpers.h"
 
-#if FLOAT_DATA_TYPE
-#define ISGREATER(x, y) isgreater(x, y)
-#define ISLESS(x, y) isless(x, y)
-#else // !FLOAT_DATA_TYPE
-#if defined(WIDTH)
-#define ISGREATER(x, y) (x > y) ? 1 : 0
-#define ISLESS(x, y) (x < y) ? 1 : 0
-#else // !defined(WIDTH)
-#define ISGREATER(x, y) select((int16)0, (int16)-1, x > y)
-#define ISLESS(x, y) select((int16)0, (int16)-1, x < y)
-#endif // defined(WIDTH)
-#endif // FLOAT_DATA_TYPE
-
 /** Calculate square sum of a vector
  *
  * @param[in] input Pointer to the first pixel.
@@ -164,7 +151,7 @@
  * @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
  * @note The width size must be passed at compile time using -DWIDTH e.g. -DWIDTH=128
  * @note The product flag must be passed at compile time using -DPROD if we want to compute the product, otherwise sum will be used
- * @note In case of ARG_MIN and ARG_MAX the condition data type must be passed at compile time using -DCOND_DATA_TYPE e.g. -DCOND_DATA_TYPE=short
+ * @note In case of MIN and MAX the condition data type must be passed at compile time using -DCOND_DATA_TYPE e.g. -DCOND_DATA_TYPE=short
  *
  * @param[in] src_ptr                              Pointer to the source tensor. Supported data types: S32/F16/F32 and QASYMM8 for operation MEAN
  * @param[in] src_stride_x                         Stride of the source tensor in X dimension (in bytes)
@@ -184,32 +171,19 @@
 
     DATA_TYPE_PROMOTED res = *((__global DATA_TYPE *)vector_offset(&src, 0));
 
-#if defined(ARG_MAX) || defined(ARG_MIN)
-    uint indx = 0;
-#endif // defined(ARG_MAX) || defined(ARG_MIN)
-
     for(unsigned int x = 1; x < WIDTH; ++x)
     {
         DATA_TYPE_PROMOTED in = *((__global DATA_TYPE *)vector_offset(&src, x));
-#if defined(ARG_MAX)
-        indx = select(indx, x, ISGREATER(in, res));
-        res  = select(res, in, CONVERT(ISGREATER(in, res), COND_DATA_TYPE));
-#elif defined(ARG_MIN)
-        indx = select(indx, x, ISLESS(in, res));
-        res  = select(res, in, CONVERT(ISLESS(in, res), COND_DATA_TYPE));
-#elif defined(MIN)
+#if defined(MIN)
         res = select(res, in, CONVERT(ISLESS(in, res), COND_DATA_TYPE));
 #elif defined(MAX)
-        res = select(res, in, CONVERT(ISGREATER(in, res), COND_DATA_TYPE));
-#else  // !(defined(ARG_MAX) || defined(ARG_MIN))
+        res                         = select(res, in, CONVERT(ISGREATER(in, res), COND_DATA_TYPE));
+#else  // !(defined(MAX) || defined(MIN))
         res += in;
-#endif // defined(ARG_MAX) || defined(ARG_MIN)
+#endif // defined(MAX) || defined(MIN)
     }
 
     // Store result
-#if defined(ARG_MAX) || defined(ARG_MIN)
-    *((__global uint *)output.ptr) = indx;
-#else // !(defined(ARG_MAX) || defined(ARG_MIN))
 #if defined(MEAN)
     res /= WIDTH;
 #endif // defined(MEAN)
@@ -218,7 +192,6 @@
 #else  // defined(MIN) || defined(MAX)
     *((__global uchar *)output.ptr) = convert_uchar(res);
 #endif // defined(MIN) || defined(MAX)
-#endif // defined(ARG_MAX) || defined(ARG_MIN)
 }
 #endif // defined(WIDTH)
 
@@ -255,27 +228,15 @@
     res *= res;
 #endif // defined(SUM_SQUARE)
 
-#if defined(ARG_MAX) || defined(ARG_MIN)
-    uint16 indx = 0;
-#endif // defined(ARG_MAX) || defined(ARG_MIN)
-
     for(unsigned int y = 1; y < HEIGHT; ++y)
     {
         VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16)
         in = CONVERT(vload16(0, (__global DATA_TYPE *)offset(&src, 0, y)), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16));
-#if defined(ARG_MAX)
-        uint16 cond_conv = CONVERT(ISGREATER(in, res), uint16);
-        indx             = select(indx, y, cond_conv);
-        res              = select(res, in, ISGREATER(in, res));
-#elif defined(ARG_MIN)
-        uint16 cond_conv                         = CONVERT(ISLESS(in, res), uint16);
-        indx                                     = select(indx, y, cond_conv);
-        res                                      = select(res, in, ISLESS(in, res));
-#elif defined(MIN)
+#if defined(MIN)
         res = select(res, in, ISLESS(in, res));
 #elif defined(MAX)
-        res = select(res, in, ISGREATER(in, res));
-#else // !(defined(ARG_MAX) || defined(ARG_MIN))
+        res                         = select(res, in, ISGREATER(in, res));
+#else // !(defined(MAX) || defined(MIN))
 #if defined(SUM_SQUARE)
         in *= in;
 #endif // defined(SUM_SQUARE)
@@ -284,18 +245,14 @@
 #else  // !defined(PROD)
         res += in;
 #endif // defined(PROD)
-#endif // defined(ARG_MAX) || defined(ARG_MIN)
+#endif // defined(MAX) || defined(MIN)
     }
 
     // Store result
-#if defined(ARG_MAX) || defined(ARG_MIN)
-    vstore16(indx, 0, (__global uint *)output.ptr);
-#else // !(defined(ARG_MAX) || defined(ARG_MIN))
 #if defined(MEAN)
     res /= HEIGHT;
 #endif // defined(MEAN)
     vstore16(CONVERT(res, VEC_DATA_TYPE(DATA_TYPE, 16)), 0, (__global DATA_TYPE *)output.ptr);
-#endif // defined(ARG_MAX) || defined(ARG_MIN)
 }
 #endif // defined(HEIGHT)
 
@@ -340,10 +297,6 @@
     res *= res;
 #endif // defined(SUM_SQUARE)
 
-#if defined(ARG_MAX) || defined(ARG_MIN)
-    uint16 indx = 0;
-#endif // defined(ARG_MAX) || defined(ARG_MIN)
-
     for(unsigned int z = 1; z < DEPTH; ++z)
     {
         VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16)
@@ -354,19 +307,11 @@
         in1 = CONVERT(vload16(0, (__global DATA_TYPE *)tensor3D_offset(&input, 8, 0, z)), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16));
 #endif // defined(COMPLEX)
 
-#if defined(ARG_MAX)
-        uint16 cond_conv = CONVERT(ISGREATER(in, res), uint16);
-        indx             = select(indx, z, cond_conv);
-        res              = select(res, in, ISGREATER(in, res));
-#elif defined(ARG_MIN)
-        uint16 cond_conv = CONVERT(ISLESS(in, res), uint16);
-        indx             = select(indx, z, cond_conv);
-        res              = select(res, in, ISLESS(in, res));
-#elif defined(MIN)
+#if defined(MIN)
         res = select(res, in, ISLESS(in, res));
 #elif defined(MAX)
-        res = select(res, in, ISGREATER(in, res));
-#else // !(defined(ARG_MAX) || defined(ARG_MIN))
+        res                         = select(res, in, ISGREATER(in, res));
+#else // !(defined(MAX) || defined(MIN))
 #if defined(SUM_SQUARE)
         in *= in;
 #endif // defined(SUM_SQUARE)
@@ -377,14 +322,11 @@
 #if defined(COMPLEX)
         res1 += in1;
 #endif // defined(COMPLEX)
-#endif //defined(PROD)
-#endif // defined(ARG_MAX) || defined(ARG_MIN)
+#endif // defined(PROD)
+#endif // defined(MAX) || defined(MIN)
     }
 
     // Store result
-#if defined(ARG_MAX) || defined(ARG_MIN)
-    vstore16(indx, 0, (__global uint *)output.ptr);
-#else // !(defined(ARG_MAX) || defined(ARG_MIN))
 #if defined(MEAN)
     res /= DEPTH;
 #endif // defined(MEAN)
@@ -392,7 +334,6 @@
 #if defined(COMPLEX)
     vstore16(CONVERT(res1, VEC_DATA_TYPE(DATA_TYPE, 16)), 0, (__global DATA_TYPE *)tensor3D_offset(&output, 8, 0, 0));
 #endif // defined(COMPLEX)
-#endif // defined(ARG_MAX) || defined(ARG_MIN)
 }
 #endif /* defined(DEPTH) */
 
@@ -438,28 +379,16 @@
     res *= res;
 #endif // defined(SUM_SQUARE)
 
-#if defined(ARG_MAX) || defined(ARG_MIN)
-    uint16 indx = 0;
-#endif // defined(ARG_MAX) || defined(ARG_MIN)
-
     for(unsigned int w = 1; w < BATCH; ++w)
     {
         VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16)
         in = CONVERT(vload16(0, (__global DATA_TYPE *)tensor4D_offset(&input, 0, 0, 0, w)), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16));
 
-#if defined(ARG_MAX)
-        uint16 cond_conv = CONVERT(ISGREATER(in, res), uint16);
-        indx             = select(indx, w, cond_conv);
-        res              = select(res, in, ISGREATER(in, res));
-#elif defined(ARG_MIN)
-        uint16 cond_conv = CONVERT(ISLESS(in, res), uint16);
-        indx             = select(indx, w, cond_conv);
-        res              = select(res, in, ISLESS(in, res));
-#elif defined(MIN)
+#if defined(MIN)
         res = select(res, in, ISLESS(in, res));
 #elif defined(MAX)
-        res = select(res, in, ISGREATER(in, res));
-#else // !(defined(ARG_MAX) || defined(ARG_MIN))
+        res                         = select(res, in, ISGREATER(in, res));
+#else // !(defined(MAX) || defined(MIN))
 #if defined(SUM_SQUARE)
         in *= in;
 #endif // defined(SUM_SQUARE)
@@ -468,17 +397,13 @@
 #else  //!defined(PROD)
         res += in;
 #endif //defined(PROD)
-#endif // defined(ARG_MAX) || defined(ARG_MIN)
+#endif // defined(MAX) || defined(MIN)
     }
 
     // Store result
-#if defined(ARG_MAX) || defined(ARG_MIN)
-    vstore16(indx, 0, (__global uint *)output.ptr);
-#else // !(defined(ARG_MAX) || defined(ARG_MIN))
 #if defined(MEAN)
     res /= BATCH;
 #endif // defined(MEAN)
     vstore16(CONVERT(res, VEC_DATA_TYPE(DATA_TYPE, 16)), 0, (__global DATA_TYPE *)output.ptr);
-#endif // defined(ARG_MAX) || defined(ARG_MIN)
 }
 #endif /* defined(BATCH) && defined(DEPTH) */
diff --git a/src/core/CL/kernels/CLArgMinMaxLayerKernel.cpp b/src/core/CL/kernels/CLArgMinMaxLayerKernel.cpp
new file mode 100644
index 0000000..c8e87ba
--- /dev/null
+++ b/src/core/CL/kernels/CLArgMinMaxLayerKernel.cpp
@@ -0,0 +1,283 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/CL/kernels/CLArgMinMaxLayerKernel.h"
+
+#include "arm_compute/core/AccessWindowStatic.h"
+#include "arm_compute/core/CL/CLHelpers.h"
+#include "arm_compute/core/CL/CLKernelLibrary.h"
+#include "arm_compute/core/CL/CLValidate.h"
+#include "arm_compute/core/CL/ICLTensor.h"
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/core/Window.h"
+
+#include "support/ToolchainSupport.h"
+
+namespace arm_compute
+{
+namespace
+{
+constexpr unsigned int vector_size = 16;
+
+Status validate_arguments(const ITensorInfo *input, const ITensorInfo *prev_output, const ITensorInfo *output, unsigned int axis, ReductionOperation op)
+{
+    ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
+    ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
+    ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::S32, DataType::F16, DataType::F32);
+    ARM_COMPUTE_RETURN_ERROR_ON_MSG(op != ReductionOperation::ARG_IDX_MAX && op != ReductionOperation::ARG_IDX_MIN, "Only ARG_IDX_MAX and ARG_IDX_MIN are supported");
+    ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis >= TensorShape::num_max_dimensions, "Reduction axis greater than max number of dimensions");
+    ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis > 3, "Unsupported reduction axis");
+
+    if(output->total_size() != 0)
+    {
+        ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U32, DataType::S32);
+    }
+    if(prev_output != nullptr && prev_output->total_size() != 0)
+    {
+        ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(prev_output, 1, DataType::U32, DataType::S32);
+        if(output->total_size() != 0)
+        {
+            ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(prev_output, output);
+        }
+    }
+
+    return Status{};
+}
+
+std::tuple<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *prev_output, ITensorInfo *output, unsigned int axis, ReductionOperation op)
+{
+    ARM_COMPUTE_UNUSED(op);
+    // Output tensor auto initialization if not yet initialized
+    TensorShape output_shape{ input->tensor_shape() };
+    output_shape.set(axis, 1);
+    DataType output_data_type = DataType::S32;
+    auto_init_if_empty(*output, input->clone()->set_tensor_shape(output_shape).set_data_type(output_data_type).reset_padding().set_is_resizable(true));
+
+    Window win            = calculate_max_window((prev_output != nullptr) ? (*prev_output) : (*input), Steps(vector_size));
+    bool   window_changed = false;
+
+    switch(axis)
+    {
+        case 0:
+        {
+            ITensorInfo           *input_tensor_access = prev_output != nullptr ? prev_output : input;
+            AccessWindowStatic     input_access(input_tensor_access, 0, 0, static_cast<int>(input_tensor_access->dimension(0)), 1);
+            AccessWindowHorizontal output_access(output, 0, 1);
+            window_changed = update_window_and_padding(win, input_access, output_access);
+            output_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape()));
+        }
+        break;
+        case 1:
+        case 2:
+        case 3:
+        {
+            AccessWindowHorizontal input_access(input, 0, vector_size);
+            AccessWindowHorizontal output_access(output, 0, vector_size);
+            window_changed = update_window_and_padding(win, input_access, output_access);
+            output_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape()));
+        }
+        break;
+        default:
+            ARM_COMPUTE_ERROR("Not supported");
+    }
+
+    Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
+    return std::make_tuple(err, win);
+}
+} // namespace
+
+CLArgMinMaxLayerKernel::CLArgMinMaxLayerKernel()
+    : _input(nullptr), _prev_output(nullptr), _output(nullptr), _reduction_axis(0), _op(ReductionOperation::ARG_IDX_MAX)
+{
+}
+
+void CLArgMinMaxLayerKernel::configure(const ICLTensor *input, const ICLTensor *prev_output, ICLTensor *output, unsigned int axis, ReductionOperation op)
+{
+    ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
+    ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), (prev_output != nullptr) ? prev_output->info() : nullptr, output->info(), axis, op));
+    auto win_config = validate_and_configure_window(input->info(), (prev_output != nullptr) ? prev_output->info() : nullptr, output->info(), axis, op);
+    ARM_COMPUTE_ERROR_THROW_ON(std::get<0>(win_config));
+
+    _input          = input;
+    _prev_output    = prev_output;
+    _output         = output;
+    _reduction_axis = axis;
+    _op             = op;
+
+    // Set build options
+    CLBuildOptions    build_opts;
+    const std::string data_type_promoted = get_cl_type_from_data_type(input->info()->data_type());
+
+    build_opts.add_option_if(_prev_output != nullptr, "-DPREV_OUTPUT");
+    build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()));
+    build_opts.add_option("-DDATA_TYPE_PROMOTED=" + data_type_promoted);
+    build_opts.add_option_if(is_data_type_float(input->info()->data_type()), "-DFLOAT_DATA_TYPE");
+    build_opts.add_option_if(op == ReductionOperation::ARG_IDX_MAX, "-DARG_MAX");
+    build_opts.add_option_if(op == ReductionOperation::ARG_IDX_MIN, "-DARG_MIN");
+    build_opts.add_option("-DCOND_DATA_TYPE=" + get_cl_select_type_from_data_type(input->info()->data_type()));
+    build_opts.add_option("-DDATA_TYPE_OUTPUT=" + get_cl_type_from_data_type(output->info()->data_type()));
+
+    // Create kernel
+    cl::NDRange lws_hint = CLKernelLibrary::get().default_ndrange();
+    std::string kernel_axis_name;
+    switch(axis)
+    {
+        case 0:
+        {
+            const ICLTensor *input_for_width = prev_output != nullptr ? _prev_output : _input;
+            build_opts.add_option("-DWIDTH=" + support::cpp11::to_string(input_for_width->info()->dimension(0)));
+
+            kernel_axis_name = "x";
+            lws_hint         = create_lws_hint_parallel_implementations(input_for_width->info()->dimension(0), vector_size);
+        }
+        break;
+        case 1:
+            build_opts.add_option("-DHEIGHT=" + support::cpp11::to_string(input->info()->dimension(1)));
+            kernel_axis_name = "y";
+            break;
+        case 2:
+            build_opts.add_option("-DDEPTH=" + support::cpp11::to_string(input->info()->dimension(2)));
+            kernel_axis_name = "z";
+            break;
+        case 3:
+            build_opts.add_option("-DDEPTH=" + support::cpp11::to_string(input->info()->dimension(2)));
+            build_opts.add_option("-DBATCH=" + support::cpp11::to_string(input->info()->dimension(3)));
+            kernel_axis_name = "w";
+            break;
+        default:
+            ARM_COMPUTE_ERROR("Not supported");
+    }
+    _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("arg_min_max_" + kernel_axis_name, build_opts.options()));
+
+    // Configure kernel window
+    ICLKernel::configure_internal(std::get<1>(win_config), lws_hint);
+}
+
+Status CLArgMinMaxLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *prev_output, const ITensorInfo *output, unsigned int axis, ReductionOperation op)
+{
+    ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, prev_output, output, axis, op));
+    ARM_COMPUTE_RETURN_ON_ERROR(std::get<0>(validate_and_configure_window(input->clone().get(), (prev_output != nullptr) ? prev_output->clone().get() : nullptr, output->clone().get(), axis, op)));
+    return Status{};
+}
+
+void CLArgMinMaxLayerKernel::run(const Window &window, cl::CommandQueue &queue)
+{
+    ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+    ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
+
+    switch(_reduction_axis)
+    {
+        case 0:
+        {
+            // Set out window
+            Window out_window(window);
+            out_window.set(Window::DimX, Window::Dimension(0, 0, 0));
+
+            // Get first input and output slices
+            Window in_slice  = window.first_slice_window_2D();
+            Window out_slice = out_window.first_slice_window_2D();
+
+            // Reshape window
+            const unsigned int border_width = ((in_slice.x().end() % vector_size) != 0) ? vector_size - in_slice.x().end() % vector_size : 0;
+            in_slice.set(Window::DimX, Window::Dimension(in_slice.x().start(), in_slice.x().end() + border_width, in_slice.x().step()));
+            const unsigned int num_tensors = _prev_output != nullptr ? 3 : 2;
+
+            // Set local sums buffer
+            unsigned int local_res_size = lws_hint()[0] * _output->info()->element_size();
+            _kernel.setArg(num_arguments_per_2D_tensor() * num_tensors, local_res_size, nullptr);
+            do
+            {
+                unsigned int idx = 0;
+                add_2D_tensor_argument(idx, _input, in_slice);
+                if(_prev_output != nullptr)
+                {
+                    add_2D_tensor_argument(idx, _prev_output, in_slice);
+                }
+                add_2D_tensor_argument(idx, _output, out_slice);
+                enqueue(queue, *this, in_slice, lws_hint());
+            }
+            while(window.slide_window_slice_2D(in_slice) && window.slide_window_slice_2D(out_slice));
+        }
+        break;
+        case 1:
+        {
+            // Get first input and output slices
+            Window window_in{ window };
+            window_in.set(Window::DimY, Window::Dimension(0, _input->info()->dimension(1), _input->info()->dimension(1)));
+            Window in_slice  = window_in.first_slice_window_2D();
+            Window out_slice = window.first_slice_window_2D();
+
+            do
+            {
+                unsigned int idx = 0;
+                add_2D_tensor_argument(idx, _input, in_slice);
+                add_2D_tensor_argument(idx, _output, out_slice);
+                enqueue(queue, *this, in_slice, lws_hint());
+            }
+            while(window_in.slide_window_slice_2D(in_slice) && window.slide_window_slice_2D(out_slice));
+        }
+        break;
+        case 2:
+        {
+            // Get first input and output slices
+            Window window_in{ window };
+            window_in.set(Window::DimZ, Window::Dimension(0, _input->info()->dimension(2), _input->info()->dimension(2)));
+            Window in_slice  = window_in.first_slice_window_3D();
+            Window out_slice = window.first_slice_window_3D();
+
+            do
+            {
+                unsigned int idx = 0;
+                add_3D_tensor_argument(idx, _input, in_slice);
+                add_3D_tensor_argument(idx, _output, out_slice);
+                enqueue(queue, *this, in_slice, lws_hint());
+            }
+            while(window_in.slide_window_slice_3D(in_slice) && window.slide_window_slice_3D(out_slice));
+        }
+        break;
+        case 3:
+        {
+            // Get first input and output slices
+            Window window_in{ window };
+            window_in.set(3, Window::Dimension(0, 1, 1));
+            Window in_slice  = window_in.first_slice_window_4D();
+            Window out_slice = window.first_slice_window_4D();
+
+            do
+            {
+                unsigned int idx = 0;
+                add_4D_tensor_argument(idx, _input, in_slice);
+                add_4D_tensor_argument(idx, _output, out_slice);
+                enqueue(queue, *this, in_slice, lws_hint());
+            }
+            while(window_in.slide_window_slice_4D(in_slice) && window.slide_window_slice_4D(out_slice));
+        }
+        break;
+        default:
+            ARM_COMPUTE_ERROR("Not supported");
+    }
+}
+} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLReductionOperationKernel.cpp b/src/core/CL/kernels/CLReductionOperationKernel.cpp
index cbf3923..91ee83e 100644
--- a/src/core/CL/kernels/CLReductionOperationKernel.cpp
+++ b/src/core/CL/kernels/CLReductionOperationKernel.cpp
@@ -60,19 +60,12 @@
     ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis >= TensorShape::num_max_dimensions, "Reduction axis greater than max number of dimensions");
     ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis > 3, "Unsupported reduction axis");
     ARM_COMPUTE_RETURN_ERROR_ON(op == ReductionOperation::MEAN_SUM && axis == 0 && width == 0 && input->data_type() != DataType::QASYMM8);
+    ARM_COMPUTE_RETURN_ERROR_ON_MSG(op == ReductionOperation::ARG_IDX_MAX || op == ReductionOperation::ARG_IDX_MIN, "Not supported reduction operation, use CLArgMinMaxLayer");
 
     if(output->total_size() != 0)
     {
-        if(op == ReductionOperation::ARG_IDX_MAX || op == ReductionOperation::ARG_IDX_MIN)
-        {
-            ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == DataType::QASYMM8, "Not supported operation for QASYMM8");
-            ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U32, DataType::S32);
-        }
-        else
-        {
-            ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
-            ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(input, output);
-        }
+        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
+        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(input, output);
     }
 
     return Status{};
@@ -81,9 +74,8 @@
 std::tuple<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output, unsigned int axis, ReductionOperation op)
 {
     // Output tensor auto initialization if not yet initialized
-    const bool        is_arg_min_max   = (op == ReductionOperation::ARG_IDX_MIN || op == ReductionOperation::ARG_IDX_MAX);
-    const TensorShape output_shape     = arm_compute::misc::shape_calculator::compute_reduced_shape(input->tensor_shape(), axis, !is_arg_min_max);
-    const DataType    output_data_type = is_arg_min_max ? DataType::S32 : input->data_type();
+    const TensorShape output_shape     = arm_compute::misc::shape_calculator::compute_reduced_shape(input->tensor_shape(), axis, true);
+    DataType          output_data_type = input->data_type();
     auto_init_if_empty(*output, input->clone()->set_tensor_shape(output_shape).set_data_type(output_data_type).reset_padding().set_is_resizable(true));
 
     const unsigned int num_elems_processed_per_iteration = (is_data_type_quantized(input->data_type()) && (axis == 0)) ? 1 : 16;
@@ -166,8 +158,6 @@
     build_opts.add_option_if(is_data_type_float(input->info()->data_type()), "-DFLOAT_DATA_TYPE");
     build_opts.add_option_if(op == ReductionOperation::SUM_SQUARE, "-DSUM_SQUARE");
     build_opts.add_option_if(op == ReductionOperation::MEAN_SUM, "-DMEAN");
-    build_opts.add_option_if(op == ReductionOperation::ARG_IDX_MAX, "-DARG_MAX");
-    build_opts.add_option_if(op == ReductionOperation::ARG_IDX_MIN, "-DARG_MIN");
     build_opts.add_option_if(op == ReductionOperation::PROD, "-DPROD");
     build_opts.add_option_if(op == ReductionOperation::MIN, "-DMIN");
     build_opts.add_option_if(op == ReductionOperation::MAX, "-DMAX");
@@ -182,8 +172,6 @@
         case ReductionOperation::MEAN_SUM:
             build_opts.add_option(("-DOPERATION=sum"));
             break;
-        case ReductionOperation::ARG_IDX_MAX:
-        case ReductionOperation::ARG_IDX_MIN:
         case ReductionOperation::MIN:
         case ReductionOperation::MAX:
             break;
@@ -214,12 +202,9 @@
                 build_opts.add_option_if(op == ReductionOperation::MEAN_SUM, "-DWIDTH=" + support::cpp11::to_string(width));
                 const unsigned int width_leftover = input->info()->dimension(0) % border_val;
                 const unsigned int border_width   = (width_leftover != 0) ? border_val - width_leftover : 0;
-                const unsigned int num_of_threads = ((input->info()->dimension(0) + border_width) / 16);
                 kernel_axis_name                  = "x";
 
-                // Set the number of WG based on the input size. If input width is < 128
-                // we can use fewer threads than 8.
-                lws_hint     = cl::NDRange(std::min(8U, num_of_threads));
+                lws_hint     = create_lws_hint_parallel_implementations(input->info()->dimension(0), border_val);
                 _border_size = BorderSize(0, border_width, 0, 0);
             }
         }
diff --git a/src/core/Utils.cpp b/src/core/Utils.cpp
index cbf6e48..fa56118 100644
--- a/src/core/Utils.cpp
+++ b/src/core/Utils.cpp
@@ -431,12 +431,11 @@
 
 bool arm_compute::needs_serialized_reduction(ReductionOperation op, DataType dt, unsigned int axis)
 {
-    const bool is_arg_min_max    = (op == ReductionOperation::ARG_IDX_MAX || op == ReductionOperation::ARG_IDX_MIN);
     const bool is_min_max        = (op == ReductionOperation::MAX || op == ReductionOperation::MIN);
     const bool is_quantized_type = is_data_type_quantized(dt);
     const bool is_first_dim      = (axis == 0);
 
-    return !is_first_dim || is_arg_min_max || is_min_max || is_quantized_type;
+    return !is_first_dim || is_min_max || is_quantized_type;
 }
 
 #ifdef ARM_COMPUTE_ASSERTS_ENABLED