COMPMID-1728 CL: Implement ArgMax/ArgMin

Change-Id: I7eae2e55cc0b0b7bbebb7617299daaca6f75f40c
Reviewed-on: https://review.mlplatform.org/292
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp
index a9c4074..f2b5d45 100644
--- a/src/core/CL/CLKernelLibrary.cpp
+++ b/src/core/CL/CLKernelLibrary.cpp
@@ -370,7 +370,7 @@
     { "prior_box_layer_nchw", "prior_box_layer.cl" },
     { "quantization_layer", "quantization_layer.cl" },
     { "reduction_operation_x", "reduction_operation.cl" },
-    { "reduction_operation_quantized_x", "reduction_operation.cl" },
+    { "reduction_operation_non_parallel_x", "reduction_operation.cl" },
     { "reduction_operation_y", "reduction_operation.cl" },
     { "reduction_operation_z", "reduction_operation.cl" },
     { "reduction_operation_w", "reduction_operation.cl" },
diff --git a/src/core/CL/cl_kernels/reduction_operation.cl b/src/core/CL/cl_kernels/reduction_operation.cl
index d76e12a..d1f47be 100644
--- a/src/core/CL/cl_kernels/reduction_operation.cl
+++ b/src/core/CL/cl_kernels/reduction_operation.cl
@@ -60,7 +60,7 @@
 
     return (in.s0 + in.s1);
 }
-
+#if defined(OPERATION)
 /** This kernel performs parallel reduction given an operation on x-axis.
  *
  * @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
@@ -120,13 +120,16 @@
         }
     }
 }
+#endif // defined(OPERATION)
 
 #if defined(WIDTH)
-/** This kernel performs reduction on x-axis. (QASYMM8)
+/** This kernel performs reduction on x-axis. (Non parallel)
  *
+ * @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
  * @note The width size must be passed at compile time using -DWIDTH e.g. -DWIDTH=128
+ * @note In case of ARG_MIN and ARG_MAX the condition data type must be passed at compile time using -DCOND_DATA_TYPE e.g. -DCOND_DATA_TYPE=short
  *
- * @param[in] src_ptr                              Pointer to the source tensor. Supported data types: QASYMM8
+ * @param[in] src_ptr                              Pointer to the source tensor. Supported data types: F16/F32 and QASYMM8 for operation MEAN
  * @param[in] src_stride_x                         Stride of the source tensor in X dimension (in bytes)
  * @param[in] src_step_x                           src_stride_x * number of elements along X processed per workitem(in bytes)
  * @param[in] src_offset_first_element_in_bytes    The offset of the first element in the source tensor
@@ -135,33 +138,49 @@
  * @param[in] output_step_x                        output_stride_x * number of elements along X processed per workitem(in bytes)
  * @param[in] output_offset_first_element_in_bytes The offset of the first element in the source tensor
  */
-__kernel void reduction_operation_quantized_x(
+__kernel void reduction_operation_non_parallel_x(
     VECTOR_DECLARATION(src),
     VECTOR_DECLARATION(output))
 {
     Vector src    = CONVERT_TO_VECTOR_STRUCT(src);
     Vector output = CONVERT_TO_VECTOR_STRUCT(output);
 
-    uint res = 0;
+    DATA_TYPE_PROMOTED res = *((__global DATA_TYPE *)vector_offset(&src, 0));
 
-    for(unsigned int x = 0; x < WIDTH; ++x)
+#if defined(ARG_MAX) || defined(ARG_MIN)
+    uint indx = 0;
+#endif // defined(ARG_MAX) || defined(ARG_MIN)
+
+    for(unsigned int x = 1; x < WIDTH; ++x)
     {
-        res += *((__global uchar *)vector_offset(&src, x));
+        DATA_TYPE_PROMOTED in = *((__global DATA_TYPE *)vector_offset(&src, x));
+#if defined(ARG_MAX)
+        indx = select(indx, x, isgreater(in, res));
+        res  = select(res, in, CONVERT(isgreater(in, res), COND_DATA_TYPE));
+#elif defined(ARG_MIN)
+        indx = select(indx, x, isless(in, res));
+        res  = select(res, in, CONVERT(isless(in, res), COND_DATA_TYPE));
+#else  // !(defined(ARG_MAX) || defined(ARG_MIN))
+        res += in;
+#endif // defined(ARG_MAX) || defined(ARG_MIN)
     }
 
+    // Store result
+#if defined(ARG_MAX) || defined(ARG_MIN)
+    *((__global uint *)output.ptr) = indx;
+#else // !(defined(ARG_MAX) || defined(ARG_MIN))
 #if defined(MEAN)
     res /= WIDTH;
-#endif /* defined(MEAN) */
-
-    // Store result
+#endif // defined(MEAN)
     *((__global uchar *)output.ptr) = convert_uchar(res);
+#endif // defined(ARG_MAX) || defined(ARG_MIN)
 }
-#endif /* defined(HEIGHT) */
+#endif /* defined(WIDTH) */
 
 #if defined(HEIGHT)
 /** This kernel performs reduction on y-axis.
  *
- * @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
+ * @note The input data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
  * @note The height size must be passed at compile time using -DHEIGHT e.g. -DHEIGHT=128
  *
  * @param[in] src_ptr                              Pointer to the source tensor. Supported data types: QASYMM8/F16/F32
@@ -185,24 +204,45 @@
     Image output = CONVERT_TO_IMAGE_STRUCT(output);
 
     VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16)
-    res = 0;
+    res = CONVERT(vload16(0, (__global DATA_TYPE *)offset(&src, 0, 0)), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16));
 
-    for(unsigned int y = 0; y < HEIGHT; ++y)
+#if defined(SUM_SQUARE)
+    res *= res;
+#endif // defined(SUM_SQUARE)
+
+#if defined(ARG_MAX) || defined(ARG_MIN)
+    uint16 indx = 0;
+#endif // defined(ARG_MAX) || defined(ARG_MIN)
+
+    for(unsigned int y = 1; y < HEIGHT; ++y)
     {
         VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16)
         in = CONVERT(vload16(0, (__global DATA_TYPE *)offset(&src, 0, y)), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16));
+#if defined(ARG_MAX)
+        uint16 cond_conv = CONVERT(isgreater(in, res), uint16);
+        indx             = select(indx, y, cond_conv);
+        res              = select(res, in, isgreater(in, res));
+#elif defined(ARG_MIN)
+        uint16  cond_conv           = CONVERT(isless(in, res), uint16);
+        indx                        = select(indx, y, cond_conv);
+        res                         = select(res, in, isless(in, res));
+#else // !(defined(ARG_MAX) || defined(ARG_MIN))
 #if defined(SUM_SQUARE)
         in *= in;
-#endif // SQRSUM
+#endif // defined(SUM_SQUARE)
         res += in;
+#endif // defined(ARG_MAX) || defined(ARG_MIN)
     }
 
+    // Store result
+#if defined(ARG_MAX) || defined(ARG_MIN)
+    vstore16(indx, 0, (__global uint *)output.ptr);
+#else // !(defined(ARG_MAX) || defined(ARG_MIN))
 #if defined(MEAN)
     res /= HEIGHT;
-#endif /* defined(MEAN) */
-
-    // Store result
+#endif // defined(MEAN)
     vstore16(CONVERT(res, VEC_DATA_TYPE(DATA_TYPE, 16)), 0, (__global DATA_TYPE *)output.ptr);
+#endif // defined(ARG_MAX) || defined(ARG_MIN)
 }
 #endif /* defined(HEIGHT) */
 
@@ -237,24 +277,46 @@
     Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output);
 
     VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16)
-    res = 0;
+    res = CONVERT(vload16(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 0, 0)), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16));
 
-    for(unsigned int z = 0; z < DEPTH; ++z)
+#if defined(SUM_SQUARE)
+    res *= res;
+#endif // defined(SUM_SQUARE)
+
+#if defined(ARG_MAX) || defined(ARG_MIN)
+    uint16 indx = 0;
+#endif // defined(ARG_MAX) || defined(ARG_MIN)
+
+    for(unsigned int z = 1; z < DEPTH; ++z)
     {
         VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16)
         in = CONVERT(vload16(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 0, z)), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16));
+
+#if defined(ARG_MAX)
+        uint16 cond_conv = CONVERT(isgreater(in, res), uint16);
+        indx             = select(indx, z, cond_conv);
+        res              = select(res, in, isgreater(in, res));
+#elif defined(ARG_MIN)
+        uint16 cond_conv = CONVERT(isless(in, res), uint16);
+        indx             = select(indx, z, cond_conv);
+        res              = select(res, in, isless(in, res));
+#else // !(defined(ARG_MAX) || defined(ARG_MIN))
 #if defined(SUM_SQUARE)
         in *= in;
-#endif // SQRSUM
+#endif // defined(SUM_SQUARE)
         res += in;
+#endif // defined(ARG_MAX) || defined(ARG_MIN)
     }
 
+    // Store result
+#if defined(ARG_MAX) || defined(ARG_MIN)
+    vstore16(indx, 0, (__global uint *)output.ptr);
+#else // !(defined(ARG_MAX) || defined(ARG_MIN))
 #if defined(MEAN)
     res /= DEPTH;
-#endif /* defined(MEAN) */
-
-    // Store result
+#endif // defined(MEAN)
     vstore16(CONVERT(res, VEC_DATA_TYPE(DATA_TYPE, 16)), 0, (__global DATA_TYPE *)output.ptr);
+#endif // defined(ARG_MAX) || defined(ARG_MIN)
 }
 #endif /* defined(DEPTH) */
 
@@ -294,23 +356,45 @@
     Tensor4D output = CONVERT_TO_TENSOR4D_STRUCT(output, DEPTH);
 
     VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16)
-    res = 0;
+    res = CONVERT(vload16(0, (__global DATA_TYPE *)tensor4D_offset(&input, 0, 0, 0, 0)), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16));
 
-    for(unsigned int w = 0; w < BATCH; ++w)
+#if defined(SUM_SQUARE)
+    res *= res;
+#endif // defined(SUM_SQUARE)
+
+#if defined(ARG_MAX) || defined(ARG_MIN)
+    uint16 indx = 0;
+#endif // defined(ARG_MAX) || defined(ARG_MIN)
+
+    for(unsigned int w = 1; w < BATCH; ++w)
     {
         VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16)
         in = CONVERT(vload16(0, (__global DATA_TYPE *)tensor4D_offset(&input, 0, 0, 0, w)), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16));
+
+#if defined(ARG_MAX)
+        uint16 cond_conv = CONVERT(isgreater(in, res), uint16);
+        indx             = select(indx, w, cond_conv);
+        res              = select(res, in, isgreater(in, res));
+#elif defined(ARG_MIN)
+        uint16 cond_conv = CONVERT(isless(in, res), uint16);
+        indx             = select(indx, w, cond_conv);
+        res              = select(res, in, isless(in, res));
+#else // !(defined(ARG_MAX) || defined(ARG_MIN))
 #if defined(SUM_SQUARE)
         in *= in;
-#endif // SQRSUM
+#endif // defined(SUM_SQUARE)
         res += in;
+#endif // defined(ARG_MAX) || defined(ARG_MIN)
     }
 
+    // Store result
+#if defined(ARG_MAX) || defined(ARG_MIN)
+    vstore16(indx, 0, (__global uint *)output.ptr);
+#else // !(defined(ARG_MAX) || defined(ARG_MIN))
 #if defined(MEAN)
     res /= BATCH;
-#endif /* defined(MEAN) */
-
-    // Store result
+#endif // defined(MEAN)
     vstore16(CONVERT(res, VEC_DATA_TYPE(DATA_TYPE, 16)), 0, (__global DATA_TYPE *)output.ptr);
+#endif // defined(ARG_MAX) || defined(ARG_MIN)
 }
 #endif /* defined(BATCH) && defined(DEPTH) */
\ No newline at end of file
diff --git a/src/core/CL/kernels/CLReductionOperationKernel.cpp b/src/core/CL/kernels/CLReductionOperationKernel.cpp
index ef46325..f6dc4a8 100644
--- a/src/core/CL/kernels/CLReductionOperationKernel.cpp
+++ b/src/core/CL/kernels/CLReductionOperationKernel.cpp
@@ -53,19 +53,29 @@
 
     if(output->total_size() != 0)
     {
-        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
         ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, output);
+        if(op == ReductionOperation::ARG_IDX_MAX || op == ReductionOperation::ARG_IDX_MIN)
+        {
+            ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == DataType::QASYMM8, "Not supported operation for QASYMM8");
+            ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U32);
+        }
+        else
+        {
+            ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
+        }
     }
 
     return Status{};
 }
 
-std::tuple<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output, unsigned int axis)
+std::tuple<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output, unsigned int axis, ReductionOperation op)
 {
     // Output tensor auto initialization if not yet initialized
     TensorShape output_shape{ input->tensor_shape() };
     output_shape.set(axis, 1);
-    auto_init_if_empty(*output, output_shape, 1, input->data_type());
+    const bool is_arg_min_max   = (op == ReductionOperation::ARG_IDX_MIN || op == ReductionOperation::ARG_IDX_MAX);
+    DataType   output_data_type = is_arg_min_max ? DataType::U32 : input->data_type();
+    auto_init_if_empty(*output, output_shape, 1, output_data_type);
 
     const unsigned int num_elems_processed_per_iteration = (is_data_type_quantized(input->data_type()) && (axis == 0)) ? 1 : 16;
     Window             win                               = calculate_max_window(*input, Steps(num_elems_processed_per_iteration));
@@ -136,7 +146,7 @@
     // Set build options
     CLBuildOptions build_opts;
     std::string    data_type_promoted = get_cl_type_from_data_type(input->info()->data_type());
-    if(is_data_type_quantized(input->info()->data_type()) && axis != 0)
+    if(is_data_type_quantized(input->info()->data_type()))
     {
         data_type_promoted = "uint";
     }
@@ -144,6 +154,8 @@
     build_opts.add_option("-DDATA_TYPE_PROMOTED=" + data_type_promoted);
     build_opts.add_option_if(op == ReductionOperation::SUM_SQUARE, "-DSUM_SQUARE=");
     build_opts.add_option_if(op == ReductionOperation::MEAN_SUM, "-DMEAN");
+    build_opts.add_option_if(op == ReductionOperation::ARG_IDX_MAX, "-DARG_MAX");
+    build_opts.add_option_if(op == ReductionOperation::ARG_IDX_MIN, "-DARG_MIN");
 
     switch(op)
     {
@@ -154,6 +166,9 @@
         case ReductionOperation::MEAN_SUM:
             build_opts.add_option(("-DOPERATION=sum"));
             break;
+        case ReductionOperation::ARG_IDX_MAX:
+        case ReductionOperation::ARG_IDX_MIN:
+            break;
         default:
             ARM_COMPUTE_ERROR("Unsupported reduction operation");
     }
@@ -161,11 +176,12 @@
     // Create kernel
     cl::NDRange lws_hint = CLKernelLibrary::get().default_ndrange();
     std::string kernel_axis_name;
+    const bool  is_arg_op = (op == ReductionOperation::ARG_IDX_MAX || op == ReductionOperation::ARG_IDX_MIN);
     switch(axis)
     {
         case 0:
         {
-            if(!is_data_type_quantized(input->info()->data_type()))
+            if(!is_data_type_quantized(input->info()->data_type()) && !is_arg_op)
             {
                 build_opts.add_option_if(op == ReductionOperation::MEAN_SUM, "-DWIDTH=" + support::cpp11::to_string(width));
                 const unsigned int width_leftover = input->info()->dimension(0) % border_val;
@@ -181,7 +197,8 @@
             else
             {
                 build_opts.add_option("-DWIDTH=" + support::cpp11::to_string(input->info()->dimension(0)));
-                kernel_axis_name = "quantized_x";
+                build_opts.add_option_if_else(_input->info()->data_type() == DataType::F32, "-DCOND_DATA_TYPE=int", "-DCOND_DATA_TYPE=short");
+                kernel_axis_name = "non_parallel_x";
             }
         }
         break;
@@ -204,7 +221,7 @@
     _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("reduction_operation_" + kernel_axis_name, build_opts.options()));
 
     // Configure kernel window
-    auto win_config = validate_and_configure_window(_input->info(), _output->info(), axis);
+    auto win_config = validate_and_configure_window(_input->info(), _output->info(), axis, op);
 
     ARM_COMPUTE_ERROR_THROW_ON(std::get<0>(win_config));
 
@@ -214,7 +231,7 @@
 Status CLReductionOperationKernel::validate(const ITensorInfo *input, const ITensorInfo *output, unsigned int axis, ReductionOperation op, unsigned int width)
 {
     ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, axis, op, width));
-    ARM_COMPUTE_RETURN_ON_ERROR(std::get<0>(validate_and_configure_window(input->clone().get(), output->clone().get(), axis)));
+    ARM_COMPUTE_RETURN_ON_ERROR(std::get<0>(validate_and_configure_window(input->clone().get(), output->clone().get(), axis, op)));
 
     return Status{};
 }
@@ -224,12 +241,13 @@
     ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
     ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
 
+    const bool is_arg_op = (_op == ReductionOperation::ARG_IDX_MAX || _op == ReductionOperation::ARG_IDX_MIN);
     switch(_reduction_axis)
     {
         case 0:
         {
             // We use parallel reduction only in non quantized types
-            if(!is_data_type_quantized(_input->info()->data_type()))
+            if(!is_data_type_quantized(_input->info()->data_type()) && !is_arg_op)
             {
                 // Set out window
                 Window out_window(window);
diff --git a/src/runtime/CL/functions/CLArgMinMaxLayer.cpp b/src/runtime/CL/functions/CLArgMinMaxLayer.cpp
new file mode 100644
index 0000000..a6393c5
--- /dev/null
+++ b/src/runtime/CL/functions/CLArgMinMaxLayer.cpp
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_compute/runtime/CL/functions/CLArgMinMaxLayer.h"
+
+#include "arm_compute/core/CL/kernels/CLReductionOperationKernel.h"
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/runtime/CL/CLScheduler.h"
+
+namespace arm_compute
+{
+void CLArgMinMaxLayer::configure(const ICLTensor *input, int axis, ICLTensor *output, const ReductionOperation &op)
+{
+    auto k = arm_compute::support::cpp14::make_unique<CLReductionOperationKernel>();
+    k->configure(input, output, axis, op);
+    _kernel = std::move(k);
+}
+
+Status CLArgMinMaxLayer::validate(const ITensorInfo *input, int axis, const ITensorInfo *output, const ReductionOperation &op)
+{
+    ARM_COMPUTE_RETURN_ERROR_ON_MSG(op != ReductionOperation::ARG_IDX_MAX && op != ReductionOperation::ARG_IDX_MIN, "Invalid operation");
+    return CLReductionOperationKernel::validate(input, output, axis, op);
+}
+} // namespace arm_compute
\ No newline at end of file