Added Qasymm8 datatype support to NEROIPoolingLayer with Tests

Tests added to check ROIPooling Layer against reference with both Float32 and Qasymm8 input.
Resolves : COMPMID-2319

Change-Id: I867bc4dde1e3e91f9f42f4a7ce8debfe83b8db50
Signed-off-by: Mohammed Suhail Munshi <MohammedSuhail.Munshi@arm.com>
Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/c/VisualCompute/ComputeLibrary/+/296640
Tested-by: bsgcomp <bsgcomp@arm.com>
Reviewed-by: Pablo Tello <pablo.tello@arm.com>
Comments-Addressed: Pablo Tello <pablo.tello@arm.com>
Signed-off-by: Suhail Munshi <MohammedSuhail.Munshi@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5060
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Pablo Marquez Tello <pablo.tello@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
diff --git a/src/core/NEON/kernels/NEROIPoolingLayerKernel.cpp b/src/core/NEON/kernels/NEROIPoolingLayerKernel.cpp
index 9a3a757..400e829 100644
--- a/src/core/NEON/kernels/NEROIPoolingLayerKernel.cpp
+++ b/src/core/NEON/kernels/NEROIPoolingLayerKernel.cpp
@@ -22,7 +22,6 @@
  * SOFTWARE.
  */
 #include "src/core/NEON/kernels/NEROIPoolingLayerKernel.h"
-
 #include "arm_compute/core/TensorInfo.h"
 #include "arm_compute/core/Validate.h"
 #include "arm_compute/core/Window.h"
@@ -35,35 +34,101 @@
 
 namespace arm_compute
 {
+namespace
+{
+Status validate_arguments(const ITensorInfo *input, const ITensorInfo *rois, const ITensorInfo *output, const ROIPoolingLayerInfo &pool_info)
+{
+    ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output, rois);
+
+    //Validate arguments
+    ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(rois, DataType::U16);
+    ARM_COMPUTE_RETURN_ERROR_ON(rois->dimension(0) != 5);
+    ARM_COMPUTE_RETURN_ERROR_ON(rois->num_dimensions() > 2);
+    ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(input, DataType::F32, DataType::QASYMM8);
+    ARM_COMPUTE_RETURN_ERROR_ON((pool_info.pooled_width() == 0) || (pool_info.pooled_height() == 0));
+
+    if(output->total_size() != 0)
+    {
+        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
+        ARM_COMPUTE_RETURN_ERROR_ON((output->dimension(0) != pool_info.pooled_width()) || (output->dimension(1) != pool_info.pooled_height()));
+        ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(2) != output->dimension(2));
+        ARM_COMPUTE_RETURN_ERROR_ON(rois->dimension(1) != output->dimension(3));
+    }
+
+    return Status{};
+}
+
+/** Evaluate number needing to be stored in output tensor as quantized format.
+ *
+ * @param[in]  input          Source tensor. Data types supported: QASYMM8
+ * @param[out] output         Destination tensor. Where output value will be stored, same datatype as input
+ * @param[in]  region_start_x Beginning region of x coordinate of pooling region
+ * @param[in]  region_start_y Beginning region of y coordinate of pooling region
+ * @param[in]  region_end_x   End of pooling region, x coordinate
+ * @param[in]  region_end_y   End of pooling region, y coordinate
+ * @param[in]  fm             Channel index of coordinate in output Tensor to store value
+ * @param[in]  px             Width index of coodinate in output Tensor to store value
+ * @param[in]  py             Height index of coordinate in output Tensor to store value
+ * @param[in]  roi_batch      Index of image to perform Pooling on in input Tensor
+ * @param[in]  roi_indx       Index of image of coordinate in output Tensor to store value
+ */
+template <typename T>
+void template_eval(const ITensor *input, const ITensor *output, int region_start_x, int region_start_y,
+                   int region_end_x, int region_end_y, int fm, int px, int py, int roi_batch, int roi_indx)
+{
+    if((region_end_x <= region_start_x) || (region_end_y <= region_start_y))
+    {
+        *reinterpret_cast<T *>(output->ptr_to_element(Coordinates(px, py, fm, roi_indx))) = 0;
+    }
+    else
+    {
+        T curr_max = std::numeric_limits<T>::lowest(); // Min value of typename T
+        for(int j = region_start_y; j < region_end_y; ++j)
+        {
+            for(int i = region_start_x; i < region_end_x; ++i)
+            {
+                const auto val = *reinterpret_cast<const T *>(input->ptr_to_element(Coordinates(i, j, fm, roi_batch)));
+                curr_max       = std::max(val, curr_max);
+            }
+        }
+
+        // if quantized datatype, requantize then store in output tensor
+        if(is_data_type_quantized(input->info()->data_type()))
+        {
+            // covert qasymm to new output quantization scale and offset
+            UniformQuantizationInfo uqinfo = compute_requantization_scale_offset(input->info()->quantization_info().uniform(), output->info()->quantization_info().uniform());
+            *reinterpret_cast<T *>(output->ptr_to_element(Coordinates(px, py, fm, roi_indx))) = quantize_qasymm8(curr_max, uqinfo);
+        }
+        else
+        {
+            *reinterpret_cast<T *>(output->ptr_to_element(Coordinates(px, py, fm, roi_indx))) = curr_max;
+        }
+    }
+}
+} // namespace
+
 NEROIPoolingLayerKernel::NEROIPoolingLayerKernel()
     : _input(nullptr), _rois(nullptr), _output(nullptr), _pool_info(0, 0, 0.f)
 {
 }
 
-void NEROIPoolingLayerKernel::configure(const ITensor *input, const ITensor *rois, ITensor *output, const ROIPoolingLayerInfo &pool_info)
+Status NEROIPoolingLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *rois, const ITensorInfo *output, const ROIPoolingLayerInfo &pool_info)
+{
+    ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, rois, output, pool_info));
+    return Status{};
+}
+
+void NEROIPoolingLayerKernel::configure(const ITensor *input, const ITensor *rois, const ITensor *output, const ROIPoolingLayerInfo &pool_info)
 {
     ARM_COMPUTE_ERROR_ON_NULLPTR(input, output, rois);
 
     //Validate arguments
-    ARM_COMPUTE_ERROR_ON_NULLPTR(input->info(), rois->info(), output->info());
-    ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(rois, 1, DataType::U16);
-    ARM_COMPUTE_ERROR_ON(rois->info()->dimension(0) != 5);
-    ARM_COMPUTE_ERROR_ON(rois->info()->num_dimensions() > 2);
-    ARM_COMPUTE_ERROR_ON_CPU_F16_UNSUPPORTED(input);
-    ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32);
-    ARM_COMPUTE_ERROR_ON((pool_info.pooled_width() == 0) || (pool_info.pooled_height() == 0));
-
-    if(output->info()->total_size() != 0)
-    {
-        ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
-        ARM_COMPUTE_ERROR_ON((output->info()->dimension(0) != pool_info.pooled_width()) || (output->info()->dimension(1) != pool_info.pooled_height()));
-        ARM_COMPUTE_ERROR_ON(input->info()->dimension(2) != output->info()->dimension(2));
-        ARM_COMPUTE_ERROR_ON(rois->info()->dimension(1) != output->info()->dimension(3));
-    }
+    ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), rois->info(), output->info(), pool_info));
 
     // Output auto initialization if not yet initialized
     TensorShape output_shape(pool_info.pooled_width(), pool_info.pooled_height(), input->info()->dimension(2), rois->info()->dimension(1));
-    auto_init_if_empty(*output->info(), output_shape, 1, input->info()->data_type());
+
+    auto_init_if_empty(*output->info(), output_shape, 1, input->info()->data_type(), output->info()->quantization_info());
 
     ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
     ARM_COMPUTE_ERROR_ON((output->info()->dimension(0) != pool_info.pooled_width()) || (output->info()->dimension(1) != pool_info.pooled_height()));
@@ -99,7 +164,8 @@
     const int   pooled_h       = _pool_info.pooled_height();
     const float spatial_scale  = _pool_info.spatial_scale();
 
-    const auto *rois_ptr = reinterpret_cast<const uint16_t *>(_rois->buffer());
+    const auto *rois_ptr  = reinterpret_cast<const uint16_t *>(_rois->buffer());
+    const auto  data_type = _input->info()->data_type();
 
     for(int roi_indx = roi_list_start; roi_indx < roi_list_end; ++roi_indx)
     {
@@ -133,23 +199,17 @@
                     region_start_y = std::min(std::max(region_start_y + roi_anchor_y, 0), height);
                     region_end_y   = std::min(std::max(region_end_y + roi_anchor_y, 0), height);
 
-                    // Iterate through the pooling region
-                    if((region_end_x <= region_start_x) || (region_end_y <= region_start_y))
+                    switch(data_type)
                     {
-                        *reinterpret_cast<float *>(_output->ptr_to_element(Coordinates(px, py, fm, roi_indx))) = 0;
-                    }
-                    else
-                    {
-                        float curr_max = -FLT_MAX;
-                        for(int j = region_start_y; j < region_end_y; ++j)
-                        {
-                            for(int i = region_start_x; i < region_end_x; ++i)
-                            {
-                                const auto val = *reinterpret_cast<const float *>(_input->ptr_to_element(Coordinates(i, j, fm, roi_batch)));
-                                curr_max       = std::max(val, curr_max);
-                            }
-                        }
-                        *reinterpret_cast<float *>(_output->ptr_to_element(Coordinates(px, py, fm, roi_indx))) = curr_max;
+                        case DataType::F32:
+                            template_eval<float>(_input, _output, region_start_x, region_start_y, region_end_x, region_end_y, fm, px, py, roi_batch, roi_indx);
+                            break;
+                        case DataType::QASYMM8:
+                            template_eval<qasymm8_t>(_input, _output, region_start_x, region_start_y, region_end_x, region_end_y, fm, px, py, roi_batch, roi_indx);
+                            break;
+                        default:
+                            ARM_COMPUTE_ERROR("DataType not Supported");
+                            break;
                     }
                 }
             }
diff --git a/src/core/NEON/kernels/NEROIPoolingLayerKernel.h b/src/core/NEON/kernels/NEROIPoolingLayerKernel.h
index 3642417..2fcdb81 100644
--- a/src/core/NEON/kernels/NEROIPoolingLayerKernel.h
+++ b/src/core/NEON/kernels/NEROIPoolingLayerKernel.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -55,7 +55,7 @@
 
     /** Set the input and output tensors.
      *
-     * @param[in]  input     Source tensor. Data types supported: F32.
+     * @param[in]  input     Source tensor. Data types supported: QASYMM8/F32
      * @param[in]  rois      ROIs tensor, it is a 2D tensor of size [5, N] (where N is the number of ROIs) containing top left and bottom right corner
      *                       as coordinate of an image and batch_id of ROI [ batch_id, x1, y1, x2, y2 ]. Data types supported: U16
      * @param[out] output    Destination tensor. Data types supported: Same as @p input.
@@ -66,15 +66,31 @@
      * @note The z dimensions of @p output tensor and @p input tensor must be the same.
      * @note The fourth dimension of @p output tensor must be the same as the number of elements in @p rois tensor.
      */
-    void configure(const ITensor *input, const ITensor *rois, ITensor *output, const ROIPoolingLayerInfo &pool_info);
+    void configure(const ITensor *input, const ITensor *rois, const ITensor *output, const ROIPoolingLayerInfo &pool_info);
 
     // Inherited methods overridden:
     void run(const Window &window, const ThreadInfo &info) override;
 
+    /** Static function to check if given info will lead to a valid configuration of @ref NEROIPoolingLayerKernel
+     *
+     * @param[in] input     Source tensor info. Data types supported: QASYMM8/F32.
+     * @param[in] rois      ROIs tensor info. Data types supported: U16
+     * @param[in] output    Destination tensor info. Data types supported: Same as @p input.
+     * @param[in] pool_info Contains pooling operation information described in @ref ROIPoolingLayerInfo.
+     *
+     * @note The x and y dimensions of @p output tensor must be the same as @p pool_info 's pooled
+     * width and pooled height.
+     * @note The datatype of @p output should be the same as the datatype of @p input
+     * @note The fourth dimension of @p output tensor must be the same as the number of elements in @p rois array.
+     *
+     * @return a Status
+     */
+    static Status validate(const ITensorInfo *input, const ITensorInfo *rois, const ITensorInfo *output, const ROIPoolingLayerInfo &pool_info);
+
 private:
     const ITensor      *_input;
     const ITensor      *_rois;
-    ITensor            *_output;
+    const ITensor            *_output;
     ROIPoolingLayerInfo _pool_info;
 };
 } // namespace arm_compute
diff --git a/src/runtime/NEON/functions/NEROIPoolingLayer.cpp b/src/runtime/NEON/functions/NEROIPoolingLayer.cpp
index 7ca6ecc..f943405 100644
--- a/src/runtime/NEON/functions/NEROIPoolingLayer.cpp
+++ b/src/runtime/NEON/functions/NEROIPoolingLayer.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -22,7 +22,6 @@
  * SOFTWARE.
  */
 #include "arm_compute/runtime/NEON/functions/NEROIPoolingLayer.h"
-
 #include "arm_compute/core/Helpers.h"
 #include "arm_compute/runtime/NEON/NEScheduler.h"
 #include "src/core/NEON/kernels/NEROIPoolingLayerKernel.h"
@@ -36,7 +35,12 @@
 {
 }
 
-void NEROIPoolingLayer::configure(const ITensor *input, const ITensor *rois, ITensor *output, const ROIPoolingLayerInfo &pool_info)
+Status NEROIPoolingLayer::validate(const ITensorInfo *input, const ITensorInfo *rois, const ITensorInfo *output, const ROIPoolingLayerInfo &pool_info)
+{
+    return NEROIPoolingLayerKernel::validate(input, rois, output, pool_info);
+}
+
+void NEROIPoolingLayer::configure(const ITensor *input, const ITensor *rois, const ITensor *output, const ROIPoolingLayerInfo &pool_info)
 {
     _roi_kernel = std::make_unique<NEROIPoolingLayerKernel>();
     _roi_kernel->configure(input, rois, output, pool_info);