COMPMID-2318: Implement NEROIAlignLayer

Added support for QASYMM8

Change-Id: I884ee8b44f38ed6e2eb5600e4ffff25e19f52eb8
Signed-off-by: Pablo Tello <pablo.tello@arm.com>
Reviewed-on: https://review.mlplatform.org/c/1831
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
diff --git a/arm_compute/core/NEON/kernels/NEROIAlignLayerKernel.h b/arm_compute/core/NEON/kernels/NEROIAlignLayerKernel.h
index 00c6f07..4fc339b 100644
--- a/arm_compute/core/NEON/kernels/NEROIAlignLayerKernel.h
+++ b/arm_compute/core/NEON/kernels/NEROIAlignLayerKernel.h
@@ -55,9 +55,10 @@
 
     /** Set the input and output tensors.
      *
-     * @param[in]  input     Source tensor. Data types supported: F16/F32.
+     * @param[in]  input     Source tensor. Data types supported: QASYMM8/F16/F32.
      * @param[in]  rois      ROIs tensor, it is a 2D tensor of size [5, N] (where N is the number of ROIs) containing top left and bottom right corner
-     *                       as coordinate of an image and batch_id of ROI [ batch_id, x1, y1, x2, y2 ]. Data types supported: same as @p input
+     *                       as coordinate of an image and batch_id of ROI [ batch_id, x1, y1, x2, y2 ].
+     *                       Data types supported: QASYMM16 with scale of 0.125 and 0 offset if @p input is QASYMM8, otherwise same as @p input
      * @param[out] output    Destination tensor. Data types supported: Same as @p input.
      * @param[in]  pool_info Contains pooling operation information described in @ref ROIPoolingLayerInfo.
      *
@@ -69,8 +70,9 @@
     void configure(const ITensor *input, const ITensor *rois, ITensor *output, const ROIPoolingLayerInfo &pool_info);
     /** Static function to check if given info will lead to a valid configuration of @ref NEROIAlignLayerKernel
      *
-     * @param[in] input     Source tensor info. Data types supported: F16/F32.
-     * @param[in] rois      ROIs tensor info. Data types supported: same as @p input
+     * @param[in] input     Source tensor info. Data types supported: QASYMM8/F16/F32.
+     * @param[in] rois      ROIs tensor info. Data types supported: QASYMM16 with scale of 0.125 and 0 offset if @p input is QASYMM8,
+     *                      otherwise same as @p input
      * @param[in] output    Destination tensor info. Data types supported: Same as @p input.
      * @param[in] pool_info Contains pooling operation information described in @ref ROIPoolingLayerInfo.
      *
@@ -87,7 +89,7 @@
     void run(const Window &window, const ThreadInfo &info) override;
 
 private:
-    template <DataLayout data_layout, typename data_type>
+    template <DataLayout data_layout, typename input_data_type, typename roi_data_type = input_data_type>
     void internal_run(const Window &window, const ThreadInfo &info);
 
     const ITensor      *_input;
diff --git a/arm_compute/runtime/NEON/functions/NEROIAlignLayer.h b/arm_compute/runtime/NEON/functions/NEROIAlignLayer.h
index f28fb6b..e8171d3 100644
--- a/arm_compute/runtime/NEON/functions/NEROIAlignLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEROIAlignLayer.h
@@ -42,9 +42,10 @@
 public:
     /** Set the input and output tensors.
      *
-     * @param[in]  input     Source tensor. Data types supported: F16/F32.
+     * @param[in]  input     Source tensor. Data types supported: QASYMM8/F16/F32.
      * @param[in]  rois      ROIs tensor, it is a 2D tensor of size [5, N] (where N is the number of ROIs) containing top left and bottom right corner
-     *                       as coordinate of an image and batch_id of ROI [ batch_id, x1, y1, x2, y2 ]. Data types supported: same as @p input
+     *                       as coordinate of an image and batch_id of ROI [ batch_id, x1, y1, x2, y2 ].
+     *                       Data types supported: QASYMM16 with scale of 0.125 and 0 offset if @p input is QASYMM8, otherwise same as @p input
      * @param[out] output    Destination tensor. Data types supported: Same as @p input.
      * @param[in]  pool_info Contains pooling operation information described in @ref ROIPoolingLayerInfo.
      *
@@ -54,10 +55,11 @@
      * @note The fourth dimension of @p output tensor must be the same as the number of elements in @p rois array.
      */
     void configure(const ITensor *input, const ITensor *rois, ITensor *output, const ROIPoolingLayerInfo &pool_info);
-    /** Static function to check if given info will lead to a valid configuration of @ref NEROIAlignLayer
+    /** Static function to check if given info will lead to a valid configuration of @ref NEROIAlignLayerKernel
      *
-     * @param[in] input     Source tensor info. Data types supported: F16/F32.
-     * @param[in] rois      ROIs tensor info. Data types supported: same as @p input
+     * @param[in] input     Source tensor info. Data types supported: QASYMM8/F16/F32.
+     * @param[in] rois      ROIs tensor info. Data types supported: QASYMM16 with scale of 0.125 and 0 offset if @p input is QASYMM8,
+     *                      otherwise same as @p input
      * @param[in] output    Destination tensor info. Data types supported: Same as @p input.
      * @param[in] pool_info Contains pooling operation information described in @ref ROIPoolingLayerInfo.
      *
diff --git a/src/core/NEON/kernels/NEROIAlignLayerKernel.cpp b/src/core/NEON/kernels/NEROIAlignLayerKernel.cpp
index dd21094..3b944ab 100644
--- a/src/core/NEON/kernels/NEROIAlignLayerKernel.cpp
+++ b/src/core/NEON/kernels/NEROIAlignLayerKernel.cpp
@@ -43,10 +43,9 @@
 Status validate_arguments(const ITensorInfo *input, const ITensorInfo *rois, ITensorInfo *output, const ROIPoolingLayerInfo &pool_info)
 {
     ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, rois, output);
-    ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, rois);
     ARM_COMPUTE_RETURN_ERROR_ON(rois->dimension(0) != 5);
     ARM_COMPUTE_RETURN_ERROR_ON(rois->num_dimensions() > 2);
-    ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32, DataType::F16);
+    ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F32, DataType::F16);
     ARM_COMPUTE_RETURN_ERROR_ON_DATA_LAYOUT_NOT_IN(input, DataLayout::NHWC, DataLayout::NCHW);
     ARM_COMPUTE_RETURN_ERROR_ON((pool_info.pooled_width() == 0) || (pool_info.pooled_height() == 0));
     ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input);
@@ -57,6 +56,20 @@
         ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, output);
         ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(compute_roi_align_shape(*input, *rois, pool_info), output->tensor_shape());
     }
+
+    if(input->data_type() == DataType::QASYMM8)
+    {
+        ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(rois, 1, DataType::QASYMM16);
+
+        const UniformQuantizationInfo rois_qinfo = rois->quantization_info().uniform();
+        ARM_COMPUTE_RETURN_ERROR_ON(rois_qinfo.scale != 0.125f);
+        ARM_COMPUTE_RETURN_ERROR_ON(rois_qinfo.offset != 0);
+    }
+    else
+    {
+        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, rois);
+    }
+
     return Status{};
 }
 
@@ -118,21 +131,22 @@
 }
 
 /** Average pooling over an aligned window */
-template <typename T, DataLayout data_layout>
-inline T roi_align_1x1(const ITensor *input, unsigned int roi_batch,
-                       float region_start_x,
-                       float bin_size_x,
-                       int   grid_size_x,
-                       float region_end_x,
-                       float region_start_y,
-                       float bin_size_y,
-                       int   grid_size_y,
-                       float region_end_y,
-                       int   pz)
+template <typename input_data_type, DataLayout data_layout>
+inline input_data_type roi_align_1x1(const ITensor *input,
+                                     unsigned int   roi_batch,
+                                     float          region_start_x,
+                                     float          bin_size_x,
+                                     int            grid_size_x,
+                                     float          region_end_x,
+                                     float          region_start_y,
+                                     float          bin_size_y,
+                                     int            grid_size_y,
+                                     float          region_end_y,
+                                     int            pz)
 {
     if((region_end_x <= region_start_x) || (region_end_y <= region_start_y))
     {
-        return T(0);
+        return input_data_type(0);
     }
     else
     {
@@ -163,18 +177,90 @@
                 const float w4 = ly * lx;
                 if(data_layout == DataLayout::NCHW)
                 {
-                    const auto data1 = *reinterpret_cast<const T *>(input->ptr_to_element(Coordinates(x_low, y_low, pz, roi_batch)));
-                    const auto data2 = *reinterpret_cast<const T *>(input->ptr_to_element(Coordinates(x_high, y_low, pz, roi_batch)));
-                    const auto data3 = *reinterpret_cast<const T *>(input->ptr_to_element(Coordinates(x_low, y_high, pz, roi_batch)));
-                    const auto data4 = *reinterpret_cast<const T *>(input->ptr_to_element(Coordinates(x_high, y_high, pz, roi_batch)));
+                    const auto data1 = *reinterpret_cast<const input_data_type *>(input->ptr_to_element(Coordinates(x_low, y_low, pz, roi_batch)));
+                    const auto data2 = *reinterpret_cast<const input_data_type *>(input->ptr_to_element(Coordinates(x_high, y_low, pz, roi_batch)));
+                    const auto data3 = *reinterpret_cast<const input_data_type *>(input->ptr_to_element(Coordinates(x_low, y_high, pz, roi_batch)));
+                    const auto data4 = *reinterpret_cast<const input_data_type *>(input->ptr_to_element(Coordinates(x_high, y_high, pz, roi_batch)));
                     avg += w1 * data1 + w2 * data2 + w3 * data3 + w4 * data4;
                 }
                 else
                 {
-                    const auto data1 = *reinterpret_cast<const T *>(input->ptr_to_element(Coordinates(pz, x_low, y_low, roi_batch)));
-                    const auto data2 = *reinterpret_cast<const T *>(input->ptr_to_element(Coordinates(pz, x_high, y_low, roi_batch)));
-                    const auto data3 = *reinterpret_cast<const T *>(input->ptr_to_element(Coordinates(pz, x_low, y_high, roi_batch)));
-                    const auto data4 = *reinterpret_cast<const T *>(input->ptr_to_element(Coordinates(pz, x_high, y_high, roi_batch)));
+                    const auto data1 = *reinterpret_cast<const input_data_type *>(input->ptr_to_element(Coordinates(pz, x_low, y_low, roi_batch)));
+                    const auto data2 = *reinterpret_cast<const input_data_type *>(input->ptr_to_element(Coordinates(pz, x_high, y_low, roi_batch)));
+                    const auto data3 = *reinterpret_cast<const input_data_type *>(input->ptr_to_element(Coordinates(pz, x_low, y_high, roi_batch)));
+                    const auto data4 = *reinterpret_cast<const input_data_type *>(input->ptr_to_element(Coordinates(pz, x_high, y_high, roi_batch)));
+                    avg += w1 * data1 + w2 * data2 + w3 * data3 + w4 * data4;
+                }
+            }
+        }
+
+        avg /= grid_size_x * grid_size_y;
+        return input_data_type(avg);
+    }
+}
+
+/** Average pooling over an aligned window */
+template <typename input_data_type, DataLayout data_layout>
+inline input_data_type roi_align_1x1_qasymm8(const ITensor          *input,
+                                             unsigned int            roi_batch,
+                                             float                   region_start_x,
+                                             float                   bin_size_x,
+                                             int                     grid_size_x,
+                                             float                   region_end_x,
+                                             float                   region_start_y,
+                                             float                   bin_size_y,
+                                             int                     grid_size_y,
+                                             float                   region_end_y,
+                                             int                     pz,
+                                             const QuantizationInfo &out_qinfo)
+{
+    if((region_end_x <= region_start_x) || (region_end_y <= region_start_y))
+    {
+        return input_data_type(out_qinfo.uniform().offset);
+    }
+    else
+    {
+        float                         avg         = 0;
+        const UniformQuantizationInfo input_qinfo = input->info()->quantization_info().uniform();
+        // Iterate through the aligned pooling region
+        for(int iy = 0; iy < grid_size_y; ++iy)
+        {
+            for(int ix = 0; ix < grid_size_x; ++ix)
+            {
+                // Align the window in the middle of every bin
+                float y = region_start_y + (iy + 0.5) * bin_size_y / float(grid_size_y);
+                float x = region_start_x + (ix + 0.5) * bin_size_x / float(grid_size_x);
+
+                // Interpolation in the [0,0] [0,1] [1,0] [1,1] square
+                const int y_low  = y;
+                const int x_low  = x;
+                const int y_high = y_low + 1;
+                const int x_high = x_low + 1;
+
+                const float ly = y - y_low;
+                const float lx = x - x_low;
+                const float hy = 1. - ly;
+                const float hx = 1. - lx;
+
+                const float w1 = hy * hx;
+                const float w2 = hy * lx;
+                const float w3 = ly * hx;
+                const float w4 = ly * lx;
+
+                if(data_layout == DataLayout::NCHW)
+                {
+                    float data1 = dequantize_qasymm8(*reinterpret_cast<const input_data_type *>(input->ptr_to_element(Coordinates(x_low, y_low, pz, roi_batch))), input_qinfo);
+                    float data2 = dequantize_qasymm8(*reinterpret_cast<const input_data_type *>(input->ptr_to_element(Coordinates(x_high, y_low, pz, roi_batch))), input_qinfo);
+                    float data3 = dequantize_qasymm8(*reinterpret_cast<const input_data_type *>(input->ptr_to_element(Coordinates(x_low, y_high, pz, roi_batch))), input_qinfo);
+                    float data4 = dequantize_qasymm8(*reinterpret_cast<const input_data_type *>(input->ptr_to_element(Coordinates(x_high, y_high, pz, roi_batch))), input_qinfo);
+                    avg += w1 * data1 + w2 * data2 + w3 * data3 + w4 * data4;
+                }
+                else
+                {
+                    const auto data1 = dequantize_qasymm8(*reinterpret_cast<const input_data_type *>(input->ptr_to_element(Coordinates(pz, x_low, y_low, roi_batch))), input_qinfo);
+                    const auto data2 = dequantize_qasymm8(*reinterpret_cast<const input_data_type *>(input->ptr_to_element(Coordinates(pz, x_high, y_low, roi_batch))), input_qinfo);
+                    const auto data3 = dequantize_qasymm8(*reinterpret_cast<const input_data_type *>(input->ptr_to_element(Coordinates(pz, x_low, y_high, roi_batch))), input_qinfo);
+                    const auto data4 = dequantize_qasymm8(*reinterpret_cast<const input_data_type *>(input->ptr_to_element(Coordinates(pz, x_high, y_high, roi_batch))), input_qinfo);
                     avg += w1 * data1 + w2 * data2 + w3 * data3 + w4 * data4;
                 }
             }
@@ -182,7 +268,7 @@
 
         avg /= grid_size_x * grid_size_y;
 
-        return T(avg);
+        return quantize_qasymm8(avg, out_qinfo);
     }
 }
 
@@ -198,6 +284,11 @@
     {
         switch(_input->info()->data_type())
         {
+            case DataType::QASYMM8:
+            {
+                NEROIAlignLayerKernel::internal_run<DataLayout::NCHW, uint8_t, uint16_t>(window, info);
+                break;
+            }
             case DataType::F32:
             {
                 NEROIAlignLayerKernel::internal_run<DataLayout::NCHW, float>(window, info);
@@ -221,6 +312,11 @@
     {
         switch(_input->info()->data_type())
         {
+            case DataType::QASYMM8:
+            {
+                NEROIAlignLayerKernel::internal_run<DataLayout::NHWC, uint8_t, uint16_t>(window, info);
+                break;
+            }
             case DataType::F32:
             {
                 NEROIAlignLayerKernel::internal_run<DataLayout::NHWC, float>(window, info);
@@ -246,7 +342,7 @@
     }
 }
 
-template <DataLayout data_layout, typename data_type>
+template <DataLayout data_layout, typename input_data_type, typename roi_data_type>
 void NEROIAlignLayerKernel::internal_run(const Window &window, const ThreadInfo &info)
 {
     ARM_COMPUTE_UNUSED(info);
@@ -268,16 +364,30 @@
     const int pooled_w      = _pool_info.pooled_width();
     const int pooled_h      = _pool_info.pooled_height();
 
-    const auto *rois_ptr = reinterpret_cast<const data_type *>(_rois->buffer());
+    const DataType data_type = _input->info()->data_type();
+    const bool     is_qasymm = is_data_type_quantized_asymmetric(data_type);
 
+    const auto             *rois_ptr   = reinterpret_cast<const roi_data_type *>(_rois->buffer());
+    const QuantizationInfo &rois_qinfo = _rois->info()->quantization_info();
     for(int roi_indx = roi_list_start; roi_indx < roi_list_end; ++roi_indx)
     {
         const unsigned int roi_batch = rois_ptr[values_per_roi * roi_indx];
-        const auto         x1        = rois_ptr[values_per_roi * roi_indx + 1];
-        const auto         y1        = rois_ptr[values_per_roi * roi_indx + 2];
-        const auto         x2        = rois_ptr[values_per_roi * roi_indx + 3];
-        const auto         y2        = rois_ptr[values_per_roi * roi_indx + 4];
 
+        roi_data_type qx1 = rois_ptr[values_per_roi * roi_indx + 1];
+        roi_data_type qy1 = rois_ptr[values_per_roi * roi_indx + 2];
+        roi_data_type qx2 = rois_ptr[values_per_roi * roi_indx + 3];
+        roi_data_type qy2 = rois_ptr[values_per_roi * roi_indx + 4];
+        float         x1(qx1);
+        float         x2(qx2);
+        float         y1(qy1);
+        float         y2(qy2);
+        if(is_qasymm)
+        {
+            x1 = dequantize_qasymm16(qx1, rois_qinfo);
+            x2 = dequantize_qasymm16(qx2, rois_qinfo);
+            y1 = dequantize_qasymm16(qy1, rois_qinfo);
+            y2 = dequantize_qasymm16(qy2, rois_qinfo);
+        }
         const float roi_anchor_x = x1 * _pool_info.spatial_scale();
         const float roi_anchor_y = y1 * _pool_info.spatial_scale();
         const float roi_dims_x   = std::max((x2 - x1) * _pool_info.spatial_scale(), 1.0f);
@@ -293,29 +403,36 @@
             {
                 for(int px = 0; px < pooled_w; ++px)
                 {
-                    const float region_start_x = compute_region_coordinate(px, bin_size_x, roi_anchor_x, input_width);
-                    const float region_start_y = compute_region_coordinate(py, bin_size_y, roi_anchor_y, input_height);
-                    const float region_end_x   = compute_region_coordinate(px + 1, bin_size_x, roi_anchor_x, input_width);
-                    const float region_end_y   = compute_region_coordinate(py + 1, bin_size_y, roi_anchor_y, input_height);
-                    const int   roi_bin_grid_x = (_pool_info.sampling_ratio() > 0) ? _pool_info.sampling_ratio() : int(ceil(bin_size_x));
-                    const int   roi_bin_grid_y = (_pool_info.sampling_ratio() > 0) ? _pool_info.sampling_ratio() : int(ceil(bin_size_y));
-
-                    const float out_val = roi_align_1x1<data_type, data_layout>(_input, roi_batch, region_start_x, bin_size_x,
-                                                                                roi_bin_grid_x,
-                                                                                region_end_x,
-                                                                                region_start_y,
-                                                                                bin_size_y,
-                                                                                roi_bin_grid_y,
-                                                                                region_end_y, ch);
+                    const float     region_start_x = compute_region_coordinate(px, bin_size_x, roi_anchor_x, input_width);
+                    const float     region_start_y = compute_region_coordinate(py, bin_size_y, roi_anchor_y, input_height);
+                    const float     region_end_x   = compute_region_coordinate(px + 1, bin_size_x, roi_anchor_x, input_width);
+                    const float     region_end_y   = compute_region_coordinate(py + 1, bin_size_y, roi_anchor_y, input_height);
+                    const int       roi_bin_grid_x = (_pool_info.sampling_ratio() > 0) ? _pool_info.sampling_ratio() : int(ceil(bin_size_x));
+                    const int       roi_bin_grid_y = (_pool_info.sampling_ratio() > 0) ? _pool_info.sampling_ratio() : int(ceil(bin_size_y));
+                    input_data_type out_val(0);
+                    if(is_qasymm)
+                    {
+                        out_val = roi_align_1x1_qasymm8<input_data_type, data_layout>(
+                                      _input, roi_batch, region_start_x, bin_size_x,
+                                      roi_bin_grid_x, region_end_x, region_start_y, bin_size_y,
+                                      roi_bin_grid_y, region_end_y, ch, _output->info()->quantization_info());
+                    }
+                    else
+                    {
+                        out_val = roi_align_1x1<input_data_type, data_layout>(
+                                      _input, roi_batch, region_start_x, bin_size_x,
+                                      roi_bin_grid_x, region_end_x, region_start_y, bin_size_y,
+                                      roi_bin_grid_y, region_end_y, ch);
+                    }
 
                     if(data_layout == DataLayout::NCHW)
                     {
-                        auto out_ptr = reinterpret_cast<data_type *>(_output->ptr_to_element(Coordinates(px, py, ch, roi_indx)));
+                        auto out_ptr = reinterpret_cast<input_data_type *>(_output->ptr_to_element(Coordinates(px, py, ch, roi_indx)));
                         *out_ptr     = out_val;
                     }
                     else
                     {
-                        auto out_ptr = reinterpret_cast<data_type *>(_output->ptr_to_element(Coordinates(ch, px, py, roi_indx)));
+                        auto out_ptr = reinterpret_cast<input_data_type *>(_output->ptr_to_element(Coordinates(ch, px, py, roi_indx)));
                         *out_ptr     = out_val;
                     }
                 }
diff --git a/tests/validation/NEON/ROIAlignLayer.cpp b/tests/validation/NEON/ROIAlignLayer.cpp
index 853ef65..9433c21 100644
--- a/tests/validation/NEON/ROIAlignLayer.cpp
+++ b/tests/validation/NEON/ROIAlignLayer.cpp
@@ -52,6 +52,7 @@
 AbsoluteTolerance<float> absolute_tolerance_f16(0.001f);
 #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
 
+constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(1);
 } // namespace
 
 TEST_SUITE(NEON)
@@ -127,6 +128,24 @@
 
 TEST_SUITE_END() // Float
 
+TEST_SUITE(Quantized)
+TEST_SUITE(QASYMM8)
+template <typename T>
+using NEROIAlignLayerQuantizedFixture = ROIAlignLayerQuantizedFixture<Tensor, Accessor, NEROIAlignLayer, T>;
+
+FIXTURE_DATA_TEST_CASE(Small, NEROIAlignLayerQuantizedFixture<uint8_t>, framework::DatasetMode::ALL,
+                       combine(combine(combine(combine(datasets::SmallROIDataset(),
+                                                       framework::dataset::make("DataType", { DataType::QASYMM8 })),
+                                               framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
+                                       framework::dataset::make("InputQuantizationInfo", { QuantizationInfo(1.f / 255.f, 127) })),
+                               framework::dataset::make("OutputQuantizationInfo", { QuantizationInfo(2.f / 255.f, 120) })))
+{
+    // Validate output
+    validate(Accessor(_target), _reference, tolerance_qasymm8);
+}
+TEST_SUITE_END() // QASYMM8
+TEST_SUITE_END() // Quantized
+
 TEST_SUITE_END() // RoiAlign
 TEST_SUITE_END() // NEON
 } // namespace validation