COMPMID-2588: Optimize the output detection kernel required by MobileNet-SSD (~27% improvement)

Change-Id: Ic6ce570af3878a0666ec680e0efabba3fcfd1222
Signed-off-by: Giuseppe Rossini <giuseppe.rossini@arm.com>
Reviewed-on: https://review.mlplatform.org/c/2160
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
diff --git a/arm_compute/core/Types.h b/arm_compute/core/Types.h
index d7b47ac..0a25277 100644
--- a/arm_compute/core/Types.h
+++ b/arm_compute/core/Types.h
@@ -1099,7 +1099,8 @@
           _num_classes(),
           _scales_values(),
           _use_regular_nms(),
-          _detection_per_class()
+          _detection_per_class(),
+          _dequantize_scores()
     {
     }
     /** Constructor
@@ -1110,11 +1111,12 @@
      * @param[in] iou_threshold             Threshold to be used during the intersection over union.
      * @param[in] num_classes               Number of classes.
      * @param[in] scales_values             Scales values used for decode center size boxes.
-     * @param[in] use_regular_nms           (Optional) Boolean to determinate if use regular or fast nms.
-     * @param[in] detection_per_class       (Optional) Number of detection per class. Used in the Regular Non-Max-Suppression
+     * @param[in] use_regular_nms           (Optional) Boolean to determinate if use regular or fast nms. Defaults to false.
+     * @param[in] detection_per_class       (Optional) Number of detection per class. Used in the Regular Non-Max-Suppression. Defaults to 100.
+     * @param[in] dequantize_scores         (Optional) If the scores need to be dequantized. Defaults to true.
      */
     DetectionPostProcessLayerInfo(unsigned int max_detections, unsigned int max_classes_per_detection, float nms_score_threshold, float iou_threshold, unsigned int num_classes,
-                                  std::array<float, 4> scales_values, bool use_regular_nms = false, unsigned int detection_per_class = 100)
+                                  std::array<float, 4> scales_values, bool use_regular_nms = false, unsigned int detection_per_class = 100, bool dequantize_scores = true)
         : _max_detections(max_detections),
           _max_classes_per_detection(max_classes_per_detection),
           _nms_score_threshold(nms_score_threshold),
@@ -1122,7 +1124,8 @@
           _num_classes(num_classes),
           _scales_values(scales_values),
           _use_regular_nms(use_regular_nms),
-          _detection_per_class(detection_per_class)
+          _detection_per_class(detection_per_class),
+          _dequantize_scores(dequantize_scores)
     {
     }
     /** Get max detections. */
@@ -1184,6 +1187,11 @@
         // Saved as [y,x,h,w]
         return _scales_values[3];
     }
+    /** Get dequantize_scores value. */
+    bool dequantize_scores() const
+    {
+        return _dequantize_scores;
+    }
 
 private:
     unsigned int _max_detections;
@@ -1194,6 +1202,7 @@
     std::array<float, 4> _scales_values;
     bool         _use_regular_nms;
     unsigned int _detection_per_class;
+    bool         _dequantize_scores;
 };
 
 /** Pooling Layer Information class */
diff --git a/arm_compute/runtime/CPP/functions/CPPDetectionPostProcessLayer.h b/arm_compute/runtime/CPP/functions/CPPDetectionPostProcessLayer.h
index 1c918d2..64568e8 100644
--- a/arm_compute/runtime/CPP/functions/CPPDetectionPostProcessLayer.h
+++ b/arm_compute/runtime/CPP/functions/CPPDetectionPostProcessLayer.h
@@ -103,6 +103,7 @@
     unsigned int       _num_boxes;
     unsigned int       _num_classes_with_background;
     unsigned int       _num_max_detected_boxes;
+    bool               _dequantize_scores;
 
     Tensor         _decoded_boxes;
     Tensor         _decoded_scores;
diff --git a/arm_compute/runtime/NEON/NEFunctions.h b/arm_compute/runtime/NEON/NEFunctions.h
index 28fd7f3..95369ce 100644
--- a/arm_compute/runtime/NEON/NEFunctions.h
+++ b/arm_compute/runtime/NEON/NEFunctions.h
@@ -59,6 +59,7 @@
 #include "arm_compute/runtime/NEON/functions/NEDepthwiseConvolutionLayer.h"
 #include "arm_compute/runtime/NEON/functions/NEDequantizationLayer.h"
 #include "arm_compute/runtime/NEON/functions/NEDerivative.h"
+#include "arm_compute/runtime/NEON/functions/NEDetectionPostProcessLayer.h"
 #include "arm_compute/runtime/NEON/functions/NEDilate.h"
 #include "arm_compute/runtime/NEON/functions/NEDirectConvolutionLayer.h"
 #include "arm_compute/runtime/NEON/functions/NEElementwiseOperations.h"
diff --git a/arm_compute/runtime/NEON/functions/NEDetectionPostProcessLayer.h b/arm_compute/runtime/NEON/functions/NEDetectionPostProcessLayer.h
new file mode 100644
index 0000000..58ba98a
--- /dev/null
+++ b/arm_compute/runtime/NEON/functions/NEDetectionPostProcessLayer.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_NE_DETECTION_POSTPROCESS_H__
+#define __ARM_COMPUTE_NE_DETECTION_POSTPROCESS_H__
+
+#include "arm_compute/runtime/NEON/INESimpleFunction.h"
+
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/CPP/functions/CPPDetectionPostProcessLayer.h"
+#include "arm_compute/runtime/IMemoryManager.h"
+#include "arm_compute/runtime/MemoryGroup.h"
+#include "arm_compute/runtime/NEON/functions/NEDequantizationLayer.h"
+#include "arm_compute/runtime/Tensor.h"
+
+#include <map>
+
+namespace arm_compute
+{
+class ITensor;
+
+/** NE Function to generate the detection output based on center size encoded boxes, class prediction and anchors
+ *  by doing non maximum suppression.
+ *
+ * @note Intended for use with MultiBox detection method.
+ */
+class NEDetectionPostProcessLayer : public IFunction
+{
+public:
+    /** Constructor */
+    NEDetectionPostProcessLayer(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    NEDetectionPostProcessLayer(const NEDetectionPostProcessLayer &) = delete;
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    NEDetectionPostProcessLayer &operator=(const NEDetectionPostProcessLayer &) = delete;
+    /** Configure the detection output layer NE function
+     *
+     * @param[in]  input_box_encoding The bounding box input tensor. Data types supported: F32, QASYMM8.
+     * @param[in]  input_score        The class prediction input tensor. Data types supported: Same as @p input_box_encoding.
+     * @param[in]  input_anchors      The anchors input tensor. Data types supported: Same as @p input_box_encoding.
+     * @param[out] output_boxes       The boxes output tensor. Data types supported: F32.
+     * @param[out] output_classes     The classes output tensor. Data types supported: Same as @p output_boxes.
+     * @param[out] output_scores      The scores output tensor. Data types supported: Same as @p output_boxes.
+     * @param[out] num_detection      The number of output detection. Data types supported: Same as @p output_boxes.
+     * @param[in]  info               (Optional) DetectionPostProcessLayerInfo information.
+     *
+     * @note Output contains all the detections. Of those, only the ones selected by the valid region are valid.
+     */
+    void configure(const ITensor *input_box_encoding, const ITensor *input_score, const ITensor *input_anchors,
+                   ITensor *output_boxes, ITensor *output_classes, ITensor *output_scores, ITensor *num_detection, DetectionPostProcessLayerInfo info = DetectionPostProcessLayerInfo());
+    /** Static function to check if given info will lead to a valid configuration of @ref NEDetectionPostProcessLayer
+     *
+     * @param[in]  input_box_encoding The bounding box input tensor info. Data types supported: F32, QASYMM8.
+     * @param[in]  input_class_score  The class prediction input tensor info. Data types supported: F32, QASYMM8.
+     * @param[in]  input_anchors      The anchors input tensor. Data types supported: F32, QASYMM8.
+     * @param[out] output_boxes       The output tensor. Data types supported: F32.
+     * @param[out] output_classes     The output tensor. Data types supported: Same as @p output_boxes.
+     * @param[out] output_scores      The output tensor. Data types supported: Same as @p output_boxes.
+     * @param[out] num_detection      The number of output detection. Data types supported: Same as @p output_boxes.
+     * @param[in]  info               (Optional) DetectionPostProcessLayerInfo information.
+     *
+     * @return a status
+     */
+    static Status validate(const ITensorInfo *input_box_encoding, const ITensorInfo *input_class_score, const ITensorInfo *input_anchors,
+                           ITensorInfo *output_boxes, ITensorInfo *output_classes, ITensorInfo *output_scores, ITensorInfo *num_detection,
+                           DetectionPostProcessLayerInfo info = DetectionPostProcessLayerInfo());
+    // Inherited methods overridden:
+    void run() override;
+
+private:
+    MemoryGroup _memory_group;
+
+    NEDequantizationLayer        _dequantize;
+    CPPDetectionPostProcessLayer _detection_post_process;
+
+    Tensor _decoded_scores;
+    bool   _run_dequantize;
+};
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_NE_DETECTION_POSTPROCESS_H__ */
diff --git a/src/graph/backends/NEON/NEFunctionFactory.cpp b/src/graph/backends/NEON/NEFunctionFactory.cpp
index d8b0ae9..12f44e3 100644
--- a/src/graph/backends/NEON/NEFunctionFactory.cpp
+++ b/src/graph/backends/NEON/NEFunctionFactory.cpp
@@ -210,7 +210,7 @@
         case NodeType::DetectionOutputLayer:
             return detail::create_detection_output_layer<CPPDetectionOutputLayer, NETargetInfo>(*polymorphic_downcast<DetectionOutputLayerNode *>(node));
         case NodeType::DetectionPostProcessLayer:
-            return detail::create_detection_post_process_layer<CPPDetectionPostProcessLayer, NETargetInfo>(*polymorphic_downcast<DetectionPostProcessLayerNode *>(node));
+            return detail::create_detection_post_process_layer<NEDetectionPostProcessLayer, NETargetInfo>(*polymorphic_downcast<DetectionPostProcessLayerNode *>(node));
         case NodeType::EltwiseLayer:
             return detail::create_eltwise_layer<NEEltwiseFunctions, NETargetInfo>(*polymorphic_downcast<EltwiseLayerNode *>(node));
         case NodeType::FlattenLayer:
diff --git a/src/graph/backends/NEON/NENodeValidator.cpp b/src/graph/backends/NEON/NENodeValidator.cpp
index 0b53657..f17b116 100644
--- a/src/graph/backends/NEON/NENodeValidator.cpp
+++ b/src/graph/backends/NEON/NENodeValidator.cpp
@@ -62,7 +62,7 @@
         case NodeType::DetectionOutputLayer:
             return detail::validate_detection_output_layer<CPPDetectionOutputLayer>(*polymorphic_downcast<DetectionOutputLayerNode *>(node));
         case NodeType::DetectionPostProcessLayer:
-            return detail::validate_detection_post_process_layer<CPPDetectionPostProcessLayer>(*polymorphic_downcast<DetectionPostProcessLayerNode *>(node));
+            return detail::validate_detection_post_process_layer<NEDetectionPostProcessLayer>(*polymorphic_downcast<DetectionPostProcessLayerNode *>(node));
         case NodeType::GenerateProposalsLayer:
             return ARM_COMPUTE_CREATE_ERROR(arm_compute::ErrorCode::RUNTIME_ERROR, "Unsupported operation : GenerateProposalsLayer");
         case NodeType::NormalizePlanarYUVLayer:
diff --git a/src/runtime/CPP/functions/CPPDetectionPostProcessLayer.cpp b/src/runtime/CPP/functions/CPPDetectionPostProcessLayer.cpp
index 0addb0e..bc88f71 100644
--- a/src/runtime/CPP/functions/CPPDetectionPostProcessLayer.cpp
+++ b/src/runtime/CPP/functions/CPPDetectionPostProcessLayer.cpp
@@ -42,7 +42,7 @@
 {
     ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input_box_encoding, input_class_score, input_anchors);
     ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input_box_encoding, 1, DataType::F32, DataType::QASYMM8);
-    ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input_box_encoding, input_class_score, input_anchors);
+    ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input_box_encoding, input_anchors);
     ARM_COMPUTE_RETURN_ERROR_ON_MSG(input_box_encoding->num_dimensions() > 3, "The location input tensor shape should be [4, N, kBatchSize].");
     if(input_box_encoding->num_dimensions() > 2)
     {
@@ -183,8 +183,8 @@
 
 CPPDetectionPostProcessLayer::CPPDetectionPostProcessLayer(std::shared_ptr<IMemoryManager> memory_manager)
     : _memory_group(std::move(memory_manager)), _nms(), _input_box_encoding(nullptr), _input_scores(nullptr), _input_anchors(nullptr), _output_boxes(nullptr), _output_classes(nullptr),
-      _output_scores(nullptr), _num_detection(nullptr), _info(), _num_boxes(), _num_classes_with_background(), _num_max_detected_boxes(), _decoded_boxes(), _decoded_scores(), _selected_indices(),
-      _class_scores(), _input_scores_to_use(nullptr)
+      _output_scores(nullptr), _num_detection(nullptr), _info(), _num_boxes(), _num_classes_with_background(), _num_max_detected_boxes(), _dequantize_scores(false), _decoded_boxes(), _decoded_scores(),
+      _selected_indices(), _class_scores(), _input_scores_to_use(nullptr)
 {
 }
 
@@ -214,6 +214,7 @@
     _info                        = info;
     _num_boxes                   = input_box_encoding->info()->dimension(1);
     _num_classes_with_background = _input_scores->info()->dimension(0);
+    _dequantize_scores           = (info.dequantize_scores() && is_data_type_quantized(input_box_encoding->info()->data_type()));
 
     auto_init_if_empty(*_decoded_boxes.info(), TensorInfo(TensorShape(_kNumCoordBox, _input_box_encoding->info()->dimension(1), _kBatchSize), 1, DataType::F32));
     auto_init_if_empty(*_decoded_scores.info(), TensorInfo(TensorShape(_input_scores->info()->dimension(0), _input_scores->info()->dimension(1), _kBatchSize), 1, DataType::F32));
@@ -221,7 +222,7 @@
     const unsigned int num_classes_per_box = std::min(info.max_classes_per_detection(), info.num_classes());
     auto_init_if_empty(*_class_scores.info(), TensorInfo(info.use_regular_nms() ? TensorShape(_num_boxes) : TensorShape(_num_boxes * num_classes_per_box), 1, DataType::F32));
 
-    _input_scores_to_use = is_data_type_quantized(input_box_encoding->info()->data_type()) ? &_decoded_scores : _input_scores;
+    _input_scores_to_use = _dequantize_scores ? &_decoded_scores : _input_scores;
 
     // Manage intermediate buffers
     _memory_group.manage(&_decoded_boxes);
@@ -261,7 +262,7 @@
     DecodeCenterSizeBoxes(_input_box_encoding, _input_anchors, _info, &_decoded_boxes);
 
     // Decode scores if necessary
-    if(is_data_type_quantized(_input_box_encoding->info()->data_type()))
+    if(_dequantize_scores)
     {
         for(unsigned int idx_c = 0; idx_c < _num_classes_with_background; ++idx_c)
         {
@@ -365,7 +366,6 @@
 
         // Run Non-maxima Suppression
         _nms.run();
-
         std::vector<unsigned int> selected_indices;
         for(unsigned int i = 0; i < max_detections; ++i)
         {
@@ -384,4 +384,4 @@
                     num_output, max_detections, _output_boxes, _output_classes, _output_scores, _num_detection);
     }
 }
-} // namespace arm_compute
\ No newline at end of file
+} // namespace arm_compute
diff --git a/src/runtime/NEON/functions/NEDetectionPostProcessLayer.cpp b/src/runtime/NEON/functions/NEDetectionPostProcessLayer.cpp
new file mode 100644
index 0000000..d1d1343
--- /dev/null
+++ b/src/runtime/NEON/functions/NEDetectionPostProcessLayer.cpp
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/runtime/NEON/functions/NEDetectionPostProcessLayer.h"
+
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/Validate.h"
+#include "support/ToolchainSupport.h"
+
+#include <cstddef>
+#include <ios>
+#include <list>
+
+namespace arm_compute
+{
+NEDetectionPostProcessLayer::NEDetectionPostProcessLayer(std::shared_ptr<IMemoryManager> memory_manager)
+    : _memory_group(std::move(memory_manager)), _dequantize(), _detection_post_process(), _decoded_scores(), _run_dequantize(false)
+{
+}
+
+void NEDetectionPostProcessLayer::configure(const ITensor *input_box_encoding, const ITensor *input_scores, const ITensor *input_anchors,
+                                            ITensor *output_boxes, ITensor *output_classes, ITensor *output_scores, ITensor *num_detection, DetectionPostProcessLayerInfo info)
+{
+    ARM_COMPUTE_ERROR_ON_NULLPTR(input_box_encoding, input_scores, input_anchors, output_boxes, output_classes, output_scores);
+    ARM_COMPUTE_ERROR_THROW_ON(NEDetectionPostProcessLayer::validate(input_box_encoding->info(), input_scores->info(), input_anchors->info(), output_boxes->info(), output_classes->info(),
+                                                                     output_scores->info(),
+                                                                     num_detection->info(), info));
+
+    const ITensor                *input_scores_to_use = input_scores;
+    DetectionPostProcessLayerInfo info_to_use         = info;
+    _run_dequantize                                   = is_data_type_quantized(input_box_encoding->info()->data_type());
+
+    if(_run_dequantize)
+    {
+        _memory_group.manage(&_decoded_scores);
+
+        _dequantize.configure(input_scores, &_decoded_scores);
+
+        input_scores_to_use = &_decoded_scores;
+
+        // Create a new info struct to avoid dequantizing in the CPP layer
+        std::array<float, 4> scales_values{ info.scale_value_y(), info.scale_value_x(), info.scale_value_h(), info.scale_value_w() };
+        DetectionPostProcessLayerInfo info_quantized(info.max_detections(), info.max_classes_per_detection(), info.nms_score_threshold(), info.iou_threshold(), info.num_classes(),
+                                                     scales_values, info.use_regular_nms(), info.detection_per_class(), false);
+        info_to_use = info_quantized;
+    }
+
+    _detection_post_process.configure(input_box_encoding, input_scores_to_use, input_anchors, output_boxes, output_classes, output_scores, num_detection, info_to_use);
+    _decoded_scores.allocator()->allocate();
+}
+
+Status NEDetectionPostProcessLayer::validate(const ITensorInfo *input_box_encoding, const ITensorInfo *input_scores, const ITensorInfo *input_anchors,
+                                             ITensorInfo *output_boxes, ITensorInfo *output_classes, ITensorInfo *output_scores, ITensorInfo *num_detection, DetectionPostProcessLayerInfo info)
+{
+    bool run_dequantize = is_data_type_quantized(input_box_encoding->data_type());
+    if(run_dequantize)
+    {
+        TensorInfo decoded_classes_info = input_scores->clone()->set_is_resizable(true).set_data_type(DataType::F32);
+        ARM_COMPUTE_RETURN_ON_ERROR(NEDequantizationLayer::validate(input_scores, &decoded_classes_info));
+    }
+    ARM_COMPUTE_RETURN_ON_ERROR(CPPDetectionPostProcessLayer::validate(input_box_encoding, input_scores, input_anchors, output_boxes, output_classes, output_scores, num_detection, info));
+
+    return Status{};
+}
+
+void NEDetectionPostProcessLayer::run()
+{
+    MemoryGroupResourceScope scope_mg(_memory_group);
+
+    // Decode scores if necessary
+    if(_run_dequantize)
+    {
+        _dequantize.run();
+    }
+    _detection_post_process.run();
+}
+} // namespace arm_compute
diff --git a/tests/validation/NEON/DetectionPostProcessLayer.cpp b/tests/validation/NEON/DetectionPostProcessLayer.cpp
new file mode 100644
index 0000000..f479a13
--- /dev/null
+++ b/tests/validation/NEON/DetectionPostProcessLayer.cpp
@@ -0,0 +1,390 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/NEON/functions/NEDetectionPostProcessLayer.h"
+#include "arm_compute/runtime/Tensor.h"
+#include "arm_compute/runtime/TensorAllocator.h"
+#include "tests/NEON/Accessor.h"
+#include "tests/PaddingCalculator.h"
+#include "tests/datasets/ShapeDatasets.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Macros.h"
+#include "tests/framework/datasets/Datasets.h"
+#include "tests/validation/Validation.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace
+{
+template <typename U, typename T>
+inline void fill_tensor(U &&tensor, const std::vector<T> &v)
+{
+    std::memcpy(tensor.data(), v.data(), sizeof(T) * v.size());
+}
+template <typename U, typename T>
+inline void quantize_and_fill_tensor(U &&tensor, const std::vector<T> &v)
+{
+    QuantizationInfo     qi = tensor.quantization_info();
+    std::vector<uint8_t> quantized;
+    quantized.reserve(v.size());
+    for(auto elem : v)
+    {
+        quantized.emplace_back(quantize_qasymm8(elem, qi));
+    }
+    std::memcpy(tensor.data(), quantized.data(), sizeof(uint8_t) * quantized.size());
+}
+inline QuantizationInfo qinfo_scaleoffset_from_minmax(const float min, const float max)
+{
+    int           offset = 0;
+    float         scale  = 0;
+    const uint8_t qmin   = std::numeric_limits<uint8_t>::min();
+    const uint8_t qmax   = std::numeric_limits<uint8_t>::max();
+    const float   f_qmin = qmin;
+    const float   f_qmax = qmax;
+
+    // Continue only if [min,max] is a valid range and not a point
+    if(min != max)
+    {
+        scale                       = (max - min) / (f_qmax - f_qmin);
+        const float offset_from_min = f_qmin - min / scale;
+        const float offset_from_max = f_qmax - max / scale;
+
+        const float offset_from_min_error = std::abs(f_qmin) + std::abs(min / scale);
+        const float offset_from_max_error = std::abs(f_qmax) + std::abs(max / scale);
+        const float f_offset              = offset_from_min_error < offset_from_max_error ? offset_from_min : offset_from_max;
+
+        uint8_t uint8_offset = 0;
+        if(f_offset < f_qmin)
+        {
+            uint8_offset = qmin;
+        }
+        else if(f_offset > f_qmax)
+        {
+            uint8_offset = qmax;
+        }
+        else
+        {
+            uint8_offset = static_cast<uint8_t>(std::round(f_offset));
+        }
+        offset = uint8_offset;
+    }
+    return QuantizationInfo(scale, offset);
+}
+
+inline void base_test_case(DetectionPostProcessLayerInfo info, DataType data_type, const SimpleTensor<float> &expected_output_boxes,
+                           const SimpleTensor<float> &expected_output_classes, const SimpleTensor<float> &expected_output_scores, const SimpleTensor<float> &expected_num_detection,
+                           AbsoluteTolerance<float> tolerance_boxes = AbsoluteTolerance<float>(0.1f), AbsoluteTolerance<float> tolerance_others = AbsoluteTolerance<float>(0.1f))
+{
+    Tensor box_encoding     = create_tensor<Tensor>(TensorShape(4U, 6U, 1U), data_type, 1, qinfo_scaleoffset_from_minmax(-1.0f, 1.0f));
+    Tensor class_prediction = create_tensor<Tensor>(TensorShape(3U, 6U, 1U), data_type, 1, qinfo_scaleoffset_from_minmax(0.0f, 1.0f));
+    Tensor anchors          = create_tensor<Tensor>(TensorShape(4U, 6U), data_type, 1, qinfo_scaleoffset_from_minmax(0.0f, 100.5f));
+
+    box_encoding.allocator()->allocate();
+    class_prediction.allocator()->allocate();
+    anchors.allocator()->allocate();
+
+    std::vector<float> box_encoding_vector =
+    {
+        0.0f, 1.0f, 0.0f, 0.0f,
+        0.0f, -1.0f, 0.0f, 0.0f,
+        0.0f, 0.0f, 0.0f, 0.0f,
+        0.0f, 0.0f, 0.0f, 0.0f,
+        0.0f, 1.0f, 0.0f, 0.0f,
+        0.0f, 0.0f, 0.0f, 0.0f
+    };
+    std::vector<float> class_prediction_vector =
+    {
+        0.0f, 0.7f, 0.68f,
+        0.0f, 0.6f, 0.5f,
+        0.0f, 0.9f, 0.83f,
+        0.0f, 0.91f, 0.97f,
+        0.0f, 0.5f, 0.4f,
+        0.0f, 0.31f, 0.22f
+    };
+    std::vector<float> anchors_vector =
+    {
+        0.4f, 0.4f, 1.1f, 1.1f,
+        0.4f, 0.4f, 1.1f, 1.1f,
+        0.4f, 0.4f, 1.1f, 1.1f,
+        0.4f, 10.4f, 1.1f, 1.1f,
+        0.4f, 10.4f, 1.1f, 1.1f,
+        0.4f, 100.4f, 1.1f, 1.1f
+    };
+
+    // Fill the tensors with random pre-generated values
+    if(data_type == DataType::F32)
+    {
+        fill_tensor(Accessor(box_encoding), box_encoding_vector);
+        fill_tensor(Accessor(class_prediction), class_prediction_vector);
+        fill_tensor(Accessor(anchors), anchors_vector);
+    }
+    else
+    {
+        quantize_and_fill_tensor(Accessor(box_encoding), box_encoding_vector);
+        quantize_and_fill_tensor(Accessor(class_prediction), class_prediction_vector);
+        quantize_and_fill_tensor(Accessor(anchors), anchors_vector);
+    }
+
+    // Determine the output through the NEON kernel
+    Tensor                      output_boxes;
+    Tensor                      output_classes;
+    Tensor                      output_scores;
+    Tensor                      num_detection;
+    NEDetectionPostProcessLayer detection;
+    detection.configure(&box_encoding, &class_prediction, &anchors, &output_boxes, &output_classes, &output_scores, &num_detection, info);
+
+    output_boxes.allocator()->allocate();
+    output_classes.allocator()->allocate();
+    output_scores.allocator()->allocate();
+    num_detection.allocator()->allocate();
+
+    // Run the kernel
+    detection.run();
+
+    // Validate against the expected output
+    // Validate output boxes
+    validate(Accessor(output_boxes), expected_output_boxes, tolerance_boxes);
+    // Validate detection classes
+    validate(Accessor(output_classes), expected_output_classes, tolerance_others);
+    // Validate detection scores
+    validate(Accessor(output_scores), expected_output_scores, tolerance_others);
+    // Validate num detections
+    validate(Accessor(num_detection), expected_num_detection, tolerance_others);
+}
+} // namespace
+
+TEST_SUITE(NEON)
+TEST_SUITE(DetectionPostProcessLayer)
+
+// *INDENT-OFF*
+// clang-format off
+DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zip(zip(
+        framework::dataset::make("BoxEncodingsInfo", { TensorInfo(TensorShape(4U, 10U, 1U), 1, DataType::F32),
+                                                TensorInfo(TensorShape(4U, 10U, 3U), 1, DataType::F32),  // Mismatching batch_size
+                                                TensorInfo(TensorShape(4U, 10U, 1U), 1, DataType::S8), // Unsupported data type
+                                                TensorInfo(TensorShape(4U, 10U, 1U), 1, DataType::F32), // Wrong Detection Info
+                                                TensorInfo(TensorShape(4U, 10U, 1U), 1, DataType::F32), // Wrong boxes dimensions
+                                                TensorInfo(TensorShape(4U, 10U, 1U), 1, DataType::QASYMM8)}), // Wrong score dimension
+        framework::dataset::make("ClassPredsInfo",{ TensorInfo(TensorShape(3U ,10U), 1, DataType::F32),
+                                                TensorInfo(TensorShape(3U ,10U), 1, DataType::F32),
+                                                TensorInfo(TensorShape(3U ,10U), 1, DataType::F32),
+                                                TensorInfo(TensorShape(3U ,10U), 1, DataType::F32),
+                                                TensorInfo(TensorShape(3U ,10U), 1, DataType::F32),
+                                                TensorInfo(TensorShape(3U ,10U), 1, DataType::QASYMM8)})),
+        framework::dataset::make("AnchorsInfo",{ TensorInfo(TensorShape(4U, 10U, 1U), 1, DataType::F32),
+                                                TensorInfo(TensorShape(4U, 10U, 1U), 1, DataType::F32),
+                                                TensorInfo(TensorShape(4U, 10U, 1U), 1, DataType::F32),
+                                                TensorInfo(TensorShape(4U, 10U, 1U), 1, DataType::F32),
+                                                TensorInfo(TensorShape(4U, 10U, 1U), 1, DataType::F32),
+                                                TensorInfo(TensorShape(4U, 10U, 1U), 1, DataType::QASYMM8)})),
+        framework::dataset::make("OutputBoxInfo", { TensorInfo(TensorShape(4U, 3U, 1U), 1, DataType::F32),
+                                                TensorInfo(TensorShape(4U, 3U, 1U), 1, DataType::F32),
+                                                TensorInfo(TensorShape(4U, 3U, 1U), 1, DataType::S8),
+                                                TensorInfo(TensorShape(4U, 3U, 1U), 1, DataType::F32),
+                                                TensorInfo(TensorShape(1U, 5U, 1U), 1, DataType::F32),
+                                                TensorInfo(TensorShape(4U, 3U, 1U), 1, DataType::F32)})),
+        framework::dataset::make("OuputClassesInfo",{ TensorInfo(TensorShape(3U, 1U), 1, DataType::F32),
+                                                TensorInfo(TensorShape(3U, 1U), 1, DataType::F32),
+                                                TensorInfo(TensorShape(3U, 1U), 1, DataType::F32),
+                                                TensorInfo(TensorShape(3U, 1U), 1, DataType::F32),
+                                                TensorInfo(TensorShape(3U, 1U), 1, DataType::F32),
+                                                TensorInfo(TensorShape(6U, 1U), 1, DataType::F32)})),
+        framework::dataset::make("OutputScoresInfo",{ TensorInfo(TensorShape(3U, 1U), 1, DataType::F32),
+                                                TensorInfo(TensorShape(3U, 1U), 1, DataType::F32),
+                                                TensorInfo(TensorShape(3U, 1U), 1, DataType::F32),
+                                                TensorInfo(TensorShape(3U, 1U), 1, DataType::F32),
+                                                TensorInfo(TensorShape(3U, 1U), 1, DataType::F32),
+                                                TensorInfo(TensorShape(6U, 1U), 1, DataType::F32)})),
+        framework::dataset::make("NumDetectionsInfo",{ TensorInfo(TensorShape(1U), 1, DataType::F32),
+                                                TensorInfo(TensorShape(1U), 1, DataType::F32),
+                                                TensorInfo(TensorShape(1U), 1, DataType::F32),
+                                                TensorInfo(TensorShape(1U), 1, DataType::F32),
+                                                TensorInfo(TensorShape(1U), 1, DataType::F32),
+                                                TensorInfo(TensorShape(1U), 1, DataType::F32)})),
+        framework::dataset::make("DetectionPostProcessLayerInfo",{ DetectionPostProcessLayerInfo(3, 1, 0.0f, 0.5f, 2, {0.1f,0.1f,0.1f,0.1f}),
+                                                DetectionPostProcessLayerInfo(3, 1, 0.0f, 0.5f, 2, {0.1f,0.1f,0.1f,0.1f}),
+                                                DetectionPostProcessLayerInfo(3, 1, 0.0f, 0.5f, 2, {0.1f,0.1f,0.1f,0.1f}),
+                                                DetectionPostProcessLayerInfo(3, 1, 0.0f, 1.5f, 2, {0.0f,0.1f,0.1f,0.1f}),
+                                                DetectionPostProcessLayerInfo(3, 1, 0.0f, 0.5f, 2, {0.1f,0.1f,0.1f,0.1f}),
+                                                DetectionPostProcessLayerInfo(3, 1, 0.0f, 0.5f, 2, {0.1f,0.1f,0.1f,0.1f})})),
+        framework::dataset::make("Expected", {true, false, false, false, false, false })),
+        box_encodings_info, classes_info, anchors_info, output_boxes_info, output_classes_info,output_scores_info, num_detection_info, detect_info, expected)
+{
+    const Status status = NEDetectionPostProcessLayer::validate(&box_encodings_info.clone()->set_is_resizable(false),
+            &classes_info.clone()->set_is_resizable(false),
+            &anchors_info.clone()->set_is_resizable(false),
+            &output_boxes_info.clone()->set_is_resizable(false),
+            &output_classes_info.clone()->set_is_resizable(false),
+            &output_scores_info.clone()->set_is_resizable(false), &num_detection_info.clone()->set_is_resizable(false), detect_info);
+    ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS);
+}
+// clang-format on
+// *INDENT-ON*
+
+TEST_SUITE(F32)
+TEST_CASE(Float_general, framework::DatasetMode::ALL)
+{
+    DetectionPostProcessLayerInfo info = DetectionPostProcessLayerInfo(3 /*max_detections*/, 1 /*max_classes_per_detection*/, 0.0 /*nms_score_threshold*/,
+                                                                       0.5 /*nms_iou_threshold*/, 2 /*num_classes*/, { 11.0, 11.0, 6.0, 6.0 } /*scale*/);
+    // Fill expected detection boxes
+    SimpleTensor<float> expected_output_boxes(TensorShape(4U, 3U), DataType::F32);
+    fill_tensor(expected_output_boxes, std::vector<float> { -0.15, 9.85, 0.95, 10.95, -0.15, -0.15, 0.95, 0.95, -0.15, 99.85, 0.95, 100.95 });
+    // Fill expected detection classes
+    SimpleTensor<float> expected_output_classes(TensorShape(3U), DataType::F32);
+    fill_tensor(expected_output_classes, std::vector<float> { 1.0f, 0.0f, 0.0f });
+    // Fill expected detection scores
+    SimpleTensor<float> expected_output_scores(TensorShape(3U), DataType::F32);
+    fill_tensor(expected_output_scores, std::vector<float> { 0.97f, 0.95f, 0.31f });
+    // Fill expected num detections
+    SimpleTensor<float> expected_num_detection(TensorShape(1U), DataType::F32);
+    fill_tensor(expected_num_detection, std::vector<float> { 3.f });
+    // Run base test
+    base_test_case(info, DataType::F32, expected_output_boxes, expected_output_classes, expected_output_scores, expected_num_detection);
+}
+
+TEST_CASE(Float_fast, framework::DatasetMode::ALL)
+{
+    DetectionPostProcessLayerInfo info = DetectionPostProcessLayerInfo(3 /*max_detections*/, 1 /*max_classes_per_detection*/, 0.0 /*nms_score_threshold*/,
+                                                                       0.5 /*nms_iou_threshold*/, 2 /*num_classes*/, { 11.0, 11.0, 6.0, 6.0 } /*scale*/,
+                                                                       false /*use_regular_nms*/, 1 /*detections_per_class*/);
+
+    // Fill expected detection boxes
+    SimpleTensor<float> expected_output_boxes(TensorShape(4U, 3U), DataType::F32);
+    fill_tensor(expected_output_boxes, std::vector<float> { -0.15, 9.85, 0.95, 10.95, -0.15, -0.15, 0.95, 0.95, -0.15, 99.85, 0.95, 100.95 });
+    // Fill expected detection classes
+    SimpleTensor<float> expected_output_classes(TensorShape(3U), DataType::F32);
+    fill_tensor(expected_output_classes, std::vector<float> { 1.0f, 0.0f, 0.0f });
+    // Fill expected detection scores
+    SimpleTensor<float> expected_output_scores(TensorShape(3U), DataType::F32);
+    fill_tensor(expected_output_scores, std::vector<float> { 0.97f, 0.95f, 0.31f });
+    // Fill expected num detections
+    SimpleTensor<float> expected_num_detection(TensorShape(1U), DataType::F32);
+    fill_tensor(expected_num_detection, std::vector<float> { 3.f });
+
+    // Run base test
+    base_test_case(info, DataType::F32, expected_output_boxes, expected_output_classes, expected_output_scores, expected_num_detection);
+}
+
+TEST_CASE(Float_regular, framework::DatasetMode::ALL)
+{
+    DetectionPostProcessLayerInfo info = DetectionPostProcessLayerInfo(3 /*max_detections*/, 1 /*max_classes_per_detection*/, 0.0 /*nms_score_threshold*/,
+                                                                       0.5 /*nms_iou_threshold*/, 2 /*num_classes*/, { 11.0, 11.0, 6.0, 6.0 } /*scale*/,
+                                                                       true /*use_regular_nms*/, 1 /*detections_per_class*/);
+
+    // Fill expected detection boxes
+    SimpleTensor<float> expected_output_boxes(TensorShape(4U, 3U), DataType::F32);
+    fill_tensor(expected_output_boxes, std::vector<float> { -0.15, 9.85, 0.95, 10.95, -0.15, 9.85, 0.95, 10.95, 0.0f, 0.0f, 0.0f, 0.0f });
+    // Fill expected detection classes
+    SimpleTensor<float> expected_output_classes(TensorShape(3U), DataType::F32);
+    fill_tensor(expected_output_classes, std::vector<float> { 1.0f, 0.0f, 0.0f });
+    // Fill expected detection scores
+    SimpleTensor<float> expected_output_scores(TensorShape(3U), DataType::F32);
+    fill_tensor(expected_output_scores, std::vector<float> { 0.97f, 0.91f, 0.0f });
+    // Fill expected num detections
+    SimpleTensor<float> expected_num_detection(TensorShape(1U), DataType::F32);
+    fill_tensor(expected_num_detection, std::vector<float> { 2.f });
+
+    // Run test
+    base_test_case(info, DataType::F32, expected_output_boxes, expected_output_classes, expected_output_scores, expected_num_detection);
+}
+TEST_SUITE_END() // F32
+
+TEST_SUITE(QASYMM8)
+TEST_CASE(Quantized_general, framework::DatasetMode::ALL)
+{
+    DetectionPostProcessLayerInfo info = DetectionPostProcessLayerInfo(3 /*max_detections*/, 1 /*max_classes_per_detection*/, 0.0 /*nms_score_threshold*/,
+                                                                       0.5 /*nms_iou_threshold*/, 2 /*num_classes*/, { 11.0, 11.0, 6.0, 6.0 } /*scale*/);
+
+    // Fill expected detection boxes
+    SimpleTensor<float> expected_output_boxes(TensorShape(4U, 3U), DataType::F32);
+    fill_tensor(expected_output_boxes, std::vector<float> { -0.15, 9.85, 0.95, 10.95, -0.15, -0.15, 0.95, 0.95, -0.15, 99.85, 0.95, 100.95 });
+    // Fill expected detection classes
+    SimpleTensor<float> expected_output_classes(TensorShape(3U), DataType::F32);
+    fill_tensor(expected_output_classes, std::vector<float> { 1.0f, 0.0f, 0.0f });
+    // Fill expected detection scores
+    SimpleTensor<float> expected_output_scores(TensorShape(3U), DataType::F32);
+    fill_tensor(expected_output_scores, std::vector<float> { 0.97f, 0.95f, 0.31f });
+    // Fill expected num detections
+    SimpleTensor<float> expected_num_detection(TensorShape(1U), DataType::F32);
+    fill_tensor(expected_num_detection, std::vector<float> { 3.f });
+    // Run test
+    base_test_case(info, DataType::QASYMM8, expected_output_boxes, expected_output_classes, expected_output_scores, expected_num_detection, AbsoluteTolerance<float>(0.3f));
+}
+
+TEST_CASE(Quantized_fast, framework::DatasetMode::ALL)
+{
+    DetectionPostProcessLayerInfo info = DetectionPostProcessLayerInfo(3 /*max_detections*/, 1 /*max_classes_per_detection*/, 0.0 /*nms_score_threshold*/,
+                                                                       0.5 /*nms_iou_threshold*/, 2 /*num_classes*/, { 11.0, 11.0, 6.0, 6.0 } /*scale*/,
+                                                                       false /*use_regular_nms*/, 1 /*detections_per_class*/);
+
+    // Fill expected detection boxes
+    SimpleTensor<float> expected_output_boxes(TensorShape(4U, 3U), DataType::F32);
+    fill_tensor(expected_output_boxes, std::vector<float> { -0.15, 9.85, 0.95, 10.95, -0.15, -0.15, 0.95, 0.95, -0.15, 99.85, 0.95, 100.95 });
+    // Fill expected detection classes
+    SimpleTensor<float> expected_output_classes(TensorShape(3U), DataType::F32);
+    fill_tensor(expected_output_classes, std::vector<float> { 1.0f, 0.0f, 0.0f });
+    // Fill expected detection scores
+    SimpleTensor<float> expected_output_scores(TensorShape(3U), DataType::F32);
+    fill_tensor(expected_output_scores, std::vector<float> { 0.97f, 0.95f, 0.31f });
+    // Fill expected num detections
+    SimpleTensor<float> expected_num_detection(TensorShape(1U), DataType::F32);
+    fill_tensor(expected_num_detection, std::vector<float> { 3.f });
+
+    // Run base test
+    base_test_case(info, DataType::QASYMM8, expected_output_boxes, expected_output_classes, expected_output_scores, expected_num_detection, AbsoluteTolerance<float>(0.3f));
+}
+
+TEST_CASE(Quantized_regular, framework::DatasetMode::ALL)
+{
+    DetectionPostProcessLayerInfo info = DetectionPostProcessLayerInfo(3 /*max_detections*/, 1 /*max_classes_per_detection*/, 0.0 /*nms_score_threshold*/,
+                                                                       0.5 /*nms_iou_threshold*/, 2 /*num_classes*/, { 11.0, 11.0, 6.0, 6.0 } /*scale*/,
+                                                                       true /*use_regular_nms*/, 1 /*detections_per_class*/);
+    // Fill expected detection boxes
+    SimpleTensor<float> expected_output_boxes(TensorShape(4U, 3U), DataType::F32);
+    fill_tensor(expected_output_boxes, std::vector<float> { -0.15, 9.85, 0.95, 10.95, -0.15, 9.85, 0.95, 10.95, 0.0f, 0.0f, 0.0f, 0.0f });
+    // Fill expected detection classes
+    SimpleTensor<float> expected_output_classes(TensorShape(3U), DataType::F32);
+    fill_tensor(expected_output_classes, std::vector<float> { 1.0f, 0.0f, 0.0f });
+    // Fill expected detection scores
+    SimpleTensor<float> expected_output_scores(TensorShape(3U), DataType::F32);
+    fill_tensor(expected_output_scores, std::vector<float> { 0.95f, 0.91f, 0.0f });
+    // Fill expected num detections
+    SimpleTensor<float> expected_num_detection(TensorShape(1U), DataType::F32);
+    fill_tensor(expected_num_detection, std::vector<float> { 2.f });
+
+    // Run test
+    base_test_case(info, DataType::QASYMM8, expected_output_boxes, expected_output_classes, expected_output_scores, expected_num_detection, AbsoluteTolerance<float>(0.3f));
+}
+
+TEST_SUITE_END() // QASYMM8
+
+TEST_SUITE_END() // DetectionPostProcessLayer
+TEST_SUITE_END() // NEON
+} // namespace validation
+} // namespace test
+} // namespace arm_compute