COMPMID-3273: Add support for QASYMM8_SIGNED in CPPDetectionPostProcessLayer

Signed-off-by: Sheri Zhang <sheri.zhang@arm.com>
Change-Id: I8dad529892caf7389efb311e810c8a80ca3d03c2
Signed-off-by: Sheri Zhang <sheri.zhang@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/2888
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
diff --git a/tests/validation/CPP/DetectionPostProcessLayer.cpp b/tests/validation/CPP/DetectionPostProcessLayer.cpp
index f4528fb..934ffea 100644
--- a/tests/validation/CPP/DetectionPostProcessLayer.cpp
+++ b/tests/validation/CPP/DetectionPostProcessLayer.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019 ARM Limited.
+ * Copyright (c) 2019-2020 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -46,52 +46,64 @@
 {
     std::memcpy(tensor.data(), v.data(), sizeof(T) * v.size());
 }
-template <typename U, typename T>
+template <typename D, typename U, typename T>
 inline void quantize_and_fill_tensor(U &&tensor, const std::vector<T> &v)
 {
-    QuantizationInfo     qi = tensor.quantization_info();
-    std::vector<uint8_t> quantized;
+    QuantizationInfo qi = tensor.quantization_info();
+    std::vector<D>   quantized;
     quantized.reserve(v.size());
     for(auto elem : v)
     {
-        quantized.emplace_back(quantize_qasymm8(elem, qi));
+        quantized.emplace_back(Qasymm8QuantizationHelper<D>::quantize(elem, qi));
     }
-    std::memcpy(tensor.data(), quantized.data(), sizeof(uint8_t) * quantized.size());
+    std::memcpy(tensor.data(), quantized.data(), sizeof(D) * quantized.size());
 }
-inline QuantizationInfo qinfo_scaleoffset_from_minmax(const float min, const float max)
+template <typename T>
+inline int calc_qinfo(const float min, const float max, float &scale)
 {
-    int           offset = 0;
-    float         scale  = 0;
-    const uint8_t qmin   = std::numeric_limits<uint8_t>::min();
-    const uint8_t qmax   = std::numeric_limits<uint8_t>::max();
-    const float   f_qmin = qmin;
-    const float   f_qmax = qmax;
+    const auto  qmin   = std::numeric_limits<T>::min();
+    const auto  qmax   = std::numeric_limits<T>::max();
+    const float f_qmin = qmin;
+    const float f_qmax = qmax;
+
+    scale                       = (max - min) / (f_qmax - f_qmin);
+    const float offset_from_min = f_qmin - min / scale;
+    const float offset_from_max = f_qmax - max / scale;
+
+    const float offset_from_min_error = std::abs(f_qmin) + std::abs(min / scale);
+    const float offset_from_max_error = std::abs(f_qmax) + std::abs(max / scale);
+    const float f_offset              = offset_from_min_error < offset_from_max_error ? offset_from_min : offset_from_max;
+    T           tmp_offset;
+    if(f_offset < f_qmin)
+    {
+        tmp_offset = qmin;
+    }
+    else if(f_offset > f_qmax)
+    {
+        tmp_offset = qmax;
+    }
+    else
+    {
+        tmp_offset = static_cast<T>(arm_compute::support::cpp11::round(f_offset));
+    }
+    return static_cast<int>(tmp_offset);
+}
+inline QuantizationInfo qinfo_scaleoffset_from_minmax(DataType data_type, const float min, const float max)
+{
+    int   offset = 0;
+    float scale  = 0;
 
     // Continue only if [min,max] is a valid range and not a point
     if(min != max)
     {
-        scale                       = (max - min) / (f_qmax - f_qmin);
-        const float offset_from_min = f_qmin - min / scale;
-        const float offset_from_max = f_qmax - max / scale;
-
-        const float offset_from_min_error = std::abs(f_qmin) + std::abs(min / scale);
-        const float offset_from_max_error = std::abs(f_qmax) + std::abs(max / scale);
-        const float f_offset              = offset_from_min_error < offset_from_max_error ? offset_from_min : offset_from_max;
-
-        uint8_t uint8_offset = 0;
-        if(f_offset < f_qmin)
+        if(data_type == DataType::QASYMM8_SIGNED)
         {
-            uint8_offset = qmin;
-        }
-        else if(f_offset > f_qmax)
-        {
-            uint8_offset = qmax;
+            offset = calc_qinfo<int8_t>(min, max, scale);
         }
         else
         {
-            uint8_offset = static_cast<uint8_t>(arm_compute::support::cpp11::round(f_offset));
+            offset = calc_qinfo<uint8_t>(min, max, scale);
         }
-        offset = uint8_offset;
     }
     return QuantizationInfo(scale, offset);
 }
@@ -100,9 +112,9 @@
                            const SimpleTensor<float> &expected_output_classes, const SimpleTensor<float> &expected_output_scores, const SimpleTensor<float> &expected_num_detection,
                            AbsoluteTolerance<float> tolerance_boxes = AbsoluteTolerance<float>(0.1f), AbsoluteTolerance<float> tolerance_others = AbsoluteTolerance<float>(0.1f))
 {
-    Tensor box_encoding     = create_tensor<Tensor>(TensorShape(4U, 6U, 1U), data_type, 1, qinfo_scaleoffset_from_minmax(-1.0f, 1.0f));
-    Tensor class_prediction = create_tensor<Tensor>(TensorShape(3U, 6U, 1U), data_type, 1, qinfo_scaleoffset_from_minmax(0.0f, 1.0f));
-    Tensor anchors          = create_tensor<Tensor>(TensorShape(4U, 6U), data_type, 1, qinfo_scaleoffset_from_minmax(0.0f, 100.5f));
+    Tensor box_encoding     = create_tensor<Tensor>(TensorShape(4U, 6U, 1U), data_type, 1, qinfo_scaleoffset_from_minmax(data_type, -1.0f, 1.0f));
+    Tensor class_prediction = create_tensor<Tensor>(TensorShape(3U, 6U, 1U), data_type, 1, qinfo_scaleoffset_from_minmax(data_type, 0.0f, 1.0f));
+    Tensor anchors          = create_tensor<Tensor>(TensorShape(4U, 6U), data_type, 1, qinfo_scaleoffset_from_minmax(data_type, 0.0f, 100.5f));
 
     box_encoding.allocator()->allocate();
     class_prediction.allocator()->allocate();
@@ -137,17 +149,31 @@
     };
 
     // Fill the tensors with random pre-generated values
-    if(data_type == DataType::F32)
+    switch(data_type)
     {
-        fill_tensor(Accessor(box_encoding), box_encoding_vector);
-        fill_tensor(Accessor(class_prediction), class_prediction_vector);
-        fill_tensor(Accessor(anchors), anchors_vector);
-    }
-    else
-    {
-        quantize_and_fill_tensor(Accessor(box_encoding), box_encoding_vector);
-        quantize_and_fill_tensor(Accessor(class_prediction), class_prediction_vector);
-        quantize_and_fill_tensor(Accessor(anchors), anchors_vector);
+        case DataType::F32:
+        {
+            fill_tensor(Accessor(box_encoding), box_encoding_vector);
+            fill_tensor(Accessor(class_prediction), class_prediction_vector);
+            fill_tensor(Accessor(anchors), anchors_vector);
+        }
+        break;
+        case DataType::QASYMM8:
+        {
+            quantize_and_fill_tensor<uint8_t>(Accessor(box_encoding), box_encoding_vector);
+            quantize_and_fill_tensor<uint8_t>(Accessor(class_prediction), class_prediction_vector);
+            quantize_and_fill_tensor<uint8_t>(Accessor(anchors), anchors_vector);
+        }
+        break;
+        case DataType::QASYMM8_SIGNED:
+        {
+            quantize_and_fill_tensor<int8_t>(Accessor(box_encoding), box_encoding_vector);
+            quantize_and_fill_tensor<int8_t>(Accessor(class_prediction), class_prediction_vector);
+            quantize_and_fill_tensor<int8_t>(Accessor(anchors), anchors_vector);
+        }
+        break;
+        default:
+            return;
     }
 
     // Determine the output through the CPP kernel
@@ -189,19 +215,22 @@
                                                 TensorInfo(TensorShape(4U, 10U, 1U), 1, DataType::S8), // Unsupported data type
                                                 TensorInfo(TensorShape(4U, 10U, 1U), 1, DataType::F32), // Wrong Detection Info
                                                 TensorInfo(TensorShape(4U, 10U, 1U), 1, DataType::F32), // Wrong boxes dimensions
-                                                TensorInfo(TensorShape(4U, 10U, 1U), 1, DataType::QASYMM8)}), // Wrong score dimension 
+                                                TensorInfo(TensorShape(4U, 10U, 1U), 1, DataType::QASYMM8), // Wrong score dimension
+                                                TensorInfo(TensorShape(4U, 10U, 1U), 1, DataType::QASYMM8_SIGNED)}), // Wrong score dimension
         framework::dataset::make("ClassPredsInfo",{ TensorInfo(TensorShape(3U ,10U), 1, DataType::F32),
                                                 TensorInfo(TensorShape(3U ,10U), 1, DataType::F32),
                                                 TensorInfo(TensorShape(3U ,10U), 1, DataType::F32),
                                                 TensorInfo(TensorShape(3U ,10U), 1, DataType::F32),
                                                 TensorInfo(TensorShape(3U ,10U), 1, DataType::F32),
-                                                TensorInfo(TensorShape(3U ,10U), 1, DataType::QASYMM8)})),
+                                                TensorInfo(TensorShape(3U ,10U), 1, DataType::QASYMM8),
+                                                TensorInfo(TensorShape(3U ,10U), 1, DataType::QASYMM8_SIGNED)})),
         framework::dataset::make("AnchorsInfo",{ TensorInfo(TensorShape(4U, 10U, 1U), 1, DataType::F32),
                                                 TensorInfo(TensorShape(4U, 10U, 1U), 1, DataType::F32),
                                                 TensorInfo(TensorShape(4U, 10U, 1U), 1, DataType::F32),
                                                 TensorInfo(TensorShape(4U, 10U, 1U), 1, DataType::F32),
                                                 TensorInfo(TensorShape(4U, 10U, 1U), 1, DataType::F32),
-                                                TensorInfo(TensorShape(4U, 10U, 1U), 1, DataType::QASYMM8)})),
+                                                TensorInfo(TensorShape(4U, 10U, 1U), 1, DataType::QASYMM8),
+                                                TensorInfo(TensorShape(4U, 10U, 1U), 1, DataType::QASYMM8_SIGNED)})),
         framework::dataset::make("OutputBoxInfo", { TensorInfo(TensorShape(4U, 3U, 1U), 1, DataType::F32),
                                                 TensorInfo(TensorShape(4U, 3U, 1U), 1, DataType::F32),
                                                 TensorInfo(TensorShape(4U, 3U, 1U), 1, DataType::S8),
@@ -383,6 +412,75 @@
 
 TEST_SUITE_END() // QASYMM8
 
+TEST_SUITE(QASYMM8_SIGNED)
+TEST_CASE(Quantized_general, framework::DatasetMode::ALL)
+{
+    DetectionPostProcessLayerInfo info = DetectionPostProcessLayerInfo(3 /*max_detections*/, 1 /*max_classes_per_detection*/, 0.0 /*nms_score_threshold*/,
+                                                                       0.5 /*nms_iou_threshold*/, 2 /*num_classes*/, { 11.0, 11.0, 6.0, 6.0 } /*scale*/);
+
+    // Fill expected detection boxes
+    SimpleTensor<float> expected_output_boxes(TensorShape(4U, 3U), DataType::F32);
+    fill_tensor(expected_output_boxes, std::vector<float> { -0.15, 9.85, 0.95, 10.95, -0.15, -0.15, 0.95, 0.95, -0.15, 99.85, 0.95, 100.95 });
+    // Fill expected detection classes
+    SimpleTensor<float> expected_output_classes(TensorShape(3U), DataType::F32);
+    fill_tensor(expected_output_classes, std::vector<float> { 1.0f, 0.0f, 0.0f });
+    // Fill expected detection scores
+    SimpleTensor<float> expected_output_scores(TensorShape(3U), DataType::F32);
+    fill_tensor(expected_output_scores, std::vector<float> { 0.97f, 0.95f, 0.31f });
+    // Fill expected num detections
+    SimpleTensor<float> expected_num_detection(TensorShape(1U), DataType::F32);
+    fill_tensor(expected_num_detection, std::vector<float> { 3.f });
+    // Run test
+    base_test_case(info, DataType::QASYMM8_SIGNED, expected_output_boxes, expected_output_classes, expected_output_scores, expected_num_detection, AbsoluteTolerance<float>(0.3f));
+}
+
+TEST_CASE(Quantized_fast, framework::DatasetMode::ALL)
+{
+    DetectionPostProcessLayerInfo info = DetectionPostProcessLayerInfo(3 /*max_detections*/, 1 /*max_classes_per_detection*/, 0.0 /*nms_score_threshold*/,
+                                                                       0.5 /*nms_iou_threshold*/, 2 /*num_classes*/, { 11.0, 11.0, 6.0, 6.0 } /*scale*/,
+                                                                       false /*use_regular_nms*/, 1 /*detections_per_class*/);
+
+    // Fill expected detection boxes
+    SimpleTensor<float> expected_output_boxes(TensorShape(4U, 3U), DataType::F32);
+    fill_tensor(expected_output_boxes, std::vector<float> { -0.15, 9.85, 0.95, 10.95, -0.15, -0.15, 0.95, 0.95, -0.15, 99.85, 0.95, 100.95 });
+    // Fill expected detection classes
+    SimpleTensor<float> expected_output_classes(TensorShape(3U), DataType::F32);
+    fill_tensor(expected_output_classes, std::vector<float> { 1.0f, 0.0f, 0.0f });
+    // Fill expected detection scores
+    SimpleTensor<float> expected_output_scores(TensorShape(3U), DataType::F32);
+    fill_tensor(expected_output_scores, std::vector<float> { 0.97f, 0.95f, 0.31f });
+    // Fill expected num detections
+    SimpleTensor<float> expected_num_detection(TensorShape(1U), DataType::F32);
+    fill_tensor(expected_num_detection, std::vector<float> { 3.f });
+
+    // Run base test
+    base_test_case(info, DataType::QASYMM8_SIGNED, expected_output_boxes, expected_output_classes, expected_output_scores, expected_num_detection, AbsoluteTolerance<float>(0.3f));
+}
+
+TEST_CASE(Quantized_regular, framework::DatasetMode::ALL)
+{
+    DetectionPostProcessLayerInfo info = DetectionPostProcessLayerInfo(3 /*max_detections*/, 1 /*max_classes_per_detection*/, 0.0 /*nms_score_threshold*/,
+                                                                       0.5 /*nms_iou_threshold*/, 2 /*num_classes*/, { 11.0, 11.0, 6.0, 6.0 } /*scale*/,
+                                                                       true /*use_regular_nms*/, 1 /*detections_per_class*/);
+    // Fill expected detection boxes
+    SimpleTensor<float> expected_output_boxes(TensorShape(4U, 3U), DataType::F32);
+    fill_tensor(expected_output_boxes, std::vector<float> { -0.15, 9.85, 0.95, 10.95, -0.15, 9.85, 0.95, 10.95, 0.0f, 0.0f, 0.0f, 0.0f });
+    // Fill expected detection classes
+    SimpleTensor<float> expected_output_classes(TensorShape(3U), DataType::F32);
+    fill_tensor(expected_output_classes, std::vector<float> { 1.0f, 0.0f, 0.0f });
+    // Fill expected detection scores
+    SimpleTensor<float> expected_output_scores(TensorShape(3U), DataType::F32);
+    fill_tensor(expected_output_scores, std::vector<float> { 0.95f, 0.91f, 0.0f });
+    // Fill expected num detections
+    SimpleTensor<float> expected_num_detection(TensorShape(1U), DataType::F32);
+    fill_tensor(expected_num_detection, std::vector<float> { 2.f });
+
+    // Run test
+    base_test_case(info, DataType::QASYMM8_SIGNED, expected_output_boxes, expected_output_classes, expected_output_scores, expected_num_detection, AbsoluteTolerance<float>(0.3f));
+}
+
+TEST_SUITE_END() // QASYMM8_SIGNED
+
 TEST_SUITE_END() // DetectionPostProcessLayer
 TEST_SUITE_END() // CPP
 } // namespace validation