blob: 166299ecd5730b5012c58f5234e60dc8ba25da62 [file] [log] [blame]
Isabella Gottardia7acb3c2019-01-08 13:48:44 +00001/*
Michele Di Giorgiod9eaf612020-07-08 11:12:57 +01002 * Copyright (c) 2019-2020 Arm Limited.
Isabella Gottardia7acb3c2019-01-08 13:48:44 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/Types.h"
25#include "arm_compute/runtime/CPP/functions/CPPDetectionPostProcessLayer.h"
26#include "arm_compute/runtime/Tensor.h"
27#include "arm_compute/runtime/TensorAllocator.h"
28#include "tests/NEON/Accessor.h"
29#include "tests/PaddingCalculator.h"
30#include "tests/datasets/ShapeDatasets.h"
31#include "tests/framework/Asserts.h"
32#include "tests/framework/Macros.h"
33#include "tests/framework/datasets/Datasets.h"
34#include "tests/validation/Validation.h"
35
36namespace arm_compute
37{
38namespace test
39{
40namespace validation
41{
42namespace
43{
44template <typename U, typename T>
45inline void fill_tensor(U &&tensor, const std::vector<T> &v)
46{
47 std::memcpy(tensor.data(), v.data(), sizeof(T) * v.size());
48}
Sheri Zhang95f80892020-03-16 14:31:53 +000049template <typename D, typename U, typename T>
Isabella Gottardia7acb3c2019-01-08 13:48:44 +000050inline void quantize_and_fill_tensor(U &&tensor, const std::vector<T> &v)
51{
Sheri Zhang95f80892020-03-16 14:31:53 +000052 QuantizationInfo qi = tensor.quantization_info();
53 std::vector<D> quantized;
Isabella Gottardia7acb3c2019-01-08 13:48:44 +000054 quantized.reserve(v.size());
55 for(auto elem : v)
56 {
Sheri Zhang95f80892020-03-16 14:31:53 +000057 quantized.emplace_back(Qasymm8QuantizationHelper<D>::quantize(elem, qi));
Isabella Gottardia7acb3c2019-01-08 13:48:44 +000058 }
Sheri Zhang95f80892020-03-16 14:31:53 +000059 std::memcpy(tensor.data(), quantized.data(), sizeof(D) * quantized.size());
Isabella Gottardia7acb3c2019-01-08 13:48:44 +000060}
Sheri Zhang95f80892020-03-16 14:31:53 +000061template <typename T>
62inline int calc_qinfo(const float min, const float max, float &scale)
Isabella Gottardia7acb3c2019-01-08 13:48:44 +000063{
Sheri Zhang95f80892020-03-16 14:31:53 +000064 const auto qmin = std::numeric_limits<T>::min();
65 const auto qmax = std::numeric_limits<T>::max();
66 const float f_qmin = qmin;
67 const float f_qmax = qmax;
68
69 scale = (max - min) / (f_qmax - f_qmin);
70 const float offset_from_min = f_qmin - min / scale;
71 const float offset_from_max = f_qmax - max / scale;
72
73 const float offset_from_min_error = std::abs(f_qmin) + std::abs(min / scale);
74 const float offset_from_max_error = std::abs(f_qmax) + std::abs(max / scale);
75 const float f_offset = offset_from_min_error < offset_from_max_error ? offset_from_min : offset_from_max;
76 T tmp_offset;
77 if(f_offset < f_qmin)
78 {
79 tmp_offset = qmin;
80 }
81 else if(f_offset > f_qmax)
82 {
83 tmp_offset = qmax;
84 }
85 else
86 {
87 tmp_offset = static_cast<T>(arm_compute::support::cpp11::round(f_offset));
88 }
89 return static_cast<int>(tmp_offset);
90}
91inline QuantizationInfo qinfo_scaleoffset_from_minmax(DataType data_type, const float min, const float max)
92{
93 int offset = 0;
94 float scale = 0;
Isabella Gottardia7acb3c2019-01-08 13:48:44 +000095
96 // Continue only if [min,max] is a valid range and not a point
97 if(min != max)
98 {
Sheri Zhang95f80892020-03-16 14:31:53 +000099 if(data_type == DataType::QASYMM8_SIGNED)
Isabella Gottardia7acb3c2019-01-08 13:48:44 +0000100 {
Sheri Zhang95f80892020-03-16 14:31:53 +0000101 offset = calc_qinfo<int8_t>(min, max, scale);
Isabella Gottardia7acb3c2019-01-08 13:48:44 +0000102 }
103 else
104 {
Sheri Zhang95f80892020-03-16 14:31:53 +0000105 offset = calc_qinfo<uint8_t>(min, max, scale);
Isabella Gottardia7acb3c2019-01-08 13:48:44 +0000106 }
Isabella Gottardia7acb3c2019-01-08 13:48:44 +0000107 }
108 return QuantizationInfo(scale, offset);
109}
110
111inline void base_test_case(DetectionPostProcessLayerInfo info, DataType data_type, const SimpleTensor<float> &expected_output_boxes,
112 const SimpleTensor<float> &expected_output_classes, const SimpleTensor<float> &expected_output_scores, const SimpleTensor<float> &expected_num_detection,
113 AbsoluteTolerance<float> tolerance_boxes = AbsoluteTolerance<float>(0.1f), AbsoluteTolerance<float> tolerance_others = AbsoluteTolerance<float>(0.1f))
114{
Sheri Zhang95f80892020-03-16 14:31:53 +0000115 Tensor box_encoding = create_tensor<Tensor>(TensorShape(4U, 6U, 1U), data_type, 1, qinfo_scaleoffset_from_minmax(data_type, -1.0f, 1.0f));
116 Tensor class_prediction = create_tensor<Tensor>(TensorShape(3U, 6U, 1U), data_type, 1, qinfo_scaleoffset_from_minmax(data_type, 0.0f, 1.0f));
117 Tensor anchors = create_tensor<Tensor>(TensorShape(4U, 6U), data_type, 1, qinfo_scaleoffset_from_minmax(data_type, 0.0f, 100.5f));
Isabella Gottardia7acb3c2019-01-08 13:48:44 +0000118
119 box_encoding.allocator()->allocate();
120 class_prediction.allocator()->allocate();
121 anchors.allocator()->allocate();
122
123 std::vector<float> box_encoding_vector =
124 {
125 0.0f, 1.0f, 0.0f, 0.0f,
126 0.0f, -1.0f, 0.0f, 0.0f,
127 0.0f, 0.0f, 0.0f, 0.0f,
128 0.0f, 0.0f, 0.0f, 0.0f,
129 0.0f, 1.0f, 0.0f, 0.0f,
130 0.0f, 0.0f, 0.0f, 0.0f
131 };
132 std::vector<float> class_prediction_vector =
133 {
134 0.0f, 0.7f, 0.68f,
135 0.0f, 0.6f, 0.5f,
136 0.0f, 0.9f, 0.83f,
137 0.0f, 0.91f, 0.97f,
138 0.0f, 0.5f, 0.4f,
139 0.0f, 0.31f, 0.22f
140 };
141 std::vector<float> anchors_vector =
142 {
143 0.4f, 0.4f, 1.1f, 1.1f,
144 0.4f, 0.4f, 1.1f, 1.1f,
145 0.4f, 0.4f, 1.1f, 1.1f,
146 0.4f, 10.4f, 1.1f, 1.1f,
147 0.4f, 10.4f, 1.1f, 1.1f,
148 0.4f, 100.4f, 1.1f, 1.1f
149 };
150
151 // Fill the tensors with random pre-generated values
Sheri Zhang95f80892020-03-16 14:31:53 +0000152 switch(data_type)
Isabella Gottardia7acb3c2019-01-08 13:48:44 +0000153 {
Sheri Zhang95f80892020-03-16 14:31:53 +0000154 case DataType::F32:
155 {
156 fill_tensor(Accessor(box_encoding), box_encoding_vector);
157 fill_tensor(Accessor(class_prediction), class_prediction_vector);
158 fill_tensor(Accessor(anchors), anchors_vector);
159 }
160 break;
161 case DataType::QASYMM8:
162 {
163 quantize_and_fill_tensor<uint8_t>(Accessor(box_encoding), box_encoding_vector);
164 quantize_and_fill_tensor<uint8_t>(Accessor(class_prediction), class_prediction_vector);
165 quantize_and_fill_tensor<uint8_t>(Accessor(anchors), anchors_vector);
166 }
167 break;
168 case DataType::QASYMM8_SIGNED:
169 {
170 quantize_and_fill_tensor<int8_t>(Accessor(box_encoding), box_encoding_vector);
171 quantize_and_fill_tensor<int8_t>(Accessor(class_prediction), class_prediction_vector);
172 quantize_and_fill_tensor<int8_t>(Accessor(anchors), anchors_vector);
173 }
174 break;
175 default:
176 return;
Isabella Gottardia7acb3c2019-01-08 13:48:44 +0000177 }
178
179 // Determine the output through the CPP kernel
180 Tensor output_boxes;
181 Tensor output_classes;
182 Tensor output_scores;
183 Tensor num_detection;
184 CPPDetectionPostProcessLayer detection;
185 detection.configure(&box_encoding, &class_prediction, &anchors, &output_boxes, &output_classes, &output_scores, &num_detection, info);
186
187 output_boxes.allocator()->allocate();
188 output_classes.allocator()->allocate();
189 output_scores.allocator()->allocate();
190 num_detection.allocator()->allocate();
191
192 // Run the kernel
193 detection.run();
194
195 // Validate against the expected output
196 // Validate output boxes
197 validate(Accessor(output_boxes), expected_output_boxes, tolerance_boxes);
198 // Validate detection classes
199 validate(Accessor(output_classes), expected_output_classes, tolerance_others);
200 // Validate detection scores
201 validate(Accessor(output_scores), expected_output_scores, tolerance_others);
202 // Validate num detections
203 validate(Accessor(num_detection), expected_num_detection, tolerance_others);
204}
205} // namespace
206
207TEST_SUITE(CPP)
208TEST_SUITE(DetectionPostProcessLayer)
209
210// *INDENT-OFF*
211// clang-format off
212DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zip(zip(
213 framework::dataset::make("BoxEncodingsInfo", { TensorInfo(TensorShape(4U, 10U, 1U), 1, DataType::F32),
214 TensorInfo(TensorShape(4U, 10U, 3U), 1, DataType::F32), // Mismatching batch_size
215 TensorInfo(TensorShape(4U, 10U, 1U), 1, DataType::S8), // Unsupported data type
216 TensorInfo(TensorShape(4U, 10U, 1U), 1, DataType::F32), // Wrong Detection Info
217 TensorInfo(TensorShape(4U, 10U, 1U), 1, DataType::F32), // Wrong boxes dimensions
Sheri Zhang95f80892020-03-16 14:31:53 +0000218 TensorInfo(TensorShape(4U, 10U, 1U), 1, DataType::QASYMM8), // Wrong score dimension
219 TensorInfo(TensorShape(4U, 10U, 1U), 1, DataType::QASYMM8_SIGNED)}), // Wrong score dimension
Isabella Gottardia7acb3c2019-01-08 13:48:44 +0000220 framework::dataset::make("ClassPredsInfo",{ TensorInfo(TensorShape(3U ,10U), 1, DataType::F32),
221 TensorInfo(TensorShape(3U ,10U), 1, DataType::F32),
222 TensorInfo(TensorShape(3U ,10U), 1, DataType::F32),
223 TensorInfo(TensorShape(3U ,10U), 1, DataType::F32),
224 TensorInfo(TensorShape(3U ,10U), 1, DataType::F32),
Sheri Zhang95f80892020-03-16 14:31:53 +0000225 TensorInfo(TensorShape(3U ,10U), 1, DataType::QASYMM8),
226 TensorInfo(TensorShape(3U ,10U), 1, DataType::QASYMM8_SIGNED)})),
Isabella Gottardia7acb3c2019-01-08 13:48:44 +0000227 framework::dataset::make("AnchorsInfo",{ TensorInfo(TensorShape(4U, 10U, 1U), 1, DataType::F32),
228 TensorInfo(TensorShape(4U, 10U, 1U), 1, DataType::F32),
229 TensorInfo(TensorShape(4U, 10U, 1U), 1, DataType::F32),
230 TensorInfo(TensorShape(4U, 10U, 1U), 1, DataType::F32),
231 TensorInfo(TensorShape(4U, 10U, 1U), 1, DataType::F32),
Sheri Zhang95f80892020-03-16 14:31:53 +0000232 TensorInfo(TensorShape(4U, 10U, 1U), 1, DataType::QASYMM8),
233 TensorInfo(TensorShape(4U, 10U, 1U), 1, DataType::QASYMM8_SIGNED)})),
Isabella Gottardia7acb3c2019-01-08 13:48:44 +0000234 framework::dataset::make("OutputBoxInfo", { TensorInfo(TensorShape(4U, 3U, 1U), 1, DataType::F32),
235 TensorInfo(TensorShape(4U, 3U, 1U), 1, DataType::F32),
236 TensorInfo(TensorShape(4U, 3U, 1U), 1, DataType::S8),
237 TensorInfo(TensorShape(4U, 3U, 1U), 1, DataType::F32),
238 TensorInfo(TensorShape(1U, 5U, 1U), 1, DataType::F32),
239 TensorInfo(TensorShape(4U, 3U, 1U), 1, DataType::F32)})),
240 framework::dataset::make("OuputClassesInfo",{ TensorInfo(TensorShape(3U, 1U), 1, DataType::F32),
241 TensorInfo(TensorShape(3U, 1U), 1, DataType::F32),
242 TensorInfo(TensorShape(3U, 1U), 1, DataType::F32),
243 TensorInfo(TensorShape(3U, 1U), 1, DataType::F32),
244 TensorInfo(TensorShape(3U, 1U), 1, DataType::F32),
245 TensorInfo(TensorShape(6U, 1U), 1, DataType::F32)})),
246 framework::dataset::make("OutputScoresInfo",{ TensorInfo(TensorShape(3U, 1U), 1, DataType::F32),
247 TensorInfo(TensorShape(3U, 1U), 1, DataType::F32),
248 TensorInfo(TensorShape(3U, 1U), 1, DataType::F32),
249 TensorInfo(TensorShape(3U, 1U), 1, DataType::F32),
250 TensorInfo(TensorShape(3U, 1U), 1, DataType::F32),
251 TensorInfo(TensorShape(6U, 1U), 1, DataType::F32)})),
252 framework::dataset::make("NumDetectionsInfo",{ TensorInfo(TensorShape(1U), 1, DataType::F32),
253 TensorInfo(TensorShape(1U), 1, DataType::F32),
254 TensorInfo(TensorShape(1U), 1, DataType::F32),
255 TensorInfo(TensorShape(1U), 1, DataType::F32),
256 TensorInfo(TensorShape(1U), 1, DataType::F32),
257 TensorInfo(TensorShape(1U), 1, DataType::F32)})),
258 framework::dataset::make("DetectionPostProcessLayerInfo",{ DetectionPostProcessLayerInfo(3, 1, 0.0f, 0.5f, 2, {0.1f,0.1f,0.1f,0.1f}),
259 DetectionPostProcessLayerInfo(3, 1, 0.0f, 0.5f, 2, {0.1f,0.1f,0.1f,0.1f}),
260 DetectionPostProcessLayerInfo(3, 1, 0.0f, 0.5f, 2, {0.1f,0.1f,0.1f,0.1f}),
261 DetectionPostProcessLayerInfo(3, 1, 0.0f, 1.5f, 2, {0.0f,0.1f,0.1f,0.1f}),
262 DetectionPostProcessLayerInfo(3, 1, 0.0f, 0.5f, 2, {0.1f,0.1f,0.1f,0.1f}),
263 DetectionPostProcessLayerInfo(3, 1, 0.0f, 0.5f, 2, {0.1f,0.1f,0.1f,0.1f})})),
264 framework::dataset::make("Expected", {true, false, false, false, false, false })),
265 box_encodings_info, classes_info, anchors_info, output_boxes_info, output_classes_info,output_scores_info, num_detection_info, detect_info, expected)
266{
267 const Status status = CPPDetectionPostProcessLayer::validate(&box_encodings_info.clone()->set_is_resizable(false),
268 &classes_info.clone()->set_is_resizable(false),
269 &anchors_info.clone()->set_is_resizable(false),
270 &output_boxes_info.clone()->set_is_resizable(false),
271 &output_classes_info.clone()->set_is_resizable(false),
272 &output_scores_info.clone()->set_is_resizable(false), &num_detection_info.clone()->set_is_resizable(false), detect_info);
273 ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS);
274}
275// clang-format on
276// *INDENT-ON*
277
278TEST_SUITE(F32)
279TEST_CASE(Float_general, framework::DatasetMode::ALL)
280{
281 DetectionPostProcessLayerInfo info = DetectionPostProcessLayerInfo(3 /*max_detections*/, 1 /*max_classes_per_detection*/, 0.0 /*nms_score_threshold*/,
282 0.5 /*nms_iou_threshold*/, 2 /*num_classes*/, { 11.0, 11.0, 6.0, 6.0 } /*scale*/);
283 // Fill expected detection boxes
284 SimpleTensor<float> expected_output_boxes(TensorShape(4U, 3U), DataType::F32);
285 fill_tensor(expected_output_boxes, std::vector<float> { -0.15, 9.85, 0.95, 10.95, -0.15, -0.15, 0.95, 0.95, -0.15, 99.85, 0.95, 100.95 });
286 // Fill expected detection classes
287 SimpleTensor<float> expected_output_classes(TensorShape(3U), DataType::F32);
288 fill_tensor(expected_output_classes, std::vector<float> { 1.0f, 0.0f, 0.0f });
289 // Fill expected detection scores
290 SimpleTensor<float> expected_output_scores(TensorShape(3U), DataType::F32);
291 fill_tensor(expected_output_scores, std::vector<float> { 0.97f, 0.95f, 0.31f });
292 // Fill expected num detections
293 SimpleTensor<float> expected_num_detection(TensorShape(1U), DataType::F32);
294 fill_tensor(expected_num_detection, std::vector<float> { 3.f });
295 // Run base test
296 base_test_case(info, DataType::F32, expected_output_boxes, expected_output_classes, expected_output_scores, expected_num_detection);
297}
298
299TEST_CASE(Float_fast, framework::DatasetMode::ALL)
300{
301 DetectionPostProcessLayerInfo info = DetectionPostProcessLayerInfo(3 /*max_detections*/, 1 /*max_classes_per_detection*/, 0.0 /*nms_score_threshold*/,
302 0.5 /*nms_iou_threshold*/, 2 /*num_classes*/, { 11.0, 11.0, 6.0, 6.0 } /*scale*/,
303 false /*use_regular_nms*/, 1 /*detections_per_class*/);
304
305 // Fill expected detection boxes
306 SimpleTensor<float> expected_output_boxes(TensorShape(4U, 3U), DataType::F32);
307 fill_tensor(expected_output_boxes, std::vector<float> { -0.15, 9.85, 0.95, 10.95, -0.15, -0.15, 0.95, 0.95, -0.15, 99.85, 0.95, 100.95 });
308 // Fill expected detection classes
309 SimpleTensor<float> expected_output_classes(TensorShape(3U), DataType::F32);
310 fill_tensor(expected_output_classes, std::vector<float> { 1.0f, 0.0f, 0.0f });
311 // Fill expected detection scores
312 SimpleTensor<float> expected_output_scores(TensorShape(3U), DataType::F32);
313 fill_tensor(expected_output_scores, std::vector<float> { 0.97f, 0.95f, 0.31f });
314 // Fill expected num detections
315 SimpleTensor<float> expected_num_detection(TensorShape(1U), DataType::F32);
316 fill_tensor(expected_num_detection, std::vector<float> { 3.f });
317
318 // Run base test
319 base_test_case(info, DataType::F32, expected_output_boxes, expected_output_classes, expected_output_scores, expected_num_detection);
320}
321
322TEST_CASE(Float_regular, framework::DatasetMode::ALL)
323{
324 DetectionPostProcessLayerInfo info = DetectionPostProcessLayerInfo(3 /*max_detections*/, 1 /*max_classes_per_detection*/, 0.0 /*nms_score_threshold*/,
325 0.5 /*nms_iou_threshold*/, 2 /*num_classes*/, { 11.0, 11.0, 6.0, 6.0 } /*scale*/,
326 true /*use_regular_nms*/, 1 /*detections_per_class*/);
327
328 // Fill expected detection boxes
329 SimpleTensor<float> expected_output_boxes(TensorShape(4U, 3U), DataType::F32);
330 fill_tensor(expected_output_boxes, std::vector<float> { -0.15, 9.85, 0.95, 10.95, -0.15, 9.85, 0.95, 10.95, 0.0f, 0.0f, 0.0f, 0.0f });
331 // Fill expected detection classes
332 SimpleTensor<float> expected_output_classes(TensorShape(3U), DataType::F32);
333 fill_tensor(expected_output_classes, std::vector<float> { 1.0f, 0.0f, 0.0f });
334 // Fill expected detection scores
335 SimpleTensor<float> expected_output_scores(TensorShape(3U), DataType::F32);
336 fill_tensor(expected_output_scores, std::vector<float> { 0.97f, 0.91f, 0.0f });
337 // Fill expected num detections
338 SimpleTensor<float> expected_num_detection(TensorShape(1U), DataType::F32);
339 fill_tensor(expected_num_detection, std::vector<float> { 2.f });
340
341 // Run test
342 base_test_case(info, DataType::F32, expected_output_boxes, expected_output_classes, expected_output_scores, expected_num_detection);
343}
344TEST_SUITE_END() // F32
345
346TEST_SUITE(QASYMM8)
347TEST_CASE(Quantized_general, framework::DatasetMode::ALL)
348{
349 DetectionPostProcessLayerInfo info = DetectionPostProcessLayerInfo(3 /*max_detections*/, 1 /*max_classes_per_detection*/, 0.0 /*nms_score_threshold*/,
350 0.5 /*nms_iou_threshold*/, 2 /*num_classes*/, { 11.0, 11.0, 6.0, 6.0 } /*scale*/);
351
352 // Fill expected detection boxes
353 SimpleTensor<float> expected_output_boxes(TensorShape(4U, 3U), DataType::F32);
354 fill_tensor(expected_output_boxes, std::vector<float> { -0.15, 9.85, 0.95, 10.95, -0.15, -0.15, 0.95, 0.95, -0.15, 99.85, 0.95, 100.95 });
355 // Fill expected detection classes
356 SimpleTensor<float> expected_output_classes(TensorShape(3U), DataType::F32);
357 fill_tensor(expected_output_classes, std::vector<float> { 1.0f, 0.0f, 0.0f });
358 // Fill expected detection scores
359 SimpleTensor<float> expected_output_scores(TensorShape(3U), DataType::F32);
360 fill_tensor(expected_output_scores, std::vector<float> { 0.97f, 0.95f, 0.31f });
361 // Fill expected num detections
362 SimpleTensor<float> expected_num_detection(TensorShape(1U), DataType::F32);
363 fill_tensor(expected_num_detection, std::vector<float> { 3.f });
364 // Run test
365 base_test_case(info, DataType::QASYMM8, expected_output_boxes, expected_output_classes, expected_output_scores, expected_num_detection, AbsoluteTolerance<float>(0.3f));
366}
367
368TEST_CASE(Quantized_fast, framework::DatasetMode::ALL)
369{
370 DetectionPostProcessLayerInfo info = DetectionPostProcessLayerInfo(3 /*max_detections*/, 1 /*max_classes_per_detection*/, 0.0 /*nms_score_threshold*/,
371 0.5 /*nms_iou_threshold*/, 2 /*num_classes*/, { 11.0, 11.0, 6.0, 6.0 } /*scale*/,
372 false /*use_regular_nms*/, 1 /*detections_per_class*/);
373
374 // Fill expected detection boxes
375 SimpleTensor<float> expected_output_boxes(TensorShape(4U, 3U), DataType::F32);
376 fill_tensor(expected_output_boxes, std::vector<float> { -0.15, 9.85, 0.95, 10.95, -0.15, -0.15, 0.95, 0.95, -0.15, 99.85, 0.95, 100.95 });
377 // Fill expected detection classes
378 SimpleTensor<float> expected_output_classes(TensorShape(3U), DataType::F32);
379 fill_tensor(expected_output_classes, std::vector<float> { 1.0f, 0.0f, 0.0f });
380 // Fill expected detection scores
381 SimpleTensor<float> expected_output_scores(TensorShape(3U), DataType::F32);
382 fill_tensor(expected_output_scores, std::vector<float> { 0.97f, 0.95f, 0.31f });
383 // Fill expected num detections
384 SimpleTensor<float> expected_num_detection(TensorShape(1U), DataType::F32);
385 fill_tensor(expected_num_detection, std::vector<float> { 3.f });
386
387 // Run base test
388 base_test_case(info, DataType::QASYMM8, expected_output_boxes, expected_output_classes, expected_output_scores, expected_num_detection, AbsoluteTolerance<float>(0.3f));
389}
390
391TEST_CASE(Quantized_regular, framework::DatasetMode::ALL)
392{
393 DetectionPostProcessLayerInfo info = DetectionPostProcessLayerInfo(3 /*max_detections*/, 1 /*max_classes_per_detection*/, 0.0 /*nms_score_threshold*/,
394 0.5 /*nms_iou_threshold*/, 2 /*num_classes*/, { 11.0, 11.0, 6.0, 6.0 } /*scale*/,
395 true /*use_regular_nms*/, 1 /*detections_per_class*/);
396 // Fill expected detection boxes
397 SimpleTensor<float> expected_output_boxes(TensorShape(4U, 3U), DataType::F32);
398 fill_tensor(expected_output_boxes, std::vector<float> { -0.15, 9.85, 0.95, 10.95, -0.15, 9.85, 0.95, 10.95, 0.0f, 0.0f, 0.0f, 0.0f });
399 // Fill expected detection classes
400 SimpleTensor<float> expected_output_classes(TensorShape(3U), DataType::F32);
401 fill_tensor(expected_output_classes, std::vector<float> { 1.0f, 0.0f, 0.0f });
402 // Fill expected detection scores
403 SimpleTensor<float> expected_output_scores(TensorShape(3U), DataType::F32);
404 fill_tensor(expected_output_scores, std::vector<float> { 0.95f, 0.91f, 0.0f });
405 // Fill expected num detections
406 SimpleTensor<float> expected_num_detection(TensorShape(1U), DataType::F32);
407 fill_tensor(expected_num_detection, std::vector<float> { 2.f });
408
409 // Run test
410 base_test_case(info, DataType::QASYMM8, expected_output_boxes, expected_output_classes, expected_output_scores, expected_num_detection, AbsoluteTolerance<float>(0.3f));
411}
412
413TEST_SUITE_END() // QASYMM8
414
Sheri Zhang95f80892020-03-16 14:31:53 +0000415TEST_SUITE(QASYMM8_SIGNED)
416TEST_CASE(Quantized_general, framework::DatasetMode::ALL)
417{
418 DetectionPostProcessLayerInfo info = DetectionPostProcessLayerInfo(3 /*max_detections*/, 1 /*max_classes_per_detection*/, 0.0 /*nms_score_threshold*/,
419 0.5 /*nms_iou_threshold*/, 2 /*num_classes*/, { 11.0, 11.0, 6.0, 6.0 } /*scale*/);
420
421 // Fill expected detection boxes
422 SimpleTensor<float> expected_output_boxes(TensorShape(4U, 3U), DataType::F32);
423 fill_tensor(expected_output_boxes, std::vector<float> { -0.15, 9.85, 0.95, 10.95, -0.15, -0.15, 0.95, 0.95, -0.15, 99.85, 0.95, 100.95 });
424 // Fill expected detection classes
425 SimpleTensor<float> expected_output_classes(TensorShape(3U), DataType::F32);
426 fill_tensor(expected_output_classes, std::vector<float> { 1.0f, 0.0f, 0.0f });
427 // Fill expected detection scores
428 SimpleTensor<float> expected_output_scores(TensorShape(3U), DataType::F32);
429 fill_tensor(expected_output_scores, std::vector<float> { 0.97f, 0.95f, 0.31f });
430 // Fill expected num detections
431 SimpleTensor<float> expected_num_detection(TensorShape(1U), DataType::F32);
432 fill_tensor(expected_num_detection, std::vector<float> { 3.f });
433 // Run test
434 base_test_case(info, DataType::QASYMM8_SIGNED, expected_output_boxes, expected_output_classes, expected_output_scores, expected_num_detection, AbsoluteTolerance<float>(0.3f));
435}
436
437TEST_CASE(Quantized_fast, framework::DatasetMode::ALL)
438{
439 DetectionPostProcessLayerInfo info = DetectionPostProcessLayerInfo(3 /*max_detections*/, 1 /*max_classes_per_detection*/, 0.0 /*nms_score_threshold*/,
440 0.5 /*nms_iou_threshold*/, 2 /*num_classes*/, { 11.0, 11.0, 6.0, 6.0 } /*scale*/,
441 false /*use_regular_nms*/, 1 /*detections_per_class*/);
442
443 // Fill expected detection boxes
444 SimpleTensor<float> expected_output_boxes(TensorShape(4U, 3U), DataType::F32);
445 fill_tensor(expected_output_boxes, std::vector<float> { -0.15, 9.85, 0.95, 10.95, -0.15, -0.15, 0.95, 0.95, -0.15, 99.85, 0.95, 100.95 });
446 // Fill expected detection classes
447 SimpleTensor<float> expected_output_classes(TensorShape(3U), DataType::F32);
448 fill_tensor(expected_output_classes, std::vector<float> { 1.0f, 0.0f, 0.0f });
449 // Fill expected detection scores
450 SimpleTensor<float> expected_output_scores(TensorShape(3U), DataType::F32);
451 fill_tensor(expected_output_scores, std::vector<float> { 0.97f, 0.95f, 0.31f });
452 // Fill expected num detections
453 SimpleTensor<float> expected_num_detection(TensorShape(1U), DataType::F32);
454 fill_tensor(expected_num_detection, std::vector<float> { 3.f });
455
456 // Run base test
457 base_test_case(info, DataType::QASYMM8_SIGNED, expected_output_boxes, expected_output_classes, expected_output_scores, expected_num_detection, AbsoluteTolerance<float>(0.3f));
458}
459
460TEST_CASE(Quantized_regular, framework::DatasetMode::ALL)
461{
462 DetectionPostProcessLayerInfo info = DetectionPostProcessLayerInfo(3 /*max_detections*/, 1 /*max_classes_per_detection*/, 0.0 /*nms_score_threshold*/,
463 0.5 /*nms_iou_threshold*/, 2 /*num_classes*/, { 11.0, 11.0, 6.0, 6.0 } /*scale*/,
464 true /*use_regular_nms*/, 1 /*detections_per_class*/);
465 // Fill expected detection boxes
466 SimpleTensor<float> expected_output_boxes(TensorShape(4U, 3U), DataType::F32);
467 fill_tensor(expected_output_boxes, std::vector<float> { -0.15, 9.85, 0.95, 10.95, -0.15, 9.85, 0.95, 10.95, 0.0f, 0.0f, 0.0f, 0.0f });
468 // Fill expected detection classes
469 SimpleTensor<float> expected_output_classes(TensorShape(3U), DataType::F32);
470 fill_tensor(expected_output_classes, std::vector<float> { 1.0f, 0.0f, 0.0f });
471 // Fill expected detection scores
472 SimpleTensor<float> expected_output_scores(TensorShape(3U), DataType::F32);
473 fill_tensor(expected_output_scores, std::vector<float> { 0.95f, 0.91f, 0.0f });
474 // Fill expected num detections
475 SimpleTensor<float> expected_num_detection(TensorShape(1U), DataType::F32);
476 fill_tensor(expected_num_detection, std::vector<float> { 2.f });
477
478 // Run test
479 base_test_case(info, DataType::QASYMM8_SIGNED, expected_output_boxes, expected_output_classes, expected_output_scores, expected_num_detection, AbsoluteTolerance<float>(0.3f));
480}
481
482TEST_SUITE_END() // QASYMM8_SIGNED
483
Isabella Gottardia7acb3c2019-01-08 13:48:44 +0000484TEST_SUITE_END() // DetectionPostProcessLayer
485TEST_SUITE_END() // CPP
486} // namespace validation
487} // namespace test
488} // namespace arm_compute