blob: 79e619cfd632c6dfc002f86c5839bd04d7621913 [file] [log] [blame]
Isabella Gottardi05e56442018-11-16 11:26:52 +00001/*
Pablo Telloe96e4f02018-12-21 16:47:23 +00002 * Copyright (c) 2018-2019 ARM Limited.
Isabella Gottardi05e56442018-11-16 11:26:52 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/runtime/CPP/functions/CPPDetectionOutputLayer.h"
25
26#include "arm_compute/core/Error.h"
27#include "arm_compute/core/Helpers.h"
28#include "arm_compute/core/Validate.h"
29#include "support/ToolchainSupport.h"
30
31#include <list>
32
33namespace arm_compute
34{
35namespace
36{
Pablo Telloe96e4f02018-12-21 16:47:23 +000037Status detection_layer_validate_arguments(const ITensorInfo *input_loc, const ITensorInfo *input_conf, const ITensorInfo *input_priorbox, const ITensorInfo *output, DetectionOutputLayerInfo info)
Isabella Gottardi05e56442018-11-16 11:26:52 +000038{
39 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input_loc, input_conf, input_priorbox, output);
40 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input_loc, 1, DataType::F32);
41 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input_loc, input_conf, input_priorbox);
42 ARM_COMPUTE_RETURN_ERROR_ON_MSG(input_loc->num_dimensions() > 2, "The location input tensor should be [C1, N].");
43 ARM_COMPUTE_RETURN_ERROR_ON_MSG(input_conf->num_dimensions() > 2, "The location input tensor should be [C2, N].");
44 ARM_COMPUTE_RETURN_ERROR_ON_MSG(input_priorbox->num_dimensions() > 3, "The priorbox input tensor should be [C3, 2, N].");
45
46 ARM_COMPUTE_RETURN_ERROR_ON_MSG(info.eta() <= 0.f && info.eta() > 1.f, "Eta should be between 0 and 1");
47
48 const int num_priors = input_priorbox->tensor_shape()[0] / 4;
49 ARM_COMPUTE_RETURN_ERROR_ON_MSG(static_cast<size_t>((num_priors * info.num_loc_classes() * 4)) != input_loc->tensor_shape()[0], "Number of priors must match number of location predictions.");
50 ARM_COMPUTE_RETURN_ERROR_ON_MSG(static_cast<size_t>((num_priors * info.num_classes())) != input_conf->tensor_shape()[0], "Number of priors must match number of confidence predictions.");
51
52 // Validate configured output
53 if(output->total_size() != 0)
54 {
55 const unsigned int max_size = info.keep_top_k() * (input_loc->num_dimensions() > 1 ? input_loc->dimension(1) : 1);
56 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), TensorShape(7U, max_size));
57 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input_loc, output);
58 }
59
60 return Status{};
61}
62
63/** Function used to sort pair<float, T> in descend order based on the score (first) value.
64 */
65template <typename T>
66bool SortScorePairDescend(const std::pair<float, T> &pair1,
67 const std::pair<float, T> &pair2)
68{
69 return pair1.first > pair2.first;
70}
71
72/** Get location predictions from input_loc.
73 *
74 * @param[in] input_loc The input location prediction.
75 * @param[in] num The number of images.
76 * @param[in] num_priors number of predictions per class.
77 * @param[in] num_loc_classes number of location classes. It is 1 if share_location is true,
78 * and is equal to number of classes needed to predict otherwise.
79 * @param[in] share_location If true, all classes share the same location prediction.
80 * @param[out] all_location_predictions All the location predictions.
81 *
82 */
83void retrieve_all_loc_predictions(const ITensor *input_loc, const int num,
84 const int num_priors, const int num_loc_classes,
85 const bool share_location, std::vector<LabelBBox> &all_location_predictions)
86{
87 for(int i = 0; i < num; ++i)
88 {
89 for(int c = 0; c < num_loc_classes; ++c)
90 {
91 int label = share_location ? -1 : c;
92 if(all_location_predictions[i].find(label) == all_location_predictions[i].end())
93 {
94 all_location_predictions[i][label].resize(num_priors);
95 }
96 else
97 {
98 ARM_COMPUTE_ERROR_ON(all_location_predictions[i][label].size() != static_cast<size_t>(num_priors));
99 break;
100 }
101 }
102 }
103 for(int i = 0; i < num; ++i)
104 {
105 for(int p = 0; p < num_priors; ++p)
106 {
107 for(int c = 0; c < num_loc_classes; ++c)
108 {
109 const int label = share_location ? -1 : c;
110 const int base_ptr = i * num_priors * num_loc_classes * 4 + p * num_loc_classes * 4 + c * 4;
111 //xmin, ymin, xmax, ymax
112 all_location_predictions[i][label][p][0] = *reinterpret_cast<float *>(input_loc->ptr_to_element(Coordinates(base_ptr)));
113 all_location_predictions[i][label][p][1] = *reinterpret_cast<float *>(input_loc->ptr_to_element(Coordinates(base_ptr + 1)));
114 all_location_predictions[i][label][p][2] = *reinterpret_cast<float *>(input_loc->ptr_to_element(Coordinates(base_ptr + 2)));
115 all_location_predictions[i][label][p][3] = *reinterpret_cast<float *>(input_loc->ptr_to_element(Coordinates(base_ptr + 3)));
116 }
117 }
118 }
119}
120
121/** Get confidence predictions from input_conf.
122 *
123 * @param[in] input_loc The input location prediction.
124 * @param[in] num The number of images.
125 * @param[in] num_priors Number of predictions per class.
126 * @param[in] num_loc_classes Number of location classes. It is 1 if share_location is true,
127 * and is equal to number of classes needed to predict otherwise.
128 * @param[out] all_location_predictions All the location predictions.
129 *
130 */
131void retrieve_all_conf_scores(const ITensor *input_conf, const int num,
132 const int num_priors, const int num_classes,
133 std::vector<std::map<int, std::vector<float>>> &all_confidence_scores)
134{
135 std::vector<float> tmp_buffer;
136 tmp_buffer.resize(num * num_priors * num_classes);
137 for(int i = 0; i < num; ++i)
138 {
139 for(int c = 0; c < num_classes; ++c)
140 {
141 for(int p = 0; p < num_priors; ++p)
142 {
143 tmp_buffer[i * num_classes * num_priors + c * num_priors + p] =
144 *reinterpret_cast<float *>(input_conf->ptr_to_element(Coordinates(i * num_classes * num_priors + p * num_classes + c)));
145 }
146 }
147 }
148 for(int i = 0; i < num; ++i)
149 {
150 for(int c = 0; c < num_classes; ++c)
151 {
152 all_confidence_scores[i][c].resize(num_priors);
153 all_confidence_scores[i][c].assign(&tmp_buffer[i * num_classes * num_priors + c * num_priors],
154 &tmp_buffer[i * num_classes * num_priors + c * num_priors + num_priors]);
155 }
156 }
157}
158
159/** Get prior boxes from input_priorbox.
160 *
161 * @param[in] input_priorbox The input location prediction.
162 * @param[in] num_priors Number of priors.
163 * @param[in] num_loc_classes number of location classes. It is 1 if share_location is true,
164 * and is equal to number of classes needed to predict otherwise.
165 * @param[out] all_prior_bboxes If true, all classes share the same location prediction.
166 * @param[out] all_location_predictions All the location predictions.
167 *
168 */
169void retrieve_all_priorbox(const ITensor *input_priorbox,
170 const int num_priors,
171 std::vector<NormalizedBBox> &all_prior_bboxes,
172 std::vector<std::array<float, 4>> &all_prior_variances)
173{
174 for(int i = 0; i < num_priors; ++i)
175 {
Georgios Pinitasd57891a2019-02-19 18:10:03 +0000176 all_prior_bboxes[i] =
177 {
178 {
179 *reinterpret_cast<float *>(input_priorbox->ptr_to_element(Coordinates(i * 4))),
180 *reinterpret_cast<float *>(input_priorbox->ptr_to_element(Coordinates(i * 4 + 1))),
181 *reinterpret_cast<float *>(input_priorbox->ptr_to_element(Coordinates(i * 4 + 2))),
182 *reinterpret_cast<float *>(input_priorbox->ptr_to_element(Coordinates(i * 4 + 3)))
183 }
184 };
Isabella Gottardi05e56442018-11-16 11:26:52 +0000185 }
186
Georgios Pinitasd57891a2019-02-19 18:10:03 +0000187 std::array<float, 4> var({ { 0, 0, 0, 0 } });
Isabella Gottardi05e56442018-11-16 11:26:52 +0000188 for(int i = 0; i < num_priors; ++i)
189 {
190 for(int j = 0; j < 4; ++j)
191 {
192 var[j] = *reinterpret_cast<float *>(input_priorbox->ptr_to_element(Coordinates((num_priors + i) * 4 + j)));
193 }
194 all_prior_variances[i] = var;
195 }
196}
197
198/** Decode a bbox according to a prior bbox.
199 *
200 * @param[in] prior_bbox The input prior bounding boxes.
201 * @param[in] prior_variance The corresponding input variance.
202 * @param[in] code_type The detection output code type used to decode the results.
203 * @param[in] variance_encoded_in_target If true, the variance is encoded in target.
204 * @param[in] clip_bbox If true, the results should be between 0.f and 1.f.
205 * @param[in] bbox The input bbox to decode
206 * @param[out] decode_bbox The decoded bboxes.
207 *
208 */
209void DecodeBBox(const NormalizedBBox &prior_bbox, const std::array<float, 4> &prior_variance,
210 const DetectionOutputLayerCodeType code_type, const bool variance_encoded_in_target,
211 const bool clip_bbox, const NormalizedBBox &bbox, NormalizedBBox &decode_bbox)
212{
213 // if the variance is encoded in target, we simply need to add the offset predictions
214 // otherwise we need to scale the offset accordingly.
215 switch(code_type)
216 {
217 case DetectionOutputLayerCodeType::CORNER:
218 {
219 decode_bbox[0] = prior_bbox[0] + (variance_encoded_in_target ? bbox[0] : prior_variance[0] * bbox[0]);
220 decode_bbox[1] = prior_bbox[1] + (variance_encoded_in_target ? bbox[1] : prior_variance[1] * bbox[1]);
221 decode_bbox[2] = prior_bbox[2] + (variance_encoded_in_target ? bbox[2] : prior_variance[2] * bbox[2]);
222 decode_bbox[3] = prior_bbox[3] + (variance_encoded_in_target ? bbox[3] : prior_variance[3] * bbox[3]);
223
224 break;
225 }
226 case DetectionOutputLayerCodeType::CENTER_SIZE:
227 {
228 const float prior_width = prior_bbox[2] - prior_bbox[0];
229 const float prior_height = prior_bbox[3] - prior_bbox[1];
230
231 // Check if the prior width and height are right
232 ARM_COMPUTE_ERROR_ON(prior_width <= 0.f);
233 ARM_COMPUTE_ERROR_ON(prior_height <= 0.f);
234
235 const float prior_center_x = (prior_bbox[0] + prior_bbox[2]) / 2.;
236 const float prior_center_y = (prior_bbox[1] + prior_bbox[3]) / 2.;
237
238 const float decode_bbox_center_x = (variance_encoded_in_target ? bbox[0] : prior_variance[0] * bbox[0]) * prior_width + prior_center_x;
239 const float decode_bbox_center_y = (variance_encoded_in_target ? bbox[1] : prior_variance[1] * bbox[1]) * prior_height + prior_center_y;
240 const float decode_bbox_width = (variance_encoded_in_target ? std::exp(bbox[2]) : std::exp(prior_variance[2] * bbox[2])) * prior_width;
241 const float decode_bbox_height = (variance_encoded_in_target ? std::exp(bbox[3]) : std::exp(prior_variance[3] * bbox[3])) * prior_height;
242
243 decode_bbox[0] = (decode_bbox_center_x - decode_bbox_width / 2.f);
244 decode_bbox[1] = (decode_bbox_center_y - decode_bbox_height / 2.f);
245 decode_bbox[2] = (decode_bbox_center_x + decode_bbox_width / 2.f);
246 decode_bbox[3] = (decode_bbox_center_y + decode_bbox_height / 2.f);
247
248 break;
249 }
250 case DetectionOutputLayerCodeType::CORNER_SIZE:
251 {
252 const float prior_width = prior_bbox[2] - prior_bbox[0];
253 const float prior_height = prior_bbox[3] - prior_bbox[1];
254
255 // Check if the prior width and height are greater than 0
256 ARM_COMPUTE_ERROR_ON(prior_width <= 0.f);
257 ARM_COMPUTE_ERROR_ON(prior_height <= 0.f);
258
259 decode_bbox[0] = prior_bbox[0] + (variance_encoded_in_target ? bbox[0] : prior_variance[0] * bbox[0]) * prior_width;
260 decode_bbox[1] = prior_bbox[1] + (variance_encoded_in_target ? bbox[1] : prior_variance[1] * bbox[1]) * prior_height;
261 decode_bbox[2] = prior_bbox[2] + (variance_encoded_in_target ? bbox[2] : prior_variance[2] * bbox[2]) * prior_width;
262 decode_bbox[3] = prior_bbox[3] + (variance_encoded_in_target ? bbox[3] : prior_variance[3] * bbox[3]) * prior_height;
263
264 break;
265 }
266 default:
267 ARM_COMPUTE_ERROR("Unsupported Detection Output Code Type.");
268 }
269
270 if(clip_bbox)
271 {
272 for(auto &d_bbox : decode_bbox)
273 {
274 d_bbox = utility::clamp(d_bbox, 0.f, 1.f);
275 }
276 }
277}
278
279/** Do non maximum suppression given bboxes and scores.
280 *
281 * @param[in] bboxes The input bounding boxes.
282 * @param[in] scores The corresponding input confidence.
283 * @param[in] score_threshold The threshold used to filter detection results.
284 * @param[in] nms_threshold The threshold used in non maximum suppression.
285 * @param[in] eta Adaptation rate for nms threshold.
286 * @param[in] top_k If not -1, keep at most top_k picked indices.
287 * @param[out] indices The kept indices of bboxes after nms.
288 *
289 */
290void ApplyNMSFast(const std::vector<NormalizedBBox> &bboxes,
291 const std::vector<float> &scores, const float score_threshold,
292 const float nms_threshold, const float eta, const int top_k,
293 std::vector<int> &indices)
294{
295 ARM_COMPUTE_ERROR_ON_MSG(bboxes.size() != scores.size(), "bboxes and scores have different size.");
296
297 // Get top_k scores (with corresponding indices).
298 std::list<std::pair<float, int>> score_index_vec;
299
300 // Generate index score pairs.
301 for(size_t i = 0; i < scores.size(); ++i)
302 {
303 if(scores[i] > score_threshold)
304 {
305 score_index_vec.emplace_back(std::make_pair(scores[i], i));
306 }
307 }
308
309 // Sort the score pair according to the scores in descending order
310 score_index_vec.sort(SortScorePairDescend<int>);
311
312 // Keep top_k scores if needed.
313 const int score_index_vec_size = score_index_vec.size();
314 if(top_k > -1 && top_k < score_index_vec_size)
315 {
316 score_index_vec.resize(top_k);
317 }
318
319 // Do nms.
320 float adaptive_threshold = nms_threshold;
321 indices.clear();
322
323 while(!score_index_vec.empty())
324 {
325 const int idx = score_index_vec.front().second;
326 bool keep = true;
327 for(int kept_idx : indices)
328 {
329 if(keep)
330 {
331 // Compute the jaccard (intersection over union IoU) overlap between two bboxes.
Georgios Pinitasd57891a2019-02-19 18:10:03 +0000332 NormalizedBBox intersect_bbox = std::array<float, 4>({ { 0, 0, 0, 0 } });
Isabella Gottardi05e56442018-11-16 11:26:52 +0000333 if(bboxes[kept_idx][0] > bboxes[idx][2] || bboxes[kept_idx][2] < bboxes[idx][0] || bboxes[kept_idx][1] > bboxes[idx][3] || bboxes[kept_idx][3] < bboxes[idx][1])
334 {
Georgios Pinitasd57891a2019-02-19 18:10:03 +0000335 intersect_bbox = std::array<float, 4>({ { 0, 0, 0, 0 } });
Isabella Gottardi05e56442018-11-16 11:26:52 +0000336 }
337 else
338 {
Georgios Pinitasd57891a2019-02-19 18:10:03 +0000339 intersect_bbox = std::array<float, 4>({ {
340 std::max(bboxes[idx][0], bboxes[kept_idx][0]),
341 std::max(bboxes[idx][1], bboxes[kept_idx][1]),
342 std::min(bboxes[idx][2], bboxes[kept_idx][2]),
343 std::min(bboxes[idx][3], bboxes[kept_idx][3])
344 }
345 });
Isabella Gottardi05e56442018-11-16 11:26:52 +0000346 }
347
348 float intersect_width = intersect_bbox[2] - intersect_bbox[0];
349 float intersect_height = intersect_bbox[3] - intersect_bbox[1];
350
351 float overlap = 0.f;
352 if(intersect_width > 0 && intersect_height > 0)
353 {
354 float intersect_size = intersect_width * intersect_height;
355 float bbox1_size = (bboxes[idx][2] < bboxes[idx][0]
356 || bboxes[idx][3] < bboxes[idx][1]) ?
357 0.f :
358 (bboxes[idx][2] - bboxes[idx][0]) * (bboxes[idx][3] - bboxes[idx][1]); //BBoxSize(bboxes[idx]);
359 float bbox2_size = (bboxes[kept_idx][2] < bboxes[kept_idx][0]
360 || bboxes[kept_idx][3] < bboxes[kept_idx][1]) ?
361 0.f :
362 (bboxes[kept_idx][2] - bboxes[kept_idx][0]) * (bboxes[kept_idx][3] - bboxes[kept_idx][1]); // BBoxSize(bboxes[kept_idx]);
363 overlap = intersect_size / (bbox1_size + bbox2_size - intersect_size);
364 }
365 keep = (overlap <= adaptive_threshold);
366 }
367 else
368 {
369 break;
370 }
371 }
372 if(keep)
373 {
374 indices.push_back(idx);
375 }
376 score_index_vec.erase(score_index_vec.begin());
Pablo Telloe96e4f02018-12-21 16:47:23 +0000377 if(keep && eta < 1.f && adaptive_threshold > 0.5f)
Isabella Gottardi05e56442018-11-16 11:26:52 +0000378 {
379 adaptive_threshold *= eta;
380 }
381 }
382}
Pablo Telloe96e4f02018-12-21 16:47:23 +0000383
384Status non_max_suppression_validate_arguments(const ITensorInfo *bboxes, const ITensorInfo *scores, const ITensorInfo *indices, unsigned int max_output_size,
385 const float score_threshold, const float nms_threshold)
386{
387 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(bboxes, scores, indices);
388 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(bboxes, 1, DataType::F32);
389 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(scores, 1, DataType::F32);
390 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(indices, 1, DataType::S32);
391 ARM_COMPUTE_RETURN_ERROR_ON_MSG(bboxes->num_dimensions() > 2, "The bboxes tensor must be a 2-D float tensor of shape [4, num_boxes].");
392 ARM_COMPUTE_RETURN_ERROR_ON_MSG(scores->num_dimensions() > 1, "The scores tensor must be a 1-D float tensor of shape [num_boxes].");
393 ARM_COMPUTE_RETURN_ERROR_ON_MSG(indices->num_dimensions() > 1, "The indices must be 1-D integer tensor of shape [M], where max_output_size <= M");
394 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(bboxes, scores);
395 ARM_COMPUTE_RETURN_ERROR_ON_MSG(scores->num_dimensions() > 1, "Scores must be a 1D float tensor");
396 ARM_COMPUTE_RETURN_ERROR_ON_MSG(indices->dimension(0) == 0, "Indices tensor must be bigger than 0");
397 ARM_COMPUTE_RETURN_ERROR_ON_MSG(max_output_size == 0, "Max size cannot be 0");
398 ARM_COMPUTE_RETURN_ERROR_ON_MSG(nms_threshold < 0.f || nms_threshold > 1.f, "Threshould must be in [0,1]");
399 ARM_COMPUTE_RETURN_ERROR_ON_MSG(score_threshold < 0.f || score_threshold > 1.f, "Threshould must be in [0,1]");
400
401 return Status{};
402}
Isabella Gottardi05e56442018-11-16 11:26:52 +0000403} // namespace
404
Pablo Telloe96e4f02018-12-21 16:47:23 +0000405CPPNonMaximumSuppression::CPPNonMaximumSuppression()
406 : _bboxes(nullptr), _scores(nullptr), _indices(nullptr), _max_output_size(0), _score_threshold(0.f), _nms_threshold(0.f)
407{
408}
409
410void CPPNonMaximumSuppression::configure(
411 const ITensor *bboxes, const ITensor *scores, ITensor *indices, unsigned int max_output_size,
412 const float score_threshold, const float nms_threshold)
413{
414 ARM_COMPUTE_ERROR_ON_NULLPTR(bboxes, scores, indices);
415 ARM_COMPUTE_ERROR_THROW_ON(non_max_suppression_validate_arguments(bboxes->info(), scores->info(), indices->info(), max_output_size, score_threshold, nms_threshold));
416
417 // copy scores also to a vector
418 _bboxes = bboxes;
419 _scores = scores;
420 _indices = indices;
421
422 _nms_threshold = nms_threshold;
423 _max_output_size = max_output_size;
424 _score_threshold = score_threshold;
425}
426
427Status CPPNonMaximumSuppression::validate(
428 const ITensorInfo *bboxes, const ITensorInfo *scores, const ITensorInfo *indices, unsigned int max_output_size,
429 const float score_threshold, const float nms_threshold)
430{
431 ARM_COMPUTE_RETURN_ON_ERROR(non_max_suppression_validate_arguments(bboxes, scores, indices, max_output_size, score_threshold, nms_threshold));
432 return Status{};
433}
434
435void extract_bounding_boxes_from_tensor(const ITensor *bboxes, std::vector<NormalizedBBox> &bboxes_vector)
436{
437 Window input_win;
438 input_win.use_tensor_dimensions(bboxes->info()->tensor_shape());
439 input_win.set_dimension_step(0U, 4U);
440 input_win.set_dimension_step(1U, 1U);
441 Iterator input(bboxes, input_win);
442 auto f = [&bboxes_vector, &input](const Coordinates &)
443 {
444 const auto input_ptr = reinterpret_cast<const float *>(input.ptr());
Georgios Pinitasd57891a2019-02-19 18:10:03 +0000445 bboxes_vector.push_back(NormalizedBBox({ { *input_ptr, *(input_ptr + 1), *(2 + input_ptr), *(3 + input_ptr) } }));
Pablo Telloe96e4f02018-12-21 16:47:23 +0000446 };
447 execute_window_loop(input_win, f, input);
448}
449
450void extract_scores_from_tensor(const ITensor *scores, std::vector<float> &scores_vector)
451{
452 Window window;
453 window.use_tensor_dimensions(scores->info()->tensor_shape());
454 Iterator it(scores, window);
455 auto f = [&it, &scores_vector](const Coordinates &)
456 {
457 const auto input_ptr = reinterpret_cast<const float *>(it.ptr());
458 scores_vector.push_back(*input_ptr);
459 };
460 execute_window_loop(window, f, it);
461}
462
463void CPPNonMaximumSuppression::run()
464{
465 std::vector<NormalizedBBox> bboxes_vector;
466 std::vector<float> scores_vector;
467 std::vector<int> indices_vector;
468 extract_bounding_boxes_from_tensor(_bboxes, bboxes_vector);
469 extract_scores_from_tensor(_scores, scores_vector);
470 ApplyNMSFast(bboxes_vector, scores_vector, _score_threshold, _nms_threshold, 1, -1 /* disable top_k */, indices_vector);
471 std::copy_n(indices_vector.begin(), std::min(indices_vector.size(), _indices->info()->dimension(0)), reinterpret_cast<int *>(_indices->ptr_to_element(Coordinates(0))));
472}
473
Isabella Gottardi05e56442018-11-16 11:26:52 +0000474CPPDetectionOutputLayer::CPPDetectionOutputLayer()
475 : _input_loc(nullptr), _input_conf(nullptr), _input_priorbox(nullptr), _output(nullptr), _info(), _num_priors(), _num(), _all_location_predictions(), _all_confidence_scores(), _all_prior_bboxes(),
476 _all_prior_variances(), _all_decode_bboxes(), _all_indices()
477{
478}
479
480void CPPDetectionOutputLayer::configure(const ITensor *input_loc, const ITensor *input_conf, const ITensor *input_priorbox, ITensor *output, DetectionOutputLayerInfo info)
481{
482 ARM_COMPUTE_ERROR_ON_NULLPTR(input_loc, input_conf, input_priorbox, output);
483 // Output auto initialization if not yet initialized
484 // Since the number of bboxes to kept is unknown before nms, the shape is set to the maximum
485 // The maximum is keep_top_k * input_loc_size[1]
486 // Each row is a 7 dimension std::vector, which stores [image_id, label, confidence, xmin, ymin, xmax, ymax]
487 const unsigned int max_size = info.keep_top_k() * (input_loc->info()->num_dimensions() > 1 ? input_loc->info()->dimension(1) : 1);
488 auto_init_if_empty(*output->info(), input_loc->info()->clone()->set_tensor_shape(TensorShape(7U, max_size)));
489
490 // Perform validation step
Pablo Telloe96e4f02018-12-21 16:47:23 +0000491 ARM_COMPUTE_ERROR_THROW_ON(detection_layer_validate_arguments(input_loc->info(), input_conf->info(), input_priorbox->info(), output->info(), info));
Isabella Gottardi05e56442018-11-16 11:26:52 +0000492
493 _input_loc = input_loc;
494 _input_conf = input_conf;
495 _input_priorbox = input_priorbox;
496 _output = output;
497 _info = info;
498 _num_priors = input_priorbox->info()->dimension(0) / 4;
499 _num = (_input_loc->info()->num_dimensions() > 1 ? _input_loc->info()->dimension(1) : 1);
500
501 _all_location_predictions.resize(_num);
502 _all_confidence_scores.resize(_num);
503 _all_prior_bboxes.resize(_num_priors);
504 _all_prior_variances.resize(_num_priors);
505 _all_decode_bboxes.resize(_num);
506
507 for(int i = 0; i < _num; ++i)
508 {
509 for(int c = 0; c < _info.num_loc_classes(); ++c)
510 {
511 const int label = _info.share_location() ? -1 : c;
512 if(label == _info.background_label_id())
513 {
514 // Ignore background class.
515 continue;
516 }
517 _all_decode_bboxes[i][label].resize(_num_priors);
518 }
519 }
520 _all_indices.resize(_num);
521
522 Coordinates coord;
523 coord.set_num_dimensions(output->info()->num_dimensions());
524 output->info()->set_valid_region(ValidRegion(coord, output->info()->tensor_shape()));
525}
526
527Status CPPDetectionOutputLayer::validate(const ITensorInfo *input_loc, const ITensorInfo *input_conf, const ITensorInfo *input_priorbox, const ITensorInfo *output, DetectionOutputLayerInfo info)
528{
Pablo Telloe96e4f02018-12-21 16:47:23 +0000529 ARM_COMPUTE_RETURN_ON_ERROR(detection_layer_validate_arguments(input_loc, input_conf, input_priorbox, output, info));
Isabella Gottardi05e56442018-11-16 11:26:52 +0000530 return Status{};
531}
532
533void CPPDetectionOutputLayer::run()
534{
535 // Retrieve all location predictions.
536 retrieve_all_loc_predictions(_input_loc, _num, _num_priors, _info.num_loc_classes(), _info.share_location(), _all_location_predictions);
537
538 // Retrieve all confidences.
539 retrieve_all_conf_scores(_input_conf, _num, _num_priors, _info.num_classes(), _all_confidence_scores);
540
541 // Retrieve all prior bboxes.
542 retrieve_all_priorbox(_input_priorbox, _num_priors, _all_prior_bboxes, _all_prior_variances);
543
544 // Decode all loc predictions to bboxes
545 const bool clip_bbox = false;
546 for(int i = 0; i < _num; ++i)
547 {
548 for(int c = 0; c < _info.num_loc_classes(); ++c)
549 {
550 const int label = _info.share_location() ? -1 : c;
551 if(label == _info.background_label_id())
552 {
553 // Ignore background class.
554 continue;
555 }
556 ARM_COMPUTE_ERROR_ON_MSG(_all_location_predictions[i].find(label) == _all_location_predictions[i].end(), "Could not find location predictions for label %d.", label);
557
558 const std::vector<NormalizedBBox> &label_loc_preds = _all_location_predictions[i].find(label)->second;
559
560 const int num_bboxes = _all_prior_bboxes.size();
561 ARM_COMPUTE_ERROR_ON(_all_prior_variances[i].size() != 4);
562
563 for(int j = 0; j < num_bboxes; ++j)
564 {
565 DecodeBBox(_all_prior_bboxes[j], _all_prior_variances[j], _info.code_type(), _info.variance_encoded_in_target(), clip_bbox, label_loc_preds[j], _all_decode_bboxes[i][label][j]);
566 }
567 }
568 }
569
570 int num_kept = 0;
571
572 for(int i = 0; i < _num; ++i)
573 {
574 const LabelBBox &decode_bboxes = _all_decode_bboxes[i];
575 const std::map<int, std::vector<float>> &conf_scores = _all_confidence_scores[i];
576
577 std::map<int, std::vector<int>> indices;
578 int num_det = 0;
579 for(int c = 0; c < _info.num_classes(); ++c)
580 {
581 if(c == _info.background_label_id())
582 {
583 // Ignore background class
584 continue;
585 }
586 const int label = _info.share_location() ? -1 : c;
587 if(conf_scores.find(c) == conf_scores.end() || decode_bboxes.find(label) == decode_bboxes.end())
588 {
589 ARM_COMPUTE_ERROR("Could not find predictions for label %d.", label);
590 }
591 const std::vector<float> &scores = conf_scores.find(c)->second;
592 const std::vector<NormalizedBBox> &bboxes = decode_bboxes.find(label)->second;
593
594 ApplyNMSFast(bboxes, scores, _info.confidence_threshold(), _info.nms_threshold(), _info.eta(), _info.top_k(), indices[c]);
595
596 num_det += indices[c].size();
597 }
598
599 int num_to_add = 0;
600 if(_info.keep_top_k() > -1 && num_det > _info.keep_top_k())
601 {
602 std::vector<std::pair<float, std::pair<int, int>>> score_index_pairs;
603 for(auto it : indices)
604 {
605 const int label = it.first;
606 const std::vector<int> &label_indices = it.second;
607
608 if(conf_scores.find(label) == conf_scores.end())
609 {
610 ARM_COMPUTE_ERROR("Could not find predictions for label %d.", label);
611 }
612
613 const std::vector<float> &scores = conf_scores.find(label)->second;
614 for(auto idx : label_indices)
615 {
616 ARM_COMPUTE_ERROR_ON(idx > static_cast<int>(scores.size()));
617 score_index_pairs.push_back(std::make_pair(scores[idx], std::make_pair(label, idx)));
618 }
619 }
620
621 // Keep top k results per image.
622 std::sort(score_index_pairs.begin(), score_index_pairs.end(), SortScorePairDescend<std::pair<int, int>>);
623 score_index_pairs.resize(_info.keep_top_k());
624
625 // Store the new indices.
626
627 std::map<int, std::vector<int>> new_indices;
628 for(auto score_index_pair : score_index_pairs)
629 {
630 int label = score_index_pair.second.first;
631 int idx = score_index_pair.second.second;
632 new_indices[label].push_back(idx);
633 }
634 _all_indices[i] = new_indices;
635 num_to_add = _info.keep_top_k();
636 }
637 else
638 {
639 _all_indices[i] = indices;
640 num_to_add = num_det;
641 }
642 num_kept += num_to_add;
643 }
644
645 //Update the valid region of the ouput to mark the exact number of detection
646 _output->info()->set_valid_region(ValidRegion(Coordinates(0, 0), TensorShape(7, num_kept)));
647
648 int count = 0;
649 for(int i = 0; i < _num; ++i)
650 {
651 const std::map<int, std::vector<float>> &conf_scores = _all_confidence_scores[i];
652 const LabelBBox &decode_bboxes = _all_decode_bboxes[i];
653 for(auto &it : _all_indices[i])
654 {
655 const int label = it.first;
656 const std::vector<float> &scores = conf_scores.find(label)->second;
657 const int loc_label = _info.share_location() ? -1 : label;
658 if(conf_scores.find(label) == conf_scores.end() || decode_bboxes.find(loc_label) == decode_bboxes.end())
659 {
660 // Either if there are no confidence predictions
661 // or there are no location predictions for current label.
662 ARM_COMPUTE_ERROR("Could not find predictions for the label %d.", label);
663 }
664 const std::vector<NormalizedBBox> &bboxes = decode_bboxes.find(loc_label)->second;
665 const std::vector<int> &indices = it.second;
666
667 for(auto idx : indices)
668 {
669 *(reinterpret_cast<float *>(_output->ptr_to_element(Coordinates(count * 7)))) = i;
670 *(reinterpret_cast<float *>(_output->ptr_to_element(Coordinates(count * 7 + 1)))) = label;
671 *(reinterpret_cast<float *>(_output->ptr_to_element(Coordinates(count * 7 + 2)))) = scores[idx];
672 *(reinterpret_cast<float *>(_output->ptr_to_element(Coordinates(count * 7 + 3)))) = bboxes[idx][0];
673 *(reinterpret_cast<float *>(_output->ptr_to_element(Coordinates(count * 7 + 4)))) = bboxes[idx][1];
674 *(reinterpret_cast<float *>(_output->ptr_to_element(Coordinates(count * 7 + 5)))) = bboxes[idx][2];
675 *(reinterpret_cast<float *>(_output->ptr_to_element(Coordinates(count * 7 + 6)))) = bboxes[idx][3];
676
677 ++count;
678 }
679 }
680 }
681}
Pablo Telloe96e4f02018-12-21 16:47:23 +0000682} // namespace arm_compute