blob: 248f7307e6e8f583716395ef9b9889e8708404c9 [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
Matthew Bentham92046462020-03-07 22:15:55 +00002 * Copyright (c) 2017-2020 ARM Limited.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/runtime/CL/functions/CLHOGMultiDetection.h"
25
26#include "arm_compute/core/CL/OpenCL.h"
27#include "arm_compute/core/Error.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010028#include "arm_compute/core/TensorInfo.h"
29#include "arm_compute/runtime/CL/CLArray.h"
30#include "arm_compute/runtime/CL/CLScheduler.h"
31#include "arm_compute/runtime/CL/CLTensor.h"
Moritz Pflanzerc186b572017-09-07 09:48:04 +010032#include "arm_compute/runtime/Scheduler.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010033
34using namespace arm_compute;
35
Georgios Pinitas8a94e7c2017-09-15 19:06:47 +010036CLHOGMultiDetection::CLHOGMultiDetection(std::shared_ptr<IMemoryManager> memory_manager) // NOLINT
37 : _memory_group(std::move(memory_manager)),
38 _gradient_kernel(),
Moritz Pflanzerf4af76e2017-09-06 07:42:43 +010039 _orient_bin_kernel(),
40 _block_norm_kernel(),
41 _hog_detect_kernel(),
42 _non_maxima_kernel(),
43 _hog_space(),
44 _hog_norm_space(),
45 _detection_windows(),
46 _mag(),
47 _phase(),
48 _non_maxima_suppression(false),
49 _num_orient_bin_kernel(0),
50 _num_block_norm_kernel(0),
51 _num_hog_detect_kernel(0)
Anthony Barbier6ff3b192017-09-04 18:44:23 +010052{
53}
54
55void CLHOGMultiDetection::configure(ICLTensor *input, const ICLMultiHOG *multi_hog, ICLDetectionWindowArray *detection_windows, ICLSize2DArray *detection_window_strides, BorderMode border_mode,
56 uint8_t constant_border_value, float threshold, bool non_maxima_suppression, float min_distance)
57{
Manuel Bottini2b84be52020-04-08 10:15:51 +010058 configure(CLKernelLibrary::get().get_compile_context(), input, multi_hog, detection_windows, detection_window_strides, border_mode, constant_border_value, threshold, non_maxima_suppression,
59 min_distance);
60}
61
62void CLHOGMultiDetection::configure(const CLCompileContext &compile_context, ICLTensor *input, const ICLMultiHOG *multi_hog, ICLDetectionWindowArray *detection_windows,
63 ICLSize2DArray *detection_window_strides, BorderMode border_mode,
64 uint8_t constant_border_value, float threshold, bool non_maxima_suppression, float min_distance)
65{
Anthony Barbier6ff3b192017-09-04 18:44:23 +010066 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8);
67 ARM_COMPUTE_ERROR_ON_INVALID_MULTI_HOG(multi_hog);
68 ARM_COMPUTE_ERROR_ON(nullptr == detection_windows);
69 ARM_COMPUTE_ERROR_ON(detection_window_strides->num_values() != multi_hog->num_models());
70
71 const size_t width = input->info()->dimension(Window::DimX);
72 const size_t height = input->info()->dimension(Window::DimY);
73 const TensorShape &shape_img = input->info()->tensor_shape();
74 const size_t num_models = multi_hog->num_models();
75 PhaseType phase_type = multi_hog->model(0)->info()->phase_type();
76
77 size_t prev_num_bins = multi_hog->model(0)->info()->num_bins();
78 Size2D prev_cell_size = multi_hog->model(0)->info()->cell_size();
79 Size2D prev_block_size = multi_hog->model(0)->info()->block_size();
80 Size2D prev_block_stride = multi_hog->model(0)->info()->block_stride();
81
82 /* Check if CLHOGOrientationBinningKernel and CLHOGBlockNormalizationKernel kernels can be skipped for a specific HOG data-object
83 *
84 * 1) CLHOGOrientationBinningKernel and CLHOGBlockNormalizationKernel are skipped if the cell size and the number of bins don't change.
85 * Since "multi_hog" is sorted,it is enough to check the HOG descriptors at level "ith" and level "(i-1)th
86 * 2) CLHOGBlockNormalizationKernel is skipped if the cell size, the number of bins and block size do not change.
87 * Since "multi_hog" is sorted,it is enough to check the HOG descriptors at level "ith" and level "(i-1)th
88 *
89 * @note Since the orientation binning and block normalization kernels can be skipped, we need to keep track of the input to process for each kernel
90 * with "input_orient_bin", "input_hog_detect" and "input_block_norm"
91 */
92 std::vector<size_t> input_orient_bin;
93 std::vector<size_t> input_hog_detect;
94 std::vector<std::pair<size_t, size_t>> input_block_norm;
95
96 input_orient_bin.push_back(0);
97 input_hog_detect.push_back(0);
98 input_block_norm.emplace_back(0, 0);
99
100 for(size_t i = 1; i < num_models; ++i)
101 {
102 size_t cur_num_bins = multi_hog->model(i)->info()->num_bins();
103 Size2D cur_cell_size = multi_hog->model(i)->info()->cell_size();
104 Size2D cur_block_size = multi_hog->model(i)->info()->block_size();
105 Size2D cur_block_stride = multi_hog->model(i)->info()->block_stride();
106
107 if((cur_num_bins != prev_num_bins) || (cur_cell_size.width != prev_cell_size.width) || (cur_cell_size.height != prev_cell_size.height))
108 {
109 prev_num_bins = cur_num_bins;
110 prev_cell_size = cur_cell_size;
111 prev_block_size = cur_block_size;
112 prev_block_stride = cur_block_stride;
113
114 // Compute orientation binning and block normalization kernels. Update input to process
115 input_orient_bin.push_back(i);
116 input_block_norm.emplace_back(i, input_orient_bin.size() - 1);
117 }
118 else if((cur_block_size.width != prev_block_size.width) || (cur_block_size.height != prev_block_size.height) || (cur_block_stride.width != prev_block_stride.width)
119 || (cur_block_stride.height != prev_block_stride.height))
120 {
121 prev_block_size = cur_block_size;
122 prev_block_stride = cur_block_stride;
123
124 // Compute block normalization kernel. Update input to process
125 input_block_norm.emplace_back(i, input_orient_bin.size() - 1);
126 }
127
128 // Update input to process for hog detector kernel
129 input_hog_detect.push_back(input_block_norm.size() - 1);
130 }
131
132 _detection_windows = detection_windows;
133 _non_maxima_suppression = non_maxima_suppression;
134 _num_orient_bin_kernel = input_orient_bin.size(); // Number of CLHOGOrientationBinningKernel kernels to compute
135 _num_block_norm_kernel = input_block_norm.size(); // Number of CLHOGBlockNormalizationKernel kernels to compute
136 _num_hog_detect_kernel = input_hog_detect.size(); // Number of CLHOGDetector functions to compute
137
Michalis Spyroubcfd09a2019-05-01 13:03:59 +0100138 _orient_bin_kernel.resize(_num_orient_bin_kernel);
139 _block_norm_kernel.resize(_num_block_norm_kernel);
140 _hog_detect_kernel.resize(_num_hog_detect_kernel);
141 _hog_space.resize(_num_orient_bin_kernel);
142 _hog_norm_space.resize(_num_block_norm_kernel);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100143
144 // Allocate tensors for magnitude and phase
145 TensorInfo info_mag(shape_img, Format::S16);
146 _mag.allocator()->init(info_mag);
147
148 TensorInfo info_phase(shape_img, Format::U8);
149 _phase.allocator()->init(info_phase);
150
Georgios Pinitas8a94e7c2017-09-15 19:06:47 +0100151 // Manage intermediate buffers
152 _memory_group.manage(&_mag);
153 _memory_group.manage(&_phase);
154
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100155 // Initialise gradient kernel
Manuel Bottini2b84be52020-04-08 10:15:51 +0100156 _gradient_kernel.configure(compile_context, input, &_mag, &_phase, phase_type, border_mode, constant_border_value);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100157
158 // Configure NETensor for the HOG space and orientation binning kernel
159 for(size_t i = 0; i < _num_orient_bin_kernel; ++i)
160 {
161 const size_t idx_multi_hog = input_orient_bin[i];
162
163 // Get the corresponding cell size and number of bins
164 const Size2D &cell = multi_hog->model(idx_multi_hog)->info()->cell_size();
165 const size_t num_bins = multi_hog->model(idx_multi_hog)->info()->num_bins();
166
167 // Calculate number of cells along the x and y directions for the hog_space
168 const size_t num_cells_x = width / cell.width;
169 const size_t num_cells_y = height / cell.height;
170
171 // TensorShape of hog space
172 TensorShape shape_hog_space = input->info()->tensor_shape();
173 shape_hog_space.set(Window::DimX, num_cells_x);
174 shape_hog_space.set(Window::DimY, num_cells_y);
175
176 // Allocate HOG space
177 TensorInfo info_space(shape_hog_space, num_bins, DataType::F32);
178 _hog_space[i].allocator()->init(info_space);
179
Georgios Pinitas8a94e7c2017-09-15 19:06:47 +0100180 // Manage intermediate buffers
Michalis Spyroubcfd09a2019-05-01 13:03:59 +0100181 _memory_group.manage(&_hog_space[i]);
Georgios Pinitas8a94e7c2017-09-15 19:06:47 +0100182
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100183 // Initialise orientation binning kernel
Manuel Bottini2b84be52020-04-08 10:15:51 +0100184 _orient_bin_kernel[i].configure(compile_context, &_mag, &_phase, &_hog_space[i], multi_hog->model(idx_multi_hog)->info());
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100185 }
186
Georgios Pinitas8a94e7c2017-09-15 19:06:47 +0100187 // Allocate intermediate tensors
188 _mag.allocator()->allocate();
189 _phase.allocator()->allocate();
190
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100191 // Configure CLTensor for the normalized HOG space and block normalization kernel
192 for(size_t i = 0; i < _num_block_norm_kernel; ++i)
193 {
194 const size_t idx_multi_hog = input_block_norm[i].first;
195 const size_t idx_orient_bin = input_block_norm[i].second;
196
197 // Allocate normalized HOG space
198 TensorInfo tensor_info(*(multi_hog->model(idx_multi_hog)->info()), width, height);
199 _hog_norm_space[i].allocator()->init(tensor_info);
200
Georgios Pinitas8a94e7c2017-09-15 19:06:47 +0100201 // Manage intermediate buffers
Michalis Spyroubcfd09a2019-05-01 13:03:59 +0100202 _memory_group.manage(&_hog_norm_space[i]);
Georgios Pinitas8a94e7c2017-09-15 19:06:47 +0100203
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100204 // Initialize block normalization kernel
Manuel Bottini2b84be52020-04-08 10:15:51 +0100205 _block_norm_kernel[i].configure(compile_context, &_hog_space[idx_orient_bin], &_hog_norm_space[i], multi_hog->model(idx_multi_hog)->info());
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100206 }
207
Georgios Pinitas8a94e7c2017-09-15 19:06:47 +0100208 // Allocate intermediate tensors
209 for(size_t i = 0; i < _num_orient_bin_kernel; ++i)
210 {
211 _hog_space[i].allocator()->allocate();
212 }
213
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100214 detection_window_strides->map(CLScheduler::get().queue(), true);
215
216 // Configure HOG detector kernel
217 for(size_t i = 0; i < _num_hog_detect_kernel; ++i)
218 {
219 const size_t idx_block_norm = input_hog_detect[i];
220
Manuel Bottini2b84be52020-04-08 10:15:51 +0100221 _hog_detect_kernel[i].configure(compile_context, &_hog_norm_space[idx_block_norm], multi_hog->cl_model(i), detection_windows, detection_window_strides->at(i), threshold, i);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100222 }
223
224 detection_window_strides->unmap(CLScheduler::get().queue());
225
226 // Configure non maxima suppression kernel
Michalis Spyroubcfd09a2019-05-01 13:03:59 +0100227 _non_maxima_kernel.configure(_detection_windows, min_distance);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100228
229 // Allocate intermediate tensors
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100230 for(size_t i = 0; i < _num_block_norm_kernel; ++i)
231 {
232 _hog_norm_space[i].allocator()->allocate();
233 }
234}
235
236void CLHOGMultiDetection::run()
237{
238 ARM_COMPUTE_ERROR_ON_MSG(_detection_windows == nullptr, "Unconfigured function");
239
Georgios Pinitasda953f22019-04-02 17:27:03 +0100240 MemoryGroupResourceScope scope_mg(_memory_group);
Georgios Pinitas8a94e7c2017-09-15 19:06:47 +0100241
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100242 // Reset detection window
243 _detection_windows->clear();
244
245 // Run gradient
246 _gradient_kernel.run();
247
248 // Run orientation binning kernel
249 for(size_t i = 0; i < _num_orient_bin_kernel; ++i)
250 {
Michalis Spyroubcfd09a2019-05-01 13:03:59 +0100251 CLScheduler::get().enqueue(_orient_bin_kernel[i], false);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100252 }
253
254 // Run block normalization kernel
255 for(size_t i = 0; i < _num_block_norm_kernel; ++i)
256 {
Michalis Spyroubcfd09a2019-05-01 13:03:59 +0100257 CLScheduler::get().enqueue(_block_norm_kernel[i], false);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100258 }
259
260 // Run HOG detector kernel
261 for(size_t i = 0; i < _num_hog_detect_kernel; ++i)
262 {
263 _hog_detect_kernel[i].run();
264 }
265
266 // Run non-maxima suppression kernel if enabled
267 if(_non_maxima_suppression)
268 {
269 // Map detection windows array before computing non maxima suppression
270 _detection_windows->map(CLScheduler::get().queue(), true);
Michalis Spyroubcfd09a2019-05-01 13:03:59 +0100271 Scheduler::get().schedule(&_non_maxima_kernel, Window::DimY);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100272 _detection_windows->unmap(CLScheduler::get().queue());
273 }
Moritz Pflanzerf4af76e2017-09-06 07:42:43 +0100274}