blob: a3a62d6d5efe1d35aeb951e11ea81fa9ef49975c [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
Michele Di Giorgiod9eaf612020-07-08 11:12:57 +01002 * Copyright (c) 2016-2020 Arm Limited.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/runtime/CL/functions/CLFastCorners.h"
25
26#include "arm_compute/core/CL/OpenCL.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010027#include "arm_compute/core/Error.h"
28#include "arm_compute/core/TensorInfo.h"
29#include "arm_compute/core/Validate.h"
30#include "arm_compute/runtime/CL/CLScheduler.h"
31#include "arm_compute/runtime/ITensorAllocator.h"
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +010032#include "src/core/CL/kernels/CLFastCornersKernel.h"
33#include "src/core/CL/kernels/CLFillBorderKernel.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010034
35#include <algorithm>
36#include <cstring>
37
38using namespace arm_compute;
39
Georgios Pinitas5701e2a2017-09-18 17:43:33 +010040CLFastCorners::CLFastCorners(std::shared_ptr<IMemoryManager> memory_manager)
41 : _memory_group(std::move(memory_manager)),
Georgios Pinitas40f51a62020-11-21 03:04:18 +000042 _fast_corners_kernel(std::make_unique<CLFastCornersKernel>()),
Anthony Barbier6ff3b192017-09-04 18:44:23 +010043 _suppr_func(),
Georgios Pinitas40f51a62020-11-21 03:04:18 +000044 _copy_array_kernel(std::make_unique<CLCopyToArrayKernel>()),
Anthony Barbier6ff3b192017-09-04 18:44:23 +010045 _output(),
46 _suppr(),
47 _win(),
48 _non_max(false),
49 _num_corners(nullptr),
50 _num_buffer(),
51 _corners(nullptr),
52 _constant_border_value(0)
53{
54}
55
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +010056CLFastCorners::~CLFastCorners() = default;
57
Abe Mbise25a340f2017-12-19 13:00:58 +000058void CLFastCorners::configure(const ICLImage *input, float threshold, bool nonmax_suppression, ICLKeyPointArray *corners,
Anthony Barbier6ff3b192017-09-04 18:44:23 +010059 unsigned int *num_corners, BorderMode border_mode, uint8_t constant_border_value)
60{
Manuel Bottini2b84be52020-04-08 10:15:51 +010061 configure(CLKernelLibrary::get().get_compile_context(), input, threshold, nonmax_suppression, corners, num_corners, border_mode, constant_border_value);
62}
63
64void CLFastCorners::configure(const CLCompileContext &compile_context, const ICLImage *input, float threshold, bool nonmax_suppression, ICLKeyPointArray *corners,
65 unsigned int *num_corners, BorderMode border_mode, uint8_t constant_border_value)
66{
Anthony Barbier6ff3b192017-09-04 18:44:23 +010067 ARM_COMPUTE_ERROR_ON_TENSOR_NOT_2D(input);
68 ARM_COMPUTE_ERROR_ON(BorderMode::UNDEFINED != border_mode);
69 ARM_COMPUTE_ERROR_ON(nullptr == corners);
70 ARM_COMPUTE_ERROR_ON(threshold < 1 && threshold > 255);
71
72 TensorInfo tensor_info(input->info()->tensor_shape(), 1, DataType::U8);
73 _output.allocator()->init(tensor_info);
74
75 _non_max = nonmax_suppression;
76 _num_corners = num_corners;
77 _corners = corners;
78 _num_buffer = cl::Buffer(CLScheduler::get().context(), CL_MEM_ALLOC_HOST_PTR | CL_MEM_READ_WRITE, sizeof(unsigned int));
79 _constant_border_value = constant_border_value;
80
81 const bool update_number = (nullptr != _num_corners);
82
Georgios Pinitas5701e2a2017-09-18 17:43:33 +010083 _memory_group.manage(&_output);
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +010084 _fast_corners_kernel->configure(compile_context, input, &_output, threshold, nonmax_suppression, border_mode);
Anthony Barbier6ff3b192017-09-04 18:44:23 +010085
86 if(!_non_max)
87 {
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +010088 _copy_array_kernel->configure(compile_context, &_output, update_number, _corners, &_num_buffer);
Anthony Barbier6ff3b192017-09-04 18:44:23 +010089 }
90 else
91 {
92 _suppr.allocator()->init(tensor_info);
Georgios Pinitas5701e2a2017-09-18 17:43:33 +010093 _memory_group.manage(&_suppr);
Anthony Barbier6ff3b192017-09-04 18:44:23 +010094
Manuel Bottini2b84be52020-04-08 10:15:51 +010095 _suppr_func.configure(compile_context, &_output, &_suppr, border_mode);
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +010096 _copy_array_kernel->configure(compile_context, &_suppr, update_number, _corners, &_num_buffer);
Anthony Barbier6ff3b192017-09-04 18:44:23 +010097
98 _suppr.allocator()->allocate();
99 }
100
101 // Allocate intermediate tensors
102 _output.allocator()->allocate();
103}
104
105void CLFastCorners::run()
106{
107 cl::CommandQueue q = CLScheduler::get().queue();
108
Georgios Pinitasda953f22019-04-02 17:27:03 +0100109 MemoryGroupResourceScope scope_mg(_memory_group);
Georgios Pinitas5701e2a2017-09-18 17:43:33 +0100110
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100111 if(_non_max)
112 {
113 ARM_COMPUTE_ERROR_ON_MSG(_output.cl_buffer().get() == nullptr, "Unconfigured function");
114 const auto out_buffer = static_cast<unsigned char *>(q.enqueueMapBuffer(_output.cl_buffer(), CL_TRUE, CL_MAP_WRITE, 0, _output.info()->total_size()));
115 memset(out_buffer, 0, _output.info()->total_size());
116 q.enqueueUnmapMemObject(_output.cl_buffer(), out_buffer);
117 }
118
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +0100119 CLScheduler::get().enqueue(*_fast_corners_kernel, false);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100120
121 if(_non_max)
122 {
123 _suppr_func.run();
124 }
125
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +0100126 CLScheduler::get().enqueue(*_copy_array_kernel, false);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100127
128 unsigned int get_num_corners = 0;
129 q.enqueueReadBuffer(_num_buffer, CL_TRUE, 0, sizeof(unsigned int), &get_num_corners);
130
131 size_t corner_size = std::min(static_cast<size_t>(get_num_corners), _corners->max_num_values());
132
133 _corners->resize(corner_size);
134
135 if(_num_corners != nullptr)
136 {
137 *_num_corners = get_num_corners;
138 }
139
140 q.flush();
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100141}