blob: 4c7458d1ed5ff90379b68c471a2fa1591463613f [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
Georgios Pinitasda953f22019-04-02 17:27:03 +01002 * Copyright (c) 2017-2019 ARM Limited.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/runtime/CL/functions/CLCannyEdge.h"
25
26#include "arm_compute/core/CL/ICLTensor.h"
27#include "arm_compute/core/CL/OpenCL.h"
28#include "arm_compute/core/Error.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010029#include "arm_compute/core/Validate.h"
30#include "arm_compute/runtime/CL/CLScheduler.h"
31#include "arm_compute/runtime/CL/functions/CLSobel3x3.h"
32#include "arm_compute/runtime/CL/functions/CLSobel5x5.h"
33#include "arm_compute/runtime/CL/functions/CLSobel7x7.h"
Moritz Pflanzerd0ae8b82017-06-29 14:51:57 +010034#include "support/ToolchainSupport.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010035
36using namespace arm_compute;
37
Georgios Pinitas5701e2a2017-09-18 17:43:33 +010038CLCannyEdge::CLCannyEdge(std::shared_ptr<IMemoryManager> memory_manager) // NOLINT
39 : _memory_group(std::move(memory_manager)),
40 _sobel(),
Moritz Pflanzerf4af76e2017-09-06 07:42:43 +010041 _gradient(),
42 _border_mag_gradient(),
43 _non_max_suppr(),
44 _edge_trace(),
45 _gx(),
46 _gy(),
47 _mag(),
48 _phase(),
49 _nonmax(),
50 _visited(),
51 _recorded(),
52 _l1_list_counter(),
Abe Mbise1b993382017-12-19 13:51:59 +000053 _l1_stack(),
54 _output(nullptr)
Anthony Barbier6ff3b192017-09-04 18:44:23 +010055{
56}
57
Abe Mbise1b993382017-12-19 13:51:59 +000058void CLCannyEdge::configure(ICLTensor *input, ICLTensor *output, int32_t upper_thr, int32_t lower_thr, int32_t gradient_size, int32_t norm_type, BorderMode border_mode,
59 uint8_t constant_border_value)
Anthony Barbier6ff3b192017-09-04 18:44:23 +010060{
Abe Mbise1b993382017-12-19 13:51:59 +000061 ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
Anthony Barbier6ff3b192017-09-04 18:44:23 +010062 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8);
63 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8);
64 ARM_COMPUTE_ERROR_ON((1 != norm_type) && (2 != norm_type));
Abe Mbise1b993382017-12-19 13:51:59 +000065 ARM_COMPUTE_ERROR_ON((gradient_size != 3) && (gradient_size != 5) && (gradient_size != 7));
Michele Di Giorgioef915162018-07-30 12:01:44 +010066 ARM_COMPUTE_ERROR_ON((lower_thr < 0) || (lower_thr >= upper_thr));
Anthony Barbier6ff3b192017-09-04 18:44:23 +010067
Abe Mbise1b993382017-12-19 13:51:59 +000068 _output = output;
69
Anthony Barbier6ff3b192017-09-04 18:44:23 +010070 const unsigned int L1_hysteresis_stack_size = 8;
71 const TensorShape shape = input->info()->tensor_shape();
72
73 TensorInfo gradient_info;
74 TensorInfo info;
75
76 // Initialize images
77 if(gradient_size < 7)
78 {
79 gradient_info.init(shape, 1, arm_compute::DataType::S16);
80 info.init(shape, 1, arm_compute::DataType::U16);
81 }
82 else
83 {
84 gradient_info.init(shape, 1, arm_compute::DataType::S32);
85 info.init(shape, 1, arm_compute::DataType::U32);
86 }
87
88 _gx.allocator()->init(gradient_info);
89 _gy.allocator()->init(gradient_info);
90 _mag.allocator()->init(info);
91 _nonmax.allocator()->init(info);
92
93 TensorInfo info_u8(shape, 1, arm_compute::DataType::U8);
94 _phase.allocator()->init(info_u8);
95 _l1_list_counter.allocator()->init(info_u8);
96
97 TensorInfo info_u32(shape, 1, arm_compute::DataType::U32);
98 _visited.allocator()->init(info_u32);
99 _recorded.allocator()->init(info_u32);
100
101 TensorShape shape_l1_stack = input->info()->tensor_shape();
102 shape_l1_stack.set(0, input->info()->dimension(0) * L1_hysteresis_stack_size);
103 TensorInfo info_s32(shape_l1_stack, 1, arm_compute::DataType::S32);
104 _l1_stack.allocator()->init(info_s32);
105
Georgios Pinitas5701e2a2017-09-18 17:43:33 +0100106 // Manage intermediate buffers
107 _memory_group.manage(&_gx);
108 _memory_group.manage(&_gy);
109
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100110 // Configure/Init sobelNxN
111 if(gradient_size == 3)
112 {
Moritz Pflanzerd0ae8b82017-06-29 14:51:57 +0100113 auto k = arm_compute::support::cpp14::make_unique<CLSobel3x3>();
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100114 k->configure(input, &_gx, &_gy, border_mode, constant_border_value);
115 _sobel = std::move(k);
116 }
117 else if(gradient_size == 5)
118 {
Moritz Pflanzerd0ae8b82017-06-29 14:51:57 +0100119 auto k = arm_compute::support::cpp14::make_unique<CLSobel5x5>();
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100120 k->configure(input, &_gx, &_gy, border_mode, constant_border_value);
121 _sobel = std::move(k);
122 }
123 else if(gradient_size == 7)
124 {
Moritz Pflanzerd0ae8b82017-06-29 14:51:57 +0100125 auto k = arm_compute::support::cpp14::make_unique<CLSobel7x7>();
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100126 k->configure(input, &_gx, &_gy, border_mode, constant_border_value);
127 _sobel = std::move(k);
128 }
129 else
130 {
Abe Mbise1b993382017-12-19 13:51:59 +0000131 ARM_COMPUTE_ERROR("Gradient size %d not supported", gradient_size);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100132 }
133
Georgios Pinitas5701e2a2017-09-18 17:43:33 +0100134 // Manage intermediate buffers
135 _memory_group.manage(&_mag);
136 _memory_group.manage(&_phase);
137
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100138 // Configure gradient
139 _gradient.configure(&_gx, &_gy, &_mag, &_phase, norm_type);
140
Georgios Pinitas5701e2a2017-09-18 17:43:33 +0100141 // Allocate intermediate buffers
142 _gx.allocator()->allocate();
143 _gy.allocator()->allocate();
144
145 // Manage intermediate buffers
146 _memory_group.manage(&_nonmax);
147
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100148 // Configure non-maxima suppression
149 _non_max_suppr.configure(&_mag, &_phase, &_nonmax, lower_thr, border_mode == BorderMode::UNDEFINED);
150
Georgios Pinitas5701e2a2017-09-18 17:43:33 +0100151 // Allocate intermediate buffers
152 _phase.allocator()->allocate();
153
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100154 // Fill border around magnitude image as non-maxima suppression will access
155 // it. If border mode is undefined filling the border is a nop.
156 _border_mag_gradient.configure(&_mag, _non_max_suppr.border_size(), border_mode, constant_border_value);
157
Georgios Pinitas5701e2a2017-09-18 17:43:33 +0100158 // Allocate intermediate buffers
159 _mag.allocator()->allocate();
160
161 // Manage intermediate buffers
162 _memory_group.manage(&_visited);
163 _memory_group.manage(&_recorded);
164 _memory_group.manage(&_l1_stack);
165 _memory_group.manage(&_l1_list_counter);
166
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100167 // Configure edge tracing
168 _edge_trace.configure(&_nonmax, output, upper_thr, lower_thr, &_visited, &_recorded, &_l1_stack, &_l1_list_counter);
169
Georgios Pinitas5701e2a2017-09-18 17:43:33 +0100170 // Allocate intermediate buffers
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100171 _visited.allocator()->allocate();
172 _recorded.allocator()->allocate();
173 _l1_stack.allocator()->allocate();
174 _l1_list_counter.allocator()->allocate();
175 _nonmax.allocator()->allocate();
176}
177
178void CLCannyEdge::run()
179{
Georgios Pinitasda953f22019-04-02 17:27:03 +0100180 MemoryGroupResourceScope scope_mg(_memory_group);
Georgios Pinitas5701e2a2017-09-18 17:43:33 +0100181
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100182 // Run sobel
183 _sobel->run();
184
185 // Run phase and magnitude calculation
186 CLScheduler::get().enqueue(_gradient, false);
187
188 // Fill border before non-maxima suppression. Nop for border mode undefined.
189 CLScheduler::get().enqueue(_border_mag_gradient, false);
190
191 // Run non max suppresion
192 _nonmax.clear(CLScheduler::get().queue());
193 CLScheduler::get().enqueue(_non_max_suppr, false);
194
195 // Clear temporary structures and run edge trace
Abe Mbise1b993382017-12-19 13:51:59 +0000196 _output->clear(CLScheduler::get().queue());
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100197 _visited.clear(CLScheduler::get().queue());
198 _recorded.clear(CLScheduler::get().queue());
199 _l1_list_counter.clear(CLScheduler::get().queue());
200 _l1_stack.clear(CLScheduler::get().queue());
201 CLScheduler::get().enqueue(_edge_trace, true);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100202}