blob: 6b729c85855937c255812467658a4b52bfbed9f1 [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
Sanghoon Lee1cd41492018-03-15 11:48:48 +00002 * Copyright (c) 2017-2018 ARM Limited.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/CL/kernels/CLGaussianPyramidKernel.h"
25
26#include "arm_compute/core/CL/CLKernelLibrary.h"
27#include "arm_compute/core/CL/ICLTensor.h"
28#include "arm_compute/core/Helpers.h"
29#include "arm_compute/core/Utils.h"
30#include "arm_compute/core/Validate.h"
31
32using namespace arm_compute;
33
34CLGaussianPyramidHorKernel::CLGaussianPyramidHorKernel()
Sanghoon Lee1cd41492018-03-15 11:48:48 +000035 : _l2_load_offset(0)
Anthony Barbier6ff3b192017-09-04 18:44:23 +010036{
37}
38
39BorderSize CLGaussianPyramidHorKernel::border_size() const
40{
Sanghoon Lee1cd41492018-03-15 11:48:48 +000041 return BorderSize(0, 2);
Anthony Barbier6ff3b192017-09-04 18:44:23 +010042}
43
Sanghoon Lee1cd41492018-03-15 11:48:48 +000044void CLGaussianPyramidHorKernel::configure(const ICLTensor *input, ICLTensor *output)
Anthony Barbier6ff3b192017-09-04 18:44:23 +010045{
46 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8);
47 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U16);
Anthony Barbier6ff3b192017-09-04 18:44:23 +010048 ARM_COMPUTE_ERROR_ON(input->info()->dimension(1) != output->info()->dimension(1));
49
50 for(size_t i = 2; i < Coordinates::num_max_dimensions; ++i)
51 {
52 ARM_COMPUTE_ERROR_ON(input->info()->dimension(i) != output->info()->dimension(i));
53 }
54
Sanghoon Lee1cd41492018-03-15 11:48:48 +000055 _input = input;
56 _output = output;
Anthony Barbier6ff3b192017-09-04 18:44:23 +010057
58 // Create kernel
59 _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("gaussian1x5_sub_x"));
60
61 // Configure kernel window
62 constexpr unsigned int num_elems_processed_per_iteration = 16;
63 constexpr unsigned int num_elems_read_per_iteration = 20;
64 constexpr unsigned int num_elems_written_per_iteration = 8;
Sanghoon Lee1cd41492018-03-15 11:48:48 +000065 const float scale_x = static_cast<float>(output->info()->dimension(0)) / input->info()->dimension(0);
Anthony Barbier6ff3b192017-09-04 18:44:23 +010066
Sanghoon Lee1cd41492018-03-15 11:48:48 +000067 Window win = calculate_max_window_horizontal(*input->info(), Steps(num_elems_processed_per_iteration));
Anthony Barbier6ff3b192017-09-04 18:44:23 +010068 AccessWindowHorizontal output_access(output->info(), 0, num_elems_written_per_iteration, scale_x);
69
70 // Sub sampling selects odd pixels (1, 3, 5, ...) for images with even
71 // width and even pixels (0, 2, 4, ...) for images with odd width. (Whether
72 // a pixel is even or odd is determined based on the tensor shape not the
73 // valid region!)
74 // Thus the offset from which the first pixel (L2) for the convolution is
75 // loaded depends on the anchor and shape of the valid region.
76 // In the case of an even shape (= even image width) we need to load L2
77 // from -2 if the anchor is odd and from -1 if the anchor is even. That
78 // makes sure that L2 is always loaded from an odd pixel.
79 // On the other hand, for an odd shape (= odd image width) we need to load
80 // L2 from -1 if the anchor is odd and from -2 if the anchor is even to
81 // achieve the opposite effect.
82 // The condition can be simplified to checking whether anchor + shape is
83 // odd (-2) or even (-1) as only adding an odd and an even number will have
84 // an odd result.
85 _l2_load_offset = -border_size().left;
86
87 if((_input->info()->valid_region().anchor[0] + _input->info()->valid_region().shape[0]) % 2 == 0)
88 {
89 _l2_load_offset += 1;
90 }
91
92 update_window_and_padding(win,
93 AccessWindowHorizontal(input->info(), _l2_load_offset, num_elems_read_per_iteration),
94 output_access);
95
Sanghoon Lee1cd41492018-03-15 11:48:48 +000096 output->info()->set_valid_region(ValidRegion(Coordinates(), output->info()->tensor_shape()));
Anthony Barbier6ff3b192017-09-04 18:44:23 +010097
Anthony Barbierb6eb3532018-08-08 13:20:04 +010098 ICLKernel::configure_internal(win);
Anthony Barbier6ff3b192017-09-04 18:44:23 +010099}
100
101void CLGaussianPyramidHorKernel::run(const Window &window, cl::CommandQueue &queue)
102{
103 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
104 ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
105
106 Window win_in(window);
107 win_in.shift(Window::DimX, _l2_load_offset);
108
109 //The output is half the width of the input:
110 Window win_out(window);
111 win_out.scale(Window::DimX, 0.5f);
112
113 Window slice_in = win_in.first_slice_window_2D();
114 Window slice_out = win_out.first_slice_window_2D();
115
116 do
117 {
118 unsigned int idx = 0;
119 add_2D_tensor_argument(idx, _input, slice_in);
120 add_2D_tensor_argument(idx, _output, slice_out);
121 enqueue(queue, *this, slice_out);
122 }
123 while(win_in.slide_window_slice_2D(slice_in) && win_out.slide_window_slice_2D(slice_out));
124}
125
126CLGaussianPyramidVertKernel::CLGaussianPyramidVertKernel()
127 : _t2_load_offset(0)
128{
129}
130
131BorderSize CLGaussianPyramidVertKernel::border_size() const
132{
133 return BorderSize(2, 0);
134}
135
Sanghoon Lee1cd41492018-03-15 11:48:48 +0000136void CLGaussianPyramidVertKernel::configure(const ICLTensor *input, ICLTensor *output)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100137{
138 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U16);
139 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8);
140 ARM_COMPUTE_ERROR_ON(input->info()->dimension(0) != output->info()->dimension(0));
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100141
142 for(size_t i = 2; i < Coordinates::num_max_dimensions; ++i)
143 {
144 ARM_COMPUTE_ERROR_ON(input->info()->dimension(i) != output->info()->dimension(i));
145 }
146
147 _input = input;
148 _output = output;
149
150 // Create kernel
151 _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("gaussian5x1_sub_y"));
152
153 // Configure kernel window
154 constexpr unsigned int num_elems_processed_per_iteration = 8;
155 constexpr unsigned int num_rows_processed_per_iteration = 2;
156 constexpr unsigned int num_elems_written_per_iteration = 8;
157 constexpr unsigned int num_elems_read_per_iteration = 8;
158 constexpr unsigned int num_rows_per_iteration = 5;
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100159
Sanghoon Lee1cd41492018-03-15 11:48:48 +0000160 const float scale_y = static_cast<float>(output->info()->dimension(1)) / input->info()->dimension(1);
161
162 Window win = calculate_max_window(*input->info(), Steps(num_elems_processed_per_iteration, num_rows_processed_per_iteration));
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100163 AccessWindowRectangle output_access(output->info(), 0, 0, num_elems_written_per_iteration, num_rows_per_iteration, 1.f, scale_y);
164
165 // Determine whether we need to load even or odd rows. See above for a
166 // detailed explanation.
167 _t2_load_offset = -border_size().top;
168
169 if((_input->info()->valid_region().anchor[1] + _input->info()->valid_region().shape[1]) % 2 == 0)
170 {
171 _t2_load_offset += 1;
172 }
173
174 update_window_and_padding(win,
175 AccessWindowRectangle(input->info(), 0, _t2_load_offset, num_elems_read_per_iteration, num_rows_per_iteration),
176 output_access);
177
Sanghoon Lee1cd41492018-03-15 11:48:48 +0000178 output->info()->set_valid_region(ValidRegion(Coordinates(), output->info()->tensor_shape()));
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100179
Anthony Barbierb6eb3532018-08-08 13:20:04 +0100180 ICLKernel::configure_internal(win);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100181}
182
183void CLGaussianPyramidVertKernel::run(const Window &window, cl::CommandQueue &queue)
184{
185 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
186 ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
187 ARM_COMPUTE_ERROR_ON(window.x().step() != 8);
188 ARM_COMPUTE_ERROR_ON(window.y().step() % 2);
189
190 Window win_in(window);
191 win_in.shift(Window::DimY, _t2_load_offset);
192
193 Window win_out(window);
194 win_out.scale(Window::DimY, 0.5f);
195
196 Window slice_in = win_in.first_slice_window_2D();
197 Window slice_out = win_out.first_slice_window_2D();
198
199 do
200 {
201 unsigned int idx = 0;
202 add_2D_tensor_argument(idx, _input, slice_in);
203 add_2D_tensor_argument(idx, _output, slice_out);
204 enqueue(queue, *this, slice_out);
205 }
206 while(win_in.slide_window_slice_2D(slice_in) && win_out.slide_window_slice_2D(slice_out));
207}