blob: 7a123e2f57ac156d01af4d8c21aa3e119b1ce7de [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
2 * Copyright (c) 2016, 2017 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/NEON/kernels/NEGaussianPyramidKernel.h"
25
26#include "arm_compute/core/Coordinates.h"
27#include "arm_compute/core/Error.h"
28#include "arm_compute/core/Helpers.h"
29#include "arm_compute/core/ITensor.h"
30#include "arm_compute/core/NEON/INEKernel.h"
31#include "arm_compute/core/TensorInfo.h"
32#include "arm_compute/core/Types.h"
33#include "arm_compute/core/Validate.h"
34#include "arm_compute/core/Window.h"
35
36#include <arm_neon.h>
37#include <cstddef>
38#include <cstdint>
39#include <tuple>
40
41using namespace arm_compute;
42
43NEGaussianPyramidHorKernel::NEGaussianPyramidHorKernel()
Gian Marco37908d92017-11-07 14:38:22 +000044 : _l2_load_offset(0)
Anthony Barbier6ff3b192017-09-04 18:44:23 +010045{
46}
47
48BorderSize NEGaussianPyramidHorKernel::border_size() const
49{
Gian Marco37908d92017-11-07 14:38:22 +000050 return BorderSize(0, 2);
Anthony Barbier6ff3b192017-09-04 18:44:23 +010051}
52
Gian Marco37908d92017-11-07 14:38:22 +000053void NEGaussianPyramidHorKernel::configure(const ITensor *input, ITensor *output)
Anthony Barbier6ff3b192017-09-04 18:44:23 +010054{
55 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8);
56 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S16);
Anthony Barbier6ff3b192017-09-04 18:44:23 +010057 ARM_COMPUTE_ERROR_ON(input->info()->dimension(1) != output->info()->dimension(1));
58
59 for(size_t i = 2; i < Coordinates::num_max_dimensions; ++i)
60 {
61 ARM_COMPUTE_ERROR_ON(input->info()->dimension(i) != output->info()->dimension(i));
62 }
63
Gian Marco37908d92017-11-07 14:38:22 +000064 _input = input;
65 _output = output;
Anthony Barbier6ff3b192017-09-04 18:44:23 +010066
67 // Configure kernel window
68 constexpr unsigned int num_elems_processed_per_iteration = 16;
69 constexpr unsigned int num_elems_read_per_iteration = 32;
70 constexpr unsigned int num_elems_written_per_iteration = 8;
Gian Marco37908d92017-11-07 14:38:22 +000071 const float scale_x = static_cast<float>(output->info()->dimension(0)) / input->info()->dimension(0);
Anthony Barbier6ff3b192017-09-04 18:44:23 +010072
Gian Marco37908d92017-11-07 14:38:22 +000073 Window win = calculate_max_window_horizontal(*input->info(), Steps(num_elems_processed_per_iteration));
Anthony Barbier6ff3b192017-09-04 18:44:23 +010074 AccessWindowHorizontal output_access(output->info(), 0, num_elems_written_per_iteration, scale_x);
75
76 // Sub sampling selects odd pixels (1, 3, 5, ...) for images with even
77 // width and even pixels (0, 2, 4, ...) for images with odd width. (Whether
78 // a pixel is even or odd is determined based on the tensor shape not the
79 // valid region!)
80 // Thus the offset from which the first pixel (L2) for the convolution is
81 // loaded depends on the anchor and shape of the valid region.
82 // In the case of an even shape (= even image width) we need to load L2
83 // from -2 if the anchor is odd and from -1 if the anchor is even. That
84 // makes sure that L2 is always loaded from an odd pixel.
85 // On the other hand, for an odd shape (= odd image width) we need to load
86 // L2 from -1 if the anchor is odd and from -2 if the anchor is even to
87 // achieve the opposite effect.
88 // The condition can be simplified to checking whether anchor + shape is
89 // odd (-2) or even (-1) as only adding an odd and an even number will have
90 // an odd result.
91 _l2_load_offset = -border_size().left;
92
93 if((_input->info()->valid_region().anchor[0] + _input->info()->valid_region().shape[0]) % 2 == 0)
94 {
95 _l2_load_offset += 1;
96 }
97
Gian Marco37908d92017-11-07 14:38:22 +000098 // Replace input access with static window
Anthony Barbier6ff3b192017-09-04 18:44:23 +010099 update_window_and_padding(win,
100 AccessWindowHorizontal(input->info(), _l2_load_offset, num_elems_read_per_iteration),
101 output_access);
102
Gian Marco37908d92017-11-07 14:38:22 +0000103 output->info()->set_valid_region(ValidRegion(Coordinates(), output->info()->tensor_shape()));
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100104
105 INEKernel::configure(win);
106}
107
Moritz Pflanzerc186b572017-09-07 09:48:04 +0100108void NEGaussianPyramidHorKernel::run(const Window &window, const ThreadInfo &info)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100109{
Moritz Pflanzerc186b572017-09-07 09:48:04 +0100110 ARM_COMPUTE_UNUSED(info);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100111 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
112 ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
113 ARM_COMPUTE_ERROR_ON(window.x().step() % 2);
114
115 static const int16x8_t six = vdupq_n_s16(6);
116 static const int16x8_t four = vdupq_n_s16(4);
117
118 Window win_in(window);
119 win_in.shift(Window::DimX, _l2_load_offset);
120
121 Iterator in(_input, win_in);
122
123 // The output is half the width of the input
124 Window win_out(window);
125 win_out.scale(Window::DimX, 0.5f);
126
127 Iterator out(_output, win_out);
128
129 execute_window_loop(window, [&](const Coordinates & id)
130 {
131 const uint8x16x2_t data_2q = vld2q_u8(in.ptr());
132 const uint8x16_t &data_even = data_2q.val[0];
133 const uint8x16_t &data_odd = data_2q.val[1];
134
135 const int16x8_t data_l2 = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(data_even)));
136 const int16x8_t data_l1 = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(data_odd)));
137 const int16x8_t data_m = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(vextq_u8(data_even, data_even, 1))));
138 const int16x8_t data_r1 = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(vextq_u8(data_odd, data_odd, 1))));
139 const int16x8_t data_r2 = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(vextq_u8(data_even, data_even, 2))));
140
141 int16x8_t out_val = vaddq_s16(data_l2, data_r2);
142 out_val = vmlaq_s16(out_val, data_l1, four);
143 out_val = vmlaq_s16(out_val, data_m, six);
144 out_val = vmlaq_s16(out_val, data_r1, four);
145
146 vst1q_s16(reinterpret_cast<int16_t *>(out.ptr()), out_val);
147 },
148 in, out);
149}
150
151NEGaussianPyramidVertKernel::NEGaussianPyramidVertKernel()
152 : _t2_load_offset(0)
153{
154}
155
156BorderSize NEGaussianPyramidVertKernel::border_size() const
157{
158 return BorderSize(2, 0);
159}
160
Gian Marco37908d92017-11-07 14:38:22 +0000161void NEGaussianPyramidVertKernel::configure(const ITensor *input, ITensor *output)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100162{
163 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::S16);
164 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100165 ARM_COMPUTE_ERROR_ON(input->info()->dimension(0) != output->info()->dimension(0));
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100166
167 for(size_t i = 2; i < Coordinates::num_max_dimensions; ++i)
168 {
169 ARM_COMPUTE_ERROR_ON(input->info()->dimension(i) != output->info()->dimension(i));
170 }
171
172 _input = input;
173 _output = output;
174
175 // Configure kernel window
176 constexpr unsigned int num_elems_processed_per_iteration = 16;
177 constexpr unsigned int num_rows_processed_per_iteration = 2;
178
179 constexpr unsigned int num_elems_written_per_iteration = 16;
180 constexpr unsigned int num_rows_written_per_iteration = 1;
181
182 constexpr unsigned int num_elems_read_per_iteration = 16;
183 constexpr unsigned int num_rows_read_per_iteration = 5;
184
Gian Marco37908d92017-11-07 14:38:22 +0000185 const float scale_y = static_cast<float>(output->info()->dimension(1)) / input->info()->dimension(1);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100186
Gian Marco37908d92017-11-07 14:38:22 +0000187 Window win = calculate_max_window(*input->info(), Steps(num_elems_processed_per_iteration, num_rows_processed_per_iteration));
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100188 AccessWindowRectangle output_access(output->info(), 0, 0, num_elems_written_per_iteration, num_rows_written_per_iteration, 1.f, scale_y);
189
190 // Determine whether we need to load even or odd rows. See above for a
191 // detailed explanation.
192 _t2_load_offset = -border_size().top;
193
194 if((_input->info()->valid_region().anchor[1] + _input->info()->valid_region().shape[1]) % 2 == 0)
195 {
196 _t2_load_offset += 1;
197 }
198
199 update_window_and_padding(win,
200 AccessWindowRectangle(input->info(), 0, _t2_load_offset, num_elems_read_per_iteration, num_rows_read_per_iteration),
201 output_access);
202
Gian Marco37908d92017-11-07 14:38:22 +0000203 output->info()->set_valid_region(ValidRegion(Coordinates(), output->info()->tensor_shape()));
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100204
205 INEKernel::configure(win);
206}
207
Moritz Pflanzerc186b572017-09-07 09:48:04 +0100208void NEGaussianPyramidVertKernel::run(const Window &window, const ThreadInfo &info)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100209{
Moritz Pflanzerc186b572017-09-07 09:48:04 +0100210 ARM_COMPUTE_UNUSED(info);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100211 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
212 ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
213 ARM_COMPUTE_ERROR_ON(window.x().step() != 16);
214 ARM_COMPUTE_ERROR_ON(window.y().step() % 2);
215 ARM_COMPUTE_ERROR_ON(_input->buffer() == nullptr);
216
217 static const uint16x8_t six = vdupq_n_u16(6);
218 static const uint16x8_t four = vdupq_n_u16(4);
219
220 Window win_in(window);
221 // Need to load two times 8 values instead of 16 values once
222 win_in.set_dimension_step(Window::DimX, 8);
223 win_in.shift(Window::DimY, _t2_load_offset);
224
225 Iterator in(_input, win_in);
226
227 // Output's height is half of input's
228 Window win_out(window);
229 win_out.scale(Window::DimY, 0.5f);
230
231 Iterator out(_output, win_out);
232
233 const uint8_t *input_top2_ptr = _input->buffer() + _input->info()->offset_element_in_bytes(Coordinates(0, 0));
234 const uint8_t *input_top_ptr = _input->buffer() + _input->info()->offset_element_in_bytes(Coordinates(0, 1));
235 const uint8_t *input_mid_ptr = _input->buffer() + _input->info()->offset_element_in_bytes(Coordinates(0, 2));
236 const uint8_t *input_low_ptr = _input->buffer() + _input->info()->offset_element_in_bytes(Coordinates(0, 3));
237 const uint8_t *input_low2_ptr = _input->buffer() + _input->info()->offset_element_in_bytes(Coordinates(0, 4));
238
239 execute_window_loop(window, [&](const Coordinates & id)
240 {
241 // Low data
242 const uint16x8_t data_low_t2 = vreinterpretq_u16_s16(vld1q_s16(reinterpret_cast<const int16_t *>(input_top2_ptr + in.offset())));
243 const uint16x8_t data_low_t1 = vreinterpretq_u16_s16(vld1q_s16(reinterpret_cast<const int16_t *>(input_top_ptr + in.offset())));
244 const uint16x8_t data_low_m = vreinterpretq_u16_s16(vld1q_s16(reinterpret_cast<const int16_t *>(input_mid_ptr + in.offset())));
245 const uint16x8_t data_low_b1 = vreinterpretq_u16_s16(vld1q_s16(reinterpret_cast<const int16_t *>(input_low_ptr + in.offset())));
246 const uint16x8_t data_low_b2 = vreinterpretq_u16_s16(vld1q_s16(reinterpret_cast<const int16_t *>(input_low2_ptr + in.offset())));
247
248 uint16x8_t out_low = vaddq_u16(data_low_t2, data_low_b2);
249 out_low = vmlaq_u16(out_low, data_low_t1, four);
250 out_low = vmlaq_u16(out_low, data_low_m, six);
251 out_low = vmlaq_u16(out_low, data_low_b1, four);
252
253 in.increment(Window::DimX);
254
255 // High data
256 const uint16x8_t data_high_t2 = vreinterpretq_u16_s16(vld1q_s16(reinterpret_cast<const int16_t *>(input_top2_ptr + in.offset())));
257 const uint16x8_t data_high_t1 = vreinterpretq_u16_s16(vld1q_s16(reinterpret_cast<const int16_t *>(input_top_ptr + in.offset())));
258 const uint16x8_t data_high_m = vreinterpretq_u16_s16(vld1q_s16(reinterpret_cast<const int16_t *>(input_mid_ptr + in.offset())));
259 const uint16x8_t data_high_b1 = vreinterpretq_u16_s16(vld1q_s16(reinterpret_cast<const int16_t *>(input_low_ptr + in.offset())));
260 const uint16x8_t data_high_b2 = vreinterpretq_u16_s16(vld1q_s16(reinterpret_cast<const int16_t *>(input_low2_ptr + in.offset())));
261
262 uint16x8_t out_high = vaddq_u16(data_high_t2, data_high_b2);
263 out_high = vmlaq_u16(out_high, data_high_t1, four);
264 out_high = vmlaq_u16(out_high, data_high_m, six);
265 out_high = vmlaq_u16(out_high, data_high_b1, four);
266
267 vst1q_u8(out.ptr(), vcombine_u8(vqshrn_n_u16(out_low, 8), vqshrn_n_u16(out_high, 8)));
268 },
269 in, out);
270}