blob: dad4fee837c0581daf1c54bc5c8a075ffeea1431 [file] [log] [blame]
Michalis Spyrou7362f0d2017-10-18 17:58:22 +01001/*
Michalis Spyrou621965e2018-01-08 17:11:26 +00002 * Copyright (c) 2017-2018 ARM Limited.
Michalis Spyrou7362f0d2017-10-18 17:58:22 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Giorgio Arena04a8f8c2017-11-23 11:45:24 +000024#include "arm_compute/core/NEON/kernels/NEDepthwiseConvolutionLayer3x3Kernel.h"
Georgios Pinitas4074c992018-01-30 18:13:46 +000025#include "arm_compute/core/NEON/kernels/detail/NEDirectConvolutionDetail.h"
Michalis Spyrou7362f0d2017-10-18 17:58:22 +010026
27#include "arm_compute/core/AccessWindowStatic.h"
28#include "arm_compute/core/AccessWindowTranspose.h"
29#include "arm_compute/core/Coordinates.h"
30#include "arm_compute/core/Error.h"
31#include "arm_compute/core/Helpers.h"
32#include "arm_compute/core/ITensor.h"
33#include "arm_compute/core/NEON/INEKernel.h"
34#include "arm_compute/core/TensorInfo.h"
35#include "arm_compute/core/TensorShape.h"
36#include "arm_compute/core/Types.h"
Georgios Pinitas4074c992018-01-30 18:13:46 +000037#include "arm_compute/core/Utils.h"
Michalis Spyrou7362f0d2017-10-18 17:58:22 +010038#include "arm_compute/core/Validate.h"
39#include "arm_compute/core/Window.h"
Georgios Pinitas1250a5a2018-01-02 13:27:37 +000040#include "arm_compute/core/utils/misc/ShapeCalculator.h"
Georgios Pinitas4074c992018-01-30 18:13:46 +000041#include "support/ToolchainSupport.h"
Michalis Spyrou7362f0d2017-10-18 17:58:22 +010042
43using namespace arm_compute;
44using namespace arm_compute::detail;
Georgios Pinitas1250a5a2018-01-02 13:27:37 +000045using namespace arm_compute::misc::shape_calculator;
Georgios Pinitas4074c992018-01-30 18:13:46 +000046using namespace depthwise;
Michalis Spyrou7362f0d2017-10-18 17:58:22 +010047
Georgios Pinitasf72f9362018-01-12 16:29:45 +000048namespace
Michalis Spyrou7362f0d2017-10-18 17:58:22 +010049{
Georgios Pinitasf72f9362018-01-12 16:29:45 +000050template <typename T1, typename T2, unsigned int stridex>
Michalis Spyrou7362f0d2017-10-18 17:58:22 +010051class convolver_3x3
52{
53public:
54 static void convolve(const Window &window, unsigned int num_elems_written_per_iteration,
55 const ITensor *input, const ITensor *weights, ITensor *output, const PadStrideInfo &conv_info)
56 {
Georgios Pinitasf72f9362018-01-12 16:29:45 +000057 const int input_offset = -input->info()->quantization_info().offset;
58 const int weights_offset = -weights->info()->quantization_info().offset;
59
Michalis Spyrou7362f0d2017-10-18 17:58:22 +010060 const int input_stride_x = input->info()->strides_in_bytes().x();
61 const int input_stride_y = input->info()->strides_in_bytes().y();
62 const int output_stride_y = output->info()->strides_in_bytes().y();
63 const int kernel_stride_y = weights->info()->strides_in_bytes().y();
64 const int kernel_stride_z = weights->info()->strides_in_bytes().z();
65 const int output_w = output->info()->dimension(0);
66 const int output_h = output->info()->dimension(1);
67 const int delta_input = get_input_num_elems_processed<stridex>(num_elems_written_per_iteration);
68 const unsigned int conv_stride_y = std::get<1>(conv_info.stride());
Georgios Pinitasf72f9362018-01-12 16:29:45 +000069 const unsigned int conv_pad_x = conv_info.pad_left();
70 const unsigned int conv_pad_y = conv_info.pad_top();
Michalis Spyrou7362f0d2017-10-18 17:58:22 +010071
72 // setup output window for the iterator
73 Window window_out = window;
74 window_out.set(Window::DimX, Window::Dimension(0, output->info()->dimension(Window::DimX), output->info()->dimension(Window::DimX)));
75 window_out.set(Window::DimY, Window::Dimension(0, output->info()->dimension(Window::DimY), output->info()->dimension(Window::DimY)));
76
77 // setup input window for the iterator
78 Window window_in = window;
79 // we just want execute_window_loop to iterate over the dimensions > 2, so we set the first 2 dimensions to 0
80 window_in.set(Window::DimX, Window::Dimension(0, 0, 0));
81 window_in.set(Window::DimY, Window::Dimension(0, 0, 0));
82
83 Window window_k = calculate_max_window(*weights->info(), Steps(1u));
84
85 Iterator in(input, window_in);
86 Iterator out(output, window_out);
87 Iterator w(weights, window_k);
88
89 const uint8_t *weights_ptr = w.ptr();
90
91 execute_window_loop(window_out, [&](const Coordinates & id)
92 {
Georgios Pinitasf72f9362018-01-12 16:29:45 +000093 int ih = 0;
94 int oh = 0;
Michalis Spyrou7362f0d2017-10-18 17:58:22 +010095
Georgios Pinitasf72f9362018-01-12 16:29:45 +000096 const uint8_t *input_ptr = in.ptr() - conv_pad_x * input_stride_x - conv_pad_y * input_stride_y;
97 const uint8_t *ptr_weights_base = weights_ptr + id.z() * kernel_stride_z;
98
99 const auto ptr_weights_r0 = reinterpret_cast<const T1 *>(ptr_weights_base);
100 const auto ptr_weights_r1 = reinterpret_cast<const T1 *>(ptr_weights_base + kernel_stride_y);
101 const auto ptr_weights_r2 = reinterpret_cast<const T1 *>(ptr_weights_base + kernel_stride_y * 2);
102 const auto vw_r0 = load_matrix_row(ptr_weights_r0, weights_offset);
103 const auto vw_r1 = load_matrix_row(ptr_weights_r1, weights_offset);
104 const auto vw_r2 = load_matrix_row(ptr_weights_r2, weights_offset);
Michalis Spyrou7362f0d2017-10-18 17:58:22 +0100105
106 for(ih = 0, oh = 0; oh < output_h; ++oh, ih += conv_stride_y)
107 {
Georgios Pinitasf72f9362018-01-12 16:29:45 +0000108 auto in_top = reinterpret_cast<const T1 *>(input_ptr + (ih + 0) * input_stride_y);
109 auto in_mid = reinterpret_cast<const T1 *>(input_ptr + (ih + 1) * input_stride_y);
110 auto in_low = reinterpret_cast<const T1 *>(input_ptr + (ih + 2) * input_stride_y);
111 auto p_out = reinterpret_cast<T2 *>(out.ptr() + oh * output_stride_y);
Michalis Spyrou7362f0d2017-10-18 17:58:22 +0100112
113 for(int ow = 0; ow < output_w; ow += num_elems_written_per_iteration,
Georgios Pinitasf72f9362018-01-12 16:29:45 +0000114 in_top += delta_input, in_mid += delta_input, in_low += delta_input,
115 p_out += num_elems_written_per_iteration)
Michalis Spyrou7362f0d2017-10-18 17:58:22 +0100116 {
Georgios Pinitasf72f9362018-01-12 16:29:45 +0000117 auto vres = convolve_3x3<stridex>(in_top, in_mid, in_low, vw_r0, vw_r1, vw_r2, 0, input_offset);
Michalis Spyrou7362f0d2017-10-18 17:58:22 +0100118 store_results<stridex>(p_out, vres);
119 }
120 }
121 },
122 in, out);
123 }
124};
125
Georgios Pinitasf72f9362018-01-12 16:29:45 +0000126template <typename T1, typename T2>
127inline void convolve_3x3(const Window &window, unsigned int num_elems_written_per_iteration,
128 const ITensor *input, const ITensor *weights, ITensor *output, const PadStrideInfo &conv_info)
129{
130 const unsigned int conv_stride_x = std::get<0>(conv_info.stride());
131 switch(conv_stride_x)
132 {
133 case 1:
134 convolver_3x3<T1, T2, 1>::convolve(window, num_elems_written_per_iteration, input, weights, output, conv_info);
135 break;
136 case 2:
137 convolver_3x3<T1, T2, 2>::convolve(window, num_elems_written_per_iteration, input, weights, output, conv_info);
138 break;
139 case 3:
140 convolver_3x3<T1, T2, 3>::convolve(window, num_elems_written_per_iteration, input, weights, output, conv_info);
141 break;
142 default:
143 ARM_COMPUTE_ERROR("Not implemented");
144 }
145}
146} // namespace
147
148NEDepthwiseConvolutionLayer3x3Kernel::NEDepthwiseConvolutionLayer3x3Kernel()
Georgios Pinitas4074c992018-01-30 18:13:46 +0000149 : _border_size(0), _input(), _output(), _weights(), _conv_info(), _convolver(nullptr), _num_elems_written_per_iteration(0), _run_optimized(false)
Georgios Pinitasf72f9362018-01-12 16:29:45 +0000150{
151}
152
153BorderSize NEDepthwiseConvolutionLayer3x3Kernel::border_size() const
154{
155 return _border_size;
156}
157
Georgios Pinitas4074c992018-01-30 18:13:46 +0000158void NEDepthwiseConvolutionLayer3x3Kernel::configure(const ITensor *input, const ITensor *weights, ITensor *output, const PadStrideInfo &conv_info, DataLayout data_layout)
Georgios Pinitasf72f9362018-01-12 16:29:45 +0000159{
160 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F32);
161 ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
Georgios Pinitas4074c992018-01-30 18:13:46 +0000162
163 _input = input;
164 _output = output;
165 _weights = weights;
166 _conv_info = conv_info;
167 _convolver = nullptr;
168
169 _run_optimized = NEDepthwiseConvolutionLayer3x3Kernel::is_optimized_execution_possible(input->info()->tensor_shape(),
170 conv_info,
171 input->info()->data_type(),
172 data_layout);
173
174 (_run_optimized) ? configure_optimized() : configure_generic();
175}
176
177void NEDepthwiseConvolutionLayer3x3Kernel::run(const Window &window, const ThreadInfo &info)
178{
179 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
180 ARM_COMPUTE_UNUSED(info);
181
182 (_run_optimized) ? run_optimized(window, info) : run_generic(window, info);
183}
184
185bool NEDepthwiseConvolutionLayer3x3Kernel::is_optimized_execution_possible(TensorShape input_shape, PadStrideInfo conv_info, DataType dt, DataLayout data_layout)
186{
187 // Reshape input shape if in NHWC format
188 TensorShape in_shape{ input_shape };
189 if(data_layout == DataLayout::NHWC)
190 {
191 in_shape.set(Window::DimX, input_shape.y());
192 in_shape.set(Window::DimY, input_shape.z());
193 in_shape.set(Window::DimZ, input_shape.x());
194 }
195
196 // Check supported data type
197 bool supported_datatype = (dt == DataType::F32);
198
199 // Check for supported strides
200 const auto &strides = conv_info.stride();
201 bool supported_strides = (strides.first == strides.second) && ((strides.first == 1) || (strides.first == 2));
202
203 // Check for supported padding
204 const auto pad_top = conv_info.pad_top();
205 const auto pad_right = conv_info.pad_right();
206 const auto pad_bottom = conv_info.pad_bottom();
207 const auto pad_left = conv_info.pad_left();
208 PadStrideInfo same_pad = calculate_same_pad(in_shape, TensorShape(3U, 3U), conv_info);
209 bool is_same_padding = (pad_top == same_pad.pad_top()) && (pad_right == same_pad.pad_right()) && (pad_bottom == same_pad.pad_bottom()) && (pad_left == same_pad.pad_left());
210 bool is_valid_padding = (pad_top == 0) && (pad_right == 0) && (pad_bottom == 0) && (pad_left == 0);
211 bool supported_padding = is_same_padding || is_valid_padding;
212
213 return supported_datatype && supported_strides && supported_padding;
214}
215
216void NEDepthwiseConvolutionLayer3x3Kernel::generate_convolver()
217{
218 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(_input, 1, DataType::F32);
219 ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(_input, _weights);
220 ARM_COMPUTE_ERROR_ON(_weights->info()->dimension(1) != 3 || _weights->info()->dimension(2) != 3);
221
222 _convolver = create_convolver_object(_input->info()->tensor_shape(), _conv_info,
223 _weights->buffer(), _input->buffer(), _output->buffer());
224}
225
226void NEDepthwiseConvolutionLayer3x3Kernel::configure_generic()
227{
228 ARM_COMPUTE_ERROR_ON(_weights->info()->dimension(0) != 3 || _weights->info()->dimension(1) != 3);
Georgios Pinitasf72f9362018-01-12 16:29:45 +0000229
230 // Get convolved dimensions
Georgios Pinitas4074c992018-01-30 18:13:46 +0000231 const TensorShape output_shape = compute_depthwise_convolution_shape(*_input->info(), *_weights->info(), _conv_info);
232 const DataType output_dt = (_input->info()->data_type() == DataType::QASYMM8) ? DataType::S32 : _input->info()->data_type();
Georgios Pinitasf72f9362018-01-12 16:29:45 +0000233
234 // Output auto inizialitation if not yet initialized
Georgios Pinitas4074c992018-01-30 18:13:46 +0000235 auto_init_if_empty(*_output->info(),
236 _input->info()->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(output_shape).set_data_type(output_dt));
Georgios Pinitasf72f9362018-01-12 16:29:45 +0000237
Georgios Pinitas4074c992018-01-30 18:13:46 +0000238 ARM_COMPUTE_ERROR_ON_MISMATCHING_DIMENSIONS(_output->info()->tensor_shape(), output_shape);
Georgios Pinitasf72f9362018-01-12 16:29:45 +0000239
Georgios Pinitas4074c992018-01-30 18:13:46 +0000240 const unsigned int conv_stride_x = _conv_info.stride().first;
241 const unsigned int conv_pad_top = _conv_info.pad_top();
242 const unsigned int conv_pad_right = _conv_info.pad_right();
243 const unsigned int conv_pad_bottom = _conv_info.pad_bottom();
244 const unsigned int conv_pad_left = _conv_info.pad_left();
Georgios Pinitasf72f9362018-01-12 16:29:45 +0000245
246 ARM_COMPUTE_ERROR_ON(conv_stride_x < 1 || conv_stride_x > 3);
247
248 unsigned int num_elems_read_per_iteration = 0;
Georgios Pinitas4074c992018-01-30 18:13:46 +0000249 switch(_input->info()->data_type())
Georgios Pinitasf72f9362018-01-12 16:29:45 +0000250 {
251 case DataType::QASYMM8:
252 num_elems_read_per_iteration = 16;
253 _num_elems_written_per_iteration = 16 >> conv_stride_x;
254 break;
255 case DataType::F32:
256 num_elems_read_per_iteration = 12;
257 _num_elems_written_per_iteration = 16 >> conv_stride_x;
258 break;
259 default:
260 ARM_COMPUTE_ERROR("Data type not supported.");
261 }
Georgios Pinitas4074c992018-01-30 18:13:46 +0000262 _border_size = BorderSize(conv_pad_top, conv_pad_right, conv_pad_bottom, conv_pad_left);
Georgios Pinitasf72f9362018-01-12 16:29:45 +0000263
264 // Configure kernel window
Georgios Pinitas4074c992018-01-30 18:13:46 +0000265 Window win = calculate_max_window(*_output->info(), Steps(_num_elems_written_per_iteration));
Georgios Pinitasf72f9362018-01-12 16:29:45 +0000266
267 const unsigned int num_x_steps = (output_shape.x() + _num_elems_written_per_iteration - 1) / _num_elems_written_per_iteration;
268 const int input_num_elems_processed = get_input_num_elems_processed(_num_elems_written_per_iteration, conv_stride_x);
269
Georgios Pinitas4074c992018-01-30 18:13:46 +0000270 AccessWindowStatic input_access(_input->info(),
Georgios Pinitasf72f9362018-01-12 16:29:45 +0000271 -conv_pad_left,
272 -conv_pad_top,
273 (num_x_steps - 1) * input_num_elems_processed + num_elems_read_per_iteration,
Georgios Pinitas4074c992018-01-30 18:13:46 +0000274 _input->info()->tensor_shape().y() + conv_pad_bottom);
Georgios Pinitas9be0c5a2018-02-19 12:46:29 +0000275 AccessWindowStatic weights_access(_weights->info(), 0, 0, _weights->info()->dimension(0), _weights->info()->dimension(1));
276 AccessWindowHorizontal output_access(_output->info(), 0, _num_elems_written_per_iteration);
Georgios Pinitasf72f9362018-01-12 16:29:45 +0000277
278 update_window_and_padding(win, input_access, weights_access, output_access);
Georgios Pinitas4074c992018-01-30 18:13:46 +0000279 output_access.set_valid_region(win, ValidRegion(Coordinates(), _output->info()->tensor_shape()));
Georgios Pinitasf72f9362018-01-12 16:29:45 +0000280
281 INEKernel::configure(win);
282}
283
Georgios Pinitas4074c992018-01-30 18:13:46 +0000284void NEDepthwiseConvolutionLayer3x3Kernel::configure_optimized()
Michalis Spyrou7362f0d2017-10-18 17:58:22 +0100285{
Georgios Pinitas4074c992018-01-30 18:13:46 +0000286 ARM_COMPUTE_ERROR_ON(_weights->info()->dimension(1) != 3 || _weights->info()->dimension(2) != 3);
287
288 _border_size = BorderSize(0, 0);
289 _convolver = create_convolver_object(_input->info()->tensor_shape(), _conv_info,
290 _weights->buffer(), _input->buffer(), _output->buffer());
291
292 // Auto-configure output
293 bool same_padding = _conv_info.has_padding();
294 TensorShape output_shape{ _input->info()->tensor_shape() };
295
296 output_shape.set(1, _convolver->output_size(output_shape.y(), same_padding)); // Set width
297 output_shape.set(2, _convolver->output_size(output_shape.z(), same_padding)); // Set height
298
299 // Output auto inizialitation if not yet initialized
300 auto_init_if_empty(*_output->info(),
301 _input->info()->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(output_shape));
302
303 // Configure window
304 Window win;
305 auto win_last = _convolver->get_window();
306 win.set(Window::DimX, Window::Dimension(0, win_last, 1));
307 INEKernel::configure(win);
308}
309
310void NEDepthwiseConvolutionLayer3x3Kernel::run_generic(const Window &window, const ThreadInfo &info)
311{
Michalis Spyrou7362f0d2017-10-18 17:58:22 +0100312 ARM_COMPUTE_UNUSED(info);
313
Georgios Pinitasf72f9362018-01-12 16:29:45 +0000314 switch(_input->info()->data_type())
Michalis Spyrou7362f0d2017-10-18 17:58:22 +0100315 {
Georgios Pinitasf72f9362018-01-12 16:29:45 +0000316 case DataType::F32:
317 convolve_3x3<float, float>(window, _num_elems_written_per_iteration, _input, _weights, _output, _conv_info);
Michalis Spyrou7362f0d2017-10-18 17:58:22 +0100318 break;
Georgios Pinitasf72f9362018-01-12 16:29:45 +0000319 case DataType::QASYMM8:
320 convolve_3x3<uint8_t, int32_t>(window, _num_elems_written_per_iteration, _input, _weights, _output, _conv_info);
Michalis Spyrou7362f0d2017-10-18 17:58:22 +0100321 break;
322 default:
323 ARM_COMPUTE_ERROR("Not implemented");
324 }
325}
Georgios Pinitas4074c992018-01-30 18:13:46 +0000326
327void NEDepthwiseConvolutionLayer3x3Kernel::run_optimized(const Window &window, const ThreadInfo &info)
328{
329 ARM_COMPUTE_UNUSED(info);
330 ARM_COMPUTE_ERROR_ON(!_convolver);
331
332 const size_t start = window.x().start();
333 const size_t end = window.x().end();
334 _convolver->run(start, end);
335}
336
337std::unique_ptr<depthwise::IDepthwiseConvolution> NEDepthwiseConvolutionLayer3x3Kernel::create_convolver_object(TensorShape shape,
338 PadStrideInfo conv_info,
339 const uint8_t *w_ptr,
340 uint8_t *in_ptr,
341 uint8_t *out_ptr)
342{
343 const int in_rows = shape.z();
344 const int in_cols = shape.y();
345 const int n_batches = shape[3];
346 const int n_channels = shape.x();
347 const bool padding_same = conv_info.has_padding();
348
349 const auto stride_x = conv_info.stride().first;
350 switch(stride_x)
351 {
352 case 1:
353 return arm_compute::support::cpp14::make_unique<DepthwiseConvolution<2, 2, 3, 3, 1, 1, float, float>>(
354 n_batches,
355 in_rows,
356 in_cols,
357 n_channels,
358 padding_same,
359 reinterpret_cast<const float *>(w_ptr),
360 reinterpret_cast<float *>(in_ptr),
361 reinterpret_cast<float *>(out_ptr));
362 case 2:
363 return arm_compute::support::cpp14::make_unique<DepthwiseConvolution<2, 2, 3, 3, 2, 2, float, float>>(
364 n_batches,
365 in_rows,
366 in_cols,
367 n_channels,
368 padding_same,
369 reinterpret_cast<const float *>(w_ptr),
370 reinterpret_cast<float *>(in_ptr),
371 reinterpret_cast<float *>(out_ptr));
372 default:
373 return nullptr;
374 }
375}