blob: 94f85e5ffaa2ee2d27c185a439e991f2ad2bdde9 [file] [log] [blame]
giuros01154bc1c2019-03-26 17:44:40 +00001/*
Georgios Pinitas0f7ef8a2021-01-10 04:23:52 +00002 * Copyright (c) 2019-2021 Arm Limited.
giuros01154bc1c2019-03-26 17:44:40 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/runtime/NEON/functions/NEFFTConvolutionLayer.h"
25
26#include "arm_compute/core/ITensor.h"
27#include "arm_compute/core/Utils.h"
giuros01154bc1c2019-03-26 17:44:40 +000028#include "arm_compute/core/utils/misc/ShapeCalculator.h"
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010029#include "arm_compute/core/Validate.h"
30
ramelg01cbbb0382021-09-17 17:36:57 +010031#include "src/common/utils/Log.h"
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010032#include "src/core/helpers/AutoConfiguration.h"
Michalis Spyrouebcebf12020-10-21 00:04:14 +010033#include "src/core/NEON/kernels/NEFFTDigitReverseKernel.h"
34#include "src/core/NEON/kernels/NEFFTRadixStageKernel.h"
35#include "src/core/NEON/kernels/NEFFTScaleKernel.h"
36#include "src/core/NEON/kernels/NEPadLayerKernel.h"
37#include "src/core/NEON/kernels/NEReductionOperationKernel.h"
Sang-Hoon Park68dd25f2020-10-19 16:00:11 +010038#include "src/core/utils/helpers/fft.h"
39
giuros01154bc1c2019-03-26 17:44:40 +000040namespace arm_compute
41{
42namespace
43{
44int pad_decomposable(int N)
45{
46 const auto supported_radix = NEFFTRadixStageKernel::supported_radix();
47
48 int pad = 0;
49 bool is_decomposed = false;
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010050 while (!is_decomposed)
giuros01154bc1c2019-03-26 17:44:40 +000051 {
52 const auto decomposed_vector = arm_compute::helpers::fft::decompose_stages(N++, supported_radix);
53 is_decomposed = !decomposed_vector.empty();
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010054 if (!is_decomposed)
giuros01154bc1c2019-03-26 17:44:40 +000055 {
56 ++pad;
57 }
58 }
59 return pad;
60}
61} // namespace
62
63NEFFTConvolutionLayer::NEFFTConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager)
64 : _memory_group(memory_manager),
65 _flip_weights_func(),
66 _permute_input_func(),
67 _permute_output_func(),
68 _permute_weights_func(),
69 _permute_bias_func(),
70 _pad_input_func(),
71 _pad_weights_func(),
72 _transform_input_func(memory_manager),
73 _transform_weights_func(),
74 _itransform_output_func(memory_manager),
75 _prod_func(),
76 _reduce_func(),
77 _extract_output_func(),
78 _bias_add_func(),
79 _activation_layer_func(),
80 _permuted_input(),
81 _permuted_weights(),
82 _permuted_bias(),
83 _permuted_output(),
84 _padded_input(),
85 _padded_weights(),
86 _flip_axis(),
87 _flipped_weights(),
88 _transformed_input(),
89 _transformed_weights(),
90 _input_weights_product(),
91 _output_product(),
92 _output_reduced(),
93 _itransformed_output(),
94 _reshaped_output(),
95 _bias_output(),
96 _original_weights(nullptr),
97 _original_bias(nullptr),
98 _is_activationlayer_enabled(false),
99 _needs_permute(false),
100 _has_bias(false),
101 _is_prepared(false)
102{
103}
Michalis Spyrouebcebf12020-10-21 00:04:14 +0100104NEFFTConvolutionLayer::~NEFFTConvolutionLayer() = default;
giuros01154bc1c2019-03-26 17:44:40 +0000105
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100106void NEFFTConvolutionLayer::configure(ITensor *input,
107 const ITensor *weights,
108 const ITensor *biases,
109 ITensor *output,
110 const PadStrideInfo &conv_info,
111 const ActivationLayerInfo &act_info,
112 bool enable_fast_math)
giuros01154bc1c2019-03-26 17:44:40 +0000113{
Giorgio Arenaea7de7b2020-12-10 16:49:39 +0000114 ARM_COMPUTE_UNUSED(enable_fast_math);
ramelg01cbbb0382021-09-17 17:36:57 +0100115 ARM_COMPUTE_LOG_PARAMS(input, weights, biases, output, conv_info, act_info, enable_fast_math);
Giorgio Arenaea7de7b2020-12-10 16:49:39 +0000116
giuros01154bc1c2019-03-26 17:44:40 +0000117 _original_weights = weights;
118 _original_bias = biases;
119
120 // Flat if bias addition is required
121 _has_bias = biases != nullptr;
122
123 // Get indices for the width and height
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100124 const size_t idx_width = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::WIDTH);
125 const size_t idx_height =
126 get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::HEIGHT);
giuros01154bc1c2019-03-26 17:44:40 +0000127
128 // Input shape, kernel size and output tile
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100129 const Size2D input_dims =
130 Size2D(input->info()->tensor_shape()[idx_width], input->info()->tensor_shape()[idx_height]);
131 const Size2D kernel_size =
132 Size2D(weights->info()->tensor_shape()[idx_width], weights->info()->tensor_shape()[idx_height]);
133 const Size2D pad_valid = Size2D(pad_decomposable(input_dims.x() + kernel_size.x() - 1),
134 pad_decomposable(input_dims.y() + kernel_size.y() - 1));
giuros01154bc1c2019-03-26 17:44:40 +0000135 // Tensors to use
136 ITensor *input_to_use = input;
137 const ITensor *weights_to_use = weights;
138 ITensor *output_to_use = _has_bias ? &_bias_output : output;
139
140 // Permute bias
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100141 if (biases != nullptr)
Georgios Pinitas68c6a792019-05-15 13:24:00 +0100142 {
143 _permute_bias_func.configure(biases, &_permuted_bias, PermutationVector(1U, 2U, 0U));
144 _permuted_bias.info()->set_data_layout(DataLayout::NCHW);
145 }
giuros01154bc1c2019-03-26 17:44:40 +0000146
147 // Permute input if needed
148 _needs_permute = input->info()->data_layout() == DataLayout::NHWC;
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100149 if (_needs_permute)
giuros01154bc1c2019-03-26 17:44:40 +0000150 {
151 _memory_group.manage(&_permuted_input);
152 // Configure the function to transform the input tensor from NHWC -> NCHW
153 _permute_input_func.configure(input, &_permuted_input, PermutationVector(1U, 2U, 0U));
154 _permuted_input.info()->set_data_layout(DataLayout::NCHW);
155
156 // Configure the function to transform the weights tensor from HWI -> IHW
157 _permute_weights_func.configure(weights, &_permuted_weights, PermutationVector(1U, 2U, 0U));
158 _permuted_weights.info()->set_data_layout(DataLayout::NCHW);
159
160 input_to_use = &_permuted_input;
161 weights_to_use = &_permuted_weights;
162 }
163
164 // Flip weights
165 _flipped_weights.allocator()->init(weights_to_use->info()->clone()->set_is_resizable(true).reset_padding());
166 _flip_axis.allocator()->init(TensorInfo(TensorShape(2U), 1, DataType::U32));
167 _flip_weights_func.configure(weights_to_use, &_flipped_weights, &_flip_axis);
168
169 // Pad weights
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100170 const PaddingList padding_w = {{0, input_dims.x() + pad_valid.x() - 1}, {0, input_dims.y() + pad_valid.y() - 1}};
giuros01154bc1c2019-03-26 17:44:40 +0000171 _pad_weights_func.configure(&_flipped_weights, &_padded_weights, padding_w);
172
173 // Transform weights
Georgios Pinitas40f51a62020-11-21 03:04:18 +0000174 _transform_weights_func = std::make_unique<NEFFT2D>();
giuros01154bc1c2019-03-26 17:44:40 +0000175 _transform_weights_func->configure(&_padded_weights, &_transformed_weights, FFT2DInfo());
176
177 // Pad input
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100178 const PaddingList padding_in = {{0, kernel_size.x() + pad_valid.x() - 1}, {0, kernel_size.y() + pad_valid.y() - 1}};
giuros01154bc1c2019-03-26 17:44:40 +0000179 _memory_group.manage(&_padded_input);
180 _pad_input_func.configure(input_to_use, &_padded_input, padding_in);
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100181 if (_needs_permute)
giuros01154bc1c2019-03-26 17:44:40 +0000182 {
183 _permuted_input.allocator()->allocate();
184 }
185
186 // Transform input
187 _memory_group.manage(&_transformed_input);
188 _transform_input_func.configure(&_padded_input, &_transformed_input, FFT2DInfo());
189 _padded_input.allocator()->allocate();
190
191 // Perform product
192 _memory_group.manage(&_output_product);
193 _prod_func.configure(&_transformed_input, &_transformed_weights, &_output_product);
194 _transformed_input.allocator()->allocate();
195
196 // Perform reduction
197 _memory_group.manage(&_output_reduced);
198 _reduce_func.configure(&_output_product, &_output_reduced, 2, ReductionOperation::SUM);
199 _output_product.allocator()->allocate();
200
201 // Transform output
202 _memory_group.manage(&_itransformed_output);
203 FFT2DInfo itranform_info;
204 itranform_info.direction = FFTDirection::Inverse;
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100205 _itransformed_output.allocator()->init(
206 _output_reduced.info()->clone()->set_is_resizable(true).set_num_channels(1).reset_padding());
giuros01154bc1c2019-03-26 17:44:40 +0000207 _itransform_output_func.configure(&_output_reduced, &_itransformed_output, itranform_info);
208 _output_reduced.allocator()->allocate();
209
210 // Reshape output
211 TensorShape reshaped_shape = _itransformed_output.info()->tensor_shape();
212 reshaped_shape.remove_dimension(2);
213 _reshaped_output.allocator()->init(_itransformed_output.info()->clone()->set_tensor_shape(reshaped_shape));
214
215 // Extract correct region
216 const int start_left = kernel_size.x() - conv_info.pad_left() - 1;
217 const int start_top = kernel_size.y() - conv_info.pad_top() - 1;
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100218 const int end_right =
219 _reshaped_output.info()->tensor_shape().x() - (kernel_size.x() - conv_info.pad_right() - 1) - pad_valid.x();
220 const int end_botton =
221 _reshaped_output.info()->tensor_shape().y() - (kernel_size.y() - conv_info.pad_bottom() - 1) - pad_valid.y();
222 if (_has_bias)
giuros01154bc1c2019-03-26 17:44:40 +0000223 {
224 _memory_group.manage(&_bias_output);
225 }
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100226 else if (_needs_permute)
giuros01154bc1c2019-03-26 17:44:40 +0000227 {
228 output_to_use = &_permuted_output;
229 _memory_group.manage(&_permuted_output);
230 }
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100231 _extract_output_func.configure(&_reshaped_output, output_to_use, Coordinates(start_left, start_top),
232 Coordinates(end_right, end_botton));
giuros01154bc1c2019-03-26 17:44:40 +0000233 _reshaped_output.allocator()->allocate();
234 _itransformed_output.allocator()->allocate();
235
236 // Add bias
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100237 if (biases != nullptr)
giuros01154bc1c2019-03-26 17:44:40 +0000238 {
239 output_to_use = output;
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100240 if (_needs_permute)
giuros01154bc1c2019-03-26 17:44:40 +0000241 {
242 output_to_use = &_permuted_output;
243 _memory_group.manage(&_permuted_output);
244 }
245 auto_init_if_empty(*output_to_use->info(), *_bias_output.info());
246 _bias_add_func.configure(&_bias_output, &_permuted_bias, output_to_use, ConvertPolicy::WRAP);
247 _bias_output.allocator()->allocate();
248 }
249
250 // Permute output
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100251 if (_needs_permute)
giuros01154bc1c2019-03-26 17:44:40 +0000252 {
253 // Configure the function to transform the convoluted output to ACL's native ordering format NCHW
254 _permuted_output.info()->set_data_layout(DataLayout::NCHW);
255 _permute_output_func.configure(&_permuted_output, output, PermutationVector(2U, 0U, 1U));
256
257 // Allocate tensors
258 _permuted_output.allocator()->allocate();
259 }
260
261 // Configure Activation Layer
262 _is_activationlayer_enabled = act_info.enabled();
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100263 if (_is_activationlayer_enabled)
giuros01154bc1c2019-03-26 17:44:40 +0000264 {
265 _activation_layer_func.configure(output, nullptr, act_info);
266 }
267
268 // Setup flip axis data
269 _flip_axis.allocator()->allocate();
270
271 auto axis_data = reinterpret_cast<uint32_t *>(_flip_axis.buffer());
272 axis_data[0] = 0;
273 axis_data[1] = 1;
274}
275
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100276Status NEFFTConvolutionLayer::validate(const ITensorInfo *input,
277 const ITensorInfo *weights,
278 const ITensorInfo *biases,
279 const ITensorInfo *output,
280 const PadStrideInfo &conv_info,
281 const ActivationLayerInfo &act_info,
282 bool enable_fast_math)
giuros01154bc1c2019-03-26 17:44:40 +0000283{
Giorgio Arenaea7de7b2020-12-10 16:49:39 +0000284 ARM_COMPUTE_UNUSED(enable_fast_math);
285
giuros01154bc1c2019-03-26 17:44:40 +0000286 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32);
287 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
288
289 // Get indices for the width and height
290 const size_t idx_width = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::WIDTH);
291 const size_t idx_height = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::HEIGHT);
292
293 // Input shape, kernel size and output tile
294 const Size2D kernel_size = Size2D(weights->tensor_shape()[idx_width], weights->tensor_shape()[idx_height]);
295
296 // Strides
297 const auto strides = conv_info.stride();
298 ARM_COMPUTE_RETURN_ERROR_ON(strides.first != strides.second && strides.first != 1);
299 ARM_COMPUTE_RETURN_ERROR_ON(kernel_size.x() != kernel_size.y());
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100300 ARM_COMPUTE_RETURN_ERROR_ON(conv_info.pad_left() != (kernel_size.x() / 2) ||
301 conv_info.pad_right() != (kernel_size.x() / 2));
302 ARM_COMPUTE_RETURN_ERROR_ON(conv_info.pad_top() != (kernel_size.y() / 2) ||
303 conv_info.pad_bottom() != (kernel_size.y() / 2));
giuros01154bc1c2019-03-26 17:44:40 +0000304
305 // Validate biases
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100306 if (biases != nullptr)
giuros01154bc1c2019-03-26 17:44:40 +0000307 {
308 const size_t idx_channels = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::CHANNEL);
309 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, biases);
310 ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape()[idx_channels] != biases->tensor_shape().x());
311 }
312
313 // Checks performed when output is configured
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100314 if ((output != nullptr) && (output->total_size() != 0))
giuros01154bc1c2019-03-26 17:44:40 +0000315 {
316 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100317 ARM_COMPUTE_RETURN_ERROR_ON((input->tensor_shape()[idx_height] != output->tensor_shape()[idx_height]) ||
318 (input->tensor_shape()[idx_width] != output->tensor_shape()[idx_width]));
giuros01154bc1c2019-03-26 17:44:40 +0000319
320 // Validate Activation Layer
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100321 if (act_info.enabled())
giuros01154bc1c2019-03-26 17:44:40 +0000322 {
323 ARM_COMPUTE_RETURN_ON_ERROR(NEActivationLayer::validate(output, nullptr, act_info));
324 }
325 }
326
327 return Status{};
328}
329
330void NEFFTConvolutionLayer::run()
331{
332 prepare();
333
334 MemoryGroupResourceScope scope_mg(_memory_group);
335
336 // Transform input
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100337 if (_needs_permute)
giuros01154bc1c2019-03-26 17:44:40 +0000338 {
339 _permute_input_func.run();
340 }
341 _pad_input_func.run();
342 _transform_input_func.run();
343
344 // Perform operations to frequency domain
345 _prod_func.run();
346
347 _reduce_func.run();
348
349 // Transform output
350 _itransform_output_func.run();
351 _reshaped_output.allocator()->import_memory(_itransformed_output.buffer());
352 _extract_output_func.run();
353
354 // Add bias
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100355 if (_has_bias)
giuros01154bc1c2019-03-26 17:44:40 +0000356 {
357 _bias_add_func.run();
358 }
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100359 if (_needs_permute)
giuros01154bc1c2019-03-26 17:44:40 +0000360 {
361 _permute_output_func.run();
362 }
363
364 // Run activation layer
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100365 if (_is_activationlayer_enabled)
giuros01154bc1c2019-03-26 17:44:40 +0000366 {
367 _activation_layer_func.run();
368 }
369}
370
371void NEFFTConvolutionLayer::prepare()
372{
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100373 if (!_is_prepared)
giuros01154bc1c2019-03-26 17:44:40 +0000374 {
375 // Permute bias to NCHW
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100376 if (_original_bias != nullptr)
giuros01154bc1c2019-03-26 17:44:40 +0000377 {
378 _permuted_bias.allocator()->allocate();
379 _permute_bias_func.run();
380 _original_bias->mark_as_unused();
381 }
382
383 const ITensor *cur_weights = _original_weights;
384
385 // Permute weights
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100386 if (_needs_permute)
giuros01154bc1c2019-03-26 17:44:40 +0000387 {
388 ARM_COMPUTE_ERROR_ON(!cur_weights->is_used());
389
390 _permuted_weights.allocator()->allocate();
391 _permute_weights_func.run();
392 cur_weights->mark_as_unused();
393 cur_weights = &_permuted_weights;
394 }
395
396 // Flip weights
397 _flipped_weights.allocator()->allocate();
398 _flip_weights_func.run();
399 cur_weights->mark_as_unused();
400
401 // Pad weights
402 _padded_weights.allocator()->allocate();
403 _pad_weights_func.run();
404 _flipped_weights.mark_as_unused();
405 _flipped_weights.allocator()->free();
406
407 // Transform weights to frequency domain
408 _transformed_weights.allocator()->allocate();
409 _transform_weights_func->run();
410 _transform_weights_func.reset();
411
412 _padded_weights.mark_as_unused();
413 _padded_weights.allocator()->free();
414
415 _is_prepared = true;
416 }
417}
418} // namespace arm_compute