blob: 2a73517549be3af9d5ee0f5a9dcb1a5be49dc7a8 [file] [log] [blame]
Georgios Pinitas8be91482019-03-26 17:23:28 +00001/*
Adnan AlSinan704c22f2023-10-24 11:05:56 +01002 * Copyright (c) 2019-2021, 2023 Arm Limited.
Georgios Pinitas8be91482019-03-26 17:23:28 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/runtime/CL/functions/CLFFTConvolutionLayer.h"
25
26#include "arm_compute/core/CL/ICLTensor.h"
27#include "arm_compute/core/Utils.h"
Georgios Pinitas8be91482019-03-26 17:23:28 +000028#include "arm_compute/core/utils/misc/ShapeCalculator.h"
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010029#include "arm_compute/core/Validate.h"
Georgios Pinitas8be91482019-03-26 17:23:28 +000030#include "arm_compute/runtime/CL/CLScheduler.h"
31#include "arm_compute/runtime/CPP/CPPScheduler.h"
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010032
33#include "src/common/utils/Log.h"
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +010034#include "src/core/CL/kernels/CLFFTDigitReverseKernel.h"
35#include "src/core/CL/kernels/CLFFTRadixStageKernel.h"
36#include "src/core/CL/kernels/CLFFTScaleKernel.h"
37#include "src/core/CL/kernels/CLFillBorderKernel.h"
38#include "src/core/CL/kernels/CLPadLayerKernel.h"
39#include "src/core/CL/kernels/CLReductionOperationKernel.h"
Sang-Hoon Park68dd25f2020-10-19 16:00:11 +010040#include "src/core/helpers/AutoConfiguration.h"
41#include "src/core/utils/helpers/fft.h"
42
Georgios Pinitas8be91482019-03-26 17:23:28 +000043namespace arm_compute
44{
45namespace
46{
47int pad_decomposable(int N)
48{
49 const auto supported_radix = CLFFTRadixStageKernel::supported_radix();
50
51 int pad = 0;
52 bool is_decomposed = false;
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010053 while (!is_decomposed)
Georgios Pinitas8be91482019-03-26 17:23:28 +000054 {
55 const auto decomposed_vector = arm_compute::helpers::fft::decompose_stages(N++, supported_radix);
56 is_decomposed = !decomposed_vector.empty();
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010057 if (!is_decomposed)
Georgios Pinitas8be91482019-03-26 17:23:28 +000058 {
59 ++pad;
60 }
61 }
62 return pad;
63}
64} // namespace
65CLFFTConvolutionLayer::CLFFTConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager)
66 : _memory_group(memory_manager),
67 _flip_weights_func(),
68 _permute_input_func(),
69 _permute_output_func(),
70 _permute_weights_func(),
71 _permute_bias_func(),
72 _pad_input_func(),
73 _pad_weights_func(),
74 _transform_input_func(memory_manager),
Georgios Pinitas098516b2019-04-25 18:25:06 +010075 _transform_weights_func(),
Georgios Pinitas8be91482019-03-26 17:23:28 +000076 _itransform_output_func(memory_manager),
77 _prod_func(),
78 _reduce_func(),
79 _extract_output_func(),
80 _bias_add_func(),
81 _activation_layer_func(),
82 _permuted_input(),
83 _permuted_weights(),
84 _permuted_bias(),
85 _permuted_output(),
86 _padded_input(),
87 _padded_weights(),
88 _flip_axis(),
89 _flipped_weights(),
90 _transformed_input(),
91 _transformed_weights(),
92 _input_weights_product(),
93 _output_product(),
94 _output_reduced(),
95 _itransformed_output(),
96 _reshaped_output(),
97 _bias_output(),
98 _original_weights(nullptr),
99 _original_bias(nullptr),
100 _is_activationlayer_enabled(false),
101 _needs_permute(false),
102 _has_bias(false),
103 _is_prepared(false)
104{
105}
106
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100107void CLFFTConvolutionLayer::configure(ICLTensor *input,
108 const ICLTensor *weights,
109 const ICLTensor *biases,
110 ICLTensor *output,
111 const PadStrideInfo &conv_info,
112 const ActivationLayerInfo &act_info,
113 bool enable_fast_math)
Georgios Pinitas8be91482019-03-26 17:23:28 +0000114{
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100115 configure(CLKernelLibrary::get().get_compile_context(), input, weights, biases, output, conv_info, act_info,
116 enable_fast_math);
Manuel Bottini2b84be52020-04-08 10:15:51 +0100117}
118
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100119void CLFFTConvolutionLayer::configure(const CLCompileContext &compile_context,
120 ICLTensor *input,
121 const ICLTensor *weights,
122 const ICLTensor *biases,
123 ICLTensor *output,
124 const PadStrideInfo &conv_info,
125 const ActivationLayerInfo &act_info,
126 bool enable_fast_math)
Manuel Bottini2b84be52020-04-08 10:15:51 +0100127{
Giorgio Arenaea7de7b2020-12-10 16:49:39 +0000128 ARM_COMPUTE_UNUSED(enable_fast_math);
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100129 ARM_COMPUTE_ERROR_THROW_ON(CLFFTConvolutionLayer::validate(input->info(), weights->info(),
130 biases != nullptr ? biases->info() : nullptr,
131 output->info(), conv_info, act_info, enable_fast_math));
ramelg016d891572021-09-29 10:05:09 +0100132 ARM_COMPUTE_LOG_PARAMS(input, weights, biases, output, conv_info, act_info, enable_fast_math);
Giorgio Arenaea7de7b2020-12-10 16:49:39 +0000133
Georgios Pinitas8be91482019-03-26 17:23:28 +0000134 _original_weights = weights;
135 _original_bias = biases;
136
137 // Flat if bias addition is required
138 _has_bias = biases != nullptr;
139
140 // Get indices for the width and height
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100141 const size_t idx_width = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::WIDTH);
142 const size_t idx_height =
143 get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::HEIGHT);
Georgios Pinitas8be91482019-03-26 17:23:28 +0000144
145 // Input shape, kernel size and output tile
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100146 const Size2D input_dims =
147 Size2D(input->info()->tensor_shape()[idx_width], input->info()->tensor_shape()[idx_height]);
148 const Size2D kernel_size =
149 Size2D(weights->info()->tensor_shape()[idx_width], weights->info()->tensor_shape()[idx_height]);
150 const Size2D pad_valid = Size2D(pad_decomposable(input_dims.x() + kernel_size.x() - 1),
151 pad_decomposable(input_dims.y() + kernel_size.y() - 1));
Georgios Pinitas8be91482019-03-26 17:23:28 +0000152 // Tensors to use
153 ICLTensor *input_to_use = input;
154 const ICLTensor *weights_to_use = weights;
155 ICLTensor *output_to_use = _has_bias ? &_bias_output : output;
156
157 // Permute bias
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100158 if (biases != nullptr)
Georgios Pinitas68c6a792019-05-15 13:24:00 +0100159 {
Manuel Bottini2b84be52020-04-08 10:15:51 +0100160 _permute_bias_func.configure(compile_context, biases, &_permuted_bias, PermutationVector(1U, 2U, 0U));
Georgios Pinitas68c6a792019-05-15 13:24:00 +0100161 _permuted_bias.info()->set_data_layout(DataLayout::NCHW);
162 }
Georgios Pinitas8be91482019-03-26 17:23:28 +0000163
164 // Permute input if needed
165 _needs_permute = input->info()->data_layout() == DataLayout::NHWC;
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100166 if (_needs_permute)
Georgios Pinitas8be91482019-03-26 17:23:28 +0000167 {
168 _memory_group.manage(&_permuted_input);
169 // Configure the function to transform the input tensor from NHWC -> NCHW
Manuel Bottini2b84be52020-04-08 10:15:51 +0100170 _permute_input_func.configure(compile_context, input, &_permuted_input, PermutationVector(1U, 2U, 0U));
Georgios Pinitas8be91482019-03-26 17:23:28 +0000171 _permuted_input.info()->set_data_layout(DataLayout::NCHW);
172
173 // Configure the function to transform the weights tensor from HWI -> IHW
Manuel Bottini2b84be52020-04-08 10:15:51 +0100174 _permute_weights_func.configure(compile_context, weights, &_permuted_weights, PermutationVector(1U, 2U, 0U));
Georgios Pinitas8be91482019-03-26 17:23:28 +0000175 _permuted_weights.info()->set_data_layout(DataLayout::NCHW);
176
177 input_to_use = &_permuted_input;
178 weights_to_use = &_permuted_weights;
179 }
180
181 // Flip weights
182 _flipped_weights.allocator()->init(weights_to_use->info()->clone()->set_is_resizable(true).reset_padding());
183 _flip_axis.allocator()->init(TensorInfo(TensorShape(2U), 1, DataType::U32));
Adnan AlSinan704c22f2023-10-24 11:05:56 +0100184 _flip_weights_func.configure(compile_context, weights_to_use, &_flipped_weights, &_flip_axis,
185 /* use_inverted_axis */ false);
Georgios Pinitas8be91482019-03-26 17:23:28 +0000186
187 // Pad weights
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100188 const PaddingList padding_w = {{0, input_dims.x() + pad_valid.x() - 1}, {0, input_dims.y() + pad_valid.y() - 1}};
Manuel Bottini2b84be52020-04-08 10:15:51 +0100189 _pad_weights_func.configure(compile_context, &_flipped_weights, &_padded_weights, padding_w);
Georgios Pinitas8be91482019-03-26 17:23:28 +0000190
191 // Transform weights
Georgios Pinitas40f51a62020-11-21 03:04:18 +0000192 _transform_weights_func = std::make_unique<CLFFT2D>();
Manuel Bottini2b84be52020-04-08 10:15:51 +0100193 _transform_weights_func->configure(compile_context, &_padded_weights, &_transformed_weights, FFT2DInfo());
Georgios Pinitas8be91482019-03-26 17:23:28 +0000194
195 // Pad input
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100196 const PaddingList padding_in = {{0, kernel_size.x() + pad_valid.x() - 1}, {0, kernel_size.y() + pad_valid.y() - 1}};
Georgios Pinitas8be91482019-03-26 17:23:28 +0000197 _memory_group.manage(&_padded_input);
Manuel Bottini2b84be52020-04-08 10:15:51 +0100198 _pad_input_func.configure(compile_context, input_to_use, &_padded_input, padding_in);
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100199 if (_needs_permute)
Georgios Pinitas8be91482019-03-26 17:23:28 +0000200 {
201 _permuted_input.allocator()->allocate();
202 }
203
204 // Transform input
205 _memory_group.manage(&_transformed_input);
Manuel Bottini2b84be52020-04-08 10:15:51 +0100206 _transform_input_func.configure(compile_context, &_padded_input, &_transformed_input, FFT2DInfo());
Georgios Pinitas8be91482019-03-26 17:23:28 +0000207 _padded_input.allocator()->allocate();
208
209 // Perform product
210 _memory_group.manage(&_output_product);
Manuel Bottini2b84be52020-04-08 10:15:51 +0100211 _prod_func.configure(compile_context, &_transformed_input, &_transformed_weights, &_output_product);
Georgios Pinitas8be91482019-03-26 17:23:28 +0000212 _transformed_input.allocator()->allocate();
213
214 // Perform reduction
215 _memory_group.manage(&_output_reduced);
Manuel Bottini2b84be52020-04-08 10:15:51 +0100216 _reduce_func.configure(compile_context, &_output_product, &_output_reduced, 2, ReductionOperation::SUM);
Georgios Pinitas8be91482019-03-26 17:23:28 +0000217 _output_product.allocator()->allocate();
218
219 // Transform output
220 _memory_group.manage(&_itransformed_output);
221 FFT2DInfo itranform_info;
222 itranform_info.direction = FFTDirection::Inverse;
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100223 _itransformed_output.allocator()->init(
224 _output_reduced.info()->clone()->set_is_resizable(true).set_num_channels(1).reset_padding());
Manuel Bottini2b84be52020-04-08 10:15:51 +0100225 _itransform_output_func.configure(compile_context, &_output_reduced, &_itransformed_output, itranform_info);
Georgios Pinitas8be91482019-03-26 17:23:28 +0000226 _output_reduced.allocator()->allocate();
227
228 // Reshape output
229 TensorShape reshaped_shape = _itransformed_output.info()->tensor_shape();
230 reshaped_shape.remove_dimension(2);
231 _reshaped_output.allocator()->init(_itransformed_output.info()->clone()->set_tensor_shape(reshaped_shape));
232
233 // Extract correct region
234 const int start_left = kernel_size.x() - conv_info.pad_left() - 1;
235 const int start_top = kernel_size.y() - conv_info.pad_top() - 1;
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100236 const int end_right =
237 _reshaped_output.info()->tensor_shape().x() - (kernel_size.x() - conv_info.pad_right() - 1) - pad_valid.x();
238 const int end_botton =
239 _reshaped_output.info()->tensor_shape().y() - (kernel_size.y() - conv_info.pad_bottom() - 1) - pad_valid.y();
240 if (_has_bias)
Georgios Pinitas8be91482019-03-26 17:23:28 +0000241 {
242 _memory_group.manage(&_bias_output);
243 }
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100244 else if (_needs_permute)
Georgios Pinitas8be91482019-03-26 17:23:28 +0000245 {
246 output_to_use = &_permuted_output;
247 _memory_group.manage(&_permuted_output);
248 }
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100249 _extract_output_func.configure(compile_context, &_reshaped_output, output_to_use,
250 Coordinates(start_left, start_top), Coordinates(end_right, end_botton));
Georgios Pinitas8be91482019-03-26 17:23:28 +0000251 _itransformed_output.allocator()->allocate();
252
253 // Add bias
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100254 if (biases != nullptr)
Georgios Pinitas8be91482019-03-26 17:23:28 +0000255 {
256 output_to_use = output;
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100257 if (_needs_permute)
Georgios Pinitas8be91482019-03-26 17:23:28 +0000258 {
259 output_to_use = &_permuted_output;
260 _memory_group.manage(&_permuted_output);
261 }
262 auto_init_if_empty(*output_to_use->info(), *_bias_output.info());
Manuel Bottini2b84be52020-04-08 10:15:51 +0100263 _bias_add_func.configure(compile_context, &_bias_output, &_permuted_bias, output_to_use, ConvertPolicy::WRAP);
Georgios Pinitas8be91482019-03-26 17:23:28 +0000264 _bias_output.allocator()->allocate();
265 }
266
267 // Permute output
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100268 if (_needs_permute)
Georgios Pinitas8be91482019-03-26 17:23:28 +0000269 {
270 // Configure the function to transform the convoluted output to ACL's native ordering format NCHW
271 _permuted_output.info()->set_data_layout(DataLayout::NCHW);
Manuel Bottini2b84be52020-04-08 10:15:51 +0100272 _permute_output_func.configure(compile_context, &_permuted_output, output, PermutationVector(2U, 0U, 1U));
Georgios Pinitas8be91482019-03-26 17:23:28 +0000273
274 // Allocate tensors
275 _permuted_output.allocator()->allocate();
276 }
277
278 // Configure Activation Layer
279 _is_activationlayer_enabled = act_info.enabled();
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100280 if (_is_activationlayer_enabled)
Georgios Pinitas8be91482019-03-26 17:23:28 +0000281 {
Manuel Bottini2b84be52020-04-08 10:15:51 +0100282 _activation_layer_func.configure(compile_context, output, nullptr, act_info);
Georgios Pinitas8be91482019-03-26 17:23:28 +0000283 }
284
285 // Setup flip axis data
286 _flip_axis.allocator()->allocate();
287 _flip_axis.map(true);
288 auto axis_data = reinterpret_cast<uint32_t *>(_flip_axis.buffer());
289 axis_data[0] = 0;
290 axis_data[1] = 1;
291 _flip_axis.unmap();
292}
293
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100294Status CLFFTConvolutionLayer::validate(const ITensorInfo *input,
295 const ITensorInfo *weights,
296 const ITensorInfo *biases,
297 const ITensorInfo *output,
298 const PadStrideInfo &conv_info,
299 const ActivationLayerInfo &act_info,
300 bool enable_fast_math)
Georgios Pinitas8be91482019-03-26 17:23:28 +0000301{
Giorgio Arenaea7de7b2020-12-10 16:49:39 +0000302 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32);
303 ARM_COMPUTE_RETURN_ERROR_ON((input->data_type() == DataType::F16) && !enable_fast_math);
Georgios Pinitas8be91482019-03-26 17:23:28 +0000304 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
305
306 // Get indices for the width and height
307 const size_t idx_width = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::WIDTH);
308 const size_t idx_height = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::HEIGHT);
309
310 // Input shape, kernel size and output tile
311 const Size2D kernel_size = Size2D(weights->tensor_shape()[idx_width], weights->tensor_shape()[idx_height]);
312
313 // Strides
314 const auto strides = conv_info.stride();
315 ARM_COMPUTE_RETURN_ERROR_ON(strides.first != strides.second && strides.first != 1);
316 ARM_COMPUTE_RETURN_ERROR_ON(kernel_size.x() != kernel_size.y());
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100317 ARM_COMPUTE_RETURN_ERROR_ON(conv_info.pad_left() != (kernel_size.x() / 2) ||
318 conv_info.pad_right() != (kernel_size.x() / 2));
319 ARM_COMPUTE_RETURN_ERROR_ON(conv_info.pad_top() != (kernel_size.y() / 2) ||
320 conv_info.pad_bottom() != (kernel_size.y() / 2));
Georgios Pinitas8be91482019-03-26 17:23:28 +0000321
322 // Validate biases
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100323 if (biases != nullptr)
Georgios Pinitas8be91482019-03-26 17:23:28 +0000324 {
Georgios Pinitas8be91482019-03-26 17:23:28 +0000325 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, biases);
Giorgio Arenaea7de7b2020-12-10 16:49:39 +0000326 ARM_COMPUTE_RETURN_ERROR_ON(weights->tensor_shape()[3] != biases->tensor_shape().x());
Georgios Pinitas8be91482019-03-26 17:23:28 +0000327 }
328
329 // Checks performed when output is configured
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100330 if ((output != nullptr) && (output->total_size() != 0))
Georgios Pinitas8be91482019-03-26 17:23:28 +0000331 {
332 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100333 ARM_COMPUTE_RETURN_ERROR_ON((input->tensor_shape()[idx_height] != output->tensor_shape()[idx_height]) ||
334 (input->tensor_shape()[idx_width] != output->tensor_shape()[idx_width]));
Georgios Pinitas8be91482019-03-26 17:23:28 +0000335
336 // Validate Activation Layer
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100337 if (act_info.enabled())
Georgios Pinitas8be91482019-03-26 17:23:28 +0000338 {
339 ARM_COMPUTE_RETURN_ON_ERROR(CLActivationLayer::validate(output, nullptr, act_info));
340 }
341 }
342
343 return Status{};
344}
345
346void CLFFTConvolutionLayer::run()
347{
348 prepare();
349
Georgios Pinitas098516b2019-04-25 18:25:06 +0100350 MemoryGroupResourceScope scope_mg(_memory_group);
Georgios Pinitas8be91482019-03-26 17:23:28 +0000351
352 // Transform input
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100353 if (_needs_permute)
Georgios Pinitas8be91482019-03-26 17:23:28 +0000354 {
355 _permute_input_func.run();
356 }
357 _pad_input_func.run();
358 _transform_input_func.run();
359
360 // Perform operations to frequency domain
361 _prod_func.run();
362 _reduce_func.run();
363
364 // Transform output
365 _itransform_output_func.run();
366 _reshaped_output.allocator()->import_memory(_itransformed_output.cl_buffer());
367 _extract_output_func.run();
368 // Add bias
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100369 if (_has_bias)
Georgios Pinitas8be91482019-03-26 17:23:28 +0000370 {
371 _bias_add_func.run();
372 }
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100373 if (_needs_permute)
Georgios Pinitas8be91482019-03-26 17:23:28 +0000374 {
375 _permute_output_func.run();
376 }
377
378 // Run activation layer
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100379 if (_is_activationlayer_enabled)
Georgios Pinitas8be91482019-03-26 17:23:28 +0000380 {
381 _activation_layer_func.run();
382 }
Georgios Pinitas8be91482019-03-26 17:23:28 +0000383}
384
385void CLFFTConvolutionLayer::prepare()
386{
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100387 if (!_is_prepared)
Georgios Pinitas8be91482019-03-26 17:23:28 +0000388 {
389 // Permute bias to NCHW
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100390 if (_original_bias != nullptr)
Georgios Pinitas8be91482019-03-26 17:23:28 +0000391 {
392 _permuted_bias.allocator()->allocate();
393 _permute_bias_func.run();
394 _original_bias->mark_as_unused();
395 }
396
397 const ICLTensor *cur_weights = _original_weights;
398 // Permute weights
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100399 if (_needs_permute)
Georgios Pinitas8be91482019-03-26 17:23:28 +0000400 {
401 ARM_COMPUTE_ERROR_ON(!cur_weights->is_used());
402
403 _permuted_weights.allocator()->allocate();
404 _permute_weights_func.run();
405 cur_weights->mark_as_unused();
406 cur_weights = &_permuted_weights;
407 }
408
409 // Flip weights
410 _flipped_weights.allocator()->allocate();
411 _flip_weights_func.run();
412 cur_weights->mark_as_unused();
413
414 // Pad weights
415 _padded_weights.allocator()->allocate();
416 _pad_weights_func.run();
417 _flipped_weights.mark_as_unused();
418 CLScheduler::get().queue().finish();
419 _flipped_weights.allocator()->free();
420
Georgios Pinitas098516b2019-04-25 18:25:06 +0100421 // Transform weights to frequency domain
Georgios Pinitas8be91482019-03-26 17:23:28 +0000422 _transformed_weights.allocator()->allocate();
Georgios Pinitas098516b2019-04-25 18:25:06 +0100423 _transform_weights_func->run();
Georgios Pinitas8be91482019-03-26 17:23:28 +0000424 _padded_weights.mark_as_unused();
425 CLScheduler::get().queue().finish();
Georgios Pinitas098516b2019-04-25 18:25:06 +0100426 // Delete object and release internal memory
427 _transform_weights_func.reset();
Georgios Pinitas8be91482019-03-26 17:23:28 +0000428 _padded_weights.allocator()->free();
429
430 _is_prepared = true;
431 }
432}
433} // namespace arm_compute