blob: a040e9d38e5b09596b7c7a145ad66a4fb8deaa8a [file] [log] [blame]
giuros0146a49a02019-04-01 13:50:22 +01001/*
Michele Di Giorgiod9eaf612020-07-08 11:12:57 +01002 * Copyright (c) 2019-2020 Arm Limited.
giuros0146a49a02019-04-01 13:50:22 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/runtime/CL/functions/CLGEMMDeconvolutionLayer.h"
25
26#include "arm_compute/core/Helpers.h"
27#include "arm_compute/core/Validate.h"
28#include "arm_compute/core/utils/misc/ShapeCalculator.h"
29#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
30#include "arm_compute/runtime/CL/CLScheduler.h"
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +010031#include "src/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.h"
32#include "src/core/CL/kernels/CLDepthConvertLayerKernel.h"
33#include "src/core/CL/kernels/CLFillBorderKernel.h"
34#include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.h"
35#include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.h"
36#include "src/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.h"
37#include "src/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.h"
38#include "src/core/CL/kernels/CLGEMMLowpReductionKernel.h"
39#include "src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h"
40#include "src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.h"
41#include "src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedOnlyRHSKernel.h"
42#include "src/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.h"
43#include "src/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.h"
44#include "src/core/CL/kernels/CLIm2ColKernel.h"
45#include "src/core/CL/kernels/CLWeightsReshapeKernel.h"
giuros0146a49a02019-04-01 13:50:22 +010046
giuros0146a49a02019-04-01 13:50:22 +010047#include <tuple>
48
49namespace arm_compute
50{
51namespace
52{
53std::pair<Coordinates, Coordinates> compute_start_end_slice_coordinates(const ITensorInfo &output_info, const PadStrideInfo &deconv_info, bool is_nchw)
54{
55 Coordinates start;
56 Coordinates end;
57
58 if(is_nchw)
59 {
60 start.set(0, deconv_info.pad_left());
61 start.set(1, deconv_info.pad_top());
62 end.set(0, output_info.dimension(0) - deconv_info.pad_right());
63 end.set(1, output_info.dimension(1) - deconv_info.pad_bottom());
64 }
65 else
66 {
67 start.set(0, 0);
68 start.set(1, deconv_info.pad_left());
69 start.set(2, deconv_info.pad_top());
70
71 end.set(0, output_info.dimension(0));
72 end.set(1, output_info.dimension(1) - deconv_info.pad_right());
73 end.set(2, output_info.dimension(2) - deconv_info.pad_bottom());
74 }
75
76 return { start, end };
77}
Sheri Zhang0cdbda52020-02-25 15:57:21 +000078Status construct_gemmlowp_output_stage(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *output, GEMMLowpOutputStageInfo &output_stage_info)
79{
Manuel Bottini2b84be52020-04-08 10:15:51 +010080 const auto data_type = input->data_type();
Sheri Zhang0cdbda52020-02-25 15:57:21 +000081
Manuel Bottini2b84be52020-04-08 10:15:51 +010082 if(is_data_type_quantized_asymmetric(data_type))
83 {
84 const UniformQuantizationInfo iq_info = input->quantization_info().uniform();
85 const UniformQuantizationInfo wq_info = weights->quantization_info().uniform();
86 const UniformQuantizationInfo oq_info = output->quantization_info().uniform();
Sheri Zhang0cdbda52020-02-25 15:57:21 +000087
Manuel Bottini2b84be52020-04-08 10:15:51 +010088 float multiplier = iq_info.scale * wq_info.scale / oq_info.scale;
89 int output_multiplier(0);
90 int output_shift(0);
91 ARM_COMPUTE_RETURN_ON_ERROR(quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift));
Sheri Zhang0cdbda52020-02-25 15:57:21 +000092
Manuel Bottini2b84be52020-04-08 10:15:51 +010093 output_stage_info.type = GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT;
94 output_stage_info.gemmlowp_multiplier = output_multiplier;
95 output_stage_info.gemmlowp_shift = output_shift;
96 output_stage_info.gemmlowp_offset = oq_info.offset;
97 const auto min_max_bound = get_min_max(data_type);
98 output_stage_info.gemmlowp_min_bound = (std::get<0>(min_max_bound)).get<int32_t>();
99 output_stage_info.gemmlowp_max_bound = (std::get<1>(min_max_bound)).get<int32_t>();
100 output_stage_info.output_data_type = data_type;
101 }
102 return Status{};
Sheri Zhang0cdbda52020-02-25 15:57:21 +0000103}
104
giuros0146a49a02019-04-01 13:50:22 +0100105} // namespace
106
107CLGEMMDeconvolutionLayer::CLGEMMDeconvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager) // NOLINT
108 : _memory_group(std::move(memory_manager)),
109 _mm_gemm(),
110 _mm_gemmlowp(),
111 _gemmlowp_output_stage(),
112 _permute_input_to_nhwc(),
113 _permute_weights_to_nhwc(),
114 _reshape_weights(),
115 _transpose_weights(),
Georgios Pinitas40f51a62020-11-21 03:04:18 +0000116 _deconv_reshape(std::make_unique<CLDeconvolutionReshapeOutputKernel>()),
giuros0146a49a02019-04-01 13:50:22 +0100117 _slice_gemm(),
118 _gemmlowp_final(),
119 _reshaped_weights(),
120 _reshaped_weights_t(),
121 _permuted_input(),
122 _permuted_weights(),
123 _gemm_output(),
124 _slice_gemm_input(),
125 _original_weights(),
126 _is_prepared(false),
127 _padded_input(false),
128 _is_nchw(false),
129 _is_quantized(false)
130{
131}
132
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +0100133CLGEMMDeconvolutionLayer::~CLGEMMDeconvolutionLayer() = default;
134
giuros0146a49a02019-04-01 13:50:22 +0100135Status CLGEMMDeconvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *bias, const ITensorInfo *output, const PadStrideInfo &deconv_info)
136{
137 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output);
Sheri Zhang0cdbda52020-02-25 15:57:21 +0000138 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32, DataType::F16, DataType::QASYMM8, DataType::QASYMM8_SIGNED);
giuros0146a49a02019-04-01 13:50:22 +0100139 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
140 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, weights);
141
142 DataLayout data_layout = input->data_layout();
143 const bool padded_input = deconv_info.pad_bottom() > 0 || deconv_info.pad_left() > 0 || deconv_info.pad_right() > 0 || deconv_info.pad_top() > 0;
144 const bool is_nchw = input->data_layout() == DataLayout::NCHW;
145 const bool is_quantized = is_data_type_quantized_asymmetric(input->data_type());
146
147 const size_t idx_w = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
148 const size_t idx_h = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
149 const size_t idx_b = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
150
151 ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(idx_w) != deconv_info.stride().first);
152 ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(idx_h) != deconv_info.stride().second);
153
154 TensorShape nhwc_weights_shape = weights->tensor_shape();
155 TensorShape nhwc_input_shape = input->tensor_shape();
156
157 if(is_nchw)
158 {
159 permute(nhwc_weights_shape, PermutationVector(2, 0, 1));
160 permute(nhwc_input_shape, PermutationVector(2, 0, 1));
161
162 TensorInfo nhwc_input_info = input->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(nhwc_input_shape).set_data_layout(DataLayout::NCHW);
163
164 TensorInfo nhwc_weights_info = weights->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(nhwc_weights_shape).set_data_layout(DataLayout::NCHW);
165
166 CLPermute::validate(weights, &nhwc_weights_info, PermutationVector(2, 0, 1));
167 CLPermute::validate(input, &nhwc_input_info, PermutationVector(2, 0, 1));
168 }
169
170 const TensorShape reshaped_shape = TensorShape(nhwc_weights_shape[0], nhwc_weights_shape[1] * nhwc_weights_shape[2] * nhwc_weights_shape[3]);
171 const TensorInfo reshaped_info = weights->clone()->set_tensor_shape(reshaped_shape).set_data_layout(DataLayout::NCHW).set_is_resizable(true);
172 ARM_COMPUTE_RETURN_ON_ERROR(CLReshapeLayer::validate(weights, &reshaped_info));
173
174 TensorShape transposed_shape(reshaped_shape[1], reshaped_shape[0]);
175 const TensorInfo reshaped_t_info = reshaped_info.clone()->set_is_resizable(true).set_tensor_shape(transposed_shape);
176 ARM_COMPUTE_RETURN_ON_ERROR(CLTranspose::validate(&reshaped_info, &reshaped_t_info));
177
178 TensorShape gemm_output_shape(weights->dimension(idx_w) * weights->dimension(idx_h) * weights->dimension(idx_b),
179 input->dimension(idx_w),
180 input->dimension(idx_h),
181 input->dimension(idx_b));
182
183 TensorInfo gemm_output_info = reshaped_t_info.clone()->set_tensor_shape(gemm_output_shape).set_is_resizable(true);
184 GEMMInfo gemm_info(false, false, true, input->dimension(idx_h), true);
185
Sheri Zhang0cdbda52020-02-25 15:57:21 +0000186 GEMMLowpOutputStageInfo output_stage_info;
187
giuros0146a49a02019-04-01 13:50:22 +0100188 if(is_quantized)
189 {
190 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixMultiplyCore::validate(&input->clone()->set_tensor_shape(nhwc_input_shape), &reshaped_t_info, nullptr, &gemm_output_info.set_data_type(DataType::S32),
191 gemm_info));
Sheri Zhang0cdbda52020-02-25 15:57:21 +0000192 ARM_COMPUTE_RETURN_ON_ERROR(construct_gemmlowp_output_stage(input, weights, output, output_stage_info));
giuros0146a49a02019-04-01 13:50:22 +0100193 }
194 else
195 {
196 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMM::validate(&input->clone()->set_tensor_shape(nhwc_input_shape).set_is_resizable(true), &reshaped_t_info, nullptr, &gemm_output_info, 1.0f, 0.0f, gemm_info));
197 }
198
Matthew Jacksonb9070a42019-08-22 16:13:27 +0100199 const PadStrideInfo stride_info(deconv_info.stride().first, deconv_info.stride().second);
Michele Di Giorgio14cbfb22019-10-23 10:53:10 +0100200 auto out_dims = deconvolution_output_dimensions(input->dimension(idx_w), input->dimension(idx_h), weights->dimension(idx_w), weights->dimension(idx_h), stride_info);
201 const TensorShape deconv_shape = misc::shape_calculator::compute_deconvolution_output_shape(out_dims, *input, *weights);
202 TensorInfo col2im_output_info = gemm_output_info.clone()->set_tensor_shape(deconv_shape).set_is_resizable(true);
giuros0146a49a02019-04-01 13:50:22 +0100203
204 if(padded_input && is_quantized)
205 {
206 const auto start_end = compute_start_end_slice_coordinates(col2im_output_info, deconv_info, is_nchw);
207 ARM_COMPUTE_RETURN_ON_ERROR(CLDeconvolutionReshapeOutputKernel::validate(&gemm_output_info, bias, &col2im_output_info, input, weights, deconv_info));
Sheri Zhang0cdbda52020-02-25 15:57:21 +0000208 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpOutputStage::validate(&col2im_output_info, nullptr, &col2im_output_info.clone()->set_is_resizable(true).set_data_type(input->data_type()), output_stage_info));
209 ARM_COMPUTE_RETURN_ON_ERROR(CLSlice::validate(&col2im_output_info.clone()->set_is_resizable(true).set_data_type(input->data_type()), output, start_end.first, start_end.second));
giuros0146a49a02019-04-01 13:50:22 +0100210 }
211 else if(padded_input)
212 {
213 const auto start_end = compute_start_end_slice_coordinates(col2im_output_info, deconv_info, is_nchw);
214 ARM_COMPUTE_RETURN_ON_ERROR(CLDeconvolutionReshapeOutputKernel::validate(&gemm_output_info, bias, &col2im_output_info, input, weights, deconv_info));
215 ARM_COMPUTE_RETURN_ON_ERROR(CLSlice::validate(&col2im_output_info, output, start_end.first, start_end.second));
216 }
217 else if(is_quantized)
218 {
219 ARM_COMPUTE_RETURN_ON_ERROR(CLDeconvolutionReshapeOutputKernel::validate(&gemm_output_info, bias, &col2im_output_info, input, weights, deconv_info));
Sheri Zhang0cdbda52020-02-25 15:57:21 +0000220 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpOutputStage::validate(&col2im_output_info, nullptr, output, output_stage_info));
giuros0146a49a02019-04-01 13:50:22 +0100221 }
222 else
223 {
224 ARM_COMPUTE_RETURN_ON_ERROR(CLDeconvolutionReshapeOutputKernel::validate(&gemm_output_info, bias, output, input, weights, deconv_info));
225 }
226
227 return Status{};
228}
229
230void CLGEMMDeconvolutionLayer::configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *bias, ICLTensor *output, const PadStrideInfo &deconv_info)
231{
Manuel Bottini2b84be52020-04-08 10:15:51 +0100232 configure(CLKernelLibrary::get().get_compile_context(), input, weights, bias, output, deconv_info);
233}
234
235void CLGEMMDeconvolutionLayer::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *bias, ICLTensor *output,
236 const PadStrideInfo &deconv_info)
237{
giuros0146a49a02019-04-01 13:50:22 +0100238 ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
239 ARM_COMPUTE_ERROR_THROW_ON(CLGEMMDeconvolutionLayer::validate(input->info(),
240 weights->info(),
241 bias != nullptr ? bias->info() : nullptr,
242 output->info(),
243 deconv_info));
244
245 _original_weights = weights;
246 _padded_input = deconv_info.pad_bottom() > 0 || deconv_info.pad_left() > 0 || deconv_info.pad_right() > 0 || deconv_info.pad_top() > 0;
247 _is_nchw = input->info()->data_layout() == DataLayout::NCHW;
248 _is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
249
250 const ICLTensor *input_to_use = input;
251 const ICLTensor *weights_to_use = weights;
252
253 // If the data layout is NCHW, transform everything in NHWC. Another alternative could be to
254 // do an outer product in NCHW and then an accumulation through a reduction. This would have two
255 // drawbacks: first, the outer product is less efficient than a full GEMM. Second, the reduction
256 // might be slower than GEMM.
257 if(_is_nchw)
258 {
259 _memory_group.manage(&_permuted_input);
Manuel Bottini2b84be52020-04-08 10:15:51 +0100260 _permute_input_to_nhwc.configure(compile_context, input, &_permuted_input, PermutationVector(2U, 0U, 1U));
giuros0146a49a02019-04-01 13:50:22 +0100261
Manuel Bottini2b84be52020-04-08 10:15:51 +0100262 _permute_weights_to_nhwc.configure(compile_context, weights, &_permuted_weights, PermutationVector(2U, 0U, 1U));
giuros0146a49a02019-04-01 13:50:22 +0100263
264 input_to_use = &_permuted_input;
265 weights_to_use = &_permuted_weights;
266 }
267
268 // Reshape the input weights. The weights will be reshaped only once during the call to prepare()
269 _reshaped_weights.allocator()->init(TensorInfo(TensorShape(weights_to_use->info()->dimension(0),
270 weights_to_use->info()->dimension(1) * weights_to_use->info()->dimension(2) * weights_to_use->info()->dimension(3)),
271 1,
272 input->info()->data_type(), weights->info()->quantization_info()));
273
Manuel Bottini2b84be52020-04-08 10:15:51 +0100274 _reshape_weights.configure(compile_context, weights_to_use, &_reshaped_weights);
275 _transpose_weights.configure(compile_context, &_reshaped_weights, &_reshaped_weights_t);
giuros0146a49a02019-04-01 13:50:22 +0100276
277 const size_t idx_h = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::HEIGHT);
278 GEMMInfo gemm_info(false, false, true, input->info()->dimension(idx_h), true);
279
280 // Configure output stage for asymmetric quantized types
281 if(_is_quantized)
282 {
Giuseppe Rossini0a958cb2020-01-16 16:38:56 +0000283 // gemmlowp adds the offsets (instead of subtracting them). Thus, we need to negate the original
284 // and restore them back to make it work properly.
285 QuantizationInfo iq_info = input->info()->quantization_info();
286 QuantizationInfo wq_info = weights->info()->quantization_info();
287
288 input_to_use->info()->set_quantization_info(QuantizationInfo(iq_info.uniform().scale, -iq_info.uniform().offset));
289 _reshaped_weights_t.info()->set_quantization_info(QuantizationInfo(wq_info.uniform().scale, -wq_info.uniform().offset));
290
Manuel Bottini2b84be52020-04-08 10:15:51 +0100291 _mm_gemmlowp.configure(compile_context, input_to_use, &_reshaped_weights_t, nullptr, &_gemm_output, gemm_info);
Giuseppe Rossini0a958cb2020-01-16 16:38:56 +0000292
293 input_to_use->info()->set_quantization_info(iq_info);
294 _reshaped_weights_t.info()->set_quantization_info(wq_info);
giuros0146a49a02019-04-01 13:50:22 +0100295 }
296 else
297 {
Manuel Bottini2b84be52020-04-08 10:15:51 +0100298 _mm_gemm.configure(compile_context, input_to_use, &_reshaped_weights_t, nullptr, &_gemm_output, 1.f, 0.0f, gemm_info);
giuros0146a49a02019-04-01 13:50:22 +0100299 }
300
301 if(_is_nchw)
302 {
303 _permuted_input.allocator()->allocate();
304 }
305
306 ICLTensor *deconv_reshape_output = nullptr;
307 ICLTensor *slice_output = nullptr;
308 ICLTensor *output_stage_output = nullptr;
309
310 if(_padded_input && _is_quantized)
311 {
312 _memory_group.manage(&_slice_gemm_input);
313 _memory_group.manage(&_gemmlowp_final);
314 deconv_reshape_output = &_gemmlowp_final;
315 output_stage_output = &_slice_gemm_input;
316 slice_output = output;
317 }
318 else if(_padded_input)
319 {
320 _memory_group.manage(&_slice_gemm_input);
321 deconv_reshape_output = &_slice_gemm_input;
322 slice_output = output;
323 }
324 else if(_is_quantized)
325 {
326 _memory_group.manage(&_gemmlowp_final);
327 deconv_reshape_output = &_gemmlowp_final;
328 output_stage_output = output;
329 }
330 else
331 {
332 deconv_reshape_output = output;
333 }
334
335 // Configure a Col2Im call to reshape the output of GEMM
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +0100336 _deconv_reshape->configure(compile_context, &_gemm_output, bias, deconv_reshape_output, input->info(), weights->info(), deconv_info);
giuros0146a49a02019-04-01 13:50:22 +0100337 _gemm_output.allocator()->allocate();
338
339 if(_is_quantized)
340 {
Sheri Zhang0cdbda52020-02-25 15:57:21 +0000341 GEMMLowpOutputStageInfo output_stage_info;
342 construct_gemmlowp_output_stage(input->info(), weights->info(), output->info(), output_stage_info);
Manuel Bottini2b84be52020-04-08 10:15:51 +0100343 _gemmlowp_output_stage.configure(compile_context, &_gemmlowp_final, nullptr, output_stage_output, output_stage_info);
giuros0146a49a02019-04-01 13:50:22 +0100344 _gemmlowp_final.allocator()->allocate();
345 }
346
347 // If the input was padded, the output needs to be sliced.
348 if(_padded_input)
349 {
350 const auto start_end = compute_start_end_slice_coordinates(*deconv_reshape_output->info(), deconv_info, _is_nchw);
Manuel Bottini2b84be52020-04-08 10:15:51 +0100351 _slice_gemm.configure(compile_context, &_slice_gemm_input, slice_output, start_end.first, start_end.second);
giuros0146a49a02019-04-01 13:50:22 +0100352 _slice_gemm_input.allocator()->allocate();
353 }
354}
355
356void CLGEMMDeconvolutionLayer::run()
357{
358 prepare();
359
360 MemoryGroupResourceScope scope_mg(_memory_group);
361
362 if(_is_nchw)
363 {
364 _permute_input_to_nhwc.run();
365 }
366
367 if(_is_quantized)
368 {
369 _mm_gemmlowp.run();
370 }
371 else
372 {
373 _mm_gemm.run();
374 }
375
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +0100376 CLScheduler::get().enqueue(*_deconv_reshape, false);
giuros0146a49a02019-04-01 13:50:22 +0100377
378 if(_is_quantized)
379 {
380 _gemmlowp_output_stage.run();
381 }
382
383 if(_padded_input)
384 {
385 _slice_gemm.run();
386 }
387}
388
389void CLGEMMDeconvolutionLayer::prepare()
390{
391 if(!_is_prepared)
392 {
393 ARM_COMPUTE_ERROR_ON(!_original_weights->is_used());
394
395 if(_is_nchw)
396 {
397 _permuted_weights.allocator()->allocate();
398 _permute_weights_to_nhwc.run();
399 }
400
401 _reshaped_weights.allocator()->allocate();
402 _reshape_weights.run();
403
404 if(_is_nchw)
405 {
406 _permuted_weights.allocator()->free();
407 }
408
409 _reshaped_weights_t.allocator()->allocate();
410 _transpose_weights.run();
411
412 // Prepare gemm
413 if(!_is_quantized)
414 {
415 _mm_gemm.prepare();
416 }
417 else
418 {
419 _mm_gemmlowp.prepare();
420 }
421
422 // Free resources
423 if(!_reshaped_weights_t.is_used())
424 {
425 _reshaped_weights_t.allocator()->free();
426 }
427
428 _original_weights->mark_as_unused();
429 _is_prepared = true;
430 }
431}
432} // namespace arm_compute