blob: 32988582154067d6dd56905e3c50686fd62f1b50 [file] [log] [blame]
giuros0146a49a02019-04-01 13:50:22 +01001/*
Giuseppe Rossini0a958cb2020-01-16 16:38:56 +00002 * Copyright (c) 2019-2020 ARM Limited.
giuros0146a49a02019-04-01 13:50:22 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/runtime/CL/functions/CLGEMMDeconvolutionLayer.h"
25
26#include "arm_compute/core/Helpers.h"
27#include "arm_compute/core/Validate.h"
28#include "arm_compute/core/utils/misc/ShapeCalculator.h"
29#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
30#include "arm_compute/runtime/CL/CLScheduler.h"
31#include "utils/TypePrinter.h"
32
33#include <memory>
34#include <tuple>
35
36namespace arm_compute
37{
38namespace
39{
40std::pair<Coordinates, Coordinates> compute_start_end_slice_coordinates(const ITensorInfo &output_info, const PadStrideInfo &deconv_info, bool is_nchw)
41{
42 Coordinates start;
43 Coordinates end;
44
45 if(is_nchw)
46 {
47 start.set(0, deconv_info.pad_left());
48 start.set(1, deconv_info.pad_top());
49 end.set(0, output_info.dimension(0) - deconv_info.pad_right());
50 end.set(1, output_info.dimension(1) - deconv_info.pad_bottom());
51 }
52 else
53 {
54 start.set(0, 0);
55 start.set(1, deconv_info.pad_left());
56 start.set(2, deconv_info.pad_top());
57
58 end.set(0, output_info.dimension(0));
59 end.set(1, output_info.dimension(1) - deconv_info.pad_right());
60 end.set(2, output_info.dimension(2) - deconv_info.pad_bottom());
61 }
62
63 return { start, end };
64}
Sheri Zhang0cdbda52020-02-25 15:57:21 +000065Status construct_gemmlowp_output_stage(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *output, GEMMLowpOutputStageInfo &output_stage_info)
66{
67 const auto data_type = input->data_type();
68
69 if(is_data_type_quantized_asymmetric(data_type))
70 {
71 const UniformQuantizationInfo iq_info = input->quantization_info().uniform();
72 const UniformQuantizationInfo wq_info = weights->quantization_info().uniform();
73 const UniformQuantizationInfo oq_info = output->quantization_info().uniform();
74
75 float multiplier = iq_info.scale * wq_info.scale / oq_info.scale;
76 int output_multiplier(0);
77 int output_shift(0);
78 ARM_COMPUTE_RETURN_ON_ERROR(quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift));
79
80 output_stage_info.type = GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT;
81 output_stage_info.gemmlowp_multiplier = output_multiplier;
82 output_stage_info.gemmlowp_shift = output_shift;
83 output_stage_info.gemmlowp_offset = oq_info.offset;
84 const auto min_max_bound = get_min_max(data_type);
85 output_stage_info.gemmlowp_min_bound = (std::get<0>(min_max_bound)).get<int32_t>();
86 output_stage_info.gemmlowp_max_bound = (std::get<1>(min_max_bound)).get<int32_t>();
87 output_stage_info.output_data_type = data_type;
88 }
89 return Status{};
90}
91
giuros0146a49a02019-04-01 13:50:22 +010092} // namespace
93
94CLGEMMDeconvolutionLayer::CLGEMMDeconvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager) // NOLINT
95 : _memory_group(std::move(memory_manager)),
96 _mm_gemm(),
97 _mm_gemmlowp(),
98 _gemmlowp_output_stage(),
99 _permute_input_to_nhwc(),
100 _permute_weights_to_nhwc(),
101 _reshape_weights(),
102 _transpose_weights(),
103 _deconv_reshape(),
104 _slice_gemm(),
105 _gemmlowp_final(),
106 _reshaped_weights(),
107 _reshaped_weights_t(),
108 _permuted_input(),
109 _permuted_weights(),
110 _gemm_output(),
111 _slice_gemm_input(),
112 _original_weights(),
113 _is_prepared(false),
114 _padded_input(false),
115 _is_nchw(false),
116 _is_quantized(false)
117{
118}
119
120Status CLGEMMDeconvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *bias, const ITensorInfo *output, const PadStrideInfo &deconv_info)
121{
122 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output);
Sheri Zhang0cdbda52020-02-25 15:57:21 +0000123 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32, DataType::F16, DataType::QASYMM8, DataType::QASYMM8_SIGNED);
giuros0146a49a02019-04-01 13:50:22 +0100124 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
125 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, weights);
126
127 DataLayout data_layout = input->data_layout();
128 const bool padded_input = deconv_info.pad_bottom() > 0 || deconv_info.pad_left() > 0 || deconv_info.pad_right() > 0 || deconv_info.pad_top() > 0;
129 const bool is_nchw = input->data_layout() == DataLayout::NCHW;
130 const bool is_quantized = is_data_type_quantized_asymmetric(input->data_type());
131
132 const size_t idx_w = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
133 const size_t idx_h = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
134 const size_t idx_b = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
135
136 ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(idx_w) != deconv_info.stride().first);
137 ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(idx_h) != deconv_info.stride().second);
138
139 TensorShape nhwc_weights_shape = weights->tensor_shape();
140 TensorShape nhwc_input_shape = input->tensor_shape();
141
142 if(is_nchw)
143 {
144 permute(nhwc_weights_shape, PermutationVector(2, 0, 1));
145 permute(nhwc_input_shape, PermutationVector(2, 0, 1));
146
147 TensorInfo nhwc_input_info = input->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(nhwc_input_shape).set_data_layout(DataLayout::NCHW);
148
149 TensorInfo nhwc_weights_info = weights->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(nhwc_weights_shape).set_data_layout(DataLayout::NCHW);
150
151 CLPermute::validate(weights, &nhwc_weights_info, PermutationVector(2, 0, 1));
152 CLPermute::validate(input, &nhwc_input_info, PermutationVector(2, 0, 1));
153 }
154
155 const TensorShape reshaped_shape = TensorShape(nhwc_weights_shape[0], nhwc_weights_shape[1] * nhwc_weights_shape[2] * nhwc_weights_shape[3]);
156 const TensorInfo reshaped_info = weights->clone()->set_tensor_shape(reshaped_shape).set_data_layout(DataLayout::NCHW).set_is_resizable(true);
157 ARM_COMPUTE_RETURN_ON_ERROR(CLReshapeLayer::validate(weights, &reshaped_info));
158
159 TensorShape transposed_shape(reshaped_shape[1], reshaped_shape[0]);
160 const TensorInfo reshaped_t_info = reshaped_info.clone()->set_is_resizable(true).set_tensor_shape(transposed_shape);
161 ARM_COMPUTE_RETURN_ON_ERROR(CLTranspose::validate(&reshaped_info, &reshaped_t_info));
162
163 TensorShape gemm_output_shape(weights->dimension(idx_w) * weights->dimension(idx_h) * weights->dimension(idx_b),
164 input->dimension(idx_w),
165 input->dimension(idx_h),
166 input->dimension(idx_b));
167
168 TensorInfo gemm_output_info = reshaped_t_info.clone()->set_tensor_shape(gemm_output_shape).set_is_resizable(true);
169 GEMMInfo gemm_info(false, false, true, input->dimension(idx_h), true);
170
Sheri Zhang0cdbda52020-02-25 15:57:21 +0000171 GEMMLowpOutputStageInfo output_stage_info;
172
giuros0146a49a02019-04-01 13:50:22 +0100173 if(is_quantized)
174 {
175 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixMultiplyCore::validate(&input->clone()->set_tensor_shape(nhwc_input_shape), &reshaped_t_info, nullptr, &gemm_output_info.set_data_type(DataType::S32),
176 gemm_info));
Sheri Zhang0cdbda52020-02-25 15:57:21 +0000177 ARM_COMPUTE_RETURN_ON_ERROR(construct_gemmlowp_output_stage(input, weights, output, output_stage_info));
178
giuros0146a49a02019-04-01 13:50:22 +0100179 }
180 else
181 {
182 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMM::validate(&input->clone()->set_tensor_shape(nhwc_input_shape).set_is_resizable(true), &reshaped_t_info, nullptr, &gemm_output_info, 1.0f, 0.0f, gemm_info));
183 }
184
Matthew Jacksonb9070a42019-08-22 16:13:27 +0100185 const PadStrideInfo stride_info(deconv_info.stride().first, deconv_info.stride().second);
Michele Di Giorgio14cbfb22019-10-23 10:53:10 +0100186 auto out_dims = deconvolution_output_dimensions(input->dimension(idx_w), input->dimension(idx_h), weights->dimension(idx_w), weights->dimension(idx_h), stride_info);
187 const TensorShape deconv_shape = misc::shape_calculator::compute_deconvolution_output_shape(out_dims, *input, *weights);
188 TensorInfo col2im_output_info = gemm_output_info.clone()->set_tensor_shape(deconv_shape).set_is_resizable(true);
giuros0146a49a02019-04-01 13:50:22 +0100189
190 if(padded_input && is_quantized)
191 {
192 const auto start_end = compute_start_end_slice_coordinates(col2im_output_info, deconv_info, is_nchw);
193 ARM_COMPUTE_RETURN_ON_ERROR(CLDeconvolutionReshapeOutputKernel::validate(&gemm_output_info, bias, &col2im_output_info, input, weights, deconv_info));
Sheri Zhang0cdbda52020-02-25 15:57:21 +0000194 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpOutputStage::validate(&col2im_output_info, nullptr, &col2im_output_info.clone()->set_is_resizable(true).set_data_type(input->data_type()), output_stage_info));
195 ARM_COMPUTE_RETURN_ON_ERROR(CLSlice::validate(&col2im_output_info.clone()->set_is_resizable(true).set_data_type(input->data_type()), output, start_end.first, start_end.second));
giuros0146a49a02019-04-01 13:50:22 +0100196 }
197 else if(padded_input)
198 {
199 const auto start_end = compute_start_end_slice_coordinates(col2im_output_info, deconv_info, is_nchw);
200 ARM_COMPUTE_RETURN_ON_ERROR(CLDeconvolutionReshapeOutputKernel::validate(&gemm_output_info, bias, &col2im_output_info, input, weights, deconv_info));
201 ARM_COMPUTE_RETURN_ON_ERROR(CLSlice::validate(&col2im_output_info, output, start_end.first, start_end.second));
202 }
203 else if(is_quantized)
204 {
205 ARM_COMPUTE_RETURN_ON_ERROR(CLDeconvolutionReshapeOutputKernel::validate(&gemm_output_info, bias, &col2im_output_info, input, weights, deconv_info));
Sheri Zhang0cdbda52020-02-25 15:57:21 +0000206 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpOutputStage::validate(&col2im_output_info, nullptr, output, output_stage_info));
giuros0146a49a02019-04-01 13:50:22 +0100207 }
208 else
209 {
210 ARM_COMPUTE_RETURN_ON_ERROR(CLDeconvolutionReshapeOutputKernel::validate(&gemm_output_info, bias, output, input, weights, deconv_info));
211 }
212
213 return Status{};
214}
215
216void CLGEMMDeconvolutionLayer::configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *bias, ICLTensor *output, const PadStrideInfo &deconv_info)
217{
218 ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
219 ARM_COMPUTE_ERROR_THROW_ON(CLGEMMDeconvolutionLayer::validate(input->info(),
220 weights->info(),
221 bias != nullptr ? bias->info() : nullptr,
222 output->info(),
223 deconv_info));
224
225 _original_weights = weights;
226 _padded_input = deconv_info.pad_bottom() > 0 || deconv_info.pad_left() > 0 || deconv_info.pad_right() > 0 || deconv_info.pad_top() > 0;
227 _is_nchw = input->info()->data_layout() == DataLayout::NCHW;
228 _is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
229
230 const ICLTensor *input_to_use = input;
231 const ICLTensor *weights_to_use = weights;
232
233 // If the data layout is NCHW, transform everything in NHWC. Another alternative could be to
234 // do an outer product in NCHW and then an accumulation through a reduction. This would have two
235 // drawbacks: first, the outer product is less efficient than a full GEMM. Second, the reduction
236 // might be slower than GEMM.
237 if(_is_nchw)
238 {
239 _memory_group.manage(&_permuted_input);
240 _permute_input_to_nhwc.configure(input, &_permuted_input, PermutationVector(2U, 0U, 1U));
241
242 _permute_weights_to_nhwc.configure(weights, &_permuted_weights, PermutationVector(2U, 0U, 1U));
243
244 input_to_use = &_permuted_input;
245 weights_to_use = &_permuted_weights;
246 }
247
248 // Reshape the input weights. The weights will be reshaped only once during the call to prepare()
249 _reshaped_weights.allocator()->init(TensorInfo(TensorShape(weights_to_use->info()->dimension(0),
250 weights_to_use->info()->dimension(1) * weights_to_use->info()->dimension(2) * weights_to_use->info()->dimension(3)),
251 1,
252 input->info()->data_type(), weights->info()->quantization_info()));
253
254 _reshape_weights.configure(weights_to_use, &_reshaped_weights);
255 _transpose_weights.configure(&_reshaped_weights, &_reshaped_weights_t);
256
257 const size_t idx_h = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::HEIGHT);
258 GEMMInfo gemm_info(false, false, true, input->info()->dimension(idx_h), true);
259
260 // Configure output stage for asymmetric quantized types
261 if(_is_quantized)
262 {
Giuseppe Rossini0a958cb2020-01-16 16:38:56 +0000263 // gemmlowp adds the offsets (instead of subtracting them). Thus, we need to negate the original
264 // and restore them back to make it work properly.
265 QuantizationInfo iq_info = input->info()->quantization_info();
266 QuantizationInfo wq_info = weights->info()->quantization_info();
267
268 input_to_use->info()->set_quantization_info(QuantizationInfo(iq_info.uniform().scale, -iq_info.uniform().offset));
269 _reshaped_weights_t.info()->set_quantization_info(QuantizationInfo(wq_info.uniform().scale, -wq_info.uniform().offset));
270
giuros0146a49a02019-04-01 13:50:22 +0100271 _mm_gemmlowp.configure(input_to_use, &_reshaped_weights_t, nullptr, &_gemm_output, gemm_info);
Giuseppe Rossini0a958cb2020-01-16 16:38:56 +0000272
273 input_to_use->info()->set_quantization_info(iq_info);
274 _reshaped_weights_t.info()->set_quantization_info(wq_info);
giuros0146a49a02019-04-01 13:50:22 +0100275 }
276 else
277 {
278 _mm_gemm.configure(input_to_use, &_reshaped_weights_t, nullptr, &_gemm_output, 1.f, 0.0f, gemm_info);
279 }
280
281 if(_is_nchw)
282 {
283 _permuted_input.allocator()->allocate();
284 }
285
286 ICLTensor *deconv_reshape_output = nullptr;
287 ICLTensor *slice_output = nullptr;
288 ICLTensor *output_stage_output = nullptr;
289
290 if(_padded_input && _is_quantized)
291 {
292 _memory_group.manage(&_slice_gemm_input);
293 _memory_group.manage(&_gemmlowp_final);
294 deconv_reshape_output = &_gemmlowp_final;
295 output_stage_output = &_slice_gemm_input;
296 slice_output = output;
297 }
298 else if(_padded_input)
299 {
300 _memory_group.manage(&_slice_gemm_input);
301 deconv_reshape_output = &_slice_gemm_input;
302 slice_output = output;
303 }
304 else if(_is_quantized)
305 {
306 _memory_group.manage(&_gemmlowp_final);
307 deconv_reshape_output = &_gemmlowp_final;
308 output_stage_output = output;
309 }
310 else
311 {
312 deconv_reshape_output = output;
313 }
314
315 // Configure a Col2Im call to reshape the output of GEMM
316 _deconv_reshape.configure(&_gemm_output, bias, deconv_reshape_output, input->info(), weights->info(), deconv_info);
317 _gemm_output.allocator()->allocate();
318
319 if(_is_quantized)
320 {
Sheri Zhang0cdbda52020-02-25 15:57:21 +0000321 GEMMLowpOutputStageInfo output_stage_info;
322 construct_gemmlowp_output_stage(input->info(), weights->info(), output->info(), output_stage_info);
323 _gemmlowp_output_stage.configure(&_gemmlowp_final, nullptr, output_stage_output, output_stage_info);
giuros0146a49a02019-04-01 13:50:22 +0100324 _gemmlowp_final.allocator()->allocate();
325 }
326
327 // If the input was padded, the output needs to be sliced.
328 if(_padded_input)
329 {
330 const auto start_end = compute_start_end_slice_coordinates(*deconv_reshape_output->info(), deconv_info, _is_nchw);
331 _slice_gemm.configure(&_slice_gemm_input, slice_output, start_end.first, start_end.second);
332 _slice_gemm_input.allocator()->allocate();
333 }
334}
335
336void CLGEMMDeconvolutionLayer::run()
337{
338 prepare();
339
340 MemoryGroupResourceScope scope_mg(_memory_group);
341
342 if(_is_nchw)
343 {
344 _permute_input_to_nhwc.run();
345 }
346
347 if(_is_quantized)
348 {
349 _mm_gemmlowp.run();
350 }
351 else
352 {
353 _mm_gemm.run();
354 }
355
356 CLScheduler::get().enqueue(_deconv_reshape, false);
357
358 if(_is_quantized)
359 {
360 _gemmlowp_output_stage.run();
361 }
362
363 if(_padded_input)
364 {
365 _slice_gemm.run();
366 }
367}
368
369void CLGEMMDeconvolutionLayer::prepare()
370{
371 if(!_is_prepared)
372 {
373 ARM_COMPUTE_ERROR_ON(!_original_weights->is_used());
374
375 if(_is_nchw)
376 {
377 _permuted_weights.allocator()->allocate();
378 _permute_weights_to_nhwc.run();
379 }
380
381 _reshaped_weights.allocator()->allocate();
382 _reshape_weights.run();
383
384 if(_is_nchw)
385 {
386 _permuted_weights.allocator()->free();
387 }
388
389 _reshaped_weights_t.allocator()->allocate();
390 _transpose_weights.run();
391
392 // Prepare gemm
393 if(!_is_quantized)
394 {
395 _mm_gemm.prepare();
396 }
397 else
398 {
399 _mm_gemmlowp.prepare();
400 }
401
402 // Free resources
403 if(!_reshaped_weights_t.is_used())
404 {
405 _reshaped_weights_t.allocator()->free();
406 }
407
408 _original_weights->mark_as_unused();
409 _is_prepared = true;
410 }
411}
412} // namespace arm_compute