blob: 61180fd5d3fe957fd678243d90717d748a31e2c4 [file] [log] [blame]
Isabella Gottardif07d28d2018-02-06 14:52:43 +00001/*
2 * Copyright (c) 2017-2018 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h"
25
26#include "arm_compute/core/PixelValue.h"
27#include "arm_compute/core/Size2D.h"
28#include "arm_compute/core/Utils.h"
29#include "arm_compute/core/Validate.h"
Georgios Pinitas78c00902018-01-09 17:33:11 +000030#include "arm_compute/core/utils/misc/ShapeCalculator.h"
Isabella Gottardif07d28d2018-02-06 14:52:43 +000031#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
32#include "arm_compute/runtime/CL/CLScheduler.h"
33
34#include <cmath>
35#include <memory>
36#include <tuple>
37
38using namespace arm_compute;
Georgios Pinitas78c00902018-01-09 17:33:11 +000039using namespace arm_compute::misc::shape_calculator;
Isabella Gottardif07d28d2018-02-06 14:52:43 +000040
Georgios Pinitasd8734b52017-12-22 15:27:52 +000041CLConvolutionLayerReshapeWeights::CLConvolutionLayerReshapeWeights()
42 : _weights_reshape_kernel()
Isabella Gottardif07d28d2018-02-06 14:52:43 +000043{
44}
45
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +010046void CLConvolutionLayerReshapeWeights::configure(const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, unsigned int num_groups)
Isabella Gottardif07d28d2018-02-06 14:52:43 +000047{
Georgios Pinitas78c00902018-01-09 17:33:11 +000048 // Perform validation step
Isabella Gottardif07d28d2018-02-06 14:52:43 +000049 ARM_COMPUTE_ERROR_ON_NULLPTR(weights, output);
Georgios Pinitas78c00902018-01-09 17:33:11 +000050 ARM_COMPUTE_ERROR_THROW_ON(CLConvolutionLayerReshapeWeights::validate(weights->info(),
51 (biases != nullptr) ? biases->info() : nullptr,
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +010052 output->info(),
53 num_groups));
Georgios Pinitas78c00902018-01-09 17:33:11 +000054
55 const bool append_biases = (biases != nullptr) && !is_data_type_quantized_asymmetric(weights->info()->data_type());
56 const ICLTensor *biases_to_use = (append_biases) ? biases : nullptr;
57
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +010058 _weights_reshape_kernel.configure(weights, biases_to_use, output, num_groups);
Georgios Pinitas78c00902018-01-09 17:33:11 +000059
60 output->info()->set_quantization_info(weights->info()->quantization_info());
61}
62
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +010063Status CLConvolutionLayerReshapeWeights::validate(const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, unsigned int num_groups)
Georgios Pinitas78c00902018-01-09 17:33:11 +000064{
65 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(weights);
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +010066 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(weights, 1, DataType::QASYMM8, DataType::F16, DataType::F32);
Georgios Pinitas78c00902018-01-09 17:33:11 +000067 ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 4);
Isabella Gottardif07d28d2018-02-06 14:52:43 +000068
69 if(biases != nullptr)
70 {
Georgios Pinitas19ea4192018-06-19 13:09:53 +010071 const int idx_kernels = get_data_layout_dimension_index(weights->data_layout(), DataLayoutDimension::BATCHES);
Georgios Pinitas78c00902018-01-09 17:33:11 +000072 ARM_COMPUTE_RETURN_ERROR_ON(is_data_type_quantized_asymmetric(weights->data_type()));
73 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(weights, biases);
Georgios Pinitas19ea4192018-06-19 13:09:53 +010074 ARM_COMPUTE_RETURN_ERROR_ON(biases->dimension(0) != weights->dimension(idx_kernels));
Georgios Pinitas78c00902018-01-09 17:33:11 +000075 ARM_COMPUTE_RETURN_ERROR_ON(biases->num_dimensions() > 1);
Isabella Gottardif07d28d2018-02-06 14:52:43 +000076 }
77
Georgios Pinitas78c00902018-01-09 17:33:11 +000078 if((output != nullptr) && (output->total_size() != 0))
Isabella Gottardif07d28d2018-02-06 14:52:43 +000079 {
Georgios Pinitas78c00902018-01-09 17:33:11 +000080 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(weights, output);
Isabella Gottardif07d28d2018-02-06 14:52:43 +000081
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +010082 CLWeightsReshapeKernel::validate(weights, biases, output, num_groups);
Isabella Gottardif07d28d2018-02-06 14:52:43 +000083 }
84
Georgios Pinitas78c00902018-01-09 17:33:11 +000085 return Status{};
Isabella Gottardif07d28d2018-02-06 14:52:43 +000086}
87
88void CLConvolutionLayerReshapeWeights::run()
89{
Isabella Gottardif07d28d2018-02-06 14:52:43 +000090 CLScheduler::get().enqueue(_weights_reshape_kernel);
Isabella Gottardif07d28d2018-02-06 14:52:43 +000091}
92
93CLGEMMConvolutionLayer::CLGEMMConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager)
Isabella Gottardi3f217ec2018-02-12 14:59:19 +000094 : _memory_group(memory_manager), _reshape_weights(), _im2col_kernel(), _mm_gemm(memory_manager), _mm_gemmlowp(memory_manager), _gemmlowp_output_stage(), _col2im_kernel(), _activationlayer_function(),
Georgios Pinitas932491f2018-09-21 16:33:15 +010095 _add_bias_kernel(), _original_weights(nullptr), _im2col_output(), _weights_reshaped(), _gemm_output(), _tmp_output(), _data_layout(DataLayout::NCHW), _append_bias(false), _skip_im2col(false),
96 _skip_col2im(false), _is_quantized(false), _is_activationlayer_enabled(false), _is_prepared(false)
Isabella Gottardif07d28d2018-02-06 14:52:43 +000097{
98}
99
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100100void CLGEMMConvolutionLayer::configure_mm(const ICLTensor *input, const ICLTensor *weights, ICLTensor *output, int gemm_3d_depth)
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000101{
102 ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights);
Georgios Pinitas883f4892018-08-03 13:41:33 +0100103 ARM_COMPUTE_ERROR_THROW_ON(validate_mm(input->info(), weights->info(), output->info(), gemm_3d_depth, _skip_im2col));
Georgios Pinitas78c00902018-01-09 17:33:11 +0000104
Georgios Pinitas932491f2018-09-21 16:33:15 +0100105 const GEMMInfo &gemm_info = GEMMInfo(false, false, true /* Reshape weights only for the first run */,
106 gemm_3d_depth, _skip_im2col /* Reinterpret the input as 3D if im2col is skipped */);
107
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000108 if(_is_quantized)
109 {
Georgios Pinitas78c00902018-01-09 17:33:11 +0000110 // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
111 // Extract and negate input and weights offset
112 const QuantizationInfo input_quantization_info = input->info()->quantization_info();
113 const QuantizationInfo weights_quantization_info = weights->info()->quantization_info();
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000114
Georgios Pinitas78c00902018-01-09 17:33:11 +0000115 input->info()->set_quantization_info(QuantizationInfo(input_quantization_info.scale, -input_quantization_info.offset));
116 weights->info()->set_quantization_info(QuantizationInfo(weights_quantization_info.scale, -weights_quantization_info.offset));
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000117
Georgios Pinitas932491f2018-09-21 16:33:15 +0100118 _mm_gemmlowp.configure(input, weights, output, gemm_info);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000119
Georgios Pinitas78c00902018-01-09 17:33:11 +0000120 // Revert back QuantizatioInfo as input and weights could be used in other convolution layers
121 input->info()->set_quantization_info(input_quantization_info);
122 weights->info()->set_quantization_info(weights_quantization_info);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000123 }
124 else
125 {
Georgios Pinitas78c00902018-01-09 17:33:11 +0000126 // Configure matrix multiply function
Georgios Pinitas932491f2018-09-21 16:33:15 +0100127 _mm_gemm.configure(input, weights, nullptr, output, 1.0f, 0.0f, gemm_info);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000128 }
129}
130
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100131Status CLGEMMConvolutionLayer::validate_mm(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *output, int gemm_3d_depth, bool skip_im2col)
Georgios Pinitas78c00902018-01-09 17:33:11 +0000132{
133 const bool is_quantized = is_data_type_quantized_asymmetric(input->data_type());
134
Georgios Pinitas932491f2018-09-21 16:33:15 +0100135 const GEMMInfo &gemm_info = GEMMInfo(false, false, true /* Reshape weights only for the first run */,
136 gemm_3d_depth, skip_im2col /* Reinterpret the input as 3D if im2col is skipped */);
137
Georgios Pinitas78c00902018-01-09 17:33:11 +0000138 if(is_quantized)
139 {
140 // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
141 // Extract and negate input and weights offset
142 const QuantizationInfo input_quantization_info = input->quantization_info();
143 const QuantizationInfo weights_quantization_info = weights->quantization_info();
144
145 std::unique_ptr<ITensorInfo> input_qa = input->clone();
146 std::unique_ptr<ITensorInfo> weights_qa = weights->clone();
147 input_qa->set_quantization_info(QuantizationInfo(input_quantization_info.scale, -input_quantization_info.offset));
148 weights_qa->set_quantization_info(QuantizationInfo(weights_quantization_info.scale, -weights_quantization_info.offset));
149
150 // Perform validation step on GEMMLowp
Gian Marco Iodicedff601d2018-08-09 13:28:41 +0100151 return CLGEMMLowpMatrixMultiplyCore::validate(input_qa.get(), weights_qa.get(), output, gemm_info);
Georgios Pinitas78c00902018-01-09 17:33:11 +0000152 }
153 else
154 {
155 // Perform validation step on Matrix multiply function
Gian Marco Iodicedff601d2018-08-09 13:28:41 +0100156 return CLGEMM::validate(input, weights, nullptr, output, 1.0f, 0.0f, gemm_info);
Georgios Pinitas78c00902018-01-09 17:33:11 +0000157 }
Georgios Pinitas78c00902018-01-09 17:33:11 +0000158}
159
Alex Gilday7da29b62018-03-23 14:16:00 +0000160void CLGEMMConvolutionLayer::configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info,
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100161 const Size2D &dilation, const ActivationLayerInfo &act_info, unsigned int num_groups)
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000162{
163 ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
Georgios Pinitas78c00902018-01-09 17:33:11 +0000164
165 ARM_COMPUTE_ERROR_THROW_ON(CLGEMMConvolutionLayer::validate(input->info(),
166 weights->info(),
167 biases != nullptr ? biases->info() : nullptr,
168 output->info(),
169 conv_info,
Alex Gilday7da29b62018-03-23 14:16:00 +0000170 weights_info,
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000171 dilation,
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100172 act_info,
173 num_groups));
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000174
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100175 const DataType data_type = input->info()->data_type();
176 const DataLayout data_layout = input->info()->data_layout();
177 const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
178 const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
Georgios Pinitas932491f2018-09-21 16:33:15 +0100179 const int idx_channel = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100180 const int idx_kernels = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
181
182 const unsigned int kernel_width = weights->info()->dimension(idx_width);
183 const unsigned int kernel_height = weights->info()->dimension(idx_height);
184
Georgios Pinitas72219332018-06-05 14:56:06 +0100185 _is_prepared = weights_info.retain_internal_weights();
186 _original_weights = weights;
187 _is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100188 _data_layout = data_layout;
Georgios Pinitas932491f2018-09-21 16:33:15 +0100189 _skip_im2col = (data_layout == DataLayout::NHWC && kernel_width == 1 && kernel_height == 1 && conv_info.stride().first == 1 && conv_info.stride().second == 1);
190 _skip_col2im = data_layout == DataLayout::NHWC;
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100191 _append_bias = (biases != nullptr) && (!_is_quantized);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000192
Georgios Pinitas78c00902018-01-09 17:33:11 +0000193 // Set the GPU target for im2col and col2im
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000194 _im2col_kernel.set_target(CLScheduler::get().target());
195 _col2im_kernel.set_target(CLScheduler::get().target());
196
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100197 const ICLTensor *gemm_input_to_use = input;
198 ICLTensor *gemm_output_to_use = output;
199 ICLTensor *gemm_output_staged_to_use = output;
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000200
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100201 const ICLTensor *biases_to_use = (_append_bias && !_skip_im2col) ? biases : nullptr;
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000202
203 // Get parameters from conv_info
204 unsigned int stride_x = 0;
205 unsigned int stride_y = 0;
206 std::tie(stride_x, stride_y) = conv_info.stride();
207
208 // Get convolved dimensions
209 unsigned int conv_w = 0;
210 unsigned int conv_h = 0;
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100211 std::tie(conv_w, conv_h) = scaled_dimensions(input->info()->dimension(idx_width),
212 input->info()->dimension(idx_height),
213 kernel_width,
214 kernel_height,
215 conv_info,
216 dilation);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000217
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100218 unsigned int mat_weights_cols = weights->info()->dimension(idx_kernels) / num_groups;
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000219
Georgios Pinitas78c00902018-01-09 17:33:11 +0000220 // _weights_reshaped will be auto configured in the kernel.
221 // Just append biases and do not transpose 1xW as it will be reshaped in CLGEMM
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100222 _reshape_weights.configure(weights, biases_to_use, &_weights_reshaped, num_groups);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000223
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000224 // Create tensor to store im2col reshaped inputs
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100225 if(!_skip_im2col)
226 {
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100227 _memory_group.manage(&_im2col_output);
228
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100229 // Configure and tune im2col. im2col output shape is auto-initialized
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100230 _im2col_kernel.configure(input, &_im2col_output, Size2D(kernel_width, kernel_height), conv_info, _append_bias, dilation, num_groups);
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100231
232 // Set quantization info
233 _im2col_output.info()->set_quantization_info(input->info()->quantization_info());
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100234 CLScheduler::get().tune_kernel_static(_im2col_kernel);
235
236 // Update GEMM input
237 gemm_input_to_use = &_im2col_output;
238 }
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100239 else if(_append_bias)
240 {
241 // Configure add bias kernel
242 _add_bias_kernel.configure(output, biases, output, ConvertPolicy::SATURATE);
243 }
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000244
245 // Create GEMM output tensor
Georgios Pinitas932491f2018-09-21 16:33:15 +0100246 if(!_skip_col2im || _is_quantized)
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100247 {
Georgios Pinitas932491f2018-09-21 16:33:15 +0100248 TensorShape shape_gemm;
249 if(_skip_col2im)
250 {
251 shape_gemm = input->info()->tensor_shape();
252 shape_gemm.set(idx_width, conv_w);
253 shape_gemm.set(idx_height, conv_h);
254 shape_gemm.set(idx_channel, mat_weights_cols);
255 }
256 else
257 {
258 shape_gemm = _im2col_output.info()->tensor_shape();
259 shape_gemm.set(0, mat_weights_cols);
260 shape_gemm.set(1, conv_w * conv_h);
261 }
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100262 // GEMM output should be S32 for acquiring raw integer accumulator without quantized postprocessing for quantized asymmetric input.
263 const DataType gemm_data_type = _is_quantized ? DataType::S32 : data_type;
264 // FIXME: input->clone() doesn't work with subtensors for grouped convolutions.
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +0100265 TensorInfo info_gemm(shape_gemm, 1, gemm_data_type);
Georgios Pinitas932491f2018-09-21 16:33:15 +0100266 info_gemm.set_quantization_info(output->info()->quantization_info()).set_data_layout(input->info()->data_layout());
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100267 _gemm_output.allocator()->init(info_gemm);
268 _memory_group.manage(&_gemm_output);
269
270 // Update GEMM output
271 gemm_output_to_use = &_gemm_output;
272 }
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000273
Georgios Pinitas17812ba2018-06-04 19:27:13 +0100274 // Configure and tune GEMM
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100275 configure_mm(gemm_input_to_use, &_weights_reshaped, gemm_output_to_use, (data_layout == DataLayout::NHWC) ? conv_h : 1);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000276
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100277 if(!_skip_im2col)
278 {
279 _im2col_output.allocator()->allocate();
280 }
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000281
282 // Configure output stage for quantized case
283 if(_is_quantized)
284 {
Georgios Pinitas9be0c5a2018-02-19 12:46:29 +0000285 const QuantizationInfo output_quant_info = (output->info()->total_size() == 0) ? input->info()->quantization_info() : output->info()->quantization_info();
286
Georgios Pinitas932491f2018-09-21 16:33:15 +0100287 if(!_skip_col2im)
288 {
289 _memory_group.manage(&_tmp_output);
290 gemm_output_staged_to_use = &_tmp_output;
291 }
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100292
Georgios Pinitas51e53a32018-10-22 13:49:08 +0100293 float multiplier = input->info()->quantization_info().scale * weights->info()->quantization_info().scale / output_quant_info.scale;
294 _gemmlowp_output_stage.configure(gemm_output_to_use, biases, gemm_output_staged_to_use, multiplier, output_quant_info.offset);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000295 }
296
Georgios Pinitas932491f2018-09-21 16:33:15 +0100297 if(!_skip_col2im)
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100298 {
Georgios Pinitas932491f2018-09-21 16:33:15 +0100299 // Configure and tune Col2Im
300 _col2im_kernel.configure(_is_quantized ? gemm_output_staged_to_use : gemm_output_to_use, output, Size2D(conv_w, conv_h), num_groups);
301 CLScheduler::get().tune_kernel_static(_col2im_kernel);
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100302 }
303
Georgios Pinitas932491f2018-09-21 16:33:15 +0100304 if(!_skip_col2im)
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100305 {
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100306 _tmp_output.allocator()->allocate();
Georgios Pinitas932491f2018-09-21 16:33:15 +0100307 }
308
309 if(!_skip_col2im || _is_quantized)
310 {
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100311 _gemm_output.allocator()->allocate();
312 }
313
314 ARM_COMPUTE_ERROR_ON_MSG((output->info()->dimension(idx_width) != conv_w) || (output->info()->dimension(idx_height) != conv_h),
315 "Output shape does not match the expected one");
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000316
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000317 //Configure Activation Layer
318 _is_activationlayer_enabled = act_info.enabled();
319
320 if(_is_activationlayer_enabled)
321 {
322 _activationlayer_function.configure(output, nullptr, act_info);
323 }
324
Georgios Pinitas78c00902018-01-09 17:33:11 +0000325 ARM_COMPUTE_UNUSED(weights_info);
326}
327
328Status CLGEMMConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100329 const WeightsInfo &weights_info, const Size2D &dilation, const ActivationLayerInfo &act_info, unsigned int num_groups)
Georgios Pinitas78c00902018-01-09 17:33:11 +0000330{
331 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output);
332 ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights_info.are_reshaped(), "Weights already reshaped are not supported!");
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +0100333 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F16, DataType::F32);
Georgios Pinitas78c00902018-01-09 17:33:11 +0000334 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100335 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, weights);
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100336 ARM_COMPUTE_RETURN_ERROR_ON_MSG((num_groups != 1) && (input->data_layout() != DataLayout::NCHW), "Grouping (num_groups != 1) with NHWC data layout is not supported");
337 ARM_COMPUTE_RETURN_ERROR_ON_MSG((num_groups != 1) && (input->data_type() == DataType::QASYMM8), "Grouping (num_groups != 1) is not supported with QASYMM8");
338 ARM_COMPUTE_RETURN_ERROR_ON(((input->dimension(2) / weights->dimension(2)) != num_groups) && (input->data_layout() == DataLayout::NCHW));
Georgios Pinitas78c00902018-01-09 17:33:11 +0000339
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100340 const DataLayout data_layout = input->data_layout();
341 const DataType data_type = input->data_type();
342 const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
343 const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
344 const int idx_channel = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
345 const int idx_kernels = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000346
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100347 const unsigned int kernel_width = weights->dimension(idx_width);
348 const unsigned int kernel_height = weights->dimension(idx_height);
349
350 TensorInfo im2col_reshaped_info, info_gemm, tmp_info, weights_reshaped_info;
351 const ITensorInfo *gemm_input_to_use = input;
352 const ITensorInfo *gemm_output_to_use = output;
353 const ITensorInfo *gemm_output_staged_to_use = output;
354 const ITensorInfo *weights_to_use = weights;
355
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100356 const bool is_quantized = is_data_type_quantized_asymmetric(data_type);
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100357 const bool append_bias = (biases != nullptr) && (!is_quantized);
Georgios Pinitas932491f2018-09-21 16:33:15 +0100358 const bool skip_im2col = (data_layout == DataLayout::NHWC && kernel_width == 1 && kernel_height == 1 && conv_info.stride().first == 1 && conv_info.stride().second == 1);
359 const bool skip_col2im = data_layout == DataLayout::NHWC;
Georgios Pinitas78c00902018-01-09 17:33:11 +0000360
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100361 ARM_COMPUTE_RETURN_ERROR_ON((weights->dimension(idx_channel) * num_groups) != input->dimension(idx_channel));
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100362 ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 4);
Georgios Pinitas78c00902018-01-09 17:33:11 +0000363
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100364 // Validate biases
Georgios Pinitas78c00902018-01-09 17:33:11 +0000365 if(biases != nullptr)
366 {
367 if(is_quantized)
368 {
369 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(biases, 1, DataType::S32);
370 }
371 else
372 {
373 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, biases);
374 }
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100375 ARM_COMPUTE_RETURN_ERROR_ON(biases->dimension(0) != weights->dimension(idx_kernels));
Georgios Pinitas78c00902018-01-09 17:33:11 +0000376 ARM_COMPUTE_RETURN_ERROR_ON(biases->num_dimensions() > 1);
377 }
378
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100379 if(act_info.enabled())
380 {
381 ARM_COMPUTE_ERROR_ON(act_info.b() > act_info.a());
382 }
383
384 // Get convolved dimensions
385 unsigned int conv_w = 0;
386 unsigned int conv_h = 0;
387
388 std::tie(conv_w, conv_h) = scaled_dimensions(input->dimension(idx_width),
389 input->dimension(idx_height),
390 kernel_width,
391 kernel_height,
392 conv_info,
393 dilation);
394
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100395 unsigned int mat_weights_cols = weights->dimension(idx_kernels) / num_groups;
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100396
397 // Output tensor auto inizialitation if not yet initialized
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100398 ARM_COMPUTE_RETURN_ON_ERROR(CLConvolutionLayerReshapeWeights::validate(weights, is_quantized ? nullptr : biases, nullptr, num_groups));
399 weights_reshaped_info = TensorInfo(compute_weights_reshaped_shape(*weights, (append_bias && !skip_im2col), num_groups), 1, data_type);
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100400 weights_to_use = &weights_reshaped_info;
401
402 if(!skip_im2col)
403 {
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100404 const Size2D kernel_dims(kernel_width, kernel_height);
405
406 // Output tensor auto initialization if not yet initialized
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100407 TensorShape expected_output_shape = compute_im2col_conv_shape(input, kernel_dims, conv_info, append_bias, dilation, num_groups == 1, num_groups);
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100408
409 auto_init_if_empty(im2col_reshaped_info, input->clone()->set_tensor_shape(expected_output_shape));
410
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100411 ARM_COMPUTE_RETURN_ON_ERROR(CLIm2ColKernel::validate(input, &im2col_reshaped_info, kernel_dims, conv_info, append_bias, dilation, num_groups));
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100412 gemm_input_to_use = &im2col_reshaped_info;
413 }
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100414 else if(append_bias)
415 {
416 // Validate add bias kernel
417 ARM_COMPUTE_RETURN_ON_ERROR(CLArithmeticAdditionKernel::validate(output, biases, output, ConvertPolicy::SATURATE));
418 }
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100419
420 // Create GEMM output tensor
Georgios Pinitas932491f2018-09-21 16:33:15 +0100421 if(!skip_col2im || is_quantized)
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100422 {
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100423 const DataType gemm_data_type = is_quantized ? DataType::S32 : data_type;
Georgios Pinitas932491f2018-09-21 16:33:15 +0100424 TensorShape shape_gemm;
425 if(skip_col2im)
426 {
427 shape_gemm = input->tensor_shape();
428 shape_gemm.set(idx_width, conv_w);
429 shape_gemm.set(idx_height, conv_h);
430 shape_gemm.set(idx_channel, mat_weights_cols);
431 }
432 else
433 {
434 shape_gemm = gemm_input_to_use->tensor_shape();
435 shape_gemm.set(0, mat_weights_cols);
436 shape_gemm.set(1, conv_w * conv_h);
437 }
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100438 // GEMM output should be S32 for acquiring raw integer accumulator without quantized postprocessing for quantized asymmetric input.
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +0100439 info_gemm = TensorInfo(shape_gemm, 1, gemm_data_type);
Georgios Pinitas932491f2018-09-21 16:33:15 +0100440 info_gemm.set_quantization_info(output->quantization_info()).set_data_layout(input->data_layout());
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100441 gemm_output_to_use = &info_gemm;
442 }
443
Georgios Pinitas932491f2018-09-21 16:33:15 +0100444 ARM_COMPUTE_RETURN_ON_ERROR(validate_mm(gemm_input_to_use, weights_to_use, gemm_output_to_use, skip_col2im ? conv_h : 1, skip_im2col));
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100445
446 if(is_quantized)
447 {
Georgios Pinitas932491f2018-09-21 16:33:15 +0100448 if(!skip_col2im)
449 {
450 tmp_info = TensorInfo(gemm_output_to_use->tensor_shape(), 1, DataType::QASYMM8);
451 tmp_info.set_quantization_info(output->quantization_info());
452 gemm_output_staged_to_use = &tmp_info;
453 }
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100454 // Validate output stage for quantized case
Georgios Pinitas932491f2018-09-21 16:33:15 +0100455 CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint::validate(gemm_output_to_use, biases, gemm_output_staged_to_use);
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100456 }
457
458 // Validate Col2Im
Georgios Pinitas932491f2018-09-21 16:33:15 +0100459 if(!skip_col2im)
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100460 {
Georgios Pinitas932491f2018-09-21 16:33:15 +0100461 ARM_COMPUTE_RETURN_ON_ERROR(CLCol2ImKernel::validate(is_quantized ? gemm_output_staged_to_use : gemm_output_to_use, output,
462 Size2D(conv_w, conv_h), num_groups));
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100463 }
464
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000465 //Validate Activation Layer
466 if(act_info.enabled())
467 {
Vidhya Sudhan Loganathanedf357c2018-04-27 14:25:30 +0100468 ARM_COMPUTE_RETURN_ON_ERROR(CLActivationLayer::validate(output, nullptr, act_info));
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000469 }
470
Georgios Pinitas78c00902018-01-09 17:33:11 +0000471 return Status{};
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000472}
473
474void CLGEMMConvolutionLayer::run()
475{
Georgios Pinitase0437672018-05-02 14:07:55 +0100476 prepare();
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000477
478 _memory_group.acquire();
479
480 // Run im2col
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100481 if(!_skip_im2col)
482 {
483 CLScheduler::get().enqueue(_im2col_kernel);
484 }
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000485
Georgios Pinitas78c00902018-01-09 17:33:11 +0000486 // Runs CLGEMM or CLGEMMLowpMatrixMultiplyCore functions
487 if(_is_quantized)
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000488 {
Georgios Pinitas78c00902018-01-09 17:33:11 +0000489 // Run gemmlowp
490 _mm_gemmlowp.run();
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000491
Georgios Pinitas78c00902018-01-09 17:33:11 +0000492 // Run output stage
493 _gemmlowp_output_stage.run();
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000494 }
495 else
496 {
Georgios Pinitas78c00902018-01-09 17:33:11 +0000497 // Run gemm
498 _mm_gemm.run();
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000499 }
500
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100501 if(_skip_im2col && _append_bias)
502 {
503 CLScheduler::get().enqueue(_add_bias_kernel);
504 }
505
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000506 // Reshape output matrix
Georgios Pinitas932491f2018-09-21 16:33:15 +0100507 if(!_skip_col2im)
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100508 {
Georgios Pinitas932491f2018-09-21 16:33:15 +0100509 CLScheduler::get().enqueue(_col2im_kernel, false);
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100510 }
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000511
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000512 //Run Activation Layer if enabled
513 if(_is_activationlayer_enabled)
514 {
515 _activationlayer_function.run();
516 }
517
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000518 _memory_group.release();
Georgios Pinitase0437672018-05-02 14:07:55 +0100519}
Georgios Pinitas82b51482018-04-24 15:14:12 +0100520
Georgios Pinitase0437672018-05-02 14:07:55 +0100521void CLGEMMConvolutionLayer::prepare()
522{
523 if(!_is_prepared)
524 {
Georgios Pinitas72219332018-06-05 14:56:06 +0100525 ARM_COMPUTE_ERROR_ON(!_original_weights->is_used());
Georgios Pinitase0437672018-05-02 14:07:55 +0100526
Georgios Pinitas72219332018-06-05 14:56:06 +0100527 // Run weights reshaping and mark original weights tensor as unused
528 _weights_reshaped.allocator()->allocate();
529 _reshape_weights.run();
530 _original_weights->mark_as_unused();
531
532 // Prepare GEMM
533 _is_quantized ? _mm_gemmlowp.prepare() : _mm_gemm.prepare();
534 if(!_weights_reshaped.is_used())
Georgios Pinitase0437672018-05-02 14:07:55 +0100535 {
Georgios Pinitas72219332018-06-05 14:56:06 +0100536 _weights_reshaped.allocator()->free();
Georgios Pinitase0437672018-05-02 14:07:55 +0100537 }
538
539 CLScheduler::get().queue().finish();
540 _is_prepared = true;
541 }
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000542}