blob: ca6157ef138edb123965c36c0f1a5cdc7720f453 [file] [log] [blame]
Isabella Gottardif07d28d2018-02-06 14:52:43 +00001/*
2 * Copyright (c) 2017-2018 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h"
25
26#include "arm_compute/core/PixelValue.h"
27#include "arm_compute/core/Size2D.h"
28#include "arm_compute/core/Utils.h"
29#include "arm_compute/core/Validate.h"
Georgios Pinitas78c00902018-01-09 17:33:11 +000030#include "arm_compute/core/utils/misc/ShapeCalculator.h"
Isabella Gottardif07d28d2018-02-06 14:52:43 +000031#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
32#include "arm_compute/runtime/CL/CLScheduler.h"
33
34#include <cmath>
35#include <memory>
36#include <tuple>
37
38using namespace arm_compute;
Georgios Pinitas78c00902018-01-09 17:33:11 +000039using namespace arm_compute::misc::shape_calculator;
Isabella Gottardif07d28d2018-02-06 14:52:43 +000040
Georgios Pinitasd8734b52017-12-22 15:27:52 +000041CLConvolutionLayerReshapeWeights::CLConvolutionLayerReshapeWeights()
42 : _weights_reshape_kernel()
Isabella Gottardif07d28d2018-02-06 14:52:43 +000043{
44}
45
Georgios Pinitas78c00902018-01-09 17:33:11 +000046void CLConvolutionLayerReshapeWeights::configure(const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output)
Isabella Gottardif07d28d2018-02-06 14:52:43 +000047{
Georgios Pinitas78c00902018-01-09 17:33:11 +000048 // Perform validation step
Isabella Gottardif07d28d2018-02-06 14:52:43 +000049 ARM_COMPUTE_ERROR_ON_NULLPTR(weights, output);
Georgios Pinitas78c00902018-01-09 17:33:11 +000050 ARM_COMPUTE_ERROR_THROW_ON(CLConvolutionLayerReshapeWeights::validate(weights->info(),
51 (biases != nullptr) ? biases->info() : nullptr,
52 output->info()));
53
54 const bool append_biases = (biases != nullptr) && !is_data_type_quantized_asymmetric(weights->info()->data_type());
55 const ICLTensor *biases_to_use = (append_biases) ? biases : nullptr;
56
57 _weights_reshape_kernel.configure(weights, biases_to_use, output);
58
59 output->info()->set_quantization_info(weights->info()->quantization_info());
60}
61
62Status CLConvolutionLayerReshapeWeights::validate(const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output)
63{
64 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(weights);
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +010065 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(weights, 1, DataType::QASYMM8, DataType::F16, DataType::F32);
Georgios Pinitas78c00902018-01-09 17:33:11 +000066 ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 4);
Isabella Gottardif07d28d2018-02-06 14:52:43 +000067
68 if(biases != nullptr)
69 {
Georgios Pinitas19ea4192018-06-19 13:09:53 +010070 const int idx_kernels = get_data_layout_dimension_index(weights->data_layout(), DataLayoutDimension::BATCHES);
Georgios Pinitas78c00902018-01-09 17:33:11 +000071 ARM_COMPUTE_RETURN_ERROR_ON(is_data_type_quantized_asymmetric(weights->data_type()));
72 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(weights, biases);
Georgios Pinitas19ea4192018-06-19 13:09:53 +010073 ARM_COMPUTE_RETURN_ERROR_ON(biases->dimension(0) != weights->dimension(idx_kernels));
Georgios Pinitas78c00902018-01-09 17:33:11 +000074 ARM_COMPUTE_RETURN_ERROR_ON(biases->num_dimensions() > 1);
Isabella Gottardif07d28d2018-02-06 14:52:43 +000075 }
76
Georgios Pinitas78c00902018-01-09 17:33:11 +000077 if((output != nullptr) && (output->total_size() != 0))
Isabella Gottardif07d28d2018-02-06 14:52:43 +000078 {
Georgios Pinitas78c00902018-01-09 17:33:11 +000079 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(weights, output);
Isabella Gottardif07d28d2018-02-06 14:52:43 +000080
Georgios Pinitas78c00902018-01-09 17:33:11 +000081 CLWeightsReshapeKernel::validate(weights, biases, output);
Isabella Gottardif07d28d2018-02-06 14:52:43 +000082 }
83
Georgios Pinitas78c00902018-01-09 17:33:11 +000084 return Status{};
Isabella Gottardif07d28d2018-02-06 14:52:43 +000085}
86
87void CLConvolutionLayerReshapeWeights::run()
88{
Isabella Gottardif07d28d2018-02-06 14:52:43 +000089 CLScheduler::get().enqueue(_weights_reshape_kernel);
Isabella Gottardif07d28d2018-02-06 14:52:43 +000090}
91
92CLGEMMConvolutionLayer::CLGEMMConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager)
Isabella Gottardi3f217ec2018-02-12 14:59:19 +000093 : _memory_group(memory_manager), _reshape_weights(), _im2col_kernel(), _mm_gemm(memory_manager), _mm_gemmlowp(memory_manager), _gemmlowp_output_stage(), _col2im_kernel(), _activationlayer_function(),
Gian Marco Iodicedff601d2018-08-09 13:28:41 +010094 _add_bias_kernel(), _reshape_layer(), _original_weights(nullptr), _im2col_output(), _weights_reshaped(), _gemm_output(), _tmp_output(), _data_layout(DataLayout::NCHW), _append_bias(false),
95 _skip_im2col(false), _is_quantized(false), _is_activationlayer_enabled(false), _is_prepared(false)
Isabella Gottardif07d28d2018-02-06 14:52:43 +000096{
97}
98
Georgios Pinitas19ea4192018-06-19 13:09:53 +010099void CLGEMMConvolutionLayer::configure_mm(const ICLTensor *input, const ICLTensor *weights, ICLTensor *output, int gemm_3d_depth)
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000100{
101 ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights);
Georgios Pinitas883f4892018-08-03 13:41:33 +0100102 ARM_COMPUTE_ERROR_THROW_ON(validate_mm(input->info(), weights->info(), output->info(), gemm_3d_depth, _skip_im2col));
Georgios Pinitas78c00902018-01-09 17:33:11 +0000103
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000104 if(_is_quantized)
105 {
Georgios Pinitas78c00902018-01-09 17:33:11 +0000106 // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
107 // Extract and negate input and weights offset
108 const QuantizationInfo input_quantization_info = input->info()->quantization_info();
109 const QuantizationInfo weights_quantization_info = weights->info()->quantization_info();
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000110
Georgios Pinitas78c00902018-01-09 17:33:11 +0000111 input->info()->set_quantization_info(QuantizationInfo(input_quantization_info.scale, -input_quantization_info.offset));
112 weights->info()->set_quantization_info(QuantizationInfo(weights_quantization_info.scale, -weights_quantization_info.offset));
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000113
Georgios Pinitas78c00902018-01-09 17:33:11 +0000114 _mm_gemmlowp.configure(input, weights, output, GEMMInfo(false, false, true /* Reshape weights only for the first run*/));
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000115
Georgios Pinitas78c00902018-01-09 17:33:11 +0000116 // Revert back QuantizatioInfo as input and weights could be used in other convolution layers
117 input->info()->set_quantization_info(input_quantization_info);
118 weights->info()->set_quantization_info(weights_quantization_info);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000119 }
120 else
121 {
Georgios Pinitas78c00902018-01-09 17:33:11 +0000122 // Configure matrix multiply function
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100123 _mm_gemm.configure(input, weights, nullptr, output, 1.0f, 0.0f, GEMMInfo(false, false, true /* Reshape weights only for the first run*/, gemm_3d_depth,
124 _skip_im2col /* Reinterpret the input as 3D if im2col is skipped */));
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000125 }
126}
127
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100128Status CLGEMMConvolutionLayer::validate_mm(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *output, int gemm_3d_depth, bool skip_im2col)
Georgios Pinitas78c00902018-01-09 17:33:11 +0000129{
130 const bool is_quantized = is_data_type_quantized_asymmetric(input->data_type());
131
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100132 const GEMMInfo &gemm_info = GEMMInfo(false, false, true /* Reshape weights only for the first run */, gemm_3d_depth, skip_im2col /* Reinterpret the input as 3D if im2col is skipped */);
Georgios Pinitas78c00902018-01-09 17:33:11 +0000133 if(is_quantized)
134 {
135 // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
136 // Extract and negate input and weights offset
137 const QuantizationInfo input_quantization_info = input->quantization_info();
138 const QuantizationInfo weights_quantization_info = weights->quantization_info();
139
140 std::unique_ptr<ITensorInfo> input_qa = input->clone();
141 std::unique_ptr<ITensorInfo> weights_qa = weights->clone();
142 input_qa->set_quantization_info(QuantizationInfo(input_quantization_info.scale, -input_quantization_info.offset));
143 weights_qa->set_quantization_info(QuantizationInfo(weights_quantization_info.scale, -weights_quantization_info.offset));
144
145 // Perform validation step on GEMMLowp
Gian Marco Iodicedff601d2018-08-09 13:28:41 +0100146 return CLGEMMLowpMatrixMultiplyCore::validate(input_qa.get(), weights_qa.get(), output, gemm_info);
Georgios Pinitas78c00902018-01-09 17:33:11 +0000147 }
148 else
149 {
150 // Perform validation step on Matrix multiply function
Gian Marco Iodicedff601d2018-08-09 13:28:41 +0100151 return CLGEMM::validate(input, weights, nullptr, output, 1.0f, 0.0f, gemm_info);
Georgios Pinitas78c00902018-01-09 17:33:11 +0000152 }
Georgios Pinitas78c00902018-01-09 17:33:11 +0000153}
154
Alex Gilday7da29b62018-03-23 14:16:00 +0000155void CLGEMMConvolutionLayer::configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info,
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000156 const Size2D &dilation, const ActivationLayerInfo &act_info)
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000157{
158 ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
Georgios Pinitas78c00902018-01-09 17:33:11 +0000159
160 ARM_COMPUTE_ERROR_THROW_ON(CLGEMMConvolutionLayer::validate(input->info(),
161 weights->info(),
162 biases != nullptr ? biases->info() : nullptr,
163 output->info(),
164 conv_info,
Alex Gilday7da29b62018-03-23 14:16:00 +0000165 weights_info,
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000166 dilation,
167 act_info));
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000168
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100169 const DataType data_type = input->info()->data_type();
170 const DataLayout data_layout = input->info()->data_layout();
171 const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
172 const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100173 const int idx_kernels = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
174
175 const unsigned int kernel_width = weights->info()->dimension(idx_width);
176 const unsigned int kernel_height = weights->info()->dimension(idx_height);
177
Georgios Pinitas72219332018-06-05 14:56:06 +0100178 _is_prepared = weights_info.retain_internal_weights();
179 _original_weights = weights;
180 _is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100181 _data_layout = data_layout;
Gian Marco Iodiceebbb7f22018-08-02 16:43:24 +0100182 _skip_im2col = (data_layout == DataLayout::NHWC && kernel_width == 1 && kernel_height == 1 && conv_info.stride().first == 1 && conv_info.stride().second == 1) && !_is_quantized;
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100183 _append_bias = (biases != nullptr) && (!_is_quantized);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000184
Georgios Pinitas78c00902018-01-09 17:33:11 +0000185 // Set the GPU target for im2col and col2im
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000186 _im2col_kernel.set_target(CLScheduler::get().target());
187 _col2im_kernel.set_target(CLScheduler::get().target());
188
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100189 bool is_nhwc = _data_layout == DataLayout::NHWC;
190 const ICLTensor *gemm_input_to_use = input;
191 ICLTensor *gemm_output_to_use = output;
192 ICLTensor *gemm_output_staged_to_use = output;
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000193
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100194 const ICLTensor *biases_to_use = (_append_bias && !_skip_im2col) ? biases : nullptr;
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000195
196 // Get parameters from conv_info
197 unsigned int stride_x = 0;
198 unsigned int stride_y = 0;
199 std::tie(stride_x, stride_y) = conv_info.stride();
200
201 // Get convolved dimensions
202 unsigned int conv_w = 0;
203 unsigned int conv_h = 0;
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100204 std::tie(conv_w, conv_h) = scaled_dimensions(input->info()->dimension(idx_width),
205 input->info()->dimension(idx_height),
206 kernel_width,
207 kernel_height,
208 conv_info,
209 dilation);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000210
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100211 unsigned int mat_weights_cols = weights->info()->dimension(idx_kernels);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000212
Georgios Pinitas78c00902018-01-09 17:33:11 +0000213 // _weights_reshaped will be auto configured in the kernel.
214 // Just append biases and do not transpose 1xW as it will be reshaped in CLGEMM
215 _reshape_weights.configure(weights, biases_to_use, &_weights_reshaped);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000216
Georgios Pinitas78c00902018-01-09 17:33:11 +0000217 weights = &_weights_reshaped;
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000218
219 // Create tensor to store im2col reshaped inputs
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100220 if(!_skip_im2col)
221 {
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100222 _memory_group.manage(&_im2col_output);
223
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100224 // Configure and tune im2col. im2col output shape is auto-initialized
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100225 _im2col_kernel.configure(input, &_im2col_output, Size2D(kernel_width, kernel_height), conv_info, _append_bias, dilation);
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100226
227 // Set quantization info
228 _im2col_output.info()->set_quantization_info(input->info()->quantization_info());
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100229 CLScheduler::get().tune_kernel_static(_im2col_kernel);
230
231 // Update GEMM input
232 gemm_input_to_use = &_im2col_output;
233 }
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100234 else if(_append_bias)
235 {
236 // Configure add bias kernel
237 _add_bias_kernel.configure(output, biases, output, ConvertPolicy::SATURATE);
238 }
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000239
240 // Create GEMM output tensor
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100241 if(!is_nhwc || _is_quantized)
242 {
243 // Calculate GEMM output shape
244 TensorShape shape_gemm = _im2col_output.info()->tensor_shape();
245 shape_gemm.set(0, mat_weights_cols);
246 shape_gemm.set(1, conv_w * conv_h);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000247
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100248 // GEMM output should be S32 for acquiring raw integer accumulator without quantized postprocessing for quantized asymmetric input.
249 const DataType gemm_data_type = _is_quantized ? DataType::S32 : data_type;
250 // FIXME: input->clone() doesn't work with subtensors for grouped convolutions.
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +0100251 TensorInfo info_gemm(shape_gemm, 1, gemm_data_type);
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100252 info_gemm.set_quantization_info(output->info()->quantization_info());
253 _gemm_output.allocator()->init(info_gemm);
254 _memory_group.manage(&_gemm_output);
255
256 // Update GEMM output
257 gemm_output_to_use = &_gemm_output;
258 }
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000259
Georgios Pinitas17812ba2018-06-04 19:27:13 +0100260 // Configure and tune GEMM
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100261 configure_mm(gemm_input_to_use, weights, gemm_output_to_use, (data_layout == DataLayout::NHWC) ? conv_h : 1);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000262
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100263 if(!_skip_im2col)
264 {
265 _im2col_output.allocator()->allocate();
266 }
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000267
268 // Configure output stage for quantized case
269 if(_is_quantized)
270 {
Georgios Pinitas9be0c5a2018-02-19 12:46:29 +0000271 const QuantizationInfo output_quant_info = (output->info()->total_size() == 0) ? input->info()->quantization_info() : output->info()->quantization_info();
272
273 float multiplier = input->info()->quantization_info().scale * weights->info()->quantization_info().scale / output_quant_info.scale;
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000274 int output_multiplier, output_shift;
275 quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100276
277 _memory_group.manage(&_tmp_output);
278 gemm_output_staged_to_use = &_tmp_output;
279
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100280 _gemmlowp_output_stage.configure(gemm_output_to_use, biases, gemm_output_staged_to_use, output_multiplier, output_shift, output_quant_info.offset);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000281 }
282
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100283 if(!is_nhwc || _is_quantized)
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100284 {
Gian Marco Iodicedff601d2018-08-09 13:28:41 +0100285 if(input->info()->data_layout() == DataLayout::NCHW)
286 {
287 // Configure and tune Col2Im
288 _col2im_kernel.configure(_is_quantized ? gemm_output_staged_to_use : gemm_output_to_use, output, std::make_pair(conv_w, conv_h));
289 CLScheduler::get().tune_kernel_static(_col2im_kernel);
290 }
291 else
292 {
293 // Configure reshape layer
294 _reshape_layer.configure(_is_quantized ? gemm_output_staged_to_use : gemm_output_to_use, output);
295 }
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100296 }
297
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100298 if(!is_nhwc || _is_quantized)
299 {
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100300 _tmp_output.allocator()->allocate();
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100301 _gemm_output.allocator()->allocate();
302 }
303
304 ARM_COMPUTE_ERROR_ON_MSG((output->info()->dimension(idx_width) != conv_w) || (output->info()->dimension(idx_height) != conv_h),
305 "Output shape does not match the expected one");
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000306
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000307 //Configure Activation Layer
308 _is_activationlayer_enabled = act_info.enabled();
309
310 if(_is_activationlayer_enabled)
311 {
312 _activationlayer_function.configure(output, nullptr, act_info);
313 }
314
Georgios Pinitas78c00902018-01-09 17:33:11 +0000315 ARM_COMPUTE_UNUSED(weights_info);
316}
317
318Status CLGEMMConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000319 const WeightsInfo &weights_info, const Size2D &dilation, const ActivationLayerInfo &act_info)
Georgios Pinitas78c00902018-01-09 17:33:11 +0000320{
321 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output);
322 ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights_info.are_reshaped(), "Weights already reshaped are not supported!");
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +0100323 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F16, DataType::F32);
Georgios Pinitas78c00902018-01-09 17:33:11 +0000324 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100325 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, weights);
Georgios Pinitas78c00902018-01-09 17:33:11 +0000326
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100327 const DataLayout data_layout = input->data_layout();
328 const DataType data_type = input->data_type();
329 const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
330 const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
331 const int idx_channel = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
332 const int idx_kernels = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000333
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100334 const unsigned int kernel_width = weights->dimension(idx_width);
335 const unsigned int kernel_height = weights->dimension(idx_height);
336
337 TensorInfo im2col_reshaped_info, info_gemm, tmp_info, weights_reshaped_info;
338 const ITensorInfo *gemm_input_to_use = input;
339 const ITensorInfo *gemm_output_to_use = output;
340 const ITensorInfo *gemm_output_staged_to_use = output;
341 const ITensorInfo *weights_to_use = weights;
342
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100343 const bool is_nhwc = data_layout == DataLayout::NHWC;
344 const bool is_quantized = is_data_type_quantized_asymmetric(data_type);
345 const bool skip_im2col = (data_layout == DataLayout::NHWC && kernel_width == 1 && kernel_height == 1 && conv_info.stride().first == 1 && conv_info.stride().second == 1) && !is_quantized;
346 const bool append_bias = (biases != nullptr) && (!is_quantized);
Georgios Pinitas78c00902018-01-09 17:33:11 +0000347
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100348 ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(idx_channel) != input->dimension(idx_channel));
349 ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 4);
Georgios Pinitas78c00902018-01-09 17:33:11 +0000350
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100351 // Validate biases
Georgios Pinitas78c00902018-01-09 17:33:11 +0000352 if(biases != nullptr)
353 {
354 if(is_quantized)
355 {
356 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(biases, 1, DataType::S32);
357 }
358 else
359 {
360 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, biases);
361 }
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100362 ARM_COMPUTE_RETURN_ERROR_ON(biases->dimension(0) != weights->dimension(idx_kernels));
Georgios Pinitas78c00902018-01-09 17:33:11 +0000363 ARM_COMPUTE_RETURN_ERROR_ON(biases->num_dimensions() > 1);
364 }
365
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100366 if(act_info.enabled())
367 {
368 ARM_COMPUTE_ERROR_ON(act_info.b() > act_info.a());
369 }
370
371 // Get convolved dimensions
372 unsigned int conv_w = 0;
373 unsigned int conv_h = 0;
374
375 std::tie(conv_w, conv_h) = scaled_dimensions(input->dimension(idx_width),
376 input->dimension(idx_height),
377 kernel_width,
378 kernel_height,
379 conv_info,
380 dilation);
381
382 unsigned int mat_weights_cols = weights->dimension(idx_kernels);
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100383
384 // Output tensor auto inizialitation if not yet initialized
385 ARM_COMPUTE_RETURN_ON_ERROR(CLConvolutionLayerReshapeWeights::validate(weights, is_quantized ? nullptr : biases, nullptr));
Gian Marco Iodicedff601d2018-08-09 13:28:41 +0100386 weights_reshaped_info = TensorInfo(compute_weights_reshaped_shape(*weights, (append_bias && !skip_im2col)), 1, data_type);
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100387 weights_to_use = &weights_reshaped_info;
388
389 if(!skip_im2col)
390 {
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100391 const Size2D kernel_dims(kernel_width, kernel_height);
392
393 // Output tensor auto initialization if not yet initialized
394 TensorShape expected_output_shape = compute_im2col_conv_shape(input, kernel_dims, conv_info, append_bias, dilation, true);
395
396 auto_init_if_empty(im2col_reshaped_info, input->clone()->set_tensor_shape(expected_output_shape));
397
398 ARM_COMPUTE_RETURN_ON_ERROR(CLIm2ColKernel::validate(input, &im2col_reshaped_info, kernel_dims, conv_info, append_bias, dilation));
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100399 gemm_input_to_use = &im2col_reshaped_info;
400 }
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100401 else if(append_bias)
402 {
403 // Validate add bias kernel
404 ARM_COMPUTE_RETURN_ON_ERROR(CLArithmeticAdditionKernel::validate(output, biases, output, ConvertPolicy::SATURATE));
405 }
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100406
407 // Create GEMM output tensor
408 if(!is_nhwc || is_quantized)
409 {
410 TensorShape shape_gemm = gemm_input_to_use->tensor_shape();
411 shape_gemm.set(0, mat_weights_cols);
412 shape_gemm.set(1, conv_w * conv_h);
413 const DataType gemm_data_type = is_quantized ? DataType::S32 : data_type;
414 // GEMM output should be S32 for acquiring raw integer accumulator without quantized postprocessing for quantized asymmetric input.
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +0100415 info_gemm = TensorInfo(shape_gemm, 1, gemm_data_type);
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100416 info_gemm.set_quantization_info(output->quantization_info());
417 gemm_output_to_use = &info_gemm;
418 }
419
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100420 ARM_COMPUTE_RETURN_ON_ERROR(validate_mm(gemm_input_to_use, weights_to_use, gemm_output_to_use, (data_layout == DataLayout::NHWC) ? conv_h : 1, skip_im2col));
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100421
422 if(is_quantized)
423 {
424 float multiplier = input->quantization_info().scale * weights_to_use->quantization_info().scale / output->quantization_info().scale;
425 int output_multiplier, output_shift;
426 quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100427
428 tmp_info = TensorInfo(gemm_output_to_use->tensor_shape(), 1, DataType::QASYMM8);
429 tmp_info.set_quantization_info(output->quantization_info());
430 gemm_output_staged_to_use = &tmp_info;
431
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100432 // Validate output stage for quantized case
433 CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint::validate(gemm_output_to_use, biases, gemm_output_staged_to_use, output->quantization_info().offset);
434 }
435
436 // Validate Col2Im
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100437 if(!is_nhwc || is_quantized)
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100438 {
Gian Marco Iodicedff601d2018-08-09 13:28:41 +0100439 if(input->data_layout() == DataLayout::NCHW)
440 {
441 ARM_COMPUTE_RETURN_ON_ERROR(CLCol2ImKernel::validate(is_quantized ? gemm_output_staged_to_use : gemm_output_to_use,
442 output,
443 std::make_pair(conv_w, conv_h)));
444 }
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100445 }
446
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000447 //Validate Activation Layer
448 if(act_info.enabled())
449 {
Vidhya Sudhan Loganathanedf357c2018-04-27 14:25:30 +0100450 ARM_COMPUTE_RETURN_ON_ERROR(CLActivationLayer::validate(output, nullptr, act_info));
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000451 }
452
Georgios Pinitas78c00902018-01-09 17:33:11 +0000453 return Status{};
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000454}
455
456void CLGEMMConvolutionLayer::run()
457{
Georgios Pinitase0437672018-05-02 14:07:55 +0100458 prepare();
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000459
460 _memory_group.acquire();
461
462 // Run im2col
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100463 if(!_skip_im2col)
464 {
465 CLScheduler::get().enqueue(_im2col_kernel);
466 }
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000467
Georgios Pinitas78c00902018-01-09 17:33:11 +0000468 // Runs CLGEMM or CLGEMMLowpMatrixMultiplyCore functions
469 if(_is_quantized)
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000470 {
Georgios Pinitas78c00902018-01-09 17:33:11 +0000471 // Run gemmlowp
472 _mm_gemmlowp.run();
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000473
Georgios Pinitas78c00902018-01-09 17:33:11 +0000474 // Run output stage
475 _gemmlowp_output_stage.run();
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000476 }
477 else
478 {
Georgios Pinitas78c00902018-01-09 17:33:11 +0000479 // Run gemm
480 _mm_gemm.run();
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000481 }
482
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100483 if(_skip_im2col && _append_bias)
484 {
485 CLScheduler::get().enqueue(_add_bias_kernel);
486 }
487
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000488 // Reshape output matrix
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100489 if(_data_layout == DataLayout::NCHW || _is_quantized)
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100490 {
Gian Marco Iodicedff601d2018-08-09 13:28:41 +0100491 if(_data_layout == DataLayout::NCHW)
492 {
493 CLScheduler::get().enqueue(_col2im_kernel, false);
494 }
495 else
496 {
497 _reshape_layer.run();
498 }
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100499 }
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000500
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000501 //Run Activation Layer if enabled
502 if(_is_activationlayer_enabled)
503 {
504 _activationlayer_function.run();
505 }
506
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000507 _memory_group.release();
Georgios Pinitase0437672018-05-02 14:07:55 +0100508}
Georgios Pinitas82b51482018-04-24 15:14:12 +0100509
Georgios Pinitase0437672018-05-02 14:07:55 +0100510void CLGEMMConvolutionLayer::prepare()
511{
512 if(!_is_prepared)
513 {
Georgios Pinitas72219332018-06-05 14:56:06 +0100514 ARM_COMPUTE_ERROR_ON(!_original_weights->is_used());
Georgios Pinitase0437672018-05-02 14:07:55 +0100515
Georgios Pinitas72219332018-06-05 14:56:06 +0100516 // Run weights reshaping and mark original weights tensor as unused
517 _weights_reshaped.allocator()->allocate();
518 _reshape_weights.run();
519 _original_weights->mark_as_unused();
520
521 // Prepare GEMM
522 _is_quantized ? _mm_gemmlowp.prepare() : _mm_gemm.prepare();
523 if(!_weights_reshaped.is_used())
Georgios Pinitase0437672018-05-02 14:07:55 +0100524 {
Georgios Pinitas72219332018-06-05 14:56:06 +0100525 _weights_reshaped.allocator()->free();
Georgios Pinitase0437672018-05-02 14:07:55 +0100526 }
527
528 CLScheduler::get().queue().finish();
529 _is_prepared = true;
530 }
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000531}