blob: b76cf6aa10fc4a7ce0960f063ea171df2f6ab38c [file] [log] [blame]
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +00001/*
2 * Copyright (c) 2017-2018 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h"
25
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000026#include "arm_compute/core/Size2D.h"
27#include "arm_compute/core/Utils.h"
28#include "arm_compute/core/Validate.h"
Gian Marco Iodice597a8562018-08-01 15:06:06 +010029#include "arm_compute/core/utils/misc/ShapeCalculator.h"
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000030#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
31#include "arm_compute/runtime/NEON/NEScheduler.h"
32#include "support/ToolchainSupport.h"
33
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000034#include <cmath>
35#include <tuple>
36
Gian Marco Iodice597a8562018-08-01 15:06:06 +010037using namespace arm_compute;
38using namespace arm_compute::misc::shape_calculator;
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000039
Gian Marco Iodice597a8562018-08-01 15:06:06 +010040NEConvolutionLayerReshapeWeights::NEConvolutionLayerReshapeWeights()
41 : _weights_reshape_kernel()
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000042{
43}
44
Gian Marco Iodice597a8562018-08-01 15:06:06 +010045void NEConvolutionLayerReshapeWeights::configure(const ITensor *weights, const ITensor *biases, ITensor *output)
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000046{
47 // Perform validation step
48 ARM_COMPUTE_ERROR_ON_NULLPTR(weights, output);
49 ARM_COMPUTE_ERROR_THROW_ON(NEConvolutionLayerReshapeWeights::validate(weights->info(),
50 (biases != nullptr) ? biases->info() : nullptr,
Gian Marco Iodice597a8562018-08-01 15:06:06 +010051 output->info()));
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000052
Gian Marco Iodice597a8562018-08-01 15:06:06 +010053 const bool append_biases = (biases != nullptr) && !is_data_type_quantized_asymmetric(weights->info()->data_type());
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000054 const ITensor *biases_to_use = (append_biases) ? biases : nullptr;
55
Gian Marco Iodice597a8562018-08-01 15:06:06 +010056 _weights_reshape_kernel.configure(weights, biases_to_use, output);
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000057
58 output->info()->set_quantization_info(weights->info()->quantization_info());
59}
60
Gian Marco Iodice597a8562018-08-01 15:06:06 +010061Status NEConvolutionLayerReshapeWeights::validate(const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output)
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000062{
Gian Marco Iodice597a8562018-08-01 15:06:06 +010063 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(weights);
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +010064 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(weights, 1, DataType::QASYMM8, DataType::F16, DataType::F32);
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000065 ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 4);
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000066
Gian Marco Iodice597a8562018-08-01 15:06:06 +010067 if(biases != nullptr)
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000068 {
Gian Marco Iodice597a8562018-08-01 15:06:06 +010069 const int idx_kernels = get_data_layout_dimension_index(weights->data_layout(), DataLayoutDimension::BATCHES);
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000070 ARM_COMPUTE_RETURN_ERROR_ON(is_data_type_quantized_asymmetric(weights->data_type()));
71 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(weights, biases);
Gian Marco Iodice597a8562018-08-01 15:06:06 +010072 ARM_COMPUTE_RETURN_ERROR_ON(biases->dimension(0) != weights->dimension(idx_kernels));
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000073 ARM_COMPUTE_RETURN_ERROR_ON(biases->num_dimensions() > 1);
74 }
75
Gian Marco Iodice597a8562018-08-01 15:06:06 +010076 if((output != nullptr) && (output->total_size() != 0))
Michalis Spyroue2503892018-04-23 15:17:31 +010077 {
Gian Marco Iodice597a8562018-08-01 15:06:06 +010078 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(weights, output);
Michalis Spyroue2503892018-04-23 15:17:31 +010079
Gian Marco Iodice597a8562018-08-01 15:06:06 +010080 NEWeightsReshapeKernel::validate(weights, biases, output);
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000081 }
82
83 return Status{};
84}
85
86void NEConvolutionLayerReshapeWeights::run()
87{
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000088 NEScheduler::get().schedule(&_weights_reshape_kernel, 3);
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000089}
90
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000091NEGEMMConvolutionLayer::NEGEMMConvolutionLayer(const std::shared_ptr<IMemoryManager> &memory_manager)
Gian Marco Iodice597a8562018-08-01 15:06:06 +010092 : _memory_group(memory_manager), _reshape_weights(), _im2col_kernel(), _mm_gemm(), _mm_gemmlowp(memory_manager), _gemmlowp_output_stage(), _col2im_kernel(), _activationlayer_function(),
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +010093 _add_bias_kernel(), _reshape_layer(), _original_weights(nullptr), _im2col_output(), _weights_reshaped(), _gemm_output(), _tmp_output(), _data_layout(DataLayout::NCHW), _append_bias(false),
94 _skip_im2col(false), _skip_col2im(false), _is_quantized(false), _is_activationlayer_enabled(false), _is_prepared(false)
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000095{
96}
97
Gian Marco Iodice597a8562018-08-01 15:06:06 +010098void NEGEMMConvolutionLayer::configure_mm(const ITensor *input, const ITensor *weights, ITensor *output, int gemm_3d_depth)
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000099{
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100100 ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights);
101 ARM_COMPUTE_ERROR_THROW_ON(validate_mm(input->info(), weights->info(), output->info(), gemm_3d_depth, _skip_im2col));
102
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000103 if(_is_quantized)
104 {
105 // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
106 // Extract and negate input and weights offset
107 const QuantizationInfo input_quantization_info = input->info()->quantization_info();
108 const QuantizationInfo weights_quantization_info = weights->info()->quantization_info();
109
110 input->info()->set_quantization_info(QuantizationInfo(input_quantization_info.scale, -input_quantization_info.offset));
111 weights->info()->set_quantization_info(QuantizationInfo(weights_quantization_info.scale, -weights_quantization_info.offset));
112
113 _mm_gemmlowp.configure(input, weights, output, GEMMInfo(false, false, true /* Reshape weights only for the first run*/));
114
115 // Revert back QuantizatioInfo as input and weights could be used in other convolution layers
116 input->info()->set_quantization_info(input_quantization_info);
117 weights->info()->set_quantization_info(weights_quantization_info);
118 }
119 else
120 {
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100121 // Configure matrix multiply function
122 _mm_gemm.configure(input, weights, nullptr, output, 1.0f, 0.0f, GEMMInfo(false, false, true /* Reshape weights only for the first run*/, gemm_3d_depth,
123 _skip_im2col /* Reinterpret the input as 3D if im2col is skipped */));
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000124 }
125}
126
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100127Status NEGEMMConvolutionLayer::validate_mm(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *output, int gemm_3d_depth, bool skip_im2col)
128{
129 const bool is_quantized = is_data_type_quantized_asymmetric(input->data_type());
130
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100131 const GEMMInfo gemm_info = GEMMInfo(false, false, true /* Reshape weights only for the first run */, gemm_3d_depth, skip_im2col);
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100132 if(is_quantized)
133 {
134 // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
135 // Extract and negate input and weights offset
136 const QuantizationInfo input_quantization_info = input->quantization_info();
137 const QuantizationInfo weights_quantization_info = weights->quantization_info();
138
139 std::unique_ptr<ITensorInfo> input_qa = input->clone();
140 std::unique_ptr<ITensorInfo> weights_qa = weights->clone();
141 input_qa->set_quantization_info(QuantizationInfo(input_quantization_info.scale, -input_quantization_info.offset));
142 weights_qa->set_quantization_info(QuantizationInfo(weights_quantization_info.scale, -weights_quantization_info.offset));
143
144 // Perform validation step on GEMMLowp
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100145 return NEGEMMLowpMatrixMultiplyCore::validate(input_qa.get(), weights_qa.get(), output, gemm_info);
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100146 }
147 else
148 {
149 // Perform validation step on Matrix multiply function
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100150 return NEGEMM::validate(input, weights, nullptr, output, 1.0f, 0.0f, gemm_info);
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100151 }
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100152}
153
154Status NEGEMMConvolutionLayer::validate_gemm3d(DataType data_type, int gemm_3d_depth, bool skip_im2col)
155{
156 const bool is_quantized = is_data_type_quantized_asymmetric(data_type);
157 const DataType output_gemm_data_type = is_quantized ? DataType::S32 : data_type;
158 const unsigned int mult_y = skip_im2col ? 1U : gemm_3d_depth;
159 const unsigned int mult_z = skip_im2col ? gemm_3d_depth : 1U;
160
161 // Set dummy tensor shapes for the validation
162 const TensorInfo dummy_input_info(TensorShape(4U, 4U * mult_y, 1U * mult_z), 1, data_type);
163 const TensorInfo dummy_weights_info(TensorShape(4U, 4U), 1, data_type);
164 const TensorInfo dummy_output_info(TensorShape(4U, 4U, gemm_3d_depth), 1, output_gemm_data_type);
165
166 return validate_mm(&dummy_input_info, &dummy_weights_info, &dummy_output_info, gemm_3d_depth, skip_im2col);
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100167}
168
Alex Gilday7da29b62018-03-23 14:16:00 +0000169void NEGEMMConvolutionLayer::configure(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info,
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100170 const Size2D &dilation, const ActivationLayerInfo &act_info, unsigned int num_groups)
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000171{
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000172 ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100173 ARM_COMPUTE_UNUSED(num_groups);
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100174 ARM_COMPUTE_ERROR_THROW_ON(NEGEMMConvolutionLayer::validate(input->info(),
175 weights->info(),
176 biases != nullptr ? biases->info() : nullptr,
177 output->info(),
178 conv_info,
179 weights_info,
180 dilation,
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100181 act_info,
182 num_groups));
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000183
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100184 const DataType data_type = input->info()->data_type();
185 const DataLayout data_layout = input->info()->data_layout();
186 const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
187 const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
188 const int idx_channel = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
189 const int idx_kernels = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
Michalis Spyroue2503892018-04-23 15:17:31 +0100190
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100191 const unsigned int kernel_width = weights->info()->dimension(idx_width);
192 const unsigned int kernel_height = weights->info()->dimension(idx_height);
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000193
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100194 _is_prepared = weights_info.retain_internal_weights();
195 _original_weights = weights;
196 _is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
197 _data_layout = data_layout;
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100198 _skip_im2col = (data_layout == DataLayout::NHWC && kernel_width == 1 && kernel_height == 1 && conv_info.stride().first == 1 && conv_info.stride().second == 1);
199 _skip_col2im = data_layout == DataLayout::NHWC;
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100200 _append_bias = (biases != nullptr) && (!_is_quantized);
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000201
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100202 const ITensor *gemm_input_to_use = input;
203 ITensor *gemm_output_to_use = output;
204 ITensor *gemm_output_staged_to_use = output;
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000205
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100206 // Get convolved dimensions
207 unsigned int conv_w = 0;
208 unsigned int conv_h = 0;
209 std::tie(conv_w, conv_h) = scaled_dimensions(input->info()->dimension(idx_width),
210 input->info()->dimension(idx_height),
211 kernel_width,
212 kernel_height,
213 conv_info,
214 dilation);
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000215
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100216 // Check if GEMM3D is supported
217 if(_skip_col2im)
218 {
219 // If not supported, we need to perform im2col and col2im (or reshape layer)
220 if(!bool(validate_gemm3d(input->info()->data_type(), conv_h, _skip_im2col)))
221 {
222 _skip_im2col = false;
223 _skip_col2im = false;
224 }
225 }
226
227 const unsigned bias_element = (_append_bias && !_skip_im2col) ? 1 : 0;
228 const ITensor *biases_to_use = (_append_bias && !_skip_im2col) ? biases : nullptr;
229
230 // Get parameters from conv_info
231 unsigned int stride_x = 0;
232 unsigned int stride_y = 0;
233 std::tie(stride_x, stride_y) = conv_info.stride();
234
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100235 unsigned int mat_weights_cols = weights->info()->dimension(idx_kernels);
236 unsigned int mat_weights_rows = weights->info()->dimension(idx_width) * weights->info()->dimension(idx_height) * weights->info()->dimension(idx_channel) + bias_element;
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000237
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100238 // _weights_reshaped will be auto configured in the kernel.
239 // Just append biases and do not transpose 1xW as it will be reshaped in NEGEMM
240 _reshape_weights.configure(weights, biases_to_use, &_weights_reshaped);
241
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100242 // Create tensor to store im2col reshaped inputs
Michalis Spyroue2503892018-04-23 15:17:31 +0100243 if(!_skip_im2col)
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000244 {
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100245 // Calculate im2col shape
246 // For NEON the batch size is on the fourth dimension
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100247 // TODO (giaiod01): Auto-initialize the output shape of im2col COMPMID-1482
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100248 TensorShape shape_im2col = input->info()->tensor_shape();
249 shape_im2col.set(0, mat_weights_rows);
250 shape_im2col.set(1, conv_w * conv_h);
Michalis Spyroue2503892018-04-23 15:17:31 +0100251 shape_im2col.set(2, 1);
Michalis Spyroue2503892018-04-23 15:17:31 +0100252
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100253 _im2col_output.allocator()->init(input->info()->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(shape_im2col));
254 _memory_group.manage(&_im2col_output);
Michalis Spyroue2503892018-04-23 15:17:31 +0100255
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100256 // Configure
Giorgio Arena0f170392018-07-18 16:13:12 +0100257 _im2col_kernel.configure(input, &_im2col_output, Size2D(kernel_width, kernel_height), conv_info, _append_bias, dilation);
Michalis Spyroue2503892018-04-23 15:17:31 +0100258
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100259 // Update GEMM input
260 gemm_input_to_use = &_im2col_output;
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000261 }
Michalis Spyroue2503892018-04-23 15:17:31 +0100262 else if(_append_bias)
263 {
264 // Configure add bias kernel
265 _add_bias_kernel.configure(output, biases, output, ConvertPolicy::SATURATE);
266 }
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000267
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100268 // Create temporary GEMM output tensor in case we cannot skip col2im
269 if(!_skip_col2im)
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000270 {
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100271 // Calculate GEMM output shape
272 TensorShape shape_gemm = _im2col_output.info()->tensor_shape();
273 shape_gemm.set(0, mat_weights_cols);
274 shape_gemm.set(1, conv_w * conv_h);
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000275
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100276 // GEMM output should be S32 for acquiring raw integer accumulator without quantized postprocessing for quantized asymmetric input.
277 const DataType gemm_data_type = _is_quantized ? DataType::S32 : data_type;
278 // FIXME: input->clone() doesn't work with subtensors for grouped convolutions.
279 TensorInfo info_gemm(shape_gemm, 1, gemm_data_type);
280 info_gemm.set_quantization_info(output->info()->quantization_info());
281 _gemm_output.allocator()->init(info_gemm);
282 _memory_group.manage(&_gemm_output);
283
284 // Update GEMM output
285 gemm_output_to_use = &_gemm_output;
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000286 }
287
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100288 // Configure GEMM
289 configure_mm(gemm_input_to_use, &_weights_reshaped, gemm_output_to_use, _skip_col2im ? conv_h : 1);
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100290
Michalis Spyroue2503892018-04-23 15:17:31 +0100291 if(!_skip_im2col)
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000292 {
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100293 _im2col_output.allocator()->allocate();
294 }
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000295
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100296 // Configure output stage for quantized case
297 if(_is_quantized)
298 {
299 const QuantizationInfo output_quant_info = (output->info()->total_size() == 0) ? input->info()->quantization_info() : output->info()->quantization_info();
Michalis Spyroue2503892018-04-23 15:17:31 +0100300
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100301 float multiplier = input->info()->quantization_info().scale * weights->info()->quantization_info().scale / output_quant_info.scale;
302 int output_multiplier, output_shift;
303 quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
Michalis Spyroue2503892018-04-23 15:17:31 +0100304
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100305 _memory_group.manage(&_tmp_output);
306 gemm_output_staged_to_use = &_tmp_output;
Michalis Spyroue2503892018-04-23 15:17:31 +0100307
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100308 _gemmlowp_output_stage.configure(gemm_output_to_use, biases, gemm_output_staged_to_use, output_multiplier, output_shift, output_quant_info.offset);
309 }
310
311 if(!_skip_col2im)
312 {
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100313 if(_data_layout == DataLayout::NCHW)
314 {
315 // Configure col2im
316 _col2im_kernel.configure(_is_quantized ? gemm_output_staged_to_use : gemm_output_to_use, output, Size2D(conv_w, conv_h));
317 }
318 else
319 {
320 // Configure reshape layer
321 _reshape_layer.configure(_is_quantized ? gemm_output_staged_to_use : gemm_output_to_use, output);
322 }
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100323 }
324
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100325 if(_is_quantized)
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100326 {
327 _tmp_output.allocator()->allocate();
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100328 }
329
330 if(!_skip_col2im)
331 {
Michalis Spyroue2503892018-04-23 15:17:31 +0100332 _gemm_output.allocator()->allocate();
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000333 }
334
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100335 ARM_COMPUTE_ERROR_ON_MSG((output->info()->dimension(idx_width) != conv_w) || (output->info()->dimension(idx_height) != conv_h),
336 "Output shape does not match the expected one");
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000337
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000338 //Configure Activation Layer
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100339 _is_activationlayer_enabled = act_info.enabled();
340
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000341 if(_is_activationlayer_enabled)
342 {
343 _activationlayer_function.configure(output, nullptr, act_info);
344 }
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100345
346 ARM_COMPUTE_UNUSED(weights_info);
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000347}
348
349Status NEGEMMConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100350 const WeightsInfo &weights_info, const Size2D &dilation, const ActivationLayerInfo &act_info, unsigned int num_groups)
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000351{
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100352 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output);
353 ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights_info.are_reshaped(), "Weights already reshaped are not supported!");
354 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F16, DataType::F32);
355 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
356 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, weights);
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100357 ARM_COMPUTE_RETURN_ERROR_ON_MSG(num_groups > 1, "Grouping (num_groups != 1) is not supported on NEON");
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000358
Michalis Spyroue2503892018-04-23 15:17:31 +0100359 const DataLayout data_layout = input->data_layout();
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100360 const DataType data_type = input->data_type();
Michalis Spyroue2503892018-04-23 15:17:31 +0100361 const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
362 const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100363 const int idx_channel = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
364 const int idx_kernels = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
Michalis Spyroue2503892018-04-23 15:17:31 +0100365
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100366 const unsigned int kernel_width = weights->dimension(idx_width);
367 const unsigned int kernel_height = weights->dimension(idx_height);
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000368
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100369 TensorInfo im2col_reshaped_info, info_gemm, tmp_info, weights_reshaped_info;
370 const ITensorInfo *gemm_input_to_use = input;
371 const ITensorInfo *gemm_output_to_use = output;
372 const ITensorInfo *gemm_output_staged_to_use = output;
373 const ITensorInfo *weights_to_use = weights;
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000374
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100375 const bool is_quantized = is_data_type_quantized_asymmetric(data_type);
376 const bool append_bias = (biases != nullptr) && (!is_quantized);
377 bool skip_im2col = (data_layout == DataLayout::NHWC && kernel_width == 1 && kernel_height == 1 && conv_info.stride().first == 1 && conv_info.stride().second == 1);
378 bool skip_col2im = data_layout == DataLayout::NHWC;
379
380 // Get convolved dimensions
381 unsigned int conv_w = 0;
382 unsigned int conv_h = 0;
383
384 std::tie(conv_w, conv_h) = scaled_dimensions(input->dimension(idx_width),
385 input->dimension(idx_height),
386 kernel_width,
387 kernel_height,
388 conv_info,
389 dilation);
390
391 // Check if GEMM3D is supported
392 if(skip_col2im)
393 {
394 // If not supported, we need to perform im2col and col2im (or reshape layer)
395 if(!bool(validate_gemm3d(input->data_type(), conv_h, skip_im2col)))
396 {
397 skip_im2col = false;
398 skip_col2im = false;
399 }
400 }
401
402 const unsigned bias_element = (append_bias && !skip_im2col) ? 1 : 0;
403 const ITensorInfo *biases_to_use = (append_bias && !skip_im2col) ? biases : nullptr;
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000404
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100405 ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(idx_channel) != input->dimension(idx_channel));
406 ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 4);
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000407
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100408 // Validate biases
409 if(biases != nullptr)
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000410 {
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100411 if(is_quantized)
412 {
413 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(biases, 1, DataType::S32);
414 }
415 else
416 {
417 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, biases);
418 }
419 ARM_COMPUTE_RETURN_ERROR_ON(biases->dimension(0) != weights->dimension(idx_kernels));
420 ARM_COMPUTE_RETURN_ERROR_ON(biases->num_dimensions() > 1);
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000421 }
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000422
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100423 if(act_info.enabled())
424 {
425 ARM_COMPUTE_ERROR_ON(act_info.b() > act_info.a());
426 }
427
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100428 unsigned int mat_weights_cols = weights->dimension(idx_kernels);
429 unsigned int mat_weights_rows = weights->dimension(idx_width) * weights->dimension(idx_height) * weights->dimension(idx_channel) + bias_element;
430
431 // Output tensor auto inizialization if not yet initialized
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100432 ARM_COMPUTE_RETURN_ON_ERROR(NEConvolutionLayerReshapeWeights::validate(weights, biases_to_use, nullptr));
433 weights_reshaped_info = TensorInfo(compute_weights_reshaped_shape(*weights, (append_bias && !skip_im2col)), 1, data_type);
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100434 weights_to_use = &weights_reshaped_info;
435
Michalis Spyroue2503892018-04-23 15:17:31 +0100436 if(!skip_im2col)
437 {
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100438 // Create tensor info for im2col reshaped inputs
439 // For NEON the batch size is on the fourth dimension
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100440 // TODO (giaiod01): Auto-initialize the output shape of im2col COMPMID-1482
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100441 TensorShape shape_im2col = input->tensor_shape();
442 shape_im2col.set(0, mat_weights_rows);
443 shape_im2col.set(1, conv_w * conv_h);
444 shape_im2col.set(2, 1);
445
446 im2col_reshaped_info = TensorInfo(shape_im2col, 1, data_type);
447 im2col_reshaped_info.set_quantization_info(input->quantization_info());
448
Giorgio Arena0f170392018-07-18 16:13:12 +0100449 ARM_COMPUTE_RETURN_ON_ERROR(NEIm2ColKernel::validate(input, &im2col_reshaped_info, Size2D(kernel_width, kernel_height), conv_info, append_bias, dilation));
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100450 gemm_input_to_use = &im2col_reshaped_info;
Michalis Spyroue2503892018-04-23 15:17:31 +0100451 }
452 else if(append_bias)
453 {
454 // Validate add bias kernel
455 ARM_COMPUTE_RETURN_ON_ERROR(NEArithmeticAdditionKernel::validate(output, biases, output, ConvertPolicy::SATURATE));
456 }
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000457
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100458 // Create temporary GEMM output tensor in case we cannot skip col2im
459 if(!skip_col2im)
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000460 {
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100461 TensorShape shape_gemm = gemm_input_to_use->tensor_shape();
462 shape_gemm.set(0, mat_weights_cols);
463 shape_gemm.set(1, conv_w * conv_h);
464 const DataType gemm_data_type = is_quantized ? DataType::S32 : data_type;
465 // GEMM output should be S32 for acquiring raw integer accumulator without quantized postprocessing for quantized asymmetric input.
466 info_gemm = TensorInfo(shape_gemm, 1, gemm_data_type);
467 info_gemm.set_quantization_info(output->quantization_info());
Gian Marco Iodicea72300a2018-04-12 11:41:26 +0100468
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100469 gemm_output_to_use = &info_gemm;
Michalis Spyroue2503892018-04-23 15:17:31 +0100470 }
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000471
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100472 ARM_COMPUTE_RETURN_ON_ERROR(validate_mm(gemm_input_to_use, weights_to_use, gemm_output_to_use, skip_col2im ? conv_h : 1, skip_im2col));
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000473
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100474 if(is_quantized)
475 {
476 float multiplier = input->quantization_info().scale * weights_to_use->quantization_info().scale / output->quantization_info().scale;
477 int output_multiplier, output_shift;
478 quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
479
480 tmp_info = TensorInfo(gemm_output_to_use->tensor_shape(), 1, DataType::QASYMM8);
481 tmp_info.set_quantization_info(output->quantization_info());
482 gemm_output_staged_to_use = &tmp_info;
483
484 // Validate output stage for quantized case
485 NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint::validate(gemm_output_to_use, biases, gemm_output_staged_to_use, output->quantization_info().offset);
486 }
487
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100488 // Validate Col2Im/ReshapeLayer
489 if(!skip_col2im && (data_layout == DataLayout::NCHW))
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100490 {
491 ARM_COMPUTE_RETURN_ON_ERROR(NECol2ImKernel::validate(is_quantized ? gemm_output_staged_to_use : gemm_output_to_use,
492 output,
493 Size2D(conv_w, conv_h)));
494 }
495
496 //Validate Activation Layer
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000497 if(act_info.enabled())
498 {
499 ARM_COMPUTE_RETURN_ON_ERROR(NEActivationLayer::validate(output, nullptr, act_info));
500 }
501
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000502 return Status{};
503}
504
505void NEGEMMConvolutionLayer::run()
506{
Georgios Pinitas72219332018-06-05 14:56:06 +0100507 prepare();
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000508
509 _memory_group.acquire();
510
Michalis Spyroue2503892018-04-23 15:17:31 +0100511 if(!_skip_im2col)
512 {
513 // Run input reshaping
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100514 unsigned int y_dim = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::HEIGHT);
515 NEScheduler::get().schedule(&_im2col_kernel, y_dim);
Michalis Spyroue2503892018-04-23 15:17:31 +0100516 }
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000517
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100518 // Runs NEGEMM or NEGEMMLowpMatrixMultiplyCore functions
519 if(_is_quantized)
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000520 {
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100521 // Run gemmlowp
522 _mm_gemmlowp.run();
523
524 // Run output stage
525 _gemmlowp_output_stage.run();
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000526 }
527 else
528 {
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100529 // Run gemm
530 _mm_gemm.run();
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000531 }
532
Michalis Spyroue2503892018-04-23 15:17:31 +0100533 if(_skip_im2col && _append_bias)
534 {
535 NEScheduler::get().schedule(&_add_bias_kernel, Window::DimY);
536 }
537
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000538 // Reshape output matrix
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100539 if(!_skip_col2im)
Michalis Spyroue2503892018-04-23 15:17:31 +0100540 {
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100541 if(_data_layout == DataLayout::NCHW)
542 {
543 NEScheduler::get().schedule(&_col2im_kernel, Window::DimY);
544 }
545 else
546 {
547 _reshape_layer.run();
548 }
Michalis Spyroue2503892018-04-23 15:17:31 +0100549 }
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000550
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000551 if(_is_activationlayer_enabled)
552 {
553 _activationlayer_function.run();
554 }
555
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000556 _memory_group.release();
557}
Georgios Pinitas72219332018-06-05 14:56:06 +0100558
559void NEGEMMConvolutionLayer::prepare()
560{
561 if(!_is_prepared)
562 {
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100563 ARM_COMPUTE_ERROR_ON(!_original_weights->is_used());
Georgios Pinitas72219332018-06-05 14:56:06 +0100564
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100565 // Run weights reshaping and mark original weights tensor as unused
566 _weights_reshaped.allocator()->allocate();
567 _reshape_weights.run();
568 _original_weights->mark_as_unused();
Georgios Pinitas72219332018-06-05 14:56:06 +0100569
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100570 // Prepare GEMM
571 _is_quantized ? _mm_gemmlowp.prepare() : _mm_gemm.prepare();
Georgios Pinitas72219332018-06-05 14:56:06 +0100572 if(!_weights_reshaped.is_used())
573 {
574 _weights_reshaped.allocator()->free();
575 }
576
577 _is_prepared = true;
578 }
579}