blob: beac4ffe5f3685f3223e6dd0f36133136a28b0c2 [file] [log] [blame]
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +00001/*
George Wort2d7e6832019-02-22 16:37:41 +00002 * Copyright (c) 2017-2019 ARM Limited.
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h"
25
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000026#include "arm_compute/core/Size2D.h"
27#include "arm_compute/core/Utils.h"
28#include "arm_compute/core/Validate.h"
Gian Marco Iodice597a8562018-08-01 15:06:06 +010029#include "arm_compute/core/utils/misc/ShapeCalculator.h"
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000030#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
31#include "arm_compute/runtime/NEON/NEScheduler.h"
32#include "support/ToolchainSupport.h"
33
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000034#include <cmath>
Georgios Pinitas08346e92018-10-16 19:10:46 +010035#include <set>
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000036#include <tuple>
37
Gian Marco Iodice597a8562018-08-01 15:06:06 +010038using namespace arm_compute;
39using namespace arm_compute::misc::shape_calculator;
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000040
Gian Marco Iodice597a8562018-08-01 15:06:06 +010041NEConvolutionLayerReshapeWeights::NEConvolutionLayerReshapeWeights()
42 : _weights_reshape_kernel()
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000043{
44}
45
Gian Marco Iodice597a8562018-08-01 15:06:06 +010046void NEConvolutionLayerReshapeWeights::configure(const ITensor *weights, const ITensor *biases, ITensor *output)
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000047{
48 // Perform validation step
49 ARM_COMPUTE_ERROR_ON_NULLPTR(weights, output);
50 ARM_COMPUTE_ERROR_THROW_ON(NEConvolutionLayerReshapeWeights::validate(weights->info(),
51 (biases != nullptr) ? biases->info() : nullptr,
Gian Marco Iodice597a8562018-08-01 15:06:06 +010052 output->info()));
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000053
Gian Marco Iodice597a8562018-08-01 15:06:06 +010054 const bool append_biases = (biases != nullptr) && !is_data_type_quantized_asymmetric(weights->info()->data_type());
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000055 const ITensor *biases_to_use = (append_biases) ? biases : nullptr;
56
Gian Marco Iodice597a8562018-08-01 15:06:06 +010057 _weights_reshape_kernel.configure(weights, biases_to_use, output);
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000058
59 output->info()->set_quantization_info(weights->info()->quantization_info());
60}
61
Gian Marco Iodice597a8562018-08-01 15:06:06 +010062Status NEConvolutionLayerReshapeWeights::validate(const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output)
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000063{
Gian Marco Iodice597a8562018-08-01 15:06:06 +010064 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(weights);
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +010065 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(weights, 1, DataType::QASYMM8, DataType::F16, DataType::F32);
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000066 ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 4);
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000067
Gian Marco Iodice597a8562018-08-01 15:06:06 +010068 if(biases != nullptr)
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000069 {
Gian Marco Iodice597a8562018-08-01 15:06:06 +010070 const int idx_kernels = get_data_layout_dimension_index(weights->data_layout(), DataLayoutDimension::BATCHES);
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000071 ARM_COMPUTE_RETURN_ERROR_ON(is_data_type_quantized_asymmetric(weights->data_type()));
72 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(weights, biases);
Gian Marco Iodice597a8562018-08-01 15:06:06 +010073 ARM_COMPUTE_RETURN_ERROR_ON(biases->dimension(0) != weights->dimension(idx_kernels));
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000074 ARM_COMPUTE_RETURN_ERROR_ON(biases->num_dimensions() > 1);
75 }
76
Gian Marco Iodice597a8562018-08-01 15:06:06 +010077 if((output != nullptr) && (output->total_size() != 0))
Michalis Spyroue2503892018-04-23 15:17:31 +010078 {
Gian Marco Iodice597a8562018-08-01 15:06:06 +010079 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(weights, output);
Michalis Spyroue2503892018-04-23 15:17:31 +010080
Gian Marco Iodice597a8562018-08-01 15:06:06 +010081 NEWeightsReshapeKernel::validate(weights, biases, output);
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000082 }
83
84 return Status{};
85}
86
87void NEConvolutionLayerReshapeWeights::run()
88{
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000089 NEScheduler::get().schedule(&_weights_reshape_kernel, 3);
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000090}
91
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000092NEGEMMConvolutionLayer::NEGEMMConvolutionLayer(const std::shared_ptr<IMemoryManager> &memory_manager)
George Wort2d7e6832019-02-22 16:37:41 +000093 : _memory_group(memory_manager), _reshape_weights(), _im2col_kernel(), _mm_gemm(memory_manager), _mm_gemmlowp(memory_manager), _col2im_kernel(), _activationlayer_function(), _add_bias_kernel(),
94 _reshape_layer(), _original_weights(nullptr), _im2col_output(), _weights_reshaped(), _gemm_output(), _tmp_output(), _data_layout(DataLayout::NCHW), _append_bias(false), _skip_im2col(false),
95 _skip_col2im(false), _is_quantized(false), _is_activationlayer_enabled(false), _is_prepared(false)
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000096{
97}
98
George Wort2d7e6832019-02-22 16:37:41 +000099void NEGEMMConvolutionLayer::configure_mm(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const ActivationLayerInfo &act_info, int gemm_3d_depth)
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000100{
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100101 ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights);
George Wort2d7e6832019-02-22 16:37:41 +0000102 ARM_COMPUTE_ERROR_THROW_ON(validate_mm(input->info(), weights->info(), biases == nullptr ? nullptr : biases->info(), output == nullptr ? nullptr : output->info(), act_info, gemm_3d_depth,
103 _skip_im2col));
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100104
Georgios Pinitasbb081ca2018-11-08 10:22:01 +0000105 const GEMMInfo &gemm_info = GEMMInfo(false, false, true /* Reshape weights only for the first run */,
106 gemm_3d_depth, _skip_im2col /* Reinterpret the input as 3D if im2col is skipped */);
107
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000108 if(_is_quantized)
109 {
110 // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
111 // Extract and negate input and weights offset
112 const QuantizationInfo input_quantization_info = input->info()->quantization_info();
113 const QuantizationInfo weights_quantization_info = weights->info()->quantization_info();
114
115 input->info()->set_quantization_info(QuantizationInfo(input_quantization_info.scale, -input_quantization_info.offset));
116 weights->info()->set_quantization_info(QuantizationInfo(weights_quantization_info.scale, -weights_quantization_info.offset));
117
George Wort2d7e6832019-02-22 16:37:41 +0000118 const QuantizationInfo output_quant_info = (output->info()->total_size() == 0) ? input_quantization_info : output->info()->quantization_info();
119
120 float multiplier = input_quantization_info.scale * weights->info()->quantization_info().scale / output_quant_info.scale;
121 int output_multiplier, output_shift;
122 quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
123
124 // Merge activation with output stage
125 int min_activation = 0;
126 int max_activation = 0;
127
128 const std::set<ActivationLayerInfo::ActivationFunction> supported_acts = { ActivationLayerInfo::ActivationFunction::RELU,
129 ActivationLayerInfo::ActivationFunction::BOUNDED_RELU,
130 ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU
131 };
132 if(_is_activationlayer_enabled && supported_acts.count(act_info.activation()) != 0)
133 {
134 const int a_const_int = output_quant_info.quantize(act_info.a(), RoundingPolicy::TO_NEAREST_UP);
135 const int b_const_int = output_quant_info.quantize(act_info.b(), RoundingPolicy::TO_NEAREST_UP);
136
137 min_activation = act_info.activation() != ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU ? output_quant_info.offset : b_const_int;
138 max_activation = act_info.activation() == ActivationLayerInfo::ActivationFunction::RELU ? 255 : a_const_int;
139
140 _is_activationlayer_enabled = false;
141 }
142
143 GEMMLowpOutputStageInfo output_info;
144 output_info.type = GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT;
145 output_info.gemmlowp_offset = output_quant_info.offset;
146 output_info.gemmlowp_multiplier = output_multiplier;
147 output_info.gemmlowp_shift = output_shift;
148 output_info.gemmlowp_min_bound = min_activation;
149 output_info.gemmlowp_max_bound = max_activation;
150
151 _mm_gemmlowp.configure(input, weights, biases, output, GEMMInfo(false, false, true, gemm_3d_depth, _skip_im2col, false, output_info));
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000152
153 // Revert back QuantizatioInfo as input and weights could be used in other convolution layers
154 input->info()->set_quantization_info(input_quantization_info);
155 weights->info()->set_quantization_info(weights_quantization_info);
156 }
157 else
158 {
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100159 // Configure matrix multiply function
Georgios Pinitasbb081ca2018-11-08 10:22:01 +0000160 _mm_gemm.configure(input, weights, nullptr, output, 1.0f, 0.0f, gemm_info);
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000161 }
162}
163
George Wort2d7e6832019-02-22 16:37:41 +0000164Status NEGEMMConvolutionLayer::validate_mm(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const ActivationLayerInfo &act_info,
165 int gemm_3d_depth, bool skip_im2col)
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100166{
George Wort2d7e6832019-02-22 16:37:41 +0000167 const bool is_quantized = is_data_type_quantized_asymmetric(input->data_type());
168 const bool is_activation_enabled = act_info.enabled();
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100169
Georgios Pinitasbb081ca2018-11-08 10:22:01 +0000170 const GEMMInfo &gemm_info = GEMMInfo(false, false, true /* Reshape weights only for the first run */,
171 gemm_3d_depth, skip_im2col /* Reinterpret the input as 3D if im2col is skipped */);
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100172 if(is_quantized)
173 {
174 // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
175 // Extract and negate input and weights offset
176 const QuantizationInfo input_quantization_info = input->quantization_info();
177 const QuantizationInfo weights_quantization_info = weights->quantization_info();
178
179 std::unique_ptr<ITensorInfo> input_qa = input->clone();
180 std::unique_ptr<ITensorInfo> weights_qa = weights->clone();
181 input_qa->set_quantization_info(QuantizationInfo(input_quantization_info.scale, -input_quantization_info.offset));
182 weights_qa->set_quantization_info(QuantizationInfo(weights_quantization_info.scale, -weights_quantization_info.offset));
183
George Wort2d7e6832019-02-22 16:37:41 +0000184 const QuantizationInfo output_quant_info = (output->total_size() == 0) ? input_quantization_info : output->quantization_info();
185
186 float multiplier = input_quantization_info.scale * weights->quantization_info().scale / output_quant_info.scale;
187 int output_multiplier, output_shift;
188 quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
189
190 // Merge activation with output stage
191 int min_activation = 0;
192 int max_activation = 0;
193
194 const std::set<ActivationLayerInfo::ActivationFunction> supported_acts = { ActivationLayerInfo::ActivationFunction::RELU,
195 ActivationLayerInfo::ActivationFunction::BOUNDED_RELU,
196 ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU
197 };
198 if(is_activation_enabled && supported_acts.count(act_info.activation()) != 0)
199 {
200 const int a_const_int = output_quant_info.quantize(act_info.a(), RoundingPolicy::TO_NEAREST_UP);
201 const int b_const_int = output_quant_info.quantize(act_info.b(), RoundingPolicy::TO_NEAREST_UP);
202
203 min_activation = act_info.activation() != ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU ? output_quant_info.offset : b_const_int;
204 max_activation = act_info.activation() == ActivationLayerInfo::ActivationFunction::RELU ? 255 : a_const_int;
205 }
206
207 GEMMLowpOutputStageInfo output_info;
208 output_info.type = GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT;
209 output_info.gemmlowp_offset = output_quant_info.offset;
210 output_info.gemmlowp_multiplier = output_multiplier;
211 output_info.gemmlowp_shift = output_shift;
212 output_info.gemmlowp_min_bound = min_activation;
213 output_info.gemmlowp_max_bound = max_activation;
214
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100215 // Perform validation step on GEMMLowp
George Wort2d7e6832019-02-22 16:37:41 +0000216 return NEGEMMLowpMatrixMultiplyCore::validate(input_qa.get(), weights_qa.get(), biases, output, GEMMInfo(false, false, true, gemm_3d_depth, skip_im2col, false, output_info));
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100217 }
218 else
219 {
220 // Perform validation step on Matrix multiply function
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100221 return NEGEMM::validate(input, weights, nullptr, output, 1.0f, 0.0f, gemm_info);
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100222 }
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100223}
224
George Wort2d7e6832019-02-22 16:37:41 +0000225Status NEGEMMConvolutionLayer::validate_gemm3d(const ITensorInfo *input_info, const ActivationLayerInfo &act_info, int gemm_3d_depth, bool skip_im2col)
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100226{
George Wort2d7e6832019-02-22 16:37:41 +0000227 const DataType data_type = input_info->data_type();
228 const unsigned int mult_y = skip_im2col ? 1U : gemm_3d_depth;
229 const unsigned int mult_z = skip_im2col ? gemm_3d_depth : 1U;
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100230
231 // Set dummy tensor shapes for the validation
George Wort2d7e6832019-02-22 16:37:41 +0000232 const TensorInfo dummy_input_info(TensorShape(4U, 4U * mult_y, 1U * mult_z), 1, data_type, input_info->quantization_info());
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100233 const TensorInfo dummy_weights_info(TensorShape(4U, 4U), 1, data_type);
George Wort2d7e6832019-02-22 16:37:41 +0000234 const TensorInfo dummy_output_info(TensorShape(4U, 4U, gemm_3d_depth), 1, data_type, input_info->quantization_info());
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100235
George Wort2d7e6832019-02-22 16:37:41 +0000236 return validate_mm(&dummy_input_info, &dummy_weights_info, nullptr, &dummy_output_info, act_info, gemm_3d_depth, skip_im2col);
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100237}
238
Alex Gilday7da29b62018-03-23 14:16:00 +0000239void NEGEMMConvolutionLayer::configure(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info,
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100240 const Size2D &dilation, const ActivationLayerInfo &act_info, unsigned int num_groups)
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000241{
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000242 ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100243 ARM_COMPUTE_UNUSED(num_groups);
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100244 ARM_COMPUTE_ERROR_THROW_ON(NEGEMMConvolutionLayer::validate(input->info(),
245 weights->info(),
246 biases != nullptr ? biases->info() : nullptr,
247 output->info(),
248 conv_info,
249 weights_info,
250 dilation,
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100251 act_info,
252 num_groups));
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000253
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100254 const DataType data_type = input->info()->data_type();
255 const DataLayout data_layout = input->info()->data_layout();
256 const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
257 const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100258 const int idx_kernels = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
Michalis Spyroue2503892018-04-23 15:17:31 +0100259
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100260 const unsigned int kernel_width = weights->info()->dimension(idx_width);
261 const unsigned int kernel_height = weights->info()->dimension(idx_height);
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000262
Georgios Pinitas08346e92018-10-16 19:10:46 +0100263 _is_prepared = weights_info.retain_internal_weights();
264 _original_weights = weights;
265 _is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
266 _data_layout = data_layout;
267 _skip_im2col = (data_layout == DataLayout::NHWC && kernel_width == 1 && kernel_height == 1 && conv_info.stride().first == 1 && conv_info.stride().second == 1);
Georgios Pinitas08346e92018-10-16 19:10:46 +0100268 _append_bias = (biases != nullptr) && (!_is_quantized);
269 _is_activationlayer_enabled = act_info.enabled();
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000270
George Wort2d7e6832019-02-22 16:37:41 +0000271 const ITensor *gemm_input_to_use = input;
272 ITensor *gemm_output_to_use = output;
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000273
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100274 // Get convolved dimensions
275 unsigned int conv_w = 0;
276 unsigned int conv_h = 0;
277 std::tie(conv_w, conv_h) = scaled_dimensions(input->info()->dimension(idx_width),
278 input->info()->dimension(idx_height),
279 kernel_width,
280 kernel_height,
281 conv_info,
282 dilation);
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000283
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100284 // Check if GEMM3D is supported
Georgios Pinitase413d252018-11-14 18:29:58 +0000285 if(data_layout == DataLayout::NHWC)
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100286 {
George Wort2d7e6832019-02-22 16:37:41 +0000287 _skip_col2im = bool(validate_gemm3d(input->info(), act_info, conv_h, true));
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100288 // If not supported, we need to perform im2col and col2im (or reshape layer)
Georgios Pinitase413d252018-11-14 18:29:58 +0000289 if(!_skip_col2im)
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100290 {
291 _skip_im2col = false;
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100292 }
293 }
Georgios Pinitase413d252018-11-14 18:29:58 +0000294 else
295 {
296 _skip_col2im = false;
297 }
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100298
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100299 const ITensor *biases_to_use = (_append_bias && !_skip_im2col) ? biases : nullptr;
300
301 // Get parameters from conv_info
302 unsigned int stride_x = 0;
303 unsigned int stride_y = 0;
304 std::tie(stride_x, stride_y) = conv_info.stride();
305
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100306 unsigned int mat_weights_cols = weights->info()->dimension(idx_kernels);
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000307
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100308 // _weights_reshaped will be auto configured in the kernel.
309 // Just append biases and do not transpose 1xW as it will be reshaped in NEGEMM
310 _reshape_weights.configure(weights, biases_to_use, &_weights_reshaped);
311
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100312 // Create tensor to store im2col reshaped inputs
Michalis Spyroue2503892018-04-23 15:17:31 +0100313 if(!_skip_im2col)
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000314 {
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100315 _memory_group.manage(&_im2col_output);
Michalis Spyroue2503892018-04-23 15:17:31 +0100316
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100317 // Configure
Giorgio Arena0f170392018-07-18 16:13:12 +0100318 _im2col_kernel.configure(input, &_im2col_output, Size2D(kernel_width, kernel_height), conv_info, _append_bias, dilation);
Michalis Spyroue2503892018-04-23 15:17:31 +0100319
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100320 // Update GEMM input
321 gemm_input_to_use = &_im2col_output;
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000322 }
Michalis Spyroue2503892018-04-23 15:17:31 +0100323 else if(_append_bias)
324 {
325 // Configure add bias kernel
326 _add_bias_kernel.configure(output, biases, output, ConvertPolicy::SATURATE);
327 }
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000328
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100329 // Create temporary GEMM output tensor in case we cannot skip col2im
George Wort2d7e6832019-02-22 16:37:41 +0000330 if(!_skip_col2im)
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000331 {
George Wort2d7e6832019-02-22 16:37:41 +0000332 TensorShape shape_gemm;
Georgios Pinitasbb081ca2018-11-08 10:22:01 +0000333
George Wort2d7e6832019-02-22 16:37:41 +0000334 // Calculate GEMM output shape
335 shape_gemm = _im2col_output.info()->tensor_shape();
336 shape_gemm.set(0, mat_weights_cols);
337 shape_gemm.set(1, conv_w * conv_h);
Georgios Pinitasbb081ca2018-11-08 10:22:01 +0000338
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100339 // FIXME: input->clone() doesn't work with subtensors for grouped convolutions.
George Wort2d7e6832019-02-22 16:37:41 +0000340 TensorInfo info_gemm(shape_gemm, 1, data_type);
Georgios Pinitas041f36d2018-09-18 18:38:37 +0100341 info_gemm.set_quantization_info(output->info()->quantization_info()).set_data_layout(input->info()->data_layout());
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100342 _gemm_output.allocator()->init(info_gemm);
343 _memory_group.manage(&_gemm_output);
344
345 // Update GEMM output
346 gemm_output_to_use = &_gemm_output;
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000347 }
348
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100349 // Configure GEMM
Gian Marco Iodice3139f032018-11-05 14:26:32 +0000350 // In case we need to skip col2im, GEMM3D (gemm_3d_depth != 0) must be called in order to avoid reshaping the output matrix
351 const unsigned int gemm_3d_depth = _skip_col2im ? conv_h : 0;
George Wort2d7e6832019-02-22 16:37:41 +0000352 configure_mm(gemm_input_to_use, &_weights_reshaped, biases, gemm_output_to_use, act_info, gemm_3d_depth);
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100353
Michalis Spyroue2503892018-04-23 15:17:31 +0100354 if(!_skip_im2col)
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000355 {
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100356 _im2col_output.allocator()->allocate();
357 }
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000358
Georgios Pinitase413d252018-11-14 18:29:58 +0000359 if(!_skip_col2im)
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100360 {
Georgios Pinitase413d252018-11-14 18:29:58 +0000361 if(_data_layout == DataLayout::NCHW)
362 {
363 // Configure col2im
George Wort2d7e6832019-02-22 16:37:41 +0000364 _col2im_kernel.configure(gemm_output_to_use, output, Size2D(conv_w, conv_h));
Georgios Pinitase413d252018-11-14 18:29:58 +0000365 }
366 else
367 {
368 // Configure reshape layer
George Wort2d7e6832019-02-22 16:37:41 +0000369 _reshape_layer.configure(gemm_output_to_use, output);
Georgios Pinitase413d252018-11-14 18:29:58 +0000370 }
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100371 }
372
Georgios Pinitase413d252018-11-14 18:29:58 +0000373 if(_is_quantized && !_skip_col2im)
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100374 {
375 _tmp_output.allocator()->allocate();
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100376 }
377
Georgios Pinitasbb081ca2018-11-08 10:22:01 +0000378 if(!_skip_col2im || _is_quantized)
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100379 {
Michalis Spyroue2503892018-04-23 15:17:31 +0100380 _gemm_output.allocator()->allocate();
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000381 }
382
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100383 ARM_COMPUTE_ERROR_ON_MSG((output->info()->dimension(idx_width) != conv_w) || (output->info()->dimension(idx_height) != conv_h),
384 "Output shape does not match the expected one");
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000385
Georgios Pinitas08346e92018-10-16 19:10:46 +0100386 // Configure Activation Layer
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000387 if(_is_activationlayer_enabled)
388 {
389 _activationlayer_function.configure(output, nullptr, act_info);
390 }
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100391
392 ARM_COMPUTE_UNUSED(weights_info);
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000393}
394
395Status NEGEMMConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100396 const WeightsInfo &weights_info, const Size2D &dilation, const ActivationLayerInfo &act_info, unsigned int num_groups)
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000397{
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100398 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output);
399 ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights_info.are_reshaped(), "Weights already reshaped are not supported!");
400 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F16, DataType::F32);
401 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
402 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, weights);
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100403 ARM_COMPUTE_RETURN_ERROR_ON_MSG(num_groups > 1, "Grouping (num_groups != 1) is not supported on NEON");
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000404
Michalis Spyroue2503892018-04-23 15:17:31 +0100405 const DataLayout data_layout = input->data_layout();
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100406 const DataType data_type = input->data_type();
Michalis Spyroue2503892018-04-23 15:17:31 +0100407 const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
408 const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100409 const int idx_channel = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
410 const int idx_kernels = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
Michalis Spyroue2503892018-04-23 15:17:31 +0100411
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100412 const unsigned int kernel_width = weights->dimension(idx_width);
413 const unsigned int kernel_height = weights->dimension(idx_height);
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000414
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100415 TensorInfo im2col_reshaped_info, info_gemm, tmp_info, weights_reshaped_info;
George Wort2d7e6832019-02-22 16:37:41 +0000416 const ITensorInfo *gemm_input_to_use = input;
417 const ITensorInfo *gemm_output_to_use = output;
418 const ITensorInfo *weights_to_use = weights;
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000419
Georgios Pinitas08346e92018-10-16 19:10:46 +0100420 const bool is_quantized = is_data_type_quantized_asymmetric(data_type);
421 const bool append_bias = (biases != nullptr) && (!is_quantized);
422 bool skip_im2col = (data_layout == DataLayout::NHWC && kernel_width == 1 && kernel_height == 1 && conv_info.stride().first == 1 && conv_info.stride().second == 1);
Georgios Pinitas08346e92018-10-16 19:10:46 +0100423 bool is_activation_enabled = act_info.enabled();
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100424
425 // Get convolved dimensions
426 unsigned int conv_w = 0;
427 unsigned int conv_h = 0;
428
429 std::tie(conv_w, conv_h) = scaled_dimensions(input->dimension(idx_width),
430 input->dimension(idx_height),
431 kernel_width,
432 kernel_height,
433 conv_info,
434 dilation);
435
436 // Check if GEMM3D is supported
Georgios Pinitase413d252018-11-14 18:29:58 +0000437 bool skip_col2im = false;
438 if(data_layout == DataLayout::NHWC)
439 {
George Wort2d7e6832019-02-22 16:37:41 +0000440 skip_col2im = bool(validate_gemm3d(input, act_info, conv_h, true));
Georgios Pinitase413d252018-11-14 18:29:58 +0000441 // If not supported, we need to perform im2col and col2im (or reshape layer)
442 if(!skip_col2im)
443 {
444 skip_im2col = false;
445 }
446 }
447
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100448 if(skip_col2im)
449 {
450 // If not supported, we need to perform im2col and col2im (or reshape layer)
George Wort2d7e6832019-02-22 16:37:41 +0000451 if(!bool(validate_gemm3d(input, act_info, conv_h, skip_im2col)))
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100452 {
453 skip_im2col = false;
454 skip_col2im = false;
455 }
456 }
457
458 const unsigned bias_element = (append_bias && !skip_im2col) ? 1 : 0;
459 const ITensorInfo *biases_to_use = (append_bias && !skip_im2col) ? biases : nullptr;
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000460
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100461 ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(idx_channel) != input->dimension(idx_channel));
462 ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 4);
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000463
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100464 // Validate biases
465 if(biases != nullptr)
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000466 {
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100467 if(is_quantized)
468 {
469 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(biases, 1, DataType::S32);
470 }
471 else
472 {
473 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, biases);
474 }
475 ARM_COMPUTE_RETURN_ERROR_ON(biases->dimension(0) != weights->dimension(idx_kernels));
476 ARM_COMPUTE_RETURN_ERROR_ON(biases->num_dimensions() > 1);
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000477 }
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000478
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100479 if(act_info.enabled())
480 {
481 ARM_COMPUTE_ERROR_ON(act_info.b() > act_info.a());
482 }
483
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100484 unsigned int mat_weights_cols = weights->dimension(idx_kernels);
485 unsigned int mat_weights_rows = weights->dimension(idx_width) * weights->dimension(idx_height) * weights->dimension(idx_channel) + bias_element;
486
487 // Output tensor auto inizialization if not yet initialized
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100488 ARM_COMPUTE_RETURN_ON_ERROR(NEConvolutionLayerReshapeWeights::validate(weights, biases_to_use, nullptr));
489 weights_reshaped_info = TensorInfo(compute_weights_reshaped_shape(*weights, (append_bias && !skip_im2col)), 1, data_type);
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100490 weights_to_use = &weights_reshaped_info;
491
Michalis Spyroue2503892018-04-23 15:17:31 +0100492 if(!skip_im2col)
493 {
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100494 // Create tensor info for im2col reshaped inputs
495 // For NEON the batch size is on the fourth dimension
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100496 // TODO (giaiod01): Auto-initialize the output shape of im2col COMPMID-1482
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100497 TensorShape shape_im2col = input->tensor_shape();
498 shape_im2col.set(0, mat_weights_rows);
499 shape_im2col.set(1, conv_w * conv_h);
500 shape_im2col.set(2, 1);
501
502 im2col_reshaped_info = TensorInfo(shape_im2col, 1, data_type);
503 im2col_reshaped_info.set_quantization_info(input->quantization_info());
504
Giorgio Arena0f170392018-07-18 16:13:12 +0100505 ARM_COMPUTE_RETURN_ON_ERROR(NEIm2ColKernel::validate(input, &im2col_reshaped_info, Size2D(kernel_width, kernel_height), conv_info, append_bias, dilation));
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100506 gemm_input_to_use = &im2col_reshaped_info;
Michalis Spyroue2503892018-04-23 15:17:31 +0100507 }
508 else if(append_bias)
509 {
510 // Validate add bias kernel
511 ARM_COMPUTE_RETURN_ON_ERROR(NEArithmeticAdditionKernel::validate(output, biases, output, ConvertPolicy::SATURATE));
512 }
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000513
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100514 // Create temporary GEMM output tensor in case we cannot skip col2im
515 if(!skip_col2im)
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000516 {
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100517 TensorShape shape_gemm = gemm_input_to_use->tensor_shape();
518 shape_gemm.set(0, mat_weights_cols);
519 shape_gemm.set(1, conv_w * conv_h);
George Wort2d7e6832019-02-22 16:37:41 +0000520 info_gemm = TensorInfo(shape_gemm, 1, data_type);
Michalis Spyroue2503892018-04-23 15:17:31 +0100521 }
Georgios Pinitasbb081ca2018-11-08 10:22:01 +0000522 else
523 {
George Wort2d7e6832019-02-22 16:37:41 +0000524 info_gemm = TensorInfo(output->tensor_shape(), 1, data_type);
Georgios Pinitasbb081ca2018-11-08 10:22:01 +0000525 }
526 info_gemm.set_quantization_info(output->quantization_info()).set_data_layout(input->data_layout());
527 gemm_output_to_use = &info_gemm;
George Wort2d7e6832019-02-22 16:37:41 +0000528 ARM_COMPUTE_RETURN_ON_ERROR(validate_mm(gemm_input_to_use, weights_to_use, biases, gemm_output_to_use, act_info, skip_col2im ? conv_h : 0, skip_im2col));
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100529
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100530 // Validate Col2Im/ReshapeLayer
531 if(!skip_col2im && (data_layout == DataLayout::NCHW))
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100532 {
George Wort2d7e6832019-02-22 16:37:41 +0000533 ARM_COMPUTE_RETURN_ON_ERROR(NECol2ImKernel::validate(gemm_output_to_use, output, Size2D(conv_w, conv_h)));
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100534 }
535
536 //Validate Activation Layer
Georgios Pinitas08346e92018-10-16 19:10:46 +0100537 if(is_activation_enabled)
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000538 {
539 ARM_COMPUTE_RETURN_ON_ERROR(NEActivationLayer::validate(output, nullptr, act_info));
540 }
541
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000542 return Status{};
543}
544
545void NEGEMMConvolutionLayer::run()
546{
Georgios Pinitas72219332018-06-05 14:56:06 +0100547 prepare();
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000548
Georgios Pinitasda953f22019-04-02 17:27:03 +0100549 MemoryGroupResourceScope scope_mg(_memory_group);
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000550
Michalis Spyroue2503892018-04-23 15:17:31 +0100551 if(!_skip_im2col)
552 {
553 // Run input reshaping
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100554 unsigned int y_dim = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::HEIGHT);
555 NEScheduler::get().schedule(&_im2col_kernel, y_dim);
Michalis Spyroue2503892018-04-23 15:17:31 +0100556 }
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000557
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100558 // Runs NEGEMM or NEGEMMLowpMatrixMultiplyCore functions
559 if(_is_quantized)
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000560 {
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100561 // Run gemmlowp
562 _mm_gemmlowp.run();
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000563 }
564 else
565 {
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100566 // Run gemm
567 _mm_gemm.run();
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000568 }
569
Michalis Spyroue2503892018-04-23 15:17:31 +0100570 if(_skip_im2col && _append_bias)
571 {
572 NEScheduler::get().schedule(&_add_bias_kernel, Window::DimY);
573 }
574
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000575 // Reshape output matrix
Georgios Pinitase413d252018-11-14 18:29:58 +0000576 if(!_skip_col2im)
Michalis Spyroue2503892018-04-23 15:17:31 +0100577 {
Georgios Pinitase413d252018-11-14 18:29:58 +0000578 if(_data_layout == DataLayout::NCHW)
579 {
580 NEScheduler::get().schedule(&_col2im_kernel, Window::DimY);
581 }
582 else
583 {
584 _reshape_layer.run();
585 }
Michalis Spyroue2503892018-04-23 15:17:31 +0100586 }
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000587
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000588 if(_is_activationlayer_enabled)
589 {
590 _activationlayer_function.run();
591 }
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000592}
Georgios Pinitas72219332018-06-05 14:56:06 +0100593
594void NEGEMMConvolutionLayer::prepare()
595{
596 if(!_is_prepared)
597 {
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100598 ARM_COMPUTE_ERROR_ON(!_original_weights->is_used());
Georgios Pinitas72219332018-06-05 14:56:06 +0100599
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100600 // Run weights reshaping and mark original weights tensor as unused
601 _weights_reshaped.allocator()->allocate();
602 _reshape_weights.run();
603 _original_weights->mark_as_unused();
Georgios Pinitas72219332018-06-05 14:56:06 +0100604
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100605 // Prepare GEMM
606 _is_quantized ? _mm_gemmlowp.prepare() : _mm_gemm.prepare();
Georgios Pinitas72219332018-06-05 14:56:06 +0100607 if(!_weights_reshaped.is_used())
608 {
609 _weights_reshaped.allocator()->free();
610 }
611
612 _is_prepared = true;
613 }
614}