blob: 67f55d56e24857278683183e6d360fe77edf133a [file] [log] [blame]
Isabella Gottardif07d28d2018-02-06 14:52:43 +00001/*
2 * Copyright (c) 2017-2018 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h"
25
26#include "arm_compute/core/PixelValue.h"
27#include "arm_compute/core/Size2D.h"
28#include "arm_compute/core/Utils.h"
29#include "arm_compute/core/Validate.h"
Georgios Pinitas78c00902018-01-09 17:33:11 +000030#include "arm_compute/core/utils/misc/ShapeCalculator.h"
Isabella Gottardif07d28d2018-02-06 14:52:43 +000031#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
32#include "arm_compute/runtime/CL/CLScheduler.h"
33
34#include <cmath>
35#include <memory>
36#include <tuple>
37
38using namespace arm_compute;
Georgios Pinitas78c00902018-01-09 17:33:11 +000039using namespace arm_compute::misc::shape_calculator;
Isabella Gottardif07d28d2018-02-06 14:52:43 +000040
Georgios Pinitasd8734b52017-12-22 15:27:52 +000041CLConvolutionLayerReshapeWeights::CLConvolutionLayerReshapeWeights()
42 : _weights_reshape_kernel()
Isabella Gottardif07d28d2018-02-06 14:52:43 +000043{
44}
45
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +010046void CLConvolutionLayerReshapeWeights::configure(const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, unsigned int num_groups)
Isabella Gottardif07d28d2018-02-06 14:52:43 +000047{
Georgios Pinitas78c00902018-01-09 17:33:11 +000048 // Perform validation step
Isabella Gottardif07d28d2018-02-06 14:52:43 +000049 ARM_COMPUTE_ERROR_ON_NULLPTR(weights, output);
Georgios Pinitas78c00902018-01-09 17:33:11 +000050 ARM_COMPUTE_ERROR_THROW_ON(CLConvolutionLayerReshapeWeights::validate(weights->info(),
51 (biases != nullptr) ? biases->info() : nullptr,
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +010052 output->info(),
53 num_groups));
Georgios Pinitas78c00902018-01-09 17:33:11 +000054
55 const bool append_biases = (biases != nullptr) && !is_data_type_quantized_asymmetric(weights->info()->data_type());
56 const ICLTensor *biases_to_use = (append_biases) ? biases : nullptr;
57
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +010058 _weights_reshape_kernel.configure(weights, biases_to_use, output, num_groups);
Georgios Pinitas78c00902018-01-09 17:33:11 +000059
60 output->info()->set_quantization_info(weights->info()->quantization_info());
61}
62
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +010063Status CLConvolutionLayerReshapeWeights::validate(const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, unsigned int num_groups)
Georgios Pinitas78c00902018-01-09 17:33:11 +000064{
65 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(weights);
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +010066 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(weights, 1, DataType::QASYMM8, DataType::F16, DataType::F32);
Georgios Pinitas78c00902018-01-09 17:33:11 +000067 ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 4);
Isabella Gottardif07d28d2018-02-06 14:52:43 +000068
69 if(biases != nullptr)
70 {
Georgios Pinitas19ea4192018-06-19 13:09:53 +010071 const int idx_kernels = get_data_layout_dimension_index(weights->data_layout(), DataLayoutDimension::BATCHES);
Georgios Pinitas78c00902018-01-09 17:33:11 +000072 ARM_COMPUTE_RETURN_ERROR_ON(is_data_type_quantized_asymmetric(weights->data_type()));
73 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(weights, biases);
Georgios Pinitas19ea4192018-06-19 13:09:53 +010074 ARM_COMPUTE_RETURN_ERROR_ON(biases->dimension(0) != weights->dimension(idx_kernels));
Georgios Pinitas78c00902018-01-09 17:33:11 +000075 ARM_COMPUTE_RETURN_ERROR_ON(biases->num_dimensions() > 1);
Isabella Gottardif07d28d2018-02-06 14:52:43 +000076 }
77
Georgios Pinitas78c00902018-01-09 17:33:11 +000078 if((output != nullptr) && (output->total_size() != 0))
Isabella Gottardif07d28d2018-02-06 14:52:43 +000079 {
Georgios Pinitas78c00902018-01-09 17:33:11 +000080 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(weights, output);
Isabella Gottardif07d28d2018-02-06 14:52:43 +000081
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +010082 CLWeightsReshapeKernel::validate(weights, biases, output, num_groups);
Isabella Gottardif07d28d2018-02-06 14:52:43 +000083 }
84
Georgios Pinitas78c00902018-01-09 17:33:11 +000085 return Status{};
Isabella Gottardif07d28d2018-02-06 14:52:43 +000086}
87
88void CLConvolutionLayerReshapeWeights::run()
89{
Isabella Gottardif07d28d2018-02-06 14:52:43 +000090 CLScheduler::get().enqueue(_weights_reshape_kernel);
Isabella Gottardif07d28d2018-02-06 14:52:43 +000091}
92
93CLGEMMConvolutionLayer::CLGEMMConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager)
Gian Marco Iodice4b908652018-10-18 10:21:02 +010094 : _memory_group(memory_manager), _reshape_weights(), _im2col_kernel(), _mm_gemm(memory_manager), _mm_gemmlowp(memory_manager), _col2im_kernel(), _activationlayer_function(), _add_bias_kernel(),
95 _original_weights(nullptr), _im2col_output(), _weights_reshaped(), _gemm_output(), _data_layout(DataLayout::NCHW), _append_bias(false), _skip_im2col(false), _skip_col2im(false), _is_quantized(false),
96 _is_activationlayer_enabled(false), _is_prepared(false)
Isabella Gottardif07d28d2018-02-06 14:52:43 +000097{
98}
99
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100100void CLGEMMConvolutionLayer::configure_mm(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const GEMMLowpOutputStageInfo &gemmlowp_output_stage,
101 int gemm_3d_depth)
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000102{
103 ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights);
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100104 ARM_COMPUTE_ERROR_THROW_ON(validate_mm(input->info(), weights->info(), biases != nullptr ? biases->info() : nullptr, output->info(), gemmlowp_output_stage, gemm_3d_depth, _skip_im2col));
Georgios Pinitas78c00902018-01-09 17:33:11 +0000105
Georgios Pinitas932491f2018-09-21 16:33:15 +0100106 const GEMMInfo &gemm_info = GEMMInfo(false, false, true /* Reshape weights only for the first run */,
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100107 gemm_3d_depth, _skip_im2col /* Reinterpret the input as 3D if im2col is skipped */,
108 false, gemmlowp_output_stage);
Georgios Pinitas932491f2018-09-21 16:33:15 +0100109
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000110 if(_is_quantized)
111 {
Georgios Pinitas78c00902018-01-09 17:33:11 +0000112 // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
113 // Extract and negate input and weights offset
114 const QuantizationInfo input_quantization_info = input->info()->quantization_info();
115 const QuantizationInfo weights_quantization_info = weights->info()->quantization_info();
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000116
Georgios Pinitas78c00902018-01-09 17:33:11 +0000117 input->info()->set_quantization_info(QuantizationInfo(input_quantization_info.scale, -input_quantization_info.offset));
118 weights->info()->set_quantization_info(QuantizationInfo(weights_quantization_info.scale, -weights_quantization_info.offset));
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000119
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100120 _mm_gemmlowp.configure(input, weights, biases, output, gemm_info);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000121
Georgios Pinitas78c00902018-01-09 17:33:11 +0000122 // Revert back QuantizatioInfo as input and weights could be used in other convolution layers
123 input->info()->set_quantization_info(input_quantization_info);
124 weights->info()->set_quantization_info(weights_quantization_info);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000125 }
126 else
127 {
Georgios Pinitas78c00902018-01-09 17:33:11 +0000128 // Configure matrix multiply function
Georgios Pinitas932491f2018-09-21 16:33:15 +0100129 _mm_gemm.configure(input, weights, nullptr, output, 1.0f, 0.0f, gemm_info);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000130 }
131}
132
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100133Status CLGEMMConvolutionLayer::validate_mm(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output,
134 const GEMMLowpOutputStageInfo &gemmlowp_output_stage, int gemm_3d_depth, bool skip_im2col)
Georgios Pinitas78c00902018-01-09 17:33:11 +0000135{
136 const bool is_quantized = is_data_type_quantized_asymmetric(input->data_type());
137
Georgios Pinitas932491f2018-09-21 16:33:15 +0100138 const GEMMInfo &gemm_info = GEMMInfo(false, false, true /* Reshape weights only for the first run */,
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100139 gemm_3d_depth, skip_im2col /* Reinterpret the input as 3D if im2col is skipped */,
140 false, gemmlowp_output_stage);
Georgios Pinitas932491f2018-09-21 16:33:15 +0100141
Georgios Pinitas78c00902018-01-09 17:33:11 +0000142 if(is_quantized)
143 {
144 // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
145 // Extract and negate input and weights offset
146 const QuantizationInfo input_quantization_info = input->quantization_info();
147 const QuantizationInfo weights_quantization_info = weights->quantization_info();
148
149 std::unique_ptr<ITensorInfo> input_qa = input->clone();
150 std::unique_ptr<ITensorInfo> weights_qa = weights->clone();
151 input_qa->set_quantization_info(QuantizationInfo(input_quantization_info.scale, -input_quantization_info.offset));
152 weights_qa->set_quantization_info(QuantizationInfo(weights_quantization_info.scale, -weights_quantization_info.offset));
153
154 // Perform validation step on GEMMLowp
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100155 return CLGEMMLowpMatrixMultiplyCore::validate(input_qa.get(), weights_qa.get(), biases, output, gemm_info);
Georgios Pinitas78c00902018-01-09 17:33:11 +0000156 }
157 else
158 {
159 // Perform validation step on Matrix multiply function
Gian Marco Iodicedff601d2018-08-09 13:28:41 +0100160 return CLGEMM::validate(input, weights, nullptr, output, 1.0f, 0.0f, gemm_info);
Georgios Pinitas78c00902018-01-09 17:33:11 +0000161 }
Georgios Pinitas78c00902018-01-09 17:33:11 +0000162}
163
Alex Gilday7da29b62018-03-23 14:16:00 +0000164void CLGEMMConvolutionLayer::configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info,
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100165 const Size2D &dilation, const ActivationLayerInfo &act_info, unsigned int num_groups)
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000166{
167 ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
Georgios Pinitas78c00902018-01-09 17:33:11 +0000168
169 ARM_COMPUTE_ERROR_THROW_ON(CLGEMMConvolutionLayer::validate(input->info(),
170 weights->info(),
171 biases != nullptr ? biases->info() : nullptr,
172 output->info(),
173 conv_info,
Alex Gilday7da29b62018-03-23 14:16:00 +0000174 weights_info,
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000175 dilation,
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100176 act_info,
177 num_groups));
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000178
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100179 const DataType data_type = input->info()->data_type();
180 const DataLayout data_layout = input->info()->data_layout();
181 const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
182 const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100183 const int idx_kernels = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
184
185 const unsigned int kernel_width = weights->info()->dimension(idx_width);
186 const unsigned int kernel_height = weights->info()->dimension(idx_height);
187
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100188 _is_prepared = weights_info.retain_internal_weights();
189 _original_weights = weights;
190 _is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
191 _data_layout = data_layout;
192 _skip_im2col = (data_layout == DataLayout::NHWC && kernel_width == 1 && kernel_height == 1 && conv_info.stride().first == 1 && conv_info.stride().second == 1);
193 _skip_col2im = data_layout == DataLayout::NHWC;
194 _append_bias = (biases != nullptr) && (!_is_quantized);
195 _is_activationlayer_enabled = act_info.enabled();
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000196
Georgios Pinitas78c00902018-01-09 17:33:11 +0000197 // Set the GPU target for im2col and col2im
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000198 _im2col_kernel.set_target(CLScheduler::get().target());
199 _col2im_kernel.set_target(CLScheduler::get().target());
200
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100201 const ICLTensor *gemm_input_to_use = input;
202 ICLTensor *gemm_output_to_use = output;
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000203
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100204 const ICLTensor *biases_to_use = (_append_bias && !_skip_im2col) ? biases : nullptr;
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000205
206 // Get parameters from conv_info
207 unsigned int stride_x = 0;
208 unsigned int stride_y = 0;
209 std::tie(stride_x, stride_y) = conv_info.stride();
210
211 // Get convolved dimensions
212 unsigned int conv_w = 0;
213 unsigned int conv_h = 0;
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100214 std::tie(conv_w, conv_h) = scaled_dimensions(input->info()->dimension(idx_width),
215 input->info()->dimension(idx_height),
216 kernel_width,
217 kernel_height,
218 conv_info,
219 dilation);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000220
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100221 unsigned int mat_weights_cols = weights->info()->dimension(idx_kernels) / num_groups;
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000222
Georgios Pinitas78c00902018-01-09 17:33:11 +0000223 // _weights_reshaped will be auto configured in the kernel.
224 // Just append biases and do not transpose 1xW as it will be reshaped in CLGEMM
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100225 _reshape_weights.configure(weights, biases_to_use, &_weights_reshaped, num_groups);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000226
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000227 // Create tensor to store im2col reshaped inputs
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100228 if(!_skip_im2col)
229 {
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100230 _memory_group.manage(&_im2col_output);
231
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100232 // Configure and tune im2col. im2col output shape is auto-initialized
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100233 _im2col_kernel.configure(input, &_im2col_output, Size2D(kernel_width, kernel_height), conv_info, _append_bias, dilation, num_groups);
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100234
235 // Set quantization info
236 _im2col_output.info()->set_quantization_info(input->info()->quantization_info());
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100237 CLScheduler::get().tune_kernel_static(_im2col_kernel);
238
239 // Update GEMM input
240 gemm_input_to_use = &_im2col_output;
241 }
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100242 else if(_append_bias)
243 {
244 // Configure add bias kernel
245 _add_bias_kernel.configure(output, biases, output, ConvertPolicy::SATURATE);
246 }
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000247
248 // Create GEMM output tensor
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100249 if(!_skip_col2im)
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100250 {
Georgios Pinitas932491f2018-09-21 16:33:15 +0100251 TensorShape shape_gemm;
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100252
253 // If we cannot skip col2im it means we run im2col as well
254 shape_gemm = _im2col_output.info()->tensor_shape();
255 shape_gemm.set(0, mat_weights_cols);
256 shape_gemm.set(1, conv_w * conv_h);
257
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100258 // FIXME: input->clone() doesn't work with subtensors for grouped convolutions.
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100259 TensorInfo info_gemm(shape_gemm, 1, data_type);
Georgios Pinitas932491f2018-09-21 16:33:15 +0100260 info_gemm.set_quantization_info(output->info()->quantization_info()).set_data_layout(input->info()->data_layout());
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100261 _gemm_output.allocator()->init(info_gemm);
262 _memory_group.manage(&_gemm_output);
263
264 // Update GEMM output
265 gemm_output_to_use = &_gemm_output;
266 }
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000267
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100268 GEMMLowpOutputStageInfo gemmlowp_output_stage;
269 gemmlowp_output_stage.type = GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT;
270 gemmlowp_output_stage.gemmlowp_offset = 0;
271 gemmlowp_output_stage.gemmlowp_multiplier = 0;
272 gemmlowp_output_stage.gemmlowp_shift = 0;
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000273
274 // Configure output stage for quantized case
275 if(_is_quantized)
276 {
Georgios Pinitas9be0c5a2018-02-19 12:46:29 +0000277 const QuantizationInfo output_quant_info = (output->info()->total_size() == 0) ? input->info()->quantization_info() : output->info()->quantization_info();
278
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100279 float multiplier = input->info()->quantization_info().scale * weights->info()->quantization_info().scale / output_quant_info.scale;
280 int output_multiplier, output_shift;
281 quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
282
283 int min_activation = 0;
284 int max_activation = 0;
285
286 const std::set<ActivationLayerInfo::ActivationFunction> supported_acts = { ActivationLayerInfo::ActivationFunction::RELU,
287 ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU
288 };
289
290 if(_is_activationlayer_enabled && supported_acts.count(act_info.activation()) != 0)
Georgios Pinitas932491f2018-09-21 16:33:15 +0100291 {
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100292 const int a_const_int = input->info()->quantization_info().quantize(act_info.a(), RoundingPolicy::TO_NEAREST_UP);
293 const int b_const_int = input->info()->quantization_info().quantize(act_info.b(), RoundingPolicy::TO_NEAREST_UP);
294
295 min_activation = act_info.activation() == ActivationLayerInfo::ActivationFunction::RELU ? input->info()->quantization_info().offset : b_const_int;
296 max_activation = act_info.activation() == ActivationLayerInfo::ActivationFunction::RELU ? 255 : a_const_int;
297
298 // If the activation layer is RELU, BOUNDED_RELU or LU_BOUNDED_RELU, we can use the GEMMLowp output stage to perform this operation
299 _is_activationlayer_enabled = false;
Georgios Pinitas932491f2018-09-21 16:33:15 +0100300 }
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100301
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100302 // Set the GEMMLowp output stage info
303 gemmlowp_output_stage.gemmlowp_offset = output_quant_info.offset;
304 gemmlowp_output_stage.gemmlowp_multiplier = output_multiplier;
305 gemmlowp_output_stage.gemmlowp_shift = output_shift;
306 gemmlowp_output_stage.gemmlowp_min_bound = min_activation;
307 gemmlowp_output_stage.gemmlowp_max_bound = max_activation;
308 }
309
310 // Configure and tune GEMM
311 configure_mm(gemm_input_to_use, &_weights_reshaped, biases, gemm_output_to_use, gemmlowp_output_stage, (data_layout == DataLayout::NHWC) ? conv_h : 1);
312
313 if(!_skip_im2col)
314 {
315 _im2col_output.allocator()->allocate();
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000316 }
317
Georgios Pinitas932491f2018-09-21 16:33:15 +0100318 if(!_skip_col2im)
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100319 {
Georgios Pinitas932491f2018-09-21 16:33:15 +0100320 // Configure and tune Col2Im
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100321 _col2im_kernel.configure(gemm_output_to_use, output, Size2D(conv_w, conv_h), num_groups);
Georgios Pinitas932491f2018-09-21 16:33:15 +0100322 CLScheduler::get().tune_kernel_static(_col2im_kernel);
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100323 }
324
Georgios Pinitas932491f2018-09-21 16:33:15 +0100325 if(!_skip_col2im)
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100326 {
327 _gemm_output.allocator()->allocate();
328 }
329
330 ARM_COMPUTE_ERROR_ON_MSG((output->info()->dimension(idx_width) != conv_w) || (output->info()->dimension(idx_height) != conv_h),
331 "Output shape does not match the expected one");
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000332
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000333 if(_is_activationlayer_enabled)
334 {
335 _activationlayer_function.configure(output, nullptr, act_info);
336 }
337
Georgios Pinitas78c00902018-01-09 17:33:11 +0000338 ARM_COMPUTE_UNUSED(weights_info);
339}
340
341Status CLGEMMConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100342 const WeightsInfo &weights_info, const Size2D &dilation, const ActivationLayerInfo &act_info, unsigned int num_groups)
Georgios Pinitas78c00902018-01-09 17:33:11 +0000343{
344 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output);
345 ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights_info.are_reshaped(), "Weights already reshaped are not supported!");
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +0100346 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F16, DataType::F32);
Georgios Pinitas78c00902018-01-09 17:33:11 +0000347 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100348 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, weights);
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100349 ARM_COMPUTE_RETURN_ERROR_ON_MSG((num_groups != 1) && (input->data_layout() != DataLayout::NCHW), "Grouping (num_groups != 1) with NHWC data layout is not supported");
350 ARM_COMPUTE_RETURN_ERROR_ON_MSG((num_groups != 1) && (input->data_type() == DataType::QASYMM8), "Grouping (num_groups != 1) is not supported with QASYMM8");
351 ARM_COMPUTE_RETURN_ERROR_ON(((input->dimension(2) / weights->dimension(2)) != num_groups) && (input->data_layout() == DataLayout::NCHW));
Georgios Pinitas78c00902018-01-09 17:33:11 +0000352
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100353 const DataLayout data_layout = input->data_layout();
354 const DataType data_type = input->data_type();
355 const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
356 const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
357 const int idx_channel = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
358 const int idx_kernels = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000359
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100360 const unsigned int kernel_width = weights->dimension(idx_width);
361 const unsigned int kernel_height = weights->dimension(idx_height);
362
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100363 TensorInfo im2col_reshaped_info, info_gemm, weights_reshaped_info;
364 const ITensorInfo *gemm_input_to_use = input;
365 const ITensorInfo *gemm_output_to_use = output;
366 const ITensorInfo *weights_to_use = weights;
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100367
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100368 const bool is_quantized = is_data_type_quantized_asymmetric(data_type);
369 const bool append_bias = (biases != nullptr) && (!is_quantized);
370 const bool skip_im2col = (data_layout == DataLayout::NHWC && kernel_width == 1 && kernel_height == 1 && conv_info.stride().first == 1 && conv_info.stride().second == 1);
371 const bool skip_col2im = data_layout == DataLayout::NHWC;
372 bool is_activationlayer_enabled = act_info.enabled();
Georgios Pinitas78c00902018-01-09 17:33:11 +0000373
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100374 ARM_COMPUTE_RETURN_ERROR_ON((weights->dimension(idx_channel) * num_groups) != input->dimension(idx_channel));
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100375 ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 4);
Georgios Pinitas78c00902018-01-09 17:33:11 +0000376
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100377 // Validate biases
Georgios Pinitas78c00902018-01-09 17:33:11 +0000378 if(biases != nullptr)
379 {
380 if(is_quantized)
381 {
382 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(biases, 1, DataType::S32);
383 }
384 else
385 {
386 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, biases);
387 }
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100388 ARM_COMPUTE_RETURN_ERROR_ON(biases->dimension(0) != weights->dimension(idx_kernels));
Georgios Pinitas78c00902018-01-09 17:33:11 +0000389 ARM_COMPUTE_RETURN_ERROR_ON(biases->num_dimensions() > 1);
390 }
391
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100392 if(act_info.enabled())
393 {
394 ARM_COMPUTE_ERROR_ON(act_info.b() > act_info.a());
395 }
396
397 // Get convolved dimensions
398 unsigned int conv_w = 0;
399 unsigned int conv_h = 0;
400
401 std::tie(conv_w, conv_h) = scaled_dimensions(input->dimension(idx_width),
402 input->dimension(idx_height),
403 kernel_width,
404 kernel_height,
405 conv_info,
406 dilation);
407
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100408 unsigned int mat_weights_cols = weights->dimension(idx_kernels) / num_groups;
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100409
410 // Output tensor auto inizialitation if not yet initialized
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100411 ARM_COMPUTE_RETURN_ON_ERROR(CLConvolutionLayerReshapeWeights::validate(weights, is_quantized ? nullptr : biases, nullptr, num_groups));
412 weights_reshaped_info = TensorInfo(compute_weights_reshaped_shape(*weights, (append_bias && !skip_im2col), num_groups), 1, data_type);
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100413 weights_to_use = &weights_reshaped_info;
414
415 if(!skip_im2col)
416 {
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100417 const Size2D kernel_dims(kernel_width, kernel_height);
418
419 // Output tensor auto initialization if not yet initialized
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100420 TensorShape expected_output_shape = compute_im2col_conv_shape(input, kernel_dims, conv_info, append_bias, dilation, num_groups == 1, num_groups);
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100421
422 auto_init_if_empty(im2col_reshaped_info, input->clone()->set_tensor_shape(expected_output_shape));
423
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100424 ARM_COMPUTE_RETURN_ON_ERROR(CLIm2ColKernel::validate(input, &im2col_reshaped_info, kernel_dims, conv_info, append_bias, dilation, num_groups));
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100425 gemm_input_to_use = &im2col_reshaped_info;
426 }
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100427 else if(append_bias)
428 {
429 // Validate add bias kernel
430 ARM_COMPUTE_RETURN_ON_ERROR(CLArithmeticAdditionKernel::validate(output, biases, output, ConvertPolicy::SATURATE));
431 }
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100432
433 // Create GEMM output tensor
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100434 if(!skip_col2im)
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100435 {
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100436 TensorShape shape_gemm;
437
438 shape_gemm = gemm_input_to_use->tensor_shape();
439 shape_gemm.set(0, mat_weights_cols);
440 shape_gemm.set(1, conv_w * conv_h);
441
442 info_gemm = TensorInfo(shape_gemm, 1, data_type);
Georgios Pinitas932491f2018-09-21 16:33:15 +0100443 info_gemm.set_quantization_info(output->quantization_info()).set_data_layout(input->data_layout());
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100444 gemm_output_to_use = &info_gemm;
445 }
446
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100447 GEMMLowpOutputStageInfo gemmlowp_output_stage;
448 gemmlowp_output_stage.type = GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT;
449 gemmlowp_output_stage.gemmlowp_offset = 0;
450 gemmlowp_output_stage.gemmlowp_multiplier = 0;
451 gemmlowp_output_stage.gemmlowp_shift = 0;
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100452
453 if(is_quantized)
454 {
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100455 const QuantizationInfo output_quant_info = (output->total_size() == 0) ? input->quantization_info() : output->quantization_info();
456
457 float multiplier = input->quantization_info().scale * weights->quantization_info().scale / output_quant_info.scale;
458 int output_multiplier, output_shift;
459 quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
460
461 int min_activation = 0;
462 int max_activation = 0;
463
464 const std::set<ActivationLayerInfo::ActivationFunction> supported_acts = { ActivationLayerInfo::ActivationFunction::RELU,
465 ActivationLayerInfo::ActivationFunction::BOUNDED_RELU,
466 ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU
467 };
468
469 if(is_activationlayer_enabled && supported_acts.count(act_info.activation()) != 0)
Georgios Pinitas932491f2018-09-21 16:33:15 +0100470 {
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100471 const int a_const_int = input->quantization_info().quantize(act_info.a(), RoundingPolicy::TO_NEAREST_UP);
472 const int b_const_int = input->quantization_info().quantize(act_info.b(), RoundingPolicy::TO_NEAREST_UP);
473
474 min_activation = b_const_int;
475 max_activation = a_const_int;
476
477 if(act_info.activation() != ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU)
478 {
479 min_activation = input->quantization_info().offset;
480 }
481 if(act_info.activation() == ActivationLayerInfo::ActivationFunction::RELU)
482 {
483 max_activation = 255;
484 }
485
486 // If the activation layer is RELU, BOUNDED_RELU or LU_BOUNDED_RELU, we can use the GEMMLowp output stage to perform this operation
487 is_activationlayer_enabled = false;
488
489 // Set the GEMMLowp output stage info
490 gemmlowp_output_stage.gemmlowp_offset = output_quant_info.offset;
491 gemmlowp_output_stage.gemmlowp_multiplier = output_multiplier;
492 gemmlowp_output_stage.gemmlowp_shift = output_shift;
493 gemmlowp_output_stage.gemmlowp_min_bound = min_activation;
494 gemmlowp_output_stage.gemmlowp_max_bound = max_activation;
Georgios Pinitas932491f2018-09-21 16:33:15 +0100495 }
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100496 }
497
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100498 ARM_COMPUTE_RETURN_ON_ERROR(validate_mm(gemm_input_to_use, weights_to_use, biases, gemm_output_to_use, gemmlowp_output_stage, skip_col2im ? conv_h : 1, skip_im2col));
499
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100500 // Validate Col2Im
Georgios Pinitas932491f2018-09-21 16:33:15 +0100501 if(!skip_col2im)
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100502 {
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100503 ARM_COMPUTE_RETURN_ON_ERROR(CLCol2ImKernel::validate(gemm_output_to_use, output, Size2D(conv_w, conv_h), num_groups));
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100504 }
505
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000506 //Validate Activation Layer
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100507 if(is_activationlayer_enabled)
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000508 {
Vidhya Sudhan Loganathanedf357c2018-04-27 14:25:30 +0100509 ARM_COMPUTE_RETURN_ON_ERROR(CLActivationLayer::validate(output, nullptr, act_info));
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000510 }
511
Georgios Pinitas78c00902018-01-09 17:33:11 +0000512 return Status{};
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000513}
514
515void CLGEMMConvolutionLayer::run()
516{
Georgios Pinitase0437672018-05-02 14:07:55 +0100517 prepare();
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000518
519 _memory_group.acquire();
520
521 // Run im2col
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100522 if(!_skip_im2col)
523 {
524 CLScheduler::get().enqueue(_im2col_kernel);
525 }
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000526
Georgios Pinitas78c00902018-01-09 17:33:11 +0000527 // Runs CLGEMM or CLGEMMLowpMatrixMultiplyCore functions
528 if(_is_quantized)
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000529 {
Georgios Pinitas78c00902018-01-09 17:33:11 +0000530 // Run gemmlowp
531 _mm_gemmlowp.run();
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000532 }
533 else
534 {
Georgios Pinitas78c00902018-01-09 17:33:11 +0000535 // Run gemm
536 _mm_gemm.run();
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000537 }
538
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100539 if(_skip_im2col && _append_bias)
540 {
541 CLScheduler::get().enqueue(_add_bias_kernel);
542 }
543
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000544 // Reshape output matrix
Georgios Pinitas932491f2018-09-21 16:33:15 +0100545 if(!_skip_col2im)
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100546 {
Georgios Pinitas932491f2018-09-21 16:33:15 +0100547 CLScheduler::get().enqueue(_col2im_kernel, false);
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100548 }
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000549
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000550 //Run Activation Layer if enabled
551 if(_is_activationlayer_enabled)
552 {
553 _activationlayer_function.run();
554 }
555
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000556 _memory_group.release();
Georgios Pinitase0437672018-05-02 14:07:55 +0100557}
Georgios Pinitas82b51482018-04-24 15:14:12 +0100558
Georgios Pinitase0437672018-05-02 14:07:55 +0100559void CLGEMMConvolutionLayer::prepare()
560{
561 if(!_is_prepared)
562 {
Georgios Pinitas72219332018-06-05 14:56:06 +0100563 ARM_COMPUTE_ERROR_ON(!_original_weights->is_used());
Georgios Pinitase0437672018-05-02 14:07:55 +0100564
Georgios Pinitas72219332018-06-05 14:56:06 +0100565 // Run weights reshaping and mark original weights tensor as unused
566 _weights_reshaped.allocator()->allocate();
567 _reshape_weights.run();
568 _original_weights->mark_as_unused();
569
570 // Prepare GEMM
571 _is_quantized ? _mm_gemmlowp.prepare() : _mm_gemm.prepare();
572 if(!_weights_reshaped.is_used())
Georgios Pinitase0437672018-05-02 14:07:55 +0100573 {
Georgios Pinitas72219332018-06-05 14:56:06 +0100574 _weights_reshaped.allocator()->free();
Georgios Pinitase0437672018-05-02 14:07:55 +0100575 }
576
577 CLScheduler::get().queue().finish();
578 _is_prepared = true;
579 }
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000580}