blob: 610eec4d6761bffccba1f44402c8250eb8260d81 [file] [log] [blame]
Isabella Gottardif07d28d2018-02-06 14:52:43 +00001/*
2 * Copyright (c) 2017-2018 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h"
25
26#include "arm_compute/core/PixelValue.h"
27#include "arm_compute/core/Size2D.h"
28#include "arm_compute/core/Utils.h"
29#include "arm_compute/core/Validate.h"
Georgios Pinitas78c00902018-01-09 17:33:11 +000030#include "arm_compute/core/utils/misc/ShapeCalculator.h"
Isabella Gottardif07d28d2018-02-06 14:52:43 +000031#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
32#include "arm_compute/runtime/CL/CLScheduler.h"
33
34#include <cmath>
35#include <memory>
36#include <tuple>
37
38using namespace arm_compute;
Georgios Pinitas78c00902018-01-09 17:33:11 +000039using namespace arm_compute::misc::shape_calculator;
Isabella Gottardif07d28d2018-02-06 14:52:43 +000040
Georgios Pinitasd8734b52017-12-22 15:27:52 +000041CLConvolutionLayerReshapeWeights::CLConvolutionLayerReshapeWeights()
42 : _weights_reshape_kernel()
Isabella Gottardif07d28d2018-02-06 14:52:43 +000043{
44}
45
Georgios Pinitas78c00902018-01-09 17:33:11 +000046void CLConvolutionLayerReshapeWeights::configure(const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output)
Isabella Gottardif07d28d2018-02-06 14:52:43 +000047{
Georgios Pinitas78c00902018-01-09 17:33:11 +000048 // Perform validation step
Isabella Gottardif07d28d2018-02-06 14:52:43 +000049 ARM_COMPUTE_ERROR_ON_NULLPTR(weights, output);
Georgios Pinitas78c00902018-01-09 17:33:11 +000050 ARM_COMPUTE_ERROR_THROW_ON(CLConvolutionLayerReshapeWeights::validate(weights->info(),
51 (biases != nullptr) ? biases->info() : nullptr,
52 output->info()));
53
54 const bool append_biases = (biases != nullptr) && !is_data_type_quantized_asymmetric(weights->info()->data_type());
55 const ICLTensor *biases_to_use = (append_biases) ? biases : nullptr;
56
57 _weights_reshape_kernel.configure(weights, biases_to_use, output);
58
59 output->info()->set_quantization_info(weights->info()->quantization_info());
60}
61
62Status CLConvolutionLayerReshapeWeights::validate(const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output)
63{
64 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(weights);
65 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(weights, 1, DataType::QS8, DataType::QASYMM8, DataType::QS16, DataType::F16, DataType::F32);
66 ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 4);
Isabella Gottardif07d28d2018-02-06 14:52:43 +000067
68 if(biases != nullptr)
69 {
Georgios Pinitas78c00902018-01-09 17:33:11 +000070 ARM_COMPUTE_RETURN_ERROR_ON(is_data_type_quantized_asymmetric(weights->data_type()));
71 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(weights, biases);
72 ARM_COMPUTE_RETURN_ERROR_ON(biases->dimension(0) != weights->dimension(3));
73 ARM_COMPUTE_RETURN_ERROR_ON(biases->num_dimensions() > 1);
Isabella Gottardif07d28d2018-02-06 14:52:43 +000074 }
75
Georgios Pinitas78c00902018-01-09 17:33:11 +000076 if((output != nullptr) && (output->total_size() != 0))
Isabella Gottardif07d28d2018-02-06 14:52:43 +000077 {
Georgios Pinitas78c00902018-01-09 17:33:11 +000078 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(weights, output);
79 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT(weights, output);
Isabella Gottardif07d28d2018-02-06 14:52:43 +000080
Georgios Pinitas78c00902018-01-09 17:33:11 +000081 CLWeightsReshapeKernel::validate(weights, biases, output);
Isabella Gottardif07d28d2018-02-06 14:52:43 +000082 }
83
Georgios Pinitas78c00902018-01-09 17:33:11 +000084 return Status{};
Isabella Gottardif07d28d2018-02-06 14:52:43 +000085}
86
87void CLConvolutionLayerReshapeWeights::run()
88{
Isabella Gottardif07d28d2018-02-06 14:52:43 +000089 CLScheduler::get().enqueue(_weights_reshape_kernel);
Isabella Gottardif07d28d2018-02-06 14:52:43 +000090}
91
92CLGEMMConvolutionLayer::CLGEMMConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager)
Isabella Gottardi3f217ec2018-02-12 14:59:19 +000093 : _memory_group(memory_manager), _reshape_weights(), _im2col_kernel(), _mm_gemm(memory_manager), _mm_gemmlowp(memory_manager), _gemmlowp_output_stage(), _col2im_kernel(), _activationlayer_function(),
Georgios Pinitase0437672018-05-02 14:07:55 +010094 _original_weights(nullptr), _im2col_output(), _weights_reshaped(), _gemm_output(), _tmp_output(), _is_quantized(false), _is_activationlayer_enabled(false), _is_prepared(false)
Isabella Gottardif07d28d2018-02-06 14:52:43 +000095{
96}
97
Georgios Pinitas78c00902018-01-09 17:33:11 +000098void CLGEMMConvolutionLayer::configure_mm(const ICLTensor *input, const ICLTensor *weights, ICLTensor *output)
Isabella Gottardif07d28d2018-02-06 14:52:43 +000099{
100 ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights);
Georgios Pinitas78c00902018-01-09 17:33:11 +0000101 ARM_COMPUTE_ERROR_THROW_ON(validate_mm(input->info(), weights->info(), output->info()));
102
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000103 if(_is_quantized)
104 {
Georgios Pinitas78c00902018-01-09 17:33:11 +0000105 // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
106 // Extract and negate input and weights offset
107 const QuantizationInfo input_quantization_info = input->info()->quantization_info();
108 const QuantizationInfo weights_quantization_info = weights->info()->quantization_info();
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000109
Georgios Pinitas78c00902018-01-09 17:33:11 +0000110 input->info()->set_quantization_info(QuantizationInfo(input_quantization_info.scale, -input_quantization_info.offset));
111 weights->info()->set_quantization_info(QuantizationInfo(weights_quantization_info.scale, -weights_quantization_info.offset));
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000112
Georgios Pinitas78c00902018-01-09 17:33:11 +0000113 _mm_gemmlowp.configure(input, weights, output, GEMMInfo(false, false, true /* Reshape weights only for the first run*/));
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000114
Georgios Pinitas78c00902018-01-09 17:33:11 +0000115 // Revert back QuantizatioInfo as input and weights could be used in other convolution layers
116 input->info()->set_quantization_info(input_quantization_info);
117 weights->info()->set_quantization_info(weights_quantization_info);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000118 }
119 else
120 {
Georgios Pinitas78c00902018-01-09 17:33:11 +0000121 // Configure matrix multiply function
122 _mm_gemm.configure(input, weights, nullptr, output, 1.0f, 0.0f, GEMMInfo(false, false, true /* Reshape weights only for the first run*/));
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000123 }
124}
125
Georgios Pinitas78c00902018-01-09 17:33:11 +0000126Status CLGEMMConvolutionLayer::validate_mm(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *output)
127{
128 const bool is_quantized = is_data_type_quantized_asymmetric(input->data_type());
129
130 const GEMMInfo &gemm_info = GEMMInfo(false, false, true /* Reshape weights only for the first run */);
131 if(is_quantized)
132 {
133 // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
134 // Extract and negate input and weights offset
135 const QuantizationInfo input_quantization_info = input->quantization_info();
136 const QuantizationInfo weights_quantization_info = weights->quantization_info();
137
138 std::unique_ptr<ITensorInfo> input_qa = input->clone();
139 std::unique_ptr<ITensorInfo> weights_qa = weights->clone();
140 input_qa->set_quantization_info(QuantizationInfo(input_quantization_info.scale, -input_quantization_info.offset));
141 weights_qa->set_quantization_info(QuantizationInfo(weights_quantization_info.scale, -weights_quantization_info.offset));
142
143 // Perform validation step on GEMMLowp
144 CLGEMMLowpMatrixMultiplyCore::validate(input_qa.get(), weights_qa.get(), output, gemm_info);
145 }
146 else
147 {
148 // Perform validation step on Matrix multiply function
149 CLGEMM::validate(input, weights, nullptr, output, 1.0f, 0.0f, gemm_info);
150 }
151 return Status{};
152}
153
Alex Gilday7da29b62018-03-23 14:16:00 +0000154void CLGEMMConvolutionLayer::configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info,
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000155 const Size2D &dilation, const ActivationLayerInfo &act_info)
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000156{
157 ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
Georgios Pinitas78c00902018-01-09 17:33:11 +0000158
159 ARM_COMPUTE_ERROR_THROW_ON(CLGEMMConvolutionLayer::validate(input->info(),
160 weights->info(),
161 biases != nullptr ? biases->info() : nullptr,
162 output->info(),
163 conv_info,
Alex Gilday7da29b62018-03-23 14:16:00 +0000164 weights_info,
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000165 dilation,
166 act_info));
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000167
Georgios Pinitase0437672018-05-02 14:07:55 +0100168 _is_prepared = false;
Georgios Pinitas1562be32018-03-08 19:09:19 +0000169 _original_weights = weights;
170 _is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000171
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000172 const DataType dt = input->info()->data_type();
173
Georgios Pinitas78c00902018-01-09 17:33:11 +0000174 // Set the GPU target for im2col and col2im
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000175 _im2col_kernel.set_target(CLScheduler::get().target());
176 _col2im_kernel.set_target(CLScheduler::get().target());
177
178 const bool append_bias = (biases != nullptr) && (!_is_quantized);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000179
180 const unsigned bias_element = (append_bias) ? 1 : 0;
181 const ICLTensor *biases_to_use = (append_bias) ? biases : nullptr;
182
183 // Get parameters from conv_info
184 unsigned int stride_x = 0;
185 unsigned int stride_y = 0;
186 std::tie(stride_x, stride_y) = conv_info.stride();
187
188 // Get convolved dimensions
189 unsigned int conv_w = 0;
190 unsigned int conv_h = 0;
191
Georgios Pinitas78c00902018-01-09 17:33:11 +0000192 const unsigned int kernel_width = weights->info()->dimension(0);
193 const unsigned int kernel_height = weights->info()->dimension(1);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000194 std::tie(conv_w, conv_h) = scaled_dimensions(input->info()->dimension(0), input->info()->dimension(1), kernel_width, kernel_height,
Alex Gilday7da29b62018-03-23 14:16:00 +0000195 conv_info, dilation);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000196
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000197 unsigned int mat_weights_cols = weights->info()->dimension(3);
198 unsigned int mat_weights_rows = weights->info()->dimension(0) * weights->info()->dimension(1) * weights->info()->dimension(2) + bias_element;
199
Georgios Pinitas78c00902018-01-09 17:33:11 +0000200 // _weights_reshaped will be auto configured in the kernel.
201 // Just append biases and do not transpose 1xW as it will be reshaped in CLGEMM
202 _reshape_weights.configure(weights, biases_to_use, &_weights_reshaped);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000203
Georgios Pinitas78c00902018-01-09 17:33:11 +0000204 weights = &_weights_reshaped;
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000205
206 // Create tensor to store im2col reshaped inputs
207 const unsigned int mat_input_cols = mat_weights_rows;
208 const unsigned int mat_input_rows = conv_w * conv_h;
209 TensorShape shape_im2col = input->info()->tensor_shape();
210 shape_im2col.set(0, mat_input_cols);
211 shape_im2col.set(1, mat_input_rows);
212 shape_im2col.set(2, 1);
213 // FIXME: input->clone() doesn't work with subtensors for grouped convolutions.
214 TensorInfo im2col_reshaped_info(shape_im2col, 1, dt, input->info()->fixed_point_position());
215 im2col_reshaped_info.set_quantization_info(input->info()->quantization_info());
216 _im2col_output.allocator()->init(im2col_reshaped_info);
217 _memory_group.manage(&_im2col_output);
218
219 // Create GEMM output tensor
220 TensorShape shape_gemm = _im2col_output.info()->tensor_shape();
221 shape_gemm.set(0, mat_weights_cols);
222 shape_gemm.set(1, mat_input_rows);
223 const DataType gemm_data_type = _is_quantized ? DataType::S32 : dt;
224 // GEMM output should be S32 for acquiring raw integer accumulator without quantized postprocessing for quantized asymmetric input.
225 // FIXME: input->clone() doesn't work with subtensors for grouped convolutions.
226 TensorInfo info_gemm(shape_gemm, 1, gemm_data_type, input->info()->fixed_point_position());
227 info_gemm.set_quantization_info(output->info()->quantization_info());
228 _gemm_output.allocator()->init(info_gemm);
229 _memory_group.manage(&_gemm_output);
230
231 // Configure im2col
Alex Gilday7da29b62018-03-23 14:16:00 +0000232 _im2col_kernel.configure(input, &_im2col_output, Size2D(kernel_width, kernel_height), conv_info, append_bias, dilation);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000233
Georgios Pinitas78c00902018-01-09 17:33:11 +0000234 // Configure GEMM
235 configure_mm(&_im2col_output, weights, &_gemm_output);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000236
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000237 _im2col_output.allocator()->allocate();
238
239 // Configure output stage for quantized case
240 if(_is_quantized)
241 {
Georgios Pinitas9be0c5a2018-02-19 12:46:29 +0000242 const QuantizationInfo output_quant_info = (output->info()->total_size() == 0) ? input->info()->quantization_info() : output->info()->quantization_info();
243
244 float multiplier = input->info()->quantization_info().scale * weights->info()->quantization_info().scale / output_quant_info.scale;
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000245 int output_multiplier, output_shift;
246 quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
247 _memory_group.manage(&_tmp_output);
Georgios Pinitas9be0c5a2018-02-19 12:46:29 +0000248 _gemmlowp_output_stage.configure(&_gemm_output, biases, &_tmp_output, output_multiplier, output_shift, output_quant_info.offset);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000249 }
250
251 // Configure Col2Im
252 _col2im_kernel.configure(_is_quantized ? &_tmp_output : &_gemm_output, output, std::make_pair(conv_w, conv_h));
253 if(_is_quantized)
254 {
255 _tmp_output.allocator()->allocate();
256 }
257 _gemm_output.allocator()->allocate();
258
259 ARM_COMPUTE_ERROR_ON_MSG((output->info()->dimension(0) != conv_w) || (output->info()->dimension(1) != conv_h), "Output shape does not match the expected one");
260
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000261 //Configure Activation Layer
262 _is_activationlayer_enabled = act_info.enabled();
263
264 if(_is_activationlayer_enabled)
265 {
266 _activationlayer_function.configure(output, nullptr, act_info);
267 }
268
Georgios Pinitas78c00902018-01-09 17:33:11 +0000269 ARM_COMPUTE_UNUSED(weights_info);
270}
271
272Status CLGEMMConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000273 const WeightsInfo &weights_info, const Size2D &dilation, const ActivationLayerInfo &act_info)
Georgios Pinitas78c00902018-01-09 17:33:11 +0000274{
275 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output);
276 ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights_info.are_reshaped(), "Weights already reshaped are not supported!");
277 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::QASYMM8, DataType::QS16, DataType::F16, DataType::F32);
278 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
279 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT(input, weights);
280 ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(2) != input->dimension(2));
281 ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 4);
282
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000283 if(act_info.enabled())
284 {
285 ARM_COMPUTE_ERROR_ON(act_info.b() > act_info.a());
286 }
287
Georgios Pinitas78c00902018-01-09 17:33:11 +0000288 const bool is_quantized = is_data_type_quantized_asymmetric(input->data_type());
289 const bool append_bias = (biases != nullptr) && (!is_quantized);
290 const unsigned bias_element = (append_bias) ? 1 : 0;
291 const DataType dt = input->data_type();
292
293 // Get convolved dimensions
294 unsigned int conv_w = 0;
295 unsigned int conv_h = 0;
296
297 const unsigned int kernel_width = weights->dimension(0);
298 const unsigned int kernel_height = weights->dimension(1);
299
Alex Gilday7da29b62018-03-23 14:16:00 +0000300 std::tie(conv_w, conv_h) = scaled_dimensions(input->dimension(0), input->dimension(1), kernel_width, kernel_height, conv_info, dilation);
Georgios Pinitas78c00902018-01-09 17:33:11 +0000301
302 unsigned int mat_weights_cols = weights->dimension(3);
303 unsigned int mat_weights_rows = weights->dimension(0) * weights->dimension(1) * weights->dimension(2) + bias_element;
304
Georgios Pinitase0437672018-05-02 14:07:55 +0100305 ARM_COMPUTE_RETURN_ON_ERROR(CLConvolutionLayerReshapeWeights::validate(weights, is_quantized ? nullptr : biases, nullptr));
Georgios Pinitas78c00902018-01-09 17:33:11 +0000306
307 // Create tensor info for im2col reshaped inputs
308 const unsigned int mat_input_cols = mat_weights_rows;
309 const unsigned int mat_input_rows = conv_w * conv_h;
310 TensorShape shape_im2col = input->tensor_shape();
311 shape_im2col.set(0, mat_input_cols);
312 shape_im2col.set(1, mat_input_rows);
313 shape_im2col.set(2, 1);
314 TensorInfo im2col_reshaped_info(shape_im2col, 1, dt, input->fixed_point_position());
315 im2col_reshaped_info.set_quantization_info(input->quantization_info());
Vidhya Sudhan Loganathanedf357c2018-04-27 14:25:30 +0100316 ARM_COMPUTE_RETURN_ON_ERROR(CLIm2ColKernel::validate(input, &im2col_reshaped_info, Size2D(kernel_width, kernel_height), conv_info, append_bias, dilation));
Georgios Pinitas78c00902018-01-09 17:33:11 +0000317
318 // Create GEMM output tensor
319 TensorShape shape_gemm = im2col_reshaped_info.tensor_shape();
320 shape_gemm.set(0, mat_weights_cols);
321 shape_gemm.set(1, mat_input_rows);
322 const DataType gemm_data_type = is_quantized ? DataType::S32 : dt;
323 // GEMM output should be S32 for acquiring raw integer accumulator without quantized postprocessing for quantized asymmetric input.
324 TensorInfo info_gemm(shape_gemm, 1, gemm_data_type, input->fixed_point_position());
325 info_gemm.set_quantization_info(output->quantization_info());
326
Vidhya Sudhan Loganathanedf357c2018-04-27 14:25:30 +0100327 ARM_COMPUTE_RETURN_ON_ERROR(validate_mm(&im2col_reshaped_info, weights, &info_gemm));
328 TensorInfo tmp_info(shape_gemm, 1, DataType::QASYMM8, input->fixed_point_position());
329 tmp_info.set_quantization_info(output->quantization_info());
Georgios Pinitas78c00902018-01-09 17:33:11 +0000330
Georgios Pinitas78c00902018-01-09 17:33:11 +0000331 if(is_quantized)
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000332 {
Georgios Pinitas78c00902018-01-09 17:33:11 +0000333 float multiplier = input->quantization_info().scale * weights->quantization_info().scale / output->quantization_info().scale;
334 int output_multiplier, output_shift;
335 quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
336 // Validate output stage for quantized case
337 CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint::validate(&info_gemm, biases, &tmp_info, output->quantization_info().offset);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000338 }
Georgios Pinitas78c00902018-01-09 17:33:11 +0000339
340 // Validate Col2Im
Vidhya Sudhan Loganathanedf357c2018-04-27 14:25:30 +0100341 ARM_COMPUTE_RETURN_ON_ERROR(CLCol2ImKernel::validate(is_quantized ? &tmp_info : &info_gemm, output, std::make_pair(conv_w, conv_h)));
Georgios Pinitas78c00902018-01-09 17:33:11 +0000342
343 if(biases != nullptr)
344 {
345 if(is_quantized)
346 {
347 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(biases, 1, DataType::S32);
348 }
349 else
350 {
351 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, biases);
352 }
353 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT(input, biases);
354 ARM_COMPUTE_RETURN_ERROR_ON(biases->dimension(0) != weights->dimension(3));
355 ARM_COMPUTE_RETURN_ERROR_ON(biases->num_dimensions() > 1);
356 }
357
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000358 //Validate Activation Layer
359 if(act_info.enabled())
360 {
Vidhya Sudhan Loganathanedf357c2018-04-27 14:25:30 +0100361 ARM_COMPUTE_RETURN_ON_ERROR(CLActivationLayer::validate(output, nullptr, act_info));
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000362 }
363
Georgios Pinitas78c00902018-01-09 17:33:11 +0000364 return Status{};
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000365}
366
367void CLGEMMConvolutionLayer::run()
368{
Georgios Pinitase0437672018-05-02 14:07:55 +0100369 prepare();
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000370
371 _memory_group.acquire();
372
373 // Run im2col
374 CLScheduler::get().enqueue(_im2col_kernel);
375
Georgios Pinitas78c00902018-01-09 17:33:11 +0000376 // Runs CLGEMM or CLGEMMLowpMatrixMultiplyCore functions
377 if(_is_quantized)
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000378 {
Georgios Pinitas78c00902018-01-09 17:33:11 +0000379 // Run gemmlowp
380 _mm_gemmlowp.run();
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000381
Georgios Pinitas78c00902018-01-09 17:33:11 +0000382 // Run output stage
383 _gemmlowp_output_stage.run();
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000384 }
385 else
386 {
Georgios Pinitas78c00902018-01-09 17:33:11 +0000387 // Run gemm
388 _mm_gemm.run();
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000389 }
390
391 // Reshape output matrix
392 CLScheduler::get().enqueue(_col2im_kernel, false);
393
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000394 //Run Activation Layer if enabled
395 if(_is_activationlayer_enabled)
396 {
397 _activationlayer_function.run();
398 }
399
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000400 _memory_group.release();
Georgios Pinitase0437672018-05-02 14:07:55 +0100401}
Georgios Pinitas82b51482018-04-24 15:14:12 +0100402
Georgios Pinitase0437672018-05-02 14:07:55 +0100403void CLGEMMConvolutionLayer::prepare()
404{
405 if(!_is_prepared)
406 {
407 // Run weights reshaping and mark as unused
408 ARM_COMPUTE_ERROR_ON(!_original_weights->is_used());
409 _weights_reshaped.allocator()->allocate();
410 _reshape_weights.run();
411 _original_weights->mark_as_unused();
412
413 // Run GEMM prepare
414 if(!_is_quantized)
415 {
416 _mm_gemm.prepare();
417 if(!_weights_reshaped.is_used())
418 {
419 _weights_reshaped.allocator()->free();
420 }
421 }
422
423 CLScheduler::get().queue().finish();
424 _is_prepared = true;
425 }
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000426}