blob: d32272315013655ad83f393c833656922043bfa8 [file] [log] [blame]
Isabella Gottardif07d28d2018-02-06 14:52:43 +00001/*
Michele Di Giorgioebc3a902018-11-16 16:04:25 +00002 * Copyright (c) 2017-2019 ARM Limited.
Isabella Gottardif07d28d2018-02-06 14:52:43 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h"
25
26#include "arm_compute/core/PixelValue.h"
27#include "arm_compute/core/Size2D.h"
28#include "arm_compute/core/Utils.h"
29#include "arm_compute/core/Validate.h"
Michalis Spyroub27e13a2019-09-27 11:04:27 +010030#include "arm_compute/core/utils/misc/Cast.h"
Georgios Pinitas78c00902018-01-09 17:33:11 +000031#include "arm_compute/core/utils/misc/ShapeCalculator.h"
Isabella Gottardif07d28d2018-02-06 14:52:43 +000032#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
33#include "arm_compute/runtime/CL/CLScheduler.h"
34
35#include <cmath>
36#include <memory>
37#include <tuple>
38
Michalis Spyroub27e13a2019-09-27 11:04:27 +010039namespace arm_compute
40{
Georgios Pinitas78c00902018-01-09 17:33:11 +000041using namespace arm_compute::misc::shape_calculator;
Michalis Spyroub27e13a2019-09-27 11:04:27 +010042using namespace arm_compute::utils::cast;
Isabella Gottardif07d28d2018-02-06 14:52:43 +000043
Georgios Pinitasd8734b52017-12-22 15:27:52 +000044CLConvolutionLayerReshapeWeights::CLConvolutionLayerReshapeWeights()
45 : _weights_reshape_kernel()
Isabella Gottardif07d28d2018-02-06 14:52:43 +000046{
47}
48
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +010049void CLConvolutionLayerReshapeWeights::configure(const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, unsigned int num_groups)
Isabella Gottardif07d28d2018-02-06 14:52:43 +000050{
Georgios Pinitas78c00902018-01-09 17:33:11 +000051 // Perform validation step
Isabella Gottardif07d28d2018-02-06 14:52:43 +000052 ARM_COMPUTE_ERROR_ON_NULLPTR(weights, output);
Georgios Pinitas78c00902018-01-09 17:33:11 +000053 ARM_COMPUTE_ERROR_THROW_ON(CLConvolutionLayerReshapeWeights::validate(weights->info(),
54 (biases != nullptr) ? biases->info() : nullptr,
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +010055 output->info(),
56 num_groups));
Georgios Pinitas78c00902018-01-09 17:33:11 +000057
58 const bool append_biases = (biases != nullptr) && !is_data_type_quantized_asymmetric(weights->info()->data_type());
59 const ICLTensor *biases_to_use = (append_biases) ? biases : nullptr;
60
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +010061 _weights_reshape_kernel.configure(weights, biases_to_use, output, num_groups);
Georgios Pinitas78c00902018-01-09 17:33:11 +000062
63 output->info()->set_quantization_info(weights->info()->quantization_info());
64}
65
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +010066Status CLConvolutionLayerReshapeWeights::validate(const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, unsigned int num_groups)
Georgios Pinitas78c00902018-01-09 17:33:11 +000067{
68 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(weights);
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +000069 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(weights, 1, DataType::QASYMM8, DataType::QSYMM8_PER_CHANNEL, DataType::F16, DataType::F32);
Georgios Pinitas78c00902018-01-09 17:33:11 +000070 ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 4);
Isabella Gottardif07d28d2018-02-06 14:52:43 +000071
72 if(biases != nullptr)
73 {
Georgios Pinitas19ea4192018-06-19 13:09:53 +010074 const int idx_kernels = get_data_layout_dimension_index(weights->data_layout(), DataLayoutDimension::BATCHES);
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +000075 ARM_COMPUTE_RETURN_ERROR_ON(is_data_type_quantized(weights->data_type()));
76
Georgios Pinitas78c00902018-01-09 17:33:11 +000077 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(weights, biases);
Georgios Pinitas19ea4192018-06-19 13:09:53 +010078 ARM_COMPUTE_RETURN_ERROR_ON(biases->dimension(0) != weights->dimension(idx_kernels));
Georgios Pinitas78c00902018-01-09 17:33:11 +000079 ARM_COMPUTE_RETURN_ERROR_ON(biases->num_dimensions() > 1);
Isabella Gottardif07d28d2018-02-06 14:52:43 +000080 }
81
Georgios Pinitas78c00902018-01-09 17:33:11 +000082 if((output != nullptr) && (output->total_size() != 0))
Isabella Gottardif07d28d2018-02-06 14:52:43 +000083 {
Georgios Pinitas78c00902018-01-09 17:33:11 +000084 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(weights, output);
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +010085 CLWeightsReshapeKernel::validate(weights, biases, output, num_groups);
Isabella Gottardif07d28d2018-02-06 14:52:43 +000086 }
87
Georgios Pinitas78c00902018-01-09 17:33:11 +000088 return Status{};
Isabella Gottardif07d28d2018-02-06 14:52:43 +000089}
90
91void CLConvolutionLayerReshapeWeights::run()
92{
Isabella Gottardif07d28d2018-02-06 14:52:43 +000093 CLScheduler::get().enqueue(_weights_reshape_kernel);
Isabella Gottardif07d28d2018-02-06 14:52:43 +000094}
95
Michalis Spyroub27e13a2019-09-27 11:04:27 +010096CLGEMMConvolutionLayer::CLGEMMConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager, IWeightsManager *weights_manager)
97 : _memory_group(memory_manager), _weights_manager(weights_manager), _reshape_weights(), _reshape_weights_managed(), _im2col_kernel(), _mm_gemm(memory_manager, weights_manager),
98 _mm_gemmlowp(memory_manager), _col2im_kernel(), _activationlayer_function(), _original_weights(nullptr), _im2col_output(), _weights_reshaped(), _gemm_output(), _skip_im2col(false),
99 _skip_col2im(false), _is_quantized(false), _fuse_activation(true), _is_prepared(false)
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000100{
101}
102
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100103void CLGEMMConvolutionLayer::configure_mm(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const GEMMLowpOutputStageInfo &gemmlowp_output_stage,
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100104 int gemm_3d_depth, const ActivationLayerInfo &act_info)
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000105{
106 ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights);
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100107 ARM_COMPUTE_ERROR_THROW_ON(validate_mm(input->info(), weights->info(), biases != nullptr ? biases->info() : nullptr, output->info(), gemmlowp_output_stage, gemm_3d_depth, _skip_im2col, act_info));
Georgios Pinitas78c00902018-01-09 17:33:11 +0000108
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100109 const GEMMInfo &gemm_info = GEMMInfo(false, // is_a_reshaped
110 false, // is_b_reshaped
111 true, // reshape_b_only_on_first_run
112 gemm_3d_depth, // depth_output_gemm3d
113 _skip_im2col, // reinterpret_input_as_3d
114 false, // retain_internal_weights
115 gemmlowp_output_stage, // gemmlowp_output_stage
116 false, // fp_mixed_precision
117 true, // broadcast_bias
118 act_info); // activation_info
Georgios Pinitas932491f2018-09-21 16:33:15 +0100119
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000120 if(_is_quantized)
121 {
Georgios Pinitas78c00902018-01-09 17:33:11 +0000122 // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
123 // Extract and negate input and weights offset
124 const QuantizationInfo input_quantization_info = input->info()->quantization_info();
125 const QuantizationInfo weights_quantization_info = weights->info()->quantization_info();
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000126
Georgios Pinitas4c5469b2019-05-21 13:32:43 +0100127 input->info()->set_quantization_info(QuantizationInfo(input_quantization_info.uniform().scale, -input_quantization_info.uniform().offset));
128 weights->info()->set_quantization_info(QuantizationInfo(weights_quantization_info.uniform().scale, -weights_quantization_info.uniform().offset));
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000129
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100130 _mm_gemmlowp.configure(input, weights, biases, output, gemm_info);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000131
Georgios Pinitas78c00902018-01-09 17:33:11 +0000132 // Revert back QuantizatioInfo as input and weights could be used in other convolution layers
133 input->info()->set_quantization_info(input_quantization_info);
134 weights->info()->set_quantization_info(weights_quantization_info);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000135 }
136 else
137 {
Georgios Pinitas78c00902018-01-09 17:33:11 +0000138 // Configure matrix multiply function
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100139 _mm_gemm.configure(input, weights, biases, output, 1.0f, 1.0f, gemm_info);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000140 }
141}
142
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100143Status CLGEMMConvolutionLayer::validate_mm(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output,
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100144 const GEMMLowpOutputStageInfo &gemmlowp_output_stage, int gemm_3d_depth, bool skip_im2col, const ActivationLayerInfo &act_info)
Georgios Pinitas78c00902018-01-09 17:33:11 +0000145{
146 const bool is_quantized = is_data_type_quantized_asymmetric(input->data_type());
147
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100148 const GEMMInfo &gemm_info = GEMMInfo(false, // is_a_reshaped
149 false, // is_b_reshaped
150 true, // reshape_b_only_on_first_run
151 gemm_3d_depth, // depth_output_gemm3d
152 skip_im2col, // reinterpret_input_as_3d
153 false, // retain_internal_weights
154 gemmlowp_output_stage, // gemmlowp_output_stage
155 false, // fp_mixed_precision
156 true, // broadcast_bias
157 act_info); // activation_info
Georgios Pinitas932491f2018-09-21 16:33:15 +0100158
Georgios Pinitas78c00902018-01-09 17:33:11 +0000159 if(is_quantized)
160 {
161 // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
162 // Extract and negate input and weights offset
163 const QuantizationInfo input_quantization_info = input->quantization_info();
164 const QuantizationInfo weights_quantization_info = weights->quantization_info();
165
166 std::unique_ptr<ITensorInfo> input_qa = input->clone();
167 std::unique_ptr<ITensorInfo> weights_qa = weights->clone();
Georgios Pinitas4c5469b2019-05-21 13:32:43 +0100168 input_qa->set_quantization_info(QuantizationInfo(input_quantization_info.uniform().scale, -input_quantization_info.uniform().offset));
169 weights_qa->set_quantization_info(QuantizationInfo(weights_quantization_info.uniform().scale, -weights_quantization_info.uniform().offset));
Georgios Pinitas78c00902018-01-09 17:33:11 +0000170
171 // Perform validation step on GEMMLowp
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100172 return CLGEMMLowpMatrixMultiplyCore::validate(input_qa.get(), weights_qa.get(), biases, output, gemm_info);
Georgios Pinitas78c00902018-01-09 17:33:11 +0000173 }
174 else
175 {
176 // Perform validation step on Matrix multiply function
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100177 return CLGEMM::validate(input, weights, biases, output, 1.0f, 1.0f, gemm_info);
Georgios Pinitas78c00902018-01-09 17:33:11 +0000178 }
Georgios Pinitas78c00902018-01-09 17:33:11 +0000179}
180
Alex Gilday7da29b62018-03-23 14:16:00 +0000181void CLGEMMConvolutionLayer::configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info,
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100182 const Size2D &dilation, const ActivationLayerInfo &act_info, unsigned int num_groups)
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000183{
184 ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
Georgios Pinitas78c00902018-01-09 17:33:11 +0000185
186 ARM_COMPUTE_ERROR_THROW_ON(CLGEMMConvolutionLayer::validate(input->info(),
187 weights->info(),
188 biases != nullptr ? biases->info() : nullptr,
189 output->info(),
190 conv_info,
Alex Gilday7da29b62018-03-23 14:16:00 +0000191 weights_info,
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000192 dilation,
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100193 act_info,
194 num_groups));
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000195
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100196 const DataType data_type = input->info()->data_type();
197 const DataLayout data_layout = input->info()->data_layout();
198 const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
199 const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100200 const int idx_kernels = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
201
202 const unsigned int kernel_width = weights->info()->dimension(idx_width);
203 const unsigned int kernel_height = weights->info()->dimension(idx_height);
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000204 const unsigned int num_kernels = weights->info()->dimension(idx_kernels);
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100205
Georgios Pinitas4c5469b2019-05-21 13:32:43 +0100206 const UniformQuantizationInfo iq_info = input->info()->quantization_info().uniform();
Georgios Pinitas4c5469b2019-05-21 13:32:43 +0100207 const UniformQuantizationInfo oq_info = output->info()->quantization_info().uniform();
208
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100209 _is_prepared = weights_info.retain_internal_weights();
210 _original_weights = weights;
211 _is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
212 _skip_im2col = (data_layout == DataLayout::NHWC && kernel_width == 1 && kernel_height == 1 && conv_info.stride().first == 1 && conv_info.stride().second == 1);
213 _skip_col2im = data_layout == DataLayout::NHWC;
214
215 // Only for quantize there are few cases where we cannot fuse the activation function in GEMM
216 _fuse_activation = true;
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000217
Georgios Pinitas78c00902018-01-09 17:33:11 +0000218 // Set the GPU target for im2col and col2im
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000219 _im2col_kernel.set_target(CLScheduler::get().target());
220 _col2im_kernel.set_target(CLScheduler::get().target());
221
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100222 const ICLTensor *gemm_input_to_use = input;
223 ICLTensor *gemm_output_to_use = output;
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000224
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000225 // Get parameters from conv_info
226 unsigned int stride_x = 0;
227 unsigned int stride_y = 0;
228 std::tie(stride_x, stride_y) = conv_info.stride();
229
230 // Get convolved dimensions
231 unsigned int conv_w = 0;
232 unsigned int conv_h = 0;
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100233 std::tie(conv_w, conv_h) = scaled_dimensions(input->info()->dimension(idx_width),
234 input->info()->dimension(idx_height),
235 kernel_width,
236 kernel_height,
237 conv_info,
238 dilation);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000239
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000240 unsigned int mat_weights_cols = num_kernels / num_groups;
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000241
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100242 const ICLTensor *biases_to_use = biases;
243 bool append_bias = false;
244
Michalis Spyroub27e13a2019-09-27 11:04:27 +0100245 ICLTensor *weights_to_use = &_weights_reshaped;
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100246 if(num_groups != 1 && biases != nullptr)
247 {
248 // num_groups != 1 can only be for NCHW
249 // Since it is missing an utility function to reshape the biases, we append the biases into the weights tensor
250 biases_to_use = nullptr;
251 append_bias = true;
252
Michalis Spyroub27e13a2019-09-27 11:04:27 +0100253 if(_weights_manager && _weights_manager->are_weights_managed(weights))
254 {
255 _reshape_weights_managed.configure(weights, biases, num_groups);
256 weights_to_use = utils::cast::polymorphic_downcast<ICLTensor *>(_weights_manager->acquire(weights, &_reshape_weights_managed));
257 }
258 else
259 {
260 _reshape_weights.configure(weights, biases, &_weights_reshaped, num_groups);
261 }
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100262 }
263 else
264 {
Michalis Spyroub27e13a2019-09-27 11:04:27 +0100265 if(_weights_manager && _weights_manager->are_weights_managed(weights))
266 {
267 _reshape_weights_managed.configure(weights, nullptr, num_groups);
268 weights_to_use = utils::cast::polymorphic_downcast<ICLTensor *>(_weights_manager->acquire(weights, &_reshape_weights_managed));
269 }
270 else
271 {
272 _reshape_weights.configure(weights, nullptr, &_weights_reshaped, num_groups);
273 }
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100274 }
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000275
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000276 // Create tensor to store im2col reshaped inputs
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100277 if(!_skip_im2col)
278 {
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100279 _memory_group.manage(&_im2col_output);
280
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100281 // Configure and tune im2col. im2col output shape is auto-initialized
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100282 _im2col_kernel.configure(input, &_im2col_output, Size2D(kernel_width, kernel_height), conv_info, append_bias, dilation, num_groups);
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100283
284 // Set quantization info
285 _im2col_output.info()->set_quantization_info(input->info()->quantization_info());
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100286 CLScheduler::get().tune_kernel_static(_im2col_kernel);
287
288 // Update GEMM input
289 gemm_input_to_use = &_im2col_output;
290 }
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000291
292 // Create GEMM output tensor
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100293 if(!_skip_col2im)
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100294 {
Georgios Pinitas932491f2018-09-21 16:33:15 +0100295 TensorShape shape_gemm;
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100296
297 // If we cannot skip col2im it means we run im2col as well
298 shape_gemm = _im2col_output.info()->tensor_shape();
299 shape_gemm.set(0, mat_weights_cols);
300 shape_gemm.set(1, conv_w * conv_h);
301
Georgios Pinitasf52cd782019-03-25 14:06:14 +0000302 // TODO(COMPMID-2078): input->clone() doesn't work with subtensors for grouped convolutions.
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100303 TensorInfo info_gemm(shape_gemm, 1, data_type);
Georgios Pinitas932491f2018-09-21 16:33:15 +0100304 info_gemm.set_quantization_info(output->info()->quantization_info()).set_data_layout(input->info()->data_layout());
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100305 _gemm_output.allocator()->init(info_gemm);
306 _memory_group.manage(&_gemm_output);
307
308 // Update GEMM output
309 gemm_output_to_use = &_gemm_output;
310 }
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000311
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100312 GEMMLowpOutputStageInfo gemmlowp_output_stage;
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000313 gemmlowp_output_stage.type = GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT;
314 gemmlowp_output_stage.gemmlowp_offset = 0;
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000315
316 // Configure output stage for quantized case
317 if(_is_quantized)
318 {
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000319 const auto output_quant_info = (output->info()->total_size() == 0) ? iq_info : oq_info;
320 const bool is_quantized_per_channel = is_data_type_quantized_per_channel(weights->info()->data_type());
321 const unsigned int num_filters = (is_quantized_per_channel) ? num_kernels : 1;
Georgios Pinitas9be0c5a2018-02-19 12:46:29 +0000322
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000323 gemmlowp_output_stage.is_quantized_per_channel = is_quantized_per_channel;
324
325 gemmlowp_output_stage.gemmlowp_multipliers.resize(num_filters);
326 gemmlowp_output_stage.gemmlowp_shifts.resize(num_filters);
327 quantization::compute_quantized_multipliers_and_shifts(input->info(),
328 weights->info(),
329 output->info(),
330 idx_kernels,
331 gemmlowp_output_stage.gemmlowp_multipliers.data(),
332 gemmlowp_output_stage.gemmlowp_shifts.data());
333 gemmlowp_output_stage.gemmlowp_multiplier = gemmlowp_output_stage.gemmlowp_multipliers[0];
334 gemmlowp_output_stage.gemmlowp_shift = gemmlowp_output_stage.gemmlowp_shifts[0];
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100335
336 int min_activation = 0;
337 int max_activation = 0;
338
339 const std::set<ActivationLayerInfo::ActivationFunction> supported_acts = { ActivationLayerInfo::ActivationFunction::RELU,
Gian Marco Iodice3139f032018-11-05 14:26:32 +0000340 ActivationLayerInfo::ActivationFunction::BOUNDED_RELU,
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100341 ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU
342 };
343
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100344 if(act_info.enabled())
Georgios Pinitas932491f2018-09-21 16:33:15 +0100345 {
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100346 if(supported_acts.count(act_info.activation()) != 0)
347 {
348 const int a_const_int = quantize_qasymm8(act_info.a(), output_quant_info);
349 const int b_const_int = quantize_qasymm8(act_info.b(), output_quant_info);
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100350
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100351 min_activation = act_info.activation() != ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU ? output_quant_info.offset : b_const_int;
352 max_activation = act_info.activation() == ActivationLayerInfo::ActivationFunction::RELU ? 255 : a_const_int;
353 }
354 else
355 {
356 _fuse_activation = false;
357 }
Georgios Pinitas932491f2018-09-21 16:33:15 +0100358 }
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100359
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100360 // Set the GEMMLowp output stage info
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000361 gemmlowp_output_stage.gemmlowp_offset = output_quant_info.offset;
362 gemmlowp_output_stage.gemmlowp_min_bound = min_activation;
363 gemmlowp_output_stage.gemmlowp_max_bound = max_activation;
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100364 }
365
366 // Configure and tune GEMM
Gian Marco Iodice3139f032018-11-05 14:26:32 +0000367 // In case of NHWC, we need to run GEMM3D (gemm_3d_depth != 0) in order to avoid reshaping the output matrix
368 const unsigned int gemm_3d_depth = (data_layout == DataLayout::NHWC) ? conv_h : 0;
369
Michalis Spyroub27e13a2019-09-27 11:04:27 +0100370 configure_mm(gemm_input_to_use, weights_to_use, biases_to_use, gemm_output_to_use, gemmlowp_output_stage, gemm_3d_depth, act_info);
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100371
372 if(!_skip_im2col)
373 {
374 _im2col_output.allocator()->allocate();
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000375 }
376
Georgios Pinitas932491f2018-09-21 16:33:15 +0100377 if(!_skip_col2im)
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100378 {
Georgios Pinitas932491f2018-09-21 16:33:15 +0100379 // Configure and tune Col2Im
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100380 _col2im_kernel.configure(gemm_output_to_use, output, Size2D(conv_w, conv_h), num_groups);
Georgios Pinitas932491f2018-09-21 16:33:15 +0100381 CLScheduler::get().tune_kernel_static(_col2im_kernel);
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100382 }
383
Georgios Pinitas932491f2018-09-21 16:33:15 +0100384 if(!_skip_col2im)
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100385 {
386 _gemm_output.allocator()->allocate();
387 }
388
389 ARM_COMPUTE_ERROR_ON_MSG((output->info()->dimension(idx_width) != conv_w) || (output->info()->dimension(idx_height) != conv_h),
390 "Output shape does not match the expected one");
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000391
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100392 if(!_fuse_activation)
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000393 {
394 _activationlayer_function.configure(output, nullptr, act_info);
395 }
396
Georgios Pinitas78c00902018-01-09 17:33:11 +0000397 ARM_COMPUTE_UNUSED(weights_info);
398}
399
400Status CLGEMMConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100401 const WeightsInfo &weights_info, const Size2D &dilation, const ActivationLayerInfo &act_info, unsigned int num_groups)
Georgios Pinitas78c00902018-01-09 17:33:11 +0000402{
403 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output);
404 ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights_info.are_reshaped(), "Weights already reshaped are not supported!");
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000405 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QSYMM8_PER_CHANNEL, DataType::F16, DataType::F32);
406 const bool is_quantized_per_channel = is_data_type_quantized_per_channel(weights->data_type());
407
408 if(is_quantized_per_channel)
409 {
410 ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() != DataType::QASYMM8, "Input data type not compatible with Weights");
411 }
412 else
413 {
414 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
415 }
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100416 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, weights);
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100417 ARM_COMPUTE_RETURN_ERROR_ON_MSG((num_groups != 1) && (input->data_layout() != DataLayout::NCHW), "Grouping (num_groups != 1) with NHWC data layout is not supported");
418 ARM_COMPUTE_RETURN_ERROR_ON_MSG((num_groups != 1) && (input->data_type() == DataType::QASYMM8), "Grouping (num_groups != 1) is not supported with QASYMM8");
419 ARM_COMPUTE_RETURN_ERROR_ON(((input->dimension(2) / weights->dimension(2)) != num_groups) && (input->data_layout() == DataLayout::NCHW));
Georgios Pinitas78c00902018-01-09 17:33:11 +0000420
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100421 const DataLayout data_layout = input->data_layout();
422 const DataType data_type = input->data_type();
423 const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
424 const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
425 const int idx_channel = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
426 const int idx_kernels = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000427
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100428 const unsigned int kernel_width = weights->dimension(idx_width);
429 const unsigned int kernel_height = weights->dimension(idx_height);
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000430 const unsigned int num_kernels = weights->dimension(idx_kernels);
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100431
Michalis Spyroubcfd09a2019-05-01 13:03:59 +0100432 TensorInfo im2col_reshaped_info{};
433 TensorInfo info_gemm{};
434 TensorInfo weights_reshaped_info{};
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100435 const ITensorInfo *gemm_input_to_use = input;
436 const ITensorInfo *gemm_output_to_use = output;
437 const ITensorInfo *weights_to_use = weights;
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000438 const bool is_quantized = is_data_type_quantized_asymmetric(data_type);
439 const bool skip_im2col = (data_layout == DataLayout::NHWC && kernel_width == 1 && kernel_height == 1 && conv_info.stride().first == 1 && conv_info.stride().second == 1);
440 const bool skip_col2im = data_layout == DataLayout::NHWC;
441 bool fuse_activation = true;
Georgios Pinitas4c5469b2019-05-21 13:32:43 +0100442
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100443 ARM_COMPUTE_RETURN_ERROR_ON((weights->dimension(idx_channel) * num_groups) != input->dimension(idx_channel));
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100444 ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 4);
Georgios Pinitas78c00902018-01-09 17:33:11 +0000445
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100446 // Validate biases
Georgios Pinitas78c00902018-01-09 17:33:11 +0000447 if(biases != nullptr)
448 {
449 if(is_quantized)
450 {
451 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(biases, 1, DataType::S32);
452 }
453 else
454 {
455 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, biases);
456 }
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100457 ARM_COMPUTE_RETURN_ERROR_ON(biases->dimension(0) != weights->dimension(idx_kernels));
Georgios Pinitas78c00902018-01-09 17:33:11 +0000458 ARM_COMPUTE_RETURN_ERROR_ON(biases->num_dimensions() > 1);
459 }
460
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100461 if(act_info.enabled())
462 {
463 ARM_COMPUTE_ERROR_ON(act_info.b() > act_info.a());
464 }
465
466 // Get convolved dimensions
467 unsigned int conv_w = 0;
468 unsigned int conv_h = 0;
469
470 std::tie(conv_w, conv_h) = scaled_dimensions(input->dimension(idx_width),
471 input->dimension(idx_height),
472 kernel_width,
473 kernel_height,
474 conv_info,
475 dilation);
476
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000477 unsigned int mat_weights_cols = num_kernels / num_groups;
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100478
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100479 const ITensorInfo *biases_to_use = biases;
480 bool append_bias = false;
481
482 if(num_groups != 1 && biases != nullptr)
483 {
484 // num_groups != 1 can only be for NCHW
485 // Since it is missing an utility function to reshape the biases, we append the biases into the weights tensor
486 biases_to_use = nullptr;
487 append_bias = true;
488
489 ARM_COMPUTE_RETURN_ON_ERROR(CLConvolutionLayerReshapeWeights::validate(weights, biases, nullptr, num_groups));
490 weights_reshaped_info = TensorInfo(compute_weights_reshaped_shape(*weights, true, num_groups), 1, data_type);
491 }
492 else
493 {
494 ARM_COMPUTE_RETURN_ON_ERROR(CLConvolutionLayerReshapeWeights::validate(weights, nullptr, nullptr, num_groups));
495 weights_reshaped_info = TensorInfo(compute_weights_reshaped_shape(*weights, false, num_groups), 1, data_type);
496 }
497
498 weights_to_use = &weights_reshaped_info;
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100499
500 if(!skip_im2col)
501 {
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100502 const Size2D kernel_dims(kernel_width, kernel_height);
503
504 // Output tensor auto initialization if not yet initialized
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100505 TensorShape expected_output_shape = compute_im2col_conv_shape(input, kernel_dims, conv_info, append_bias, dilation, num_groups == 1, num_groups);
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100506
507 auto_init_if_empty(im2col_reshaped_info, input->clone()->set_tensor_shape(expected_output_shape));
508
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100509 ARM_COMPUTE_RETURN_ON_ERROR(CLIm2ColKernel::validate(input, &im2col_reshaped_info, kernel_dims, conv_info, append_bias, dilation, num_groups));
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100510 gemm_input_to_use = &im2col_reshaped_info;
511 }
512
513 // Create GEMM output tensor
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100514 if(!skip_col2im)
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100515 {
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100516 TensorShape shape_gemm;
517
518 shape_gemm = gemm_input_to_use->tensor_shape();
519 shape_gemm.set(0, mat_weights_cols);
520 shape_gemm.set(1, conv_w * conv_h);
521
522 info_gemm = TensorInfo(shape_gemm, 1, data_type);
Georgios Pinitas932491f2018-09-21 16:33:15 +0100523 info_gemm.set_quantization_info(output->quantization_info()).set_data_layout(input->data_layout());
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100524 gemm_output_to_use = &info_gemm;
525 }
526
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100527 GEMMLowpOutputStageInfo gemmlowp_output_stage;
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000528 gemmlowp_output_stage.type = GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT;
529 gemmlowp_output_stage.gemmlowp_offset = 0;
530 gemmlowp_output_stage.is_quantized_per_channel = is_quantized_per_channel;
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100531
532 if(is_quantized)
533 {
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000534 const UniformQuantizationInfo iq_info = input->quantization_info().uniform();
535 const UniformQuantizationInfo oq_info = output->quantization_info().uniform();
536 const auto output_quant_info = (output->total_size() == 0) ? iq_info : oq_info;
537 const unsigned int num_filters = (is_quantized_per_channel) ? num_kernels : 1;
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100538
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000539 gemmlowp_output_stage.gemmlowp_multipliers.resize(num_filters);
540 gemmlowp_output_stage.gemmlowp_shifts.resize(num_filters);
541 quantization::compute_quantized_multipliers_and_shifts(input,
542 weights,
543 output,
544 idx_kernels,
545 gemmlowp_output_stage.gemmlowp_multipliers.data(),
546 gemmlowp_output_stage.gemmlowp_shifts.data());
547 gemmlowp_output_stage.gemmlowp_multiplier = gemmlowp_output_stage.gemmlowp_multipliers[0];
548 gemmlowp_output_stage.gemmlowp_shift = gemmlowp_output_stage.gemmlowp_shifts[0];
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100549
550 int min_activation = 0;
551 int max_activation = 0;
552
553 const std::set<ActivationLayerInfo::ActivationFunction> supported_acts = { ActivationLayerInfo::ActivationFunction::RELU,
554 ActivationLayerInfo::ActivationFunction::BOUNDED_RELU,
555 ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU
556 };
557
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100558 if(act_info.enabled())
Georgios Pinitas932491f2018-09-21 16:33:15 +0100559 {
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100560 if(supported_acts.count(act_info.activation()) != 0)
561 {
562 const int a_const_int = quantize_qasymm8(act_info.a(), output_quant_info);
563 const int b_const_int = quantize_qasymm8(act_info.b(), output_quant_info);
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100564
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100565 min_activation = act_info.activation() != ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU ? output_quant_info.offset : b_const_int;
566 max_activation = act_info.activation() == ActivationLayerInfo::ActivationFunction::RELU ? 255 : a_const_int;
567 }
568 else
569 {
570 fuse_activation = false;
571 }
Georgios Pinitas932491f2018-09-21 16:33:15 +0100572 }
Gian Marco Iodice3139f032018-11-05 14:26:32 +0000573
574 // Set the GEMMLowp output stage info
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000575 gemmlowp_output_stage.gemmlowp_offset = output_quant_info.offset;
576 gemmlowp_output_stage.gemmlowp_min_bound = min_activation;
577 gemmlowp_output_stage.gemmlowp_max_bound = max_activation;
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100578 }
579
Gian Marco Iodice3139f032018-11-05 14:26:32 +0000580 // In case of NHWC, we need to run GEMM3D (gemm_3d_depth != 0) in order to avoid reshaping the output matrix
581 const unsigned int gemm_3d_depth = (data_layout == DataLayout::NHWC) ? conv_h : 0;
582
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100583 ARM_COMPUTE_RETURN_ON_ERROR(validate_mm(gemm_input_to_use, weights_to_use, biases_to_use, gemm_output_to_use, gemmlowp_output_stage, gemm_3d_depth, skip_im2col, act_info));
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100584
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100585 // Validate Col2Im
Georgios Pinitas932491f2018-09-21 16:33:15 +0100586 if(!skip_col2im)
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100587 {
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100588 ARM_COMPUTE_RETURN_ON_ERROR(CLCol2ImKernel::validate(gemm_output_to_use, output, Size2D(conv_w, conv_h), num_groups));
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100589 }
590
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000591 //Validate Activation Layer
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100592 if(!fuse_activation)
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000593 {
Vidhya Sudhan Loganathanedf357c2018-04-27 14:25:30 +0100594 ARM_COMPUTE_RETURN_ON_ERROR(CLActivationLayer::validate(output, nullptr, act_info));
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000595 }
596
Georgios Pinitas78c00902018-01-09 17:33:11 +0000597 return Status{};
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000598}
599
600void CLGEMMConvolutionLayer::run()
601{
Georgios Pinitase0437672018-05-02 14:07:55 +0100602 prepare();
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000603
Georgios Pinitasda953f22019-04-02 17:27:03 +0100604 MemoryGroupResourceScope scope_mg(_memory_group);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000605
606 // Run im2col
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100607 if(!_skip_im2col)
608 {
609 CLScheduler::get().enqueue(_im2col_kernel);
610 }
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000611
Georgios Pinitas78c00902018-01-09 17:33:11 +0000612 // Runs CLGEMM or CLGEMMLowpMatrixMultiplyCore functions
613 if(_is_quantized)
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000614 {
Georgios Pinitas78c00902018-01-09 17:33:11 +0000615 // Run gemmlowp
616 _mm_gemmlowp.run();
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000617 }
618 else
619 {
Georgios Pinitas78c00902018-01-09 17:33:11 +0000620 // Run gemm
621 _mm_gemm.run();
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000622 }
623
624 // Reshape output matrix
Georgios Pinitas932491f2018-09-21 16:33:15 +0100625 if(!_skip_col2im)
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100626 {
Georgios Pinitas932491f2018-09-21 16:33:15 +0100627 CLScheduler::get().enqueue(_col2im_kernel, false);
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100628 }
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000629
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100630 //Run Activation Layer if we cannot fuse in GEMM
631 if(!_fuse_activation)
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000632 {
633 _activationlayer_function.run();
634 }
Georgios Pinitase0437672018-05-02 14:07:55 +0100635}
Georgios Pinitas82b51482018-04-24 15:14:12 +0100636
Georgios Pinitase0437672018-05-02 14:07:55 +0100637void CLGEMMConvolutionLayer::prepare()
638{
639 if(!_is_prepared)
640 {
Michalis Spyroub27e13a2019-09-27 11:04:27 +0100641 ARM_COMPUTE_ERROR_ON(!_original_weights->is_used());
642 if(_weights_manager && _weights_manager->are_weights_managed(_original_weights))
643 {
644 _weights_manager->run(_original_weights, &_reshape_weights_managed);
645 }
646 else
647 {
648 // Run weights reshaping and mark original weights tensor as unused
649 _weights_reshaped.allocator()->allocate();
650 _reshape_weights.run();
651 _original_weights->mark_as_unused();
652 }
Georgios Pinitas72219332018-06-05 14:56:06 +0100653
654 // Prepare GEMM
655 _is_quantized ? _mm_gemmlowp.prepare() : _mm_gemm.prepare();
656 if(!_weights_reshaped.is_used())
Georgios Pinitase0437672018-05-02 14:07:55 +0100657 {
Georgios Pinitas72219332018-06-05 14:56:06 +0100658 _weights_reshaped.allocator()->free();
Georgios Pinitase0437672018-05-02 14:07:55 +0100659 }
660
661 CLScheduler::get().queue().finish();
662 _is_prepared = true;
663 }
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000664}
Michalis Spyroub27e13a2019-09-27 11:04:27 +0100665} // namespace arm_compute