blob: 1c37993bdace26b81e84a295ab6ed15f4e8443ce [file] [log] [blame]
Isabella Gottardif07d28d2018-02-06 14:52:43 +00001/*
Manuel Bottini8481d832019-12-10 15:28:40 +00002 * Copyright (c) 2017-2020 ARM Limited.
Isabella Gottardif07d28d2018-02-06 14:52:43 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h"
25
26#include "arm_compute/core/PixelValue.h"
27#include "arm_compute/core/Size2D.h"
28#include "arm_compute/core/Utils.h"
29#include "arm_compute/core/Validate.h"
Michalis Spyroub27e13a2019-09-27 11:04:27 +010030#include "arm_compute/core/utils/misc/Cast.h"
Georgios Pinitas78c00902018-01-09 17:33:11 +000031#include "arm_compute/core/utils/misc/ShapeCalculator.h"
Isabella Gottardif07d28d2018-02-06 14:52:43 +000032#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
33#include "arm_compute/runtime/CL/CLScheduler.h"
34
35#include <cmath>
36#include <memory>
37#include <tuple>
38
Michalis Spyroub27e13a2019-09-27 11:04:27 +010039namespace arm_compute
40{
Georgios Pinitas78c00902018-01-09 17:33:11 +000041using namespace arm_compute::misc::shape_calculator;
Michalis Spyroub27e13a2019-09-27 11:04:27 +010042using namespace arm_compute::utils::cast;
Isabella Gottardif07d28d2018-02-06 14:52:43 +000043
Georgios Pinitasd8734b52017-12-22 15:27:52 +000044CLConvolutionLayerReshapeWeights::CLConvolutionLayerReshapeWeights()
45 : _weights_reshape_kernel()
Isabella Gottardif07d28d2018-02-06 14:52:43 +000046{
47}
48
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +010049void CLConvolutionLayerReshapeWeights::configure(const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, unsigned int num_groups)
Isabella Gottardif07d28d2018-02-06 14:52:43 +000050{
Manuel Bottini2b84be52020-04-08 10:15:51 +010051 configure(CLKernelLibrary::get().get_compile_context(), weights, biases, output, num_groups);
52}
53
54void CLConvolutionLayerReshapeWeights::configure(const CLCompileContext &compile_context, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, unsigned int num_groups)
55{
Georgios Pinitas78c00902018-01-09 17:33:11 +000056 // Perform validation step
Isabella Gottardif07d28d2018-02-06 14:52:43 +000057 ARM_COMPUTE_ERROR_ON_NULLPTR(weights, output);
Georgios Pinitas78c00902018-01-09 17:33:11 +000058 ARM_COMPUTE_ERROR_THROW_ON(CLConvolutionLayerReshapeWeights::validate(weights->info(),
59 (biases != nullptr) ? biases->info() : nullptr,
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +010060 output->info(),
61 num_groups));
Georgios Pinitas78c00902018-01-09 17:33:11 +000062
63 const bool append_biases = (biases != nullptr) && !is_data_type_quantized_asymmetric(weights->info()->data_type());
64 const ICLTensor *biases_to_use = (append_biases) ? biases : nullptr;
65
Manuel Bottini2b84be52020-04-08 10:15:51 +010066 _weights_reshape_kernel.configure(compile_context, weights, biases_to_use, output, num_groups);
Georgios Pinitas78c00902018-01-09 17:33:11 +000067
68 output->info()->set_quantization_info(weights->info()->quantization_info());
69}
70
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +010071Status CLConvolutionLayerReshapeWeights::validate(const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, unsigned int num_groups)
Georgios Pinitas78c00902018-01-09 17:33:11 +000072{
73 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(weights);
Manuel Bottini8481d832019-12-10 15:28:40 +000074 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(weights, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::QSYMM8_PER_CHANNEL, DataType::F16, DataType::F32);
Georgios Pinitas78c00902018-01-09 17:33:11 +000075 ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 4);
Isabella Gottardif07d28d2018-02-06 14:52:43 +000076
77 if(biases != nullptr)
78 {
Georgios Pinitas19ea4192018-06-19 13:09:53 +010079 const int idx_kernels = get_data_layout_dimension_index(weights->data_layout(), DataLayoutDimension::BATCHES);
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +000080 ARM_COMPUTE_RETURN_ERROR_ON(is_data_type_quantized(weights->data_type()));
81
Georgios Pinitas78c00902018-01-09 17:33:11 +000082 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(weights, biases);
Georgios Pinitas19ea4192018-06-19 13:09:53 +010083 ARM_COMPUTE_RETURN_ERROR_ON(biases->dimension(0) != weights->dimension(idx_kernels));
Georgios Pinitas78c00902018-01-09 17:33:11 +000084 ARM_COMPUTE_RETURN_ERROR_ON(biases->num_dimensions() > 1);
Isabella Gottardif07d28d2018-02-06 14:52:43 +000085 }
86
Georgios Pinitas78c00902018-01-09 17:33:11 +000087 if((output != nullptr) && (output->total_size() != 0))
Isabella Gottardif07d28d2018-02-06 14:52:43 +000088 {
Georgios Pinitas78c00902018-01-09 17:33:11 +000089 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(weights, output);
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +010090 CLWeightsReshapeKernel::validate(weights, biases, output, num_groups);
Isabella Gottardif07d28d2018-02-06 14:52:43 +000091 }
92
Georgios Pinitas78c00902018-01-09 17:33:11 +000093 return Status{};
Isabella Gottardif07d28d2018-02-06 14:52:43 +000094}
95
96void CLConvolutionLayerReshapeWeights::run()
97{
Isabella Gottardif07d28d2018-02-06 14:52:43 +000098 CLScheduler::get().enqueue(_weights_reshape_kernel);
Isabella Gottardif07d28d2018-02-06 14:52:43 +000099}
100
Michalis Spyroub27e13a2019-09-27 11:04:27 +0100101CLGEMMConvolutionLayer::CLGEMMConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager, IWeightsManager *weights_manager)
102 : _memory_group(memory_manager), _weights_manager(weights_manager), _reshape_weights(), _reshape_weights_managed(), _im2col_kernel(), _mm_gemm(memory_manager, weights_manager),
103 _mm_gemmlowp(memory_manager), _col2im_kernel(), _activationlayer_function(), _original_weights(nullptr), _im2col_output(), _weights_reshaped(), _gemm_output(), _skip_im2col(false),
104 _skip_col2im(false), _is_quantized(false), _fuse_activation(true), _is_prepared(false)
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000105{
106}
107
Manuel Bottini2b84be52020-04-08 10:15:51 +0100108void CLGEMMConvolutionLayer::configure_mm(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output,
109 const GEMMLowpOutputStageInfo &gemmlowp_output_stage,
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100110 int gemm_3d_depth, const ActivationLayerInfo &act_info)
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000111{
112 ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights);
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100113 ARM_COMPUTE_ERROR_THROW_ON(validate_mm(input->info(), weights->info(), biases != nullptr ? biases->info() : nullptr, output->info(), gemmlowp_output_stage, gemm_3d_depth, _skip_im2col, act_info));
Georgios Pinitas78c00902018-01-09 17:33:11 +0000114
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100115 const GEMMInfo &gemm_info = GEMMInfo(false, // is_a_reshaped
116 false, // is_b_reshaped
117 true, // reshape_b_only_on_first_run
118 gemm_3d_depth, // depth_output_gemm3d
119 _skip_im2col, // reinterpret_input_as_3d
120 false, // retain_internal_weights
121 gemmlowp_output_stage, // gemmlowp_output_stage
122 false, // fp_mixed_precision
123 true, // broadcast_bias
124 act_info); // activation_info
Georgios Pinitas932491f2018-09-21 16:33:15 +0100125
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000126 if(_is_quantized)
127 {
Georgios Pinitas78c00902018-01-09 17:33:11 +0000128 // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
129 // Extract and negate input and weights offset
130 const QuantizationInfo input_quantization_info = input->info()->quantization_info();
131 const QuantizationInfo weights_quantization_info = weights->info()->quantization_info();
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000132
Georgios Pinitas4c5469b2019-05-21 13:32:43 +0100133 input->info()->set_quantization_info(QuantizationInfo(input_quantization_info.uniform().scale, -input_quantization_info.uniform().offset));
134 weights->info()->set_quantization_info(QuantizationInfo(weights_quantization_info.uniform().scale, -weights_quantization_info.uniform().offset));
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000135
Manuel Bottini2b84be52020-04-08 10:15:51 +0100136 _mm_gemmlowp.configure(compile_context, input, weights, biases, output, gemm_info);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000137
Georgios Pinitas78c00902018-01-09 17:33:11 +0000138 // Revert back QuantizatioInfo as input and weights could be used in other convolution layers
139 input->info()->set_quantization_info(input_quantization_info);
140 weights->info()->set_quantization_info(weights_quantization_info);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000141 }
142 else
143 {
Georgios Pinitas78c00902018-01-09 17:33:11 +0000144 // Configure matrix multiply function
Manuel Bottini2b84be52020-04-08 10:15:51 +0100145 _mm_gemm.configure(compile_context, input, weights, biases, output, 1.0f, 1.0f, gemm_info);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000146 }
147}
148
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100149Status CLGEMMConvolutionLayer::validate_mm(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output,
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100150 const GEMMLowpOutputStageInfo &gemmlowp_output_stage, int gemm_3d_depth, bool skip_im2col, const ActivationLayerInfo &act_info)
Georgios Pinitas78c00902018-01-09 17:33:11 +0000151{
152 const bool is_quantized = is_data_type_quantized_asymmetric(input->data_type());
153
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100154 const GEMMInfo &gemm_info = GEMMInfo(false, // is_a_reshaped
155 false, // is_b_reshaped
156 true, // reshape_b_only_on_first_run
157 gemm_3d_depth, // depth_output_gemm3d
158 skip_im2col, // reinterpret_input_as_3d
159 false, // retain_internal_weights
160 gemmlowp_output_stage, // gemmlowp_output_stage
161 false, // fp_mixed_precision
162 true, // broadcast_bias
163 act_info); // activation_info
Georgios Pinitas932491f2018-09-21 16:33:15 +0100164
Georgios Pinitas78c00902018-01-09 17:33:11 +0000165 if(is_quantized)
166 {
167 // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
168 // Extract and negate input and weights offset
169 const QuantizationInfo input_quantization_info = input->quantization_info();
170 const QuantizationInfo weights_quantization_info = weights->quantization_info();
171
172 std::unique_ptr<ITensorInfo> input_qa = input->clone();
173 std::unique_ptr<ITensorInfo> weights_qa = weights->clone();
Georgios Pinitas4c5469b2019-05-21 13:32:43 +0100174 input_qa->set_quantization_info(QuantizationInfo(input_quantization_info.uniform().scale, -input_quantization_info.uniform().offset));
175 weights_qa->set_quantization_info(QuantizationInfo(weights_quantization_info.uniform().scale, -weights_quantization_info.uniform().offset));
Georgios Pinitas78c00902018-01-09 17:33:11 +0000176
177 // Perform validation step on GEMMLowp
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100178 return CLGEMMLowpMatrixMultiplyCore::validate(input_qa.get(), weights_qa.get(), biases, output, gemm_info);
Georgios Pinitas78c00902018-01-09 17:33:11 +0000179 }
180 else
181 {
182 // Perform validation step on Matrix multiply function
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100183 return CLGEMM::validate(input, weights, biases, output, 1.0f, 1.0f, gemm_info);
Georgios Pinitas78c00902018-01-09 17:33:11 +0000184 }
Georgios Pinitas78c00902018-01-09 17:33:11 +0000185}
186
Alex Gilday7da29b62018-03-23 14:16:00 +0000187void CLGEMMConvolutionLayer::configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info,
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100188 const Size2D &dilation, const ActivationLayerInfo &act_info, unsigned int num_groups)
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000189{
Manuel Bottini2b84be52020-04-08 10:15:51 +0100190 configure(CLKernelLibrary::get().get_compile_context(), input, weights, biases, output, conv_info, weights_info, dilation, act_info, num_groups);
191}
192
193void CLGEMMConvolutionLayer::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output,
194 const PadStrideInfo &conv_info,
195 const WeightsInfo &weights_info, const Size2D &dilation, const ActivationLayerInfo &act_info, unsigned int num_groups)
196{
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000197 ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
Georgios Pinitas78c00902018-01-09 17:33:11 +0000198
199 ARM_COMPUTE_ERROR_THROW_ON(CLGEMMConvolutionLayer::validate(input->info(),
200 weights->info(),
201 biases != nullptr ? biases->info() : nullptr,
202 output->info(),
203 conv_info,
Alex Gilday7da29b62018-03-23 14:16:00 +0000204 weights_info,
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000205 dilation,
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100206 act_info,
207 num_groups));
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000208
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100209 const DataType data_type = input->info()->data_type();
210 const DataLayout data_layout = input->info()->data_layout();
211 const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
212 const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100213 const int idx_kernels = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
214
215 const unsigned int kernel_width = weights->info()->dimension(idx_width);
216 const unsigned int kernel_height = weights->info()->dimension(idx_height);
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000217 const unsigned int num_kernels = weights->info()->dimension(idx_kernels);
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100218
Georgios Pinitas4c5469b2019-05-21 13:32:43 +0100219 const UniformQuantizationInfo iq_info = input->info()->quantization_info().uniform();
Georgios Pinitas4c5469b2019-05-21 13:32:43 +0100220 const UniformQuantizationInfo oq_info = output->info()->quantization_info().uniform();
221
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100222 _is_prepared = weights_info.retain_internal_weights();
223 _original_weights = weights;
224 _is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
225 _skip_im2col = (data_layout == DataLayout::NHWC && kernel_width == 1 && kernel_height == 1 && conv_info.stride().first == 1 && conv_info.stride().second == 1);
226 _skip_col2im = data_layout == DataLayout::NHWC;
227
228 // Only for quantize there are few cases where we cannot fuse the activation function in GEMM
229 _fuse_activation = true;
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000230
Georgios Pinitas78c00902018-01-09 17:33:11 +0000231 // Set the GPU target for im2col and col2im
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000232 _im2col_kernel.set_target(CLScheduler::get().target());
233 _col2im_kernel.set_target(CLScheduler::get().target());
234
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100235 const ICLTensor *gemm_input_to_use = input;
236 ICLTensor *gemm_output_to_use = output;
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000237
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000238 // Get parameters from conv_info
239 unsigned int stride_x = 0;
240 unsigned int stride_y = 0;
241 std::tie(stride_x, stride_y) = conv_info.stride();
242
243 // Get convolved dimensions
244 unsigned int conv_w = 0;
245 unsigned int conv_h = 0;
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100246 std::tie(conv_w, conv_h) = scaled_dimensions(input->info()->dimension(idx_width),
247 input->info()->dimension(idx_height),
248 kernel_width,
249 kernel_height,
250 conv_info,
251 dilation);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000252
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000253 unsigned int mat_weights_cols = num_kernels / num_groups;
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000254
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100255 const ICLTensor *biases_to_use = biases;
256 bool append_bias = false;
257
Michalis Spyroub27e13a2019-09-27 11:04:27 +0100258 ICLTensor *weights_to_use = &_weights_reshaped;
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100259 if(num_groups != 1 && biases != nullptr)
260 {
261 // num_groups != 1 can only be for NCHW
262 // Since it is missing an utility function to reshape the biases, we append the biases into the weights tensor
263 biases_to_use = nullptr;
264 append_bias = true;
265
Michalis Spyroub27e13a2019-09-27 11:04:27 +0100266 if(_weights_manager && _weights_manager->are_weights_managed(weights))
267 {
Manuel Bottini2b84be52020-04-08 10:15:51 +0100268 _reshape_weights_managed.configure(compile_context, weights, biases, num_groups);
Michalis Spyroub27e13a2019-09-27 11:04:27 +0100269 weights_to_use = utils::cast::polymorphic_downcast<ICLTensor *>(_weights_manager->acquire(weights, &_reshape_weights_managed));
270 }
271 else
272 {
Manuel Bottini2b84be52020-04-08 10:15:51 +0100273 _reshape_weights.configure(compile_context, weights, biases, &_weights_reshaped, num_groups);
Michalis Spyroub27e13a2019-09-27 11:04:27 +0100274 }
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100275 }
276 else
277 {
Michalis Spyroub27e13a2019-09-27 11:04:27 +0100278 if(_weights_manager && _weights_manager->are_weights_managed(weights))
279 {
Manuel Bottini2b84be52020-04-08 10:15:51 +0100280 _reshape_weights_managed.configure(compile_context, weights, nullptr, num_groups);
Michalis Spyroub27e13a2019-09-27 11:04:27 +0100281 weights_to_use = utils::cast::polymorphic_downcast<ICLTensor *>(_weights_manager->acquire(weights, &_reshape_weights_managed));
282 }
283 else
284 {
Manuel Bottini2b84be52020-04-08 10:15:51 +0100285 _reshape_weights.configure(compile_context, weights, nullptr, &_weights_reshaped, num_groups);
Michalis Spyroub27e13a2019-09-27 11:04:27 +0100286 }
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100287 }
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000288
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000289 // Create tensor to store im2col reshaped inputs
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100290 if(!_skip_im2col)
291 {
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100292 _memory_group.manage(&_im2col_output);
293
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100294 // Configure and tune im2col. im2col output shape is auto-initialized
Manuel Bottini2b84be52020-04-08 10:15:51 +0100295 _im2col_kernel.configure(compile_context, input, &_im2col_output, Size2D(kernel_width, kernel_height), conv_info, append_bias, dilation, num_groups);
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100296
297 // Set quantization info
298 _im2col_output.info()->set_quantization_info(input->info()->quantization_info());
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100299 CLScheduler::get().tune_kernel_static(_im2col_kernel);
300
301 // Update GEMM input
302 gemm_input_to_use = &_im2col_output;
303 }
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000304
305 // Create GEMM output tensor
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100306 if(!_skip_col2im)
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100307 {
Georgios Pinitas932491f2018-09-21 16:33:15 +0100308 TensorShape shape_gemm;
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100309
310 // If we cannot skip col2im it means we run im2col as well
311 shape_gemm = _im2col_output.info()->tensor_shape();
312 shape_gemm.set(0, mat_weights_cols);
313 shape_gemm.set(1, conv_w * conv_h);
314
Georgios Pinitasf52cd782019-03-25 14:06:14 +0000315 // TODO(COMPMID-2078): input->clone() doesn't work with subtensors for grouped convolutions.
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100316 TensorInfo info_gemm(shape_gemm, 1, data_type);
Georgios Pinitas932491f2018-09-21 16:33:15 +0100317 info_gemm.set_quantization_info(output->info()->quantization_info()).set_data_layout(input->info()->data_layout());
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100318 _gemm_output.allocator()->init(info_gemm);
319 _memory_group.manage(&_gemm_output);
320
321 // Update GEMM output
322 gemm_output_to_use = &_gemm_output;
323 }
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000324
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100325 GEMMLowpOutputStageInfo gemmlowp_output_stage;
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000326 gemmlowp_output_stage.type = GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT;
327 gemmlowp_output_stage.gemmlowp_offset = 0;
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000328
329 // Configure output stage for quantized case
330 if(_is_quantized)
331 {
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000332 const auto output_quant_info = (output->info()->total_size() == 0) ? iq_info : oq_info;
333 const bool is_quantized_per_channel = is_data_type_quantized_per_channel(weights->info()->data_type());
334 const unsigned int num_filters = (is_quantized_per_channel) ? num_kernels : 1;
Georgios Pinitas9be0c5a2018-02-19 12:46:29 +0000335
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000336 gemmlowp_output_stage.is_quantized_per_channel = is_quantized_per_channel;
337
338 gemmlowp_output_stage.gemmlowp_multipliers.resize(num_filters);
339 gemmlowp_output_stage.gemmlowp_shifts.resize(num_filters);
340 quantization::compute_quantized_multipliers_and_shifts(input->info(),
341 weights->info(),
342 output->info(),
343 idx_kernels,
344 gemmlowp_output_stage.gemmlowp_multipliers.data(),
345 gemmlowp_output_stage.gemmlowp_shifts.data());
346 gemmlowp_output_stage.gemmlowp_multiplier = gemmlowp_output_stage.gemmlowp_multipliers[0];
347 gemmlowp_output_stage.gemmlowp_shift = gemmlowp_output_stage.gemmlowp_shifts[0];
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100348
Giorgio Arena1856ff72020-02-07 13:46:45 +0000349 PixelValue min_val{};
350 PixelValue max_val{};
351 std::tie(min_val, max_val) = get_min_max(output->info()->data_type());
352
353 auto min_activation = min_val.get<int32_t>();
354 auto max_activation = max_val.get<int32_t>();
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100355
356 const std::set<ActivationLayerInfo::ActivationFunction> supported_acts = { ActivationLayerInfo::ActivationFunction::RELU,
Gian Marco Iodice3139f032018-11-05 14:26:32 +0000357 ActivationLayerInfo::ActivationFunction::BOUNDED_RELU,
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100358 ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU
359 };
360
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100361 if(act_info.enabled())
Georgios Pinitas932491f2018-09-21 16:33:15 +0100362 {
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100363 if(supported_acts.count(act_info.activation()) != 0)
364 {
Sang-Hoon Park4715cf92020-01-08 16:02:47 +0000365 std::tie(min_activation, max_activation) = get_quantized_activation_min_max(act_info, data_type, output_quant_info);
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100366 }
367 else
368 {
369 _fuse_activation = false;
370 }
Georgios Pinitas932491f2018-09-21 16:33:15 +0100371 }
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100372
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100373 // Set the GEMMLowp output stage info
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000374 gemmlowp_output_stage.gemmlowp_offset = output_quant_info.offset;
375 gemmlowp_output_stage.gemmlowp_min_bound = min_activation;
376 gemmlowp_output_stage.gemmlowp_max_bound = max_activation;
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100377 }
378
379 // Configure and tune GEMM
Gian Marco Iodice3139f032018-11-05 14:26:32 +0000380 // In case of NHWC, we need to run GEMM3D (gemm_3d_depth != 0) in order to avoid reshaping the output matrix
381 const unsigned int gemm_3d_depth = (data_layout == DataLayout::NHWC) ? conv_h : 0;
382
Manuel Bottini2b84be52020-04-08 10:15:51 +0100383 configure_mm(compile_context, gemm_input_to_use, weights_to_use, biases_to_use, gemm_output_to_use, gemmlowp_output_stage, gemm_3d_depth, act_info);
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100384
385 if(!_skip_im2col)
386 {
387 _im2col_output.allocator()->allocate();
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000388 }
389
Georgios Pinitas932491f2018-09-21 16:33:15 +0100390 if(!_skip_col2im)
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100391 {
Georgios Pinitas932491f2018-09-21 16:33:15 +0100392 // Configure and tune Col2Im
Manuel Bottini2b84be52020-04-08 10:15:51 +0100393 _col2im_kernel.configure(compile_context, gemm_output_to_use, output, Size2D(conv_w, conv_h), num_groups);
Georgios Pinitas932491f2018-09-21 16:33:15 +0100394 CLScheduler::get().tune_kernel_static(_col2im_kernel);
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100395 }
396
Georgios Pinitas932491f2018-09-21 16:33:15 +0100397 if(!_skip_col2im)
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100398 {
399 _gemm_output.allocator()->allocate();
400 }
401
402 ARM_COMPUTE_ERROR_ON_MSG((output->info()->dimension(idx_width) != conv_w) || (output->info()->dimension(idx_height) != conv_h),
403 "Output shape does not match the expected one");
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000404
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100405 if(!_fuse_activation)
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000406 {
Manuel Bottini2b84be52020-04-08 10:15:51 +0100407 _activationlayer_function.configure(compile_context, output, nullptr, act_info);
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000408 }
409
Georgios Pinitas78c00902018-01-09 17:33:11 +0000410 ARM_COMPUTE_UNUSED(weights_info);
411}
412
413Status CLGEMMConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100414 const WeightsInfo &weights_info, const Size2D &dilation, const ActivationLayerInfo &act_info, unsigned int num_groups)
Georgios Pinitas78c00902018-01-09 17:33:11 +0000415{
416 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output);
417 ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights_info.are_reshaped(), "Weights already reshaped are not supported!");
Sang-Hoon Park4715cf92020-01-08 16:02:47 +0000418 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F16, DataType::F32);
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000419 const bool is_quantized_per_channel = is_data_type_quantized_per_channel(weights->data_type());
420
421 if(is_quantized_per_channel)
422 {
423 ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() != DataType::QASYMM8, "Input data type not compatible with Weights");
424 }
425 else
426 {
427 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
428 }
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100429 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, weights);
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100430 ARM_COMPUTE_RETURN_ERROR_ON_MSG((num_groups != 1) && (input->data_layout() != DataLayout::NCHW), "Grouping (num_groups != 1) with NHWC data layout is not supported");
431 ARM_COMPUTE_RETURN_ERROR_ON_MSG((num_groups != 1) && (input->data_type() == DataType::QASYMM8), "Grouping (num_groups != 1) is not supported with QASYMM8");
432 ARM_COMPUTE_RETURN_ERROR_ON(((input->dimension(2) / weights->dimension(2)) != num_groups) && (input->data_layout() == DataLayout::NCHW));
Georgios Pinitas78c00902018-01-09 17:33:11 +0000433
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100434 const DataLayout data_layout = input->data_layout();
435 const DataType data_type = input->data_type();
436 const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
437 const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
438 const int idx_channel = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
439 const int idx_kernels = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000440
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100441 const unsigned int kernel_width = weights->dimension(idx_width);
442 const unsigned int kernel_height = weights->dimension(idx_height);
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000443 const unsigned int num_kernels = weights->dimension(idx_kernels);
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100444
Michalis Spyroubcfd09a2019-05-01 13:03:59 +0100445 TensorInfo im2col_reshaped_info{};
446 TensorInfo info_gemm{};
447 TensorInfo weights_reshaped_info{};
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100448 const ITensorInfo *gemm_input_to_use = input;
449 const ITensorInfo *gemm_output_to_use = output;
450 const ITensorInfo *weights_to_use = weights;
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000451 const bool is_quantized = is_data_type_quantized_asymmetric(data_type);
452 const bool skip_im2col = (data_layout == DataLayout::NHWC && kernel_width == 1 && kernel_height == 1 && conv_info.stride().first == 1 && conv_info.stride().second == 1);
453 const bool skip_col2im = data_layout == DataLayout::NHWC;
454 bool fuse_activation = true;
Georgios Pinitas4c5469b2019-05-21 13:32:43 +0100455
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100456 ARM_COMPUTE_RETURN_ERROR_ON((weights->dimension(idx_channel) * num_groups) != input->dimension(idx_channel));
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100457 ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 4);
Georgios Pinitas78c00902018-01-09 17:33:11 +0000458
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100459 // Validate biases
Georgios Pinitas78c00902018-01-09 17:33:11 +0000460 if(biases != nullptr)
461 {
462 if(is_quantized)
463 {
464 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(biases, 1, DataType::S32);
465 }
466 else
467 {
468 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, biases);
469 }
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100470 ARM_COMPUTE_RETURN_ERROR_ON(biases->dimension(0) != weights->dimension(idx_kernels));
Georgios Pinitas78c00902018-01-09 17:33:11 +0000471 ARM_COMPUTE_RETURN_ERROR_ON(biases->num_dimensions() > 1);
472 }
473
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100474 if(act_info.enabled())
475 {
476 ARM_COMPUTE_ERROR_ON(act_info.b() > act_info.a());
477 }
478
479 // Get convolved dimensions
480 unsigned int conv_w = 0;
481 unsigned int conv_h = 0;
482
483 std::tie(conv_w, conv_h) = scaled_dimensions(input->dimension(idx_width),
484 input->dimension(idx_height),
485 kernel_width,
486 kernel_height,
487 conv_info,
488 dilation);
489
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000490 unsigned int mat_weights_cols = num_kernels / num_groups;
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100491
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100492 const ITensorInfo *biases_to_use = biases;
493 bool append_bias = false;
494
495 if(num_groups != 1 && biases != nullptr)
496 {
497 // num_groups != 1 can only be for NCHW
498 // Since it is missing an utility function to reshape the biases, we append the biases into the weights tensor
499 biases_to_use = nullptr;
500 append_bias = true;
501
502 ARM_COMPUTE_RETURN_ON_ERROR(CLConvolutionLayerReshapeWeights::validate(weights, biases, nullptr, num_groups));
503 weights_reshaped_info = TensorInfo(compute_weights_reshaped_shape(*weights, true, num_groups), 1, data_type);
504 }
505 else
506 {
507 ARM_COMPUTE_RETURN_ON_ERROR(CLConvolutionLayerReshapeWeights::validate(weights, nullptr, nullptr, num_groups));
508 weights_reshaped_info = TensorInfo(compute_weights_reshaped_shape(*weights, false, num_groups), 1, data_type);
509 }
510
511 weights_to_use = &weights_reshaped_info;
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100512
513 if(!skip_im2col)
514 {
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100515 const Size2D kernel_dims(kernel_width, kernel_height);
516
517 // Output tensor auto initialization if not yet initialized
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100518 TensorShape expected_output_shape = compute_im2col_conv_shape(input, kernel_dims, conv_info, append_bias, dilation, num_groups == 1, num_groups);
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100519
520 auto_init_if_empty(im2col_reshaped_info, input->clone()->set_tensor_shape(expected_output_shape));
521
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100522 ARM_COMPUTE_RETURN_ON_ERROR(CLIm2ColKernel::validate(input, &im2col_reshaped_info, kernel_dims, conv_info, append_bias, dilation, num_groups));
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100523 gemm_input_to_use = &im2col_reshaped_info;
524 }
525
526 // Create GEMM output tensor
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100527 if(!skip_col2im)
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100528 {
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100529 TensorShape shape_gemm;
530
531 shape_gemm = gemm_input_to_use->tensor_shape();
532 shape_gemm.set(0, mat_weights_cols);
533 shape_gemm.set(1, conv_w * conv_h);
534
535 info_gemm = TensorInfo(shape_gemm, 1, data_type);
Georgios Pinitas932491f2018-09-21 16:33:15 +0100536 info_gemm.set_quantization_info(output->quantization_info()).set_data_layout(input->data_layout());
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100537 gemm_output_to_use = &info_gemm;
538 }
539
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100540 GEMMLowpOutputStageInfo gemmlowp_output_stage;
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000541 gemmlowp_output_stage.type = GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT;
542 gemmlowp_output_stage.gemmlowp_offset = 0;
543 gemmlowp_output_stage.is_quantized_per_channel = is_quantized_per_channel;
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100544
545 if(is_quantized)
546 {
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000547 const UniformQuantizationInfo iq_info = input->quantization_info().uniform();
548 const UniformQuantizationInfo oq_info = output->quantization_info().uniform();
549 const auto output_quant_info = (output->total_size() == 0) ? iq_info : oq_info;
550 const unsigned int num_filters = (is_quantized_per_channel) ? num_kernels : 1;
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100551
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000552 gemmlowp_output_stage.gemmlowp_multipliers.resize(num_filters);
553 gemmlowp_output_stage.gemmlowp_shifts.resize(num_filters);
554 quantization::compute_quantized_multipliers_and_shifts(input,
555 weights,
556 output,
557 idx_kernels,
558 gemmlowp_output_stage.gemmlowp_multipliers.data(),
559 gemmlowp_output_stage.gemmlowp_shifts.data());
560 gemmlowp_output_stage.gemmlowp_multiplier = gemmlowp_output_stage.gemmlowp_multipliers[0];
561 gemmlowp_output_stage.gemmlowp_shift = gemmlowp_output_stage.gemmlowp_shifts[0];
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100562
563 int min_activation = 0;
564 int max_activation = 0;
565
566 const std::set<ActivationLayerInfo::ActivationFunction> supported_acts = { ActivationLayerInfo::ActivationFunction::RELU,
567 ActivationLayerInfo::ActivationFunction::BOUNDED_RELU,
568 ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU
569 };
570
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100571 if(act_info.enabled())
Georgios Pinitas932491f2018-09-21 16:33:15 +0100572 {
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100573 if(supported_acts.count(act_info.activation()) != 0)
574 {
Sang-Hoon Park4715cf92020-01-08 16:02:47 +0000575 std::tie(min_activation, max_activation) = get_quantized_activation_min_max(act_info, data_type, output_quant_info);
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100576 }
577 else
578 {
579 fuse_activation = false;
580 }
Georgios Pinitas932491f2018-09-21 16:33:15 +0100581 }
Gian Marco Iodice3139f032018-11-05 14:26:32 +0000582
583 // Set the GEMMLowp output stage info
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000584 gemmlowp_output_stage.gemmlowp_offset = output_quant_info.offset;
585 gemmlowp_output_stage.gemmlowp_min_bound = min_activation;
586 gemmlowp_output_stage.gemmlowp_max_bound = max_activation;
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100587 }
588
Gian Marco Iodice3139f032018-11-05 14:26:32 +0000589 // In case of NHWC, we need to run GEMM3D (gemm_3d_depth != 0) in order to avoid reshaping the output matrix
590 const unsigned int gemm_3d_depth = (data_layout == DataLayout::NHWC) ? conv_h : 0;
591
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100592 ARM_COMPUTE_RETURN_ON_ERROR(validate_mm(gemm_input_to_use, weights_to_use, biases_to_use, gemm_output_to_use, gemmlowp_output_stage, gemm_3d_depth, skip_im2col, act_info));
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100593
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100594 // Validate Col2Im
Georgios Pinitas932491f2018-09-21 16:33:15 +0100595 if(!skip_col2im)
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100596 {
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100597 ARM_COMPUTE_RETURN_ON_ERROR(CLCol2ImKernel::validate(gemm_output_to_use, output, Size2D(conv_w, conv_h), num_groups));
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100598 }
599
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000600 //Validate Activation Layer
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100601 if(!fuse_activation)
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000602 {
Vidhya Sudhan Loganathanedf357c2018-04-27 14:25:30 +0100603 ARM_COMPUTE_RETURN_ON_ERROR(CLActivationLayer::validate(output, nullptr, act_info));
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000604 }
605
Georgios Pinitas78c00902018-01-09 17:33:11 +0000606 return Status{};
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000607}
608
609void CLGEMMConvolutionLayer::run()
610{
Georgios Pinitase0437672018-05-02 14:07:55 +0100611 prepare();
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000612
Georgios Pinitasda953f22019-04-02 17:27:03 +0100613 MemoryGroupResourceScope scope_mg(_memory_group);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000614
615 // Run im2col
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100616 if(!_skip_im2col)
617 {
618 CLScheduler::get().enqueue(_im2col_kernel);
619 }
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000620
Georgios Pinitas78c00902018-01-09 17:33:11 +0000621 // Runs CLGEMM or CLGEMMLowpMatrixMultiplyCore functions
622 if(_is_quantized)
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000623 {
Georgios Pinitas78c00902018-01-09 17:33:11 +0000624 // Run gemmlowp
625 _mm_gemmlowp.run();
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000626 }
627 else
628 {
Georgios Pinitas78c00902018-01-09 17:33:11 +0000629 // Run gemm
630 _mm_gemm.run();
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000631 }
632
633 // Reshape output matrix
Georgios Pinitas932491f2018-09-21 16:33:15 +0100634 if(!_skip_col2im)
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100635 {
Georgios Pinitas932491f2018-09-21 16:33:15 +0100636 CLScheduler::get().enqueue(_col2im_kernel, false);
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100637 }
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000638
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100639 //Run Activation Layer if we cannot fuse in GEMM
640 if(!_fuse_activation)
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000641 {
642 _activationlayer_function.run();
643 }
Georgios Pinitase0437672018-05-02 14:07:55 +0100644}
Georgios Pinitas82b51482018-04-24 15:14:12 +0100645
Georgios Pinitase0437672018-05-02 14:07:55 +0100646void CLGEMMConvolutionLayer::prepare()
647{
648 if(!_is_prepared)
649 {
Michalis Spyroub27e13a2019-09-27 11:04:27 +0100650 ARM_COMPUTE_ERROR_ON(!_original_weights->is_used());
651 if(_weights_manager && _weights_manager->are_weights_managed(_original_weights))
652 {
653 _weights_manager->run(_original_weights, &_reshape_weights_managed);
654 }
655 else
656 {
657 // Run weights reshaping and mark original weights tensor as unused
658 _weights_reshaped.allocator()->allocate();
659 _reshape_weights.run();
660 _original_weights->mark_as_unused();
661 }
Georgios Pinitas72219332018-06-05 14:56:06 +0100662
663 // Prepare GEMM
664 _is_quantized ? _mm_gemmlowp.prepare() : _mm_gemm.prepare();
665 if(!_weights_reshaped.is_used())
Georgios Pinitase0437672018-05-02 14:07:55 +0100666 {
Georgios Pinitas72219332018-06-05 14:56:06 +0100667 _weights_reshaped.allocator()->free();
Georgios Pinitase0437672018-05-02 14:07:55 +0100668 }
669
670 CLScheduler::get().queue().finish();
671 _is_prepared = true;
672 }
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000673}
Michalis Spyroub27e13a2019-09-27 11:04:27 +0100674} // namespace arm_compute