blob: 5dc7556b2f84b29b56f425ca35567d6b9401c6db [file] [log] [blame]
Isabella Gottardif07d28d2018-02-06 14:52:43 +00001/*
Georgios Pinitas856f66e2021-04-22 21:13:21 +01002 * Copyright (c) 2017-2021 Arm Limited.
Isabella Gottardif07d28d2018-02-06 14:52:43 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h"
25
26#include "arm_compute/core/PixelValue.h"
27#include "arm_compute/core/Size2D.h"
28#include "arm_compute/core/Utils.h"
29#include "arm_compute/core/Validate.h"
Georgios Pinitas78c00902018-01-09 17:33:11 +000030#include "arm_compute/core/utils/misc/ShapeCalculator.h"
Isabella Gottardif07d28d2018-02-06 14:52:43 +000031#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
32#include "arm_compute/runtime/CL/CLScheduler.h"
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +010033#include "src/core/CL/kernels/CLCol2ImKernel.h"
34#include "src/core/CL/kernels/CLDepthConvertLayerKernel.h"
35#include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.h"
36#include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.h"
37#include "src/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.h"
38#include "src/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.h"
39#include "src/core/CL/kernels/CLGEMMLowpReductionKernel.h"
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +010040#include "src/core/CL/kernels/CLIm2ColKernel.h"
41#include "src/core/CL/kernels/CLWeightsReshapeKernel.h"
Sang-Hoon Park68dd25f2020-10-19 16:00:11 +010042#include "src/core/helpers/AutoConfiguration.h"
43#include "support/Cast.h"
Isabella Gottardif07d28d2018-02-06 14:52:43 +000044
45#include <cmath>
46#include <memory>
47#include <tuple>
48
Michalis Spyroub27e13a2019-09-27 11:04:27 +010049namespace arm_compute
50{
Georgios Pinitas78c00902018-01-09 17:33:11 +000051using namespace arm_compute::misc::shape_calculator;
Michalis Spyroub27e13a2019-09-27 11:04:27 +010052using namespace arm_compute::utils::cast;
Isabella Gottardif07d28d2018-02-06 14:52:43 +000053
Georgios Pinitasd8734b52017-12-22 15:27:52 +000054CLConvolutionLayerReshapeWeights::CLConvolutionLayerReshapeWeights()
Georgios Pinitas40f51a62020-11-21 03:04:18 +000055 : _weights_reshape_kernel(std::make_unique<CLWeightsReshapeKernel>())
Isabella Gottardif07d28d2018-02-06 14:52:43 +000056{
57}
58
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +010059CLConvolutionLayerReshapeWeights::~CLConvolutionLayerReshapeWeights() = default;
60
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +010061void CLConvolutionLayerReshapeWeights::configure(const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, unsigned int num_groups)
Isabella Gottardif07d28d2018-02-06 14:52:43 +000062{
Manuel Bottini2b84be52020-04-08 10:15:51 +010063 configure(CLKernelLibrary::get().get_compile_context(), weights, biases, output, num_groups);
64}
65
66void CLConvolutionLayerReshapeWeights::configure(const CLCompileContext &compile_context, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, unsigned int num_groups)
67{
Georgios Pinitas78c00902018-01-09 17:33:11 +000068 // Perform validation step
Isabella Gottardif07d28d2018-02-06 14:52:43 +000069 ARM_COMPUTE_ERROR_ON_NULLPTR(weights, output);
Georgios Pinitas78c00902018-01-09 17:33:11 +000070 ARM_COMPUTE_ERROR_THROW_ON(CLConvolutionLayerReshapeWeights::validate(weights->info(),
71 (biases != nullptr) ? biases->info() : nullptr,
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +010072 output->info(),
73 num_groups));
Georgios Pinitas78c00902018-01-09 17:33:11 +000074
75 const bool append_biases = (biases != nullptr) && !is_data_type_quantized_asymmetric(weights->info()->data_type());
76 const ICLTensor *biases_to_use = (append_biases) ? biases : nullptr;
77
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +010078 _weights_reshape_kernel->configure(compile_context, weights, biases_to_use, output, num_groups);
Georgios Pinitas78c00902018-01-09 17:33:11 +000079
80 output->info()->set_quantization_info(weights->info()->quantization_info());
81}
82
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +010083Status CLConvolutionLayerReshapeWeights::validate(const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, unsigned int num_groups)
Georgios Pinitas78c00902018-01-09 17:33:11 +000084{
85 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(weights);
Manuel Bottini8481d832019-12-10 15:28:40 +000086 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(weights, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::QSYMM8_PER_CHANNEL, DataType::F16, DataType::F32);
Georgios Pinitas78c00902018-01-09 17:33:11 +000087 ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 4);
Isabella Gottardif07d28d2018-02-06 14:52:43 +000088
89 if(biases != nullptr)
90 {
Georgios Pinitas19ea4192018-06-19 13:09:53 +010091 const int idx_kernels = get_data_layout_dimension_index(weights->data_layout(), DataLayoutDimension::BATCHES);
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +000092 ARM_COMPUTE_RETURN_ERROR_ON(is_data_type_quantized(weights->data_type()));
93
Georgios Pinitas78c00902018-01-09 17:33:11 +000094 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(weights, biases);
Georgios Pinitas19ea4192018-06-19 13:09:53 +010095 ARM_COMPUTE_RETURN_ERROR_ON(biases->dimension(0) != weights->dimension(idx_kernels));
Georgios Pinitas78c00902018-01-09 17:33:11 +000096 ARM_COMPUTE_RETURN_ERROR_ON(biases->num_dimensions() > 1);
Isabella Gottardif07d28d2018-02-06 14:52:43 +000097 }
98
Georgios Pinitas78c00902018-01-09 17:33:11 +000099 if((output != nullptr) && (output->total_size() != 0))
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000100 {
Georgios Pinitas78c00902018-01-09 17:33:11 +0000101 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(weights, output);
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100102 CLWeightsReshapeKernel::validate(weights, biases, output, num_groups);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000103 }
104
Georgios Pinitas78c00902018-01-09 17:33:11 +0000105 return Status{};
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000106}
107
108void CLConvolutionLayerReshapeWeights::run()
109{
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +0100110 CLScheduler::get().enqueue(*_weights_reshape_kernel);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000111}
112
Michalis Spyroub27e13a2019-09-27 11:04:27 +0100113CLGEMMConvolutionLayer::CLGEMMConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager, IWeightsManager *weights_manager)
Georgios Pinitas40f51a62020-11-21 03:04:18 +0000114 : _memory_group(memory_manager), _weights_manager(weights_manager), _reshape_weights(), _reshape_weights_managed(), _im2col_kernel(std::make_unique<CLIm2ColKernel>()), _mm_gemm(memory_manager,
115 weights_manager), _mm_gemmlowp(memory_manager), _col2im_kernel(std::make_unique<CLCol2ImKernel>()), _activationlayer_function(), _original_weights(nullptr), _im2col_output(), _weights_reshaped(),
116 _gemm_output(), _skip_im2col(false), _skip_col2im(false), _is_quantized(false), _fuse_activation(true), _is_prepared(false)
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000117{
118}
119
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +0100120CLGEMMConvolutionLayer::~CLGEMMConvolutionLayer() = default;
121
Manuel Bottini2b84be52020-04-08 10:15:51 +0100122void CLGEMMConvolutionLayer::configure_mm(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output,
123 const GEMMLowpOutputStageInfo &gemmlowp_output_stage,
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100124 int gemm_3d_depth, const ActivationLayerInfo &act_info)
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000125{
126 ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights);
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100127 ARM_COMPUTE_ERROR_THROW_ON(validate_mm(input->info(), weights->info(), biases != nullptr ? biases->info() : nullptr, output->info(), gemmlowp_output_stage, gemm_3d_depth, _skip_im2col, act_info));
Georgios Pinitas78c00902018-01-09 17:33:11 +0000128
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100129 const GEMMInfo &gemm_info = GEMMInfo(false, // is_a_reshaped
130 false, // is_b_reshaped
131 true, // reshape_b_only_on_first_run
132 gemm_3d_depth, // depth_output_gemm3d
133 _skip_im2col, // reinterpret_input_as_3d
134 false, // retain_internal_weights
135 gemmlowp_output_stage, // gemmlowp_output_stage
136 false, // fp_mixed_precision
137 true, // broadcast_bias
138 act_info); // activation_info
Georgios Pinitas932491f2018-09-21 16:33:15 +0100139
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000140 if(_is_quantized)
141 {
Georgios Pinitas78c00902018-01-09 17:33:11 +0000142 // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
143 // Extract and negate input and weights offset
144 const QuantizationInfo input_quantization_info = input->info()->quantization_info();
145 const QuantizationInfo weights_quantization_info = weights->info()->quantization_info();
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000146
Georgios Pinitas4c5469b2019-05-21 13:32:43 +0100147 input->info()->set_quantization_info(QuantizationInfo(input_quantization_info.uniform().scale, -input_quantization_info.uniform().offset));
148 weights->info()->set_quantization_info(QuantizationInfo(weights_quantization_info.uniform().scale, -weights_quantization_info.uniform().offset));
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000149
Manuel Bottini2b84be52020-04-08 10:15:51 +0100150 _mm_gemmlowp.configure(compile_context, input, weights, biases, output, gemm_info);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000151
Georgios Pinitas78c00902018-01-09 17:33:11 +0000152 // Revert back QuantizatioInfo as input and weights could be used in other convolution layers
153 input->info()->set_quantization_info(input_quantization_info);
154 weights->info()->set_quantization_info(weights_quantization_info);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000155 }
156 else
157 {
Georgios Pinitas78c00902018-01-09 17:33:11 +0000158 // Configure matrix multiply function
Manuel Bottini2b84be52020-04-08 10:15:51 +0100159 _mm_gemm.configure(compile_context, input, weights, biases, output, 1.0f, 1.0f, gemm_info);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000160 }
161}
162
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100163Status CLGEMMConvolutionLayer::validate_mm(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output,
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100164 const GEMMLowpOutputStageInfo &gemmlowp_output_stage, int gemm_3d_depth, bool skip_im2col, const ActivationLayerInfo &act_info)
Georgios Pinitas78c00902018-01-09 17:33:11 +0000165{
166 const bool is_quantized = is_data_type_quantized_asymmetric(input->data_type());
167
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100168 const GEMMInfo &gemm_info = GEMMInfo(false, // is_a_reshaped
169 false, // is_b_reshaped
170 true, // reshape_b_only_on_first_run
171 gemm_3d_depth, // depth_output_gemm3d
172 skip_im2col, // reinterpret_input_as_3d
173 false, // retain_internal_weights
174 gemmlowp_output_stage, // gemmlowp_output_stage
175 false, // fp_mixed_precision
176 true, // broadcast_bias
177 act_info); // activation_info
Georgios Pinitas932491f2018-09-21 16:33:15 +0100178
Georgios Pinitas78c00902018-01-09 17:33:11 +0000179 if(is_quantized)
180 {
181 // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
182 // Extract and negate input and weights offset
183 const QuantizationInfo input_quantization_info = input->quantization_info();
184 const QuantizationInfo weights_quantization_info = weights->quantization_info();
185
186 std::unique_ptr<ITensorInfo> input_qa = input->clone();
187 std::unique_ptr<ITensorInfo> weights_qa = weights->clone();
Georgios Pinitas4c5469b2019-05-21 13:32:43 +0100188 input_qa->set_quantization_info(QuantizationInfo(input_quantization_info.uniform().scale, -input_quantization_info.uniform().offset));
189 weights_qa->set_quantization_info(QuantizationInfo(weights_quantization_info.uniform().scale, -weights_quantization_info.uniform().offset));
Georgios Pinitas78c00902018-01-09 17:33:11 +0000190
191 // Perform validation step on GEMMLowp
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100192 return CLGEMMLowpMatrixMultiplyCore::validate(input_qa.get(), weights_qa.get(), biases, output, gemm_info);
Georgios Pinitas78c00902018-01-09 17:33:11 +0000193 }
194 else
195 {
196 // Perform validation step on Matrix multiply function
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100197 return CLGEMM::validate(input, weights, biases, output, 1.0f, 1.0f, gemm_info);
Georgios Pinitas78c00902018-01-09 17:33:11 +0000198 }
Georgios Pinitas78c00902018-01-09 17:33:11 +0000199}
200
Alex Gilday7da29b62018-03-23 14:16:00 +0000201void CLGEMMConvolutionLayer::configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info,
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100202 const Size2D &dilation, const ActivationLayerInfo &act_info, unsigned int num_groups)
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000203{
Manuel Bottini2b84be52020-04-08 10:15:51 +0100204 configure(CLKernelLibrary::get().get_compile_context(), input, weights, biases, output, conv_info, weights_info, dilation, act_info, num_groups);
205}
206
207void CLGEMMConvolutionLayer::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output,
208 const PadStrideInfo &conv_info,
209 const WeightsInfo &weights_info, const Size2D &dilation, const ActivationLayerInfo &act_info, unsigned int num_groups)
210{
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000211 ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
Georgios Pinitas78c00902018-01-09 17:33:11 +0000212
213 ARM_COMPUTE_ERROR_THROW_ON(CLGEMMConvolutionLayer::validate(input->info(),
214 weights->info(),
215 biases != nullptr ? biases->info() : nullptr,
216 output->info(),
217 conv_info,
Alex Gilday7da29b62018-03-23 14:16:00 +0000218 weights_info,
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000219 dilation,
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100220 act_info,
221 num_groups));
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000222
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100223 const DataType data_type = input->info()->data_type();
224 const DataLayout data_layout = input->info()->data_layout();
225 const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
226 const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100227 const int idx_kernels = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
228
229 const unsigned int kernel_width = weights->info()->dimension(idx_width);
230 const unsigned int kernel_height = weights->info()->dimension(idx_height);
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000231 const unsigned int num_kernels = weights->info()->dimension(idx_kernels);
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100232
Georgios Pinitas4c5469b2019-05-21 13:32:43 +0100233 const UniformQuantizationInfo iq_info = input->info()->quantization_info().uniform();
Georgios Pinitas4c5469b2019-05-21 13:32:43 +0100234 const UniformQuantizationInfo oq_info = output->info()->quantization_info().uniform();
235
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100236 _is_prepared = weights_info.retain_internal_weights();
237 _original_weights = weights;
238 _is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
239 _skip_im2col = (data_layout == DataLayout::NHWC && kernel_width == 1 && kernel_height == 1 && conv_info.stride().first == 1 && conv_info.stride().second == 1);
240 _skip_col2im = data_layout == DataLayout::NHWC;
241
242 // Only for quantize there are few cases where we cannot fuse the activation function in GEMM
243 _fuse_activation = true;
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000244
Georgios Pinitas78c00902018-01-09 17:33:11 +0000245 // Set the GPU target for im2col and col2im
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +0100246 _im2col_kernel->set_target(CLScheduler::get().target());
247 _col2im_kernel->set_target(CLScheduler::get().target());
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000248
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100249 const ICLTensor *gemm_input_to_use = input;
250 ICLTensor *gemm_output_to_use = output;
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000251
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000252 // Get parameters from conv_info
253 unsigned int stride_x = 0;
254 unsigned int stride_y = 0;
255 std::tie(stride_x, stride_y) = conv_info.stride();
256
257 // Get convolved dimensions
258 unsigned int conv_w = 0;
259 unsigned int conv_h = 0;
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100260 std::tie(conv_w, conv_h) = scaled_dimensions(input->info()->dimension(idx_width),
261 input->info()->dimension(idx_height),
262 kernel_width,
263 kernel_height,
264 conv_info,
265 dilation);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000266
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000267 unsigned int mat_weights_cols = num_kernels / num_groups;
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000268
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100269 const ICLTensor *biases_to_use = biases;
270 bool append_bias = false;
271
Michalis Spyroub27e13a2019-09-27 11:04:27 +0100272 ICLTensor *weights_to_use = &_weights_reshaped;
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100273 if(num_groups != 1 && biases != nullptr)
274 {
275 // num_groups != 1 can only be for NCHW
276 // Since it is missing an utility function to reshape the biases, we append the biases into the weights tensor
277 biases_to_use = nullptr;
278 append_bias = true;
279
Michalis Spyroub27e13a2019-09-27 11:04:27 +0100280 if(_weights_manager && _weights_manager->are_weights_managed(weights))
281 {
Manuel Bottini2b84be52020-04-08 10:15:51 +0100282 _reshape_weights_managed.configure(compile_context, weights, biases, num_groups);
Michalis Spyroub27e13a2019-09-27 11:04:27 +0100283 weights_to_use = utils::cast::polymorphic_downcast<ICLTensor *>(_weights_manager->acquire(weights, &_reshape_weights_managed));
284 }
285 else
286 {
Manuel Bottini2b84be52020-04-08 10:15:51 +0100287 _reshape_weights.configure(compile_context, weights, biases, &_weights_reshaped, num_groups);
Michalis Spyroub27e13a2019-09-27 11:04:27 +0100288 }
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100289 }
290 else
291 {
Michalis Spyroub27e13a2019-09-27 11:04:27 +0100292 if(_weights_manager && _weights_manager->are_weights_managed(weights))
293 {
Manuel Bottini2b84be52020-04-08 10:15:51 +0100294 _reshape_weights_managed.configure(compile_context, weights, nullptr, num_groups);
Michalis Spyroub27e13a2019-09-27 11:04:27 +0100295 weights_to_use = utils::cast::polymorphic_downcast<ICLTensor *>(_weights_manager->acquire(weights, &_reshape_weights_managed));
296 }
297 else
298 {
Manuel Bottini2b84be52020-04-08 10:15:51 +0100299 _reshape_weights.configure(compile_context, weights, nullptr, &_weights_reshaped, num_groups);
Michalis Spyroub27e13a2019-09-27 11:04:27 +0100300 }
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100301 }
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000302
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000303 // Create tensor to store im2col reshaped inputs
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100304 if(!_skip_im2col)
305 {
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100306 _memory_group.manage(&_im2col_output);
307
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100308 // Configure and tune im2col. im2col output shape is auto-initialized
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +0100309 _im2col_kernel->configure(compile_context, input, &_im2col_output, Size2D(kernel_width, kernel_height), conv_info, append_bias, dilation, num_groups);
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100310
311 // Set quantization info
312 _im2col_output.info()->set_quantization_info(input->info()->quantization_info());
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +0100313 CLScheduler::get().tune_kernel_static(*_im2col_kernel);
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100314
315 // Update GEMM input
316 gemm_input_to_use = &_im2col_output;
317 }
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000318
319 // Create GEMM output tensor
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100320 if(!_skip_col2im)
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100321 {
Georgios Pinitas932491f2018-09-21 16:33:15 +0100322 TensorShape shape_gemm;
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100323
324 // If we cannot skip col2im it means we run im2col as well
325 shape_gemm = _im2col_output.info()->tensor_shape();
326 shape_gemm.set(0, mat_weights_cols);
327 shape_gemm.set(1, conv_w * conv_h);
328
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100329 TensorInfo info_gemm(shape_gemm, 1, data_type);
Georgios Pinitas932491f2018-09-21 16:33:15 +0100330 info_gemm.set_quantization_info(output->info()->quantization_info()).set_data_layout(input->info()->data_layout());
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100331 _gemm_output.allocator()->init(info_gemm);
332 _memory_group.manage(&_gemm_output);
333
334 // Update GEMM output
335 gemm_output_to_use = &_gemm_output;
336 }
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000337
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100338 GEMMLowpOutputStageInfo gemmlowp_output_stage;
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000339 gemmlowp_output_stage.type = GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT;
340 gemmlowp_output_stage.gemmlowp_offset = 0;
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000341
342 // Configure output stage for quantized case
343 if(_is_quantized)
344 {
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000345 const auto output_quant_info = (output->info()->total_size() == 0) ? iq_info : oq_info;
346 const bool is_quantized_per_channel = is_data_type_quantized_per_channel(weights->info()->data_type());
347 const unsigned int num_filters = (is_quantized_per_channel) ? num_kernels : 1;
Georgios Pinitas9be0c5a2018-02-19 12:46:29 +0000348
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000349 gemmlowp_output_stage.is_quantized_per_channel = is_quantized_per_channel;
350
351 gemmlowp_output_stage.gemmlowp_multipliers.resize(num_filters);
352 gemmlowp_output_stage.gemmlowp_shifts.resize(num_filters);
353 quantization::compute_quantized_multipliers_and_shifts(input->info(),
354 weights->info(),
355 output->info(),
356 idx_kernels,
357 gemmlowp_output_stage.gemmlowp_multipliers.data(),
358 gemmlowp_output_stage.gemmlowp_shifts.data());
359 gemmlowp_output_stage.gemmlowp_multiplier = gemmlowp_output_stage.gemmlowp_multipliers[0];
360 gemmlowp_output_stage.gemmlowp_shift = gemmlowp_output_stage.gemmlowp_shifts[0];
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100361
Giorgio Arena1856ff72020-02-07 13:46:45 +0000362 PixelValue min_val{};
363 PixelValue max_val{};
364 std::tie(min_val, max_val) = get_min_max(output->info()->data_type());
365
366 auto min_activation = min_val.get<int32_t>();
367 auto max_activation = max_val.get<int32_t>();
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100368
369 const std::set<ActivationLayerInfo::ActivationFunction> supported_acts = { ActivationLayerInfo::ActivationFunction::RELU,
Gian Marco Iodice3139f032018-11-05 14:26:32 +0000370 ActivationLayerInfo::ActivationFunction::BOUNDED_RELU,
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100371 ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU
372 };
373
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100374 if(act_info.enabled())
Georgios Pinitas932491f2018-09-21 16:33:15 +0100375 {
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100376 if(supported_acts.count(act_info.activation()) != 0)
377 {
Sang-Hoon Park4715cf92020-01-08 16:02:47 +0000378 std::tie(min_activation, max_activation) = get_quantized_activation_min_max(act_info, data_type, output_quant_info);
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100379 }
380 else
381 {
382 _fuse_activation = false;
383 }
Georgios Pinitas932491f2018-09-21 16:33:15 +0100384 }
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100385
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100386 // Set the GEMMLowp output stage info
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000387 gemmlowp_output_stage.gemmlowp_offset = output_quant_info.offset;
388 gemmlowp_output_stage.gemmlowp_min_bound = min_activation;
389 gemmlowp_output_stage.gemmlowp_max_bound = max_activation;
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100390 }
391
392 // Configure and tune GEMM
Gian Marco Iodice3139f032018-11-05 14:26:32 +0000393 // In case of NHWC, we need to run GEMM3D (gemm_3d_depth != 0) in order to avoid reshaping the output matrix
394 const unsigned int gemm_3d_depth = (data_layout == DataLayout::NHWC) ? conv_h : 0;
395
Manuel Bottini2b84be52020-04-08 10:15:51 +0100396 configure_mm(compile_context, gemm_input_to_use, weights_to_use, biases_to_use, gemm_output_to_use, gemmlowp_output_stage, gemm_3d_depth, act_info);
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100397
398 if(!_skip_im2col)
399 {
400 _im2col_output.allocator()->allocate();
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000401 }
402
Georgios Pinitas932491f2018-09-21 16:33:15 +0100403 if(!_skip_col2im)
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100404 {
Georgios Pinitas932491f2018-09-21 16:33:15 +0100405 // Configure and tune Col2Im
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +0100406 _col2im_kernel->configure(compile_context, gemm_output_to_use, output, Size2D(conv_w, conv_h), num_groups);
407 CLScheduler::get().tune_kernel_static(*_col2im_kernel.get());
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100408 }
409
Georgios Pinitas932491f2018-09-21 16:33:15 +0100410 if(!_skip_col2im)
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100411 {
412 _gemm_output.allocator()->allocate();
413 }
414
415 ARM_COMPUTE_ERROR_ON_MSG((output->info()->dimension(idx_width) != conv_w) || (output->info()->dimension(idx_height) != conv_h),
416 "Output shape does not match the expected one");
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000417
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100418 if(!_fuse_activation)
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000419 {
Manuel Bottini2b84be52020-04-08 10:15:51 +0100420 _activationlayer_function.configure(compile_context, output, nullptr, act_info);
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000421 }
422
Georgios Pinitas78c00902018-01-09 17:33:11 +0000423 ARM_COMPUTE_UNUSED(weights_info);
424}
425
426Status CLGEMMConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100427 const WeightsInfo &weights_info, const Size2D &dilation, const ActivationLayerInfo &act_info, unsigned int num_groups)
Georgios Pinitas78c00902018-01-09 17:33:11 +0000428{
429 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output);
430 ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights_info.are_reshaped(), "Weights already reshaped are not supported!");
Sang-Hoon Park4715cf92020-01-08 16:02:47 +0000431 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F16, DataType::F32);
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000432 const bool is_quantized_per_channel = is_data_type_quantized_per_channel(weights->data_type());
433
morgolockd13931d2020-06-23 15:49:35 +0100434 if(!is_quantized_per_channel)
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000435 {
436 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
437 }
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100438 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, weights);
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100439 ARM_COMPUTE_RETURN_ERROR_ON_MSG((num_groups != 1) && (input->data_layout() != DataLayout::NCHW), "Grouping (num_groups != 1) with NHWC data layout is not supported");
440 ARM_COMPUTE_RETURN_ERROR_ON_MSG((num_groups != 1) && (input->data_type() == DataType::QASYMM8), "Grouping (num_groups != 1) is not supported with QASYMM8");
441 ARM_COMPUTE_RETURN_ERROR_ON(((input->dimension(2) / weights->dimension(2)) != num_groups) && (input->data_layout() == DataLayout::NCHW));
Georgios Pinitas78c00902018-01-09 17:33:11 +0000442
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100443 const DataLayout data_layout = input->data_layout();
444 const DataType data_type = input->data_type();
445 const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
446 const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
447 const int idx_channel = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
448 const int idx_kernels = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000449
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100450 const unsigned int kernel_width = weights->dimension(idx_width);
451 const unsigned int kernel_height = weights->dimension(idx_height);
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000452 const unsigned int num_kernels = weights->dimension(idx_kernels);
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100453
Michalis Spyroubcfd09a2019-05-01 13:03:59 +0100454 TensorInfo im2col_reshaped_info{};
455 TensorInfo info_gemm{};
456 TensorInfo weights_reshaped_info{};
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100457 const ITensorInfo *gemm_input_to_use = input;
458 const ITensorInfo *gemm_output_to_use = output;
459 const ITensorInfo *weights_to_use = weights;
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000460 const bool is_quantized = is_data_type_quantized_asymmetric(data_type);
461 const bool skip_im2col = (data_layout == DataLayout::NHWC && kernel_width == 1 && kernel_height == 1 && conv_info.stride().first == 1 && conv_info.stride().second == 1);
462 const bool skip_col2im = data_layout == DataLayout::NHWC;
463 bool fuse_activation = true;
Georgios Pinitas4c5469b2019-05-21 13:32:43 +0100464
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100465 ARM_COMPUTE_RETURN_ERROR_ON((weights->dimension(idx_channel) * num_groups) != input->dimension(idx_channel));
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100466 ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 4);
Georgios Pinitas78c00902018-01-09 17:33:11 +0000467
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100468 // Validate biases
Georgios Pinitas78c00902018-01-09 17:33:11 +0000469 if(biases != nullptr)
470 {
471 if(is_quantized)
472 {
473 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(biases, 1, DataType::S32);
474 }
475 else
476 {
477 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, biases);
478 }
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100479 ARM_COMPUTE_RETURN_ERROR_ON(biases->dimension(0) != weights->dimension(idx_kernels));
Georgios Pinitas78c00902018-01-09 17:33:11 +0000480 ARM_COMPUTE_RETURN_ERROR_ON(biases->num_dimensions() > 1);
481 }
482
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100483 if(act_info.enabled())
484 {
485 ARM_COMPUTE_ERROR_ON(act_info.b() > act_info.a());
486 }
487
488 // Get convolved dimensions
489 unsigned int conv_w = 0;
490 unsigned int conv_h = 0;
491
492 std::tie(conv_w, conv_h) = scaled_dimensions(input->dimension(idx_width),
493 input->dimension(idx_height),
494 kernel_width,
495 kernel_height,
496 conv_info,
497 dilation);
498
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000499 unsigned int mat_weights_cols = num_kernels / num_groups;
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100500
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100501 const ITensorInfo *biases_to_use = biases;
502 bool append_bias = false;
503
504 if(num_groups != 1 && biases != nullptr)
505 {
506 // num_groups != 1 can only be for NCHW
507 // Since it is missing an utility function to reshape the biases, we append the biases into the weights tensor
508 biases_to_use = nullptr;
509 append_bias = true;
510
511 ARM_COMPUTE_RETURN_ON_ERROR(CLConvolutionLayerReshapeWeights::validate(weights, biases, nullptr, num_groups));
512 weights_reshaped_info = TensorInfo(compute_weights_reshaped_shape(*weights, true, num_groups), 1, data_type);
513 }
514 else
515 {
516 ARM_COMPUTE_RETURN_ON_ERROR(CLConvolutionLayerReshapeWeights::validate(weights, nullptr, nullptr, num_groups));
517 weights_reshaped_info = TensorInfo(compute_weights_reshaped_shape(*weights, false, num_groups), 1, data_type);
518 }
519
520 weights_to_use = &weights_reshaped_info;
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100521
522 if(!skip_im2col)
523 {
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100524 const Size2D kernel_dims(kernel_width, kernel_height);
525
526 // Output tensor auto initialization if not yet initialized
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100527 TensorShape expected_output_shape = compute_im2col_conv_shape(input, kernel_dims, conv_info, append_bias, dilation, num_groups == 1, num_groups);
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100528
529 auto_init_if_empty(im2col_reshaped_info, input->clone()->set_tensor_shape(expected_output_shape));
530
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100531 ARM_COMPUTE_RETURN_ON_ERROR(CLIm2ColKernel::validate(input, &im2col_reshaped_info, kernel_dims, conv_info, append_bias, dilation, num_groups));
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100532 gemm_input_to_use = &im2col_reshaped_info;
533 }
534
535 // Create GEMM output tensor
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100536 if(!skip_col2im)
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100537 {
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100538 TensorShape shape_gemm;
539
540 shape_gemm = gemm_input_to_use->tensor_shape();
541 shape_gemm.set(0, mat_weights_cols);
542 shape_gemm.set(1, conv_w * conv_h);
543
544 info_gemm = TensorInfo(shape_gemm, 1, data_type);
Georgios Pinitas932491f2018-09-21 16:33:15 +0100545 info_gemm.set_quantization_info(output->quantization_info()).set_data_layout(input->data_layout());
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100546 gemm_output_to_use = &info_gemm;
547 }
548
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100549 GEMMLowpOutputStageInfo gemmlowp_output_stage;
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000550 gemmlowp_output_stage.type = GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT;
551 gemmlowp_output_stage.gemmlowp_offset = 0;
552 gemmlowp_output_stage.is_quantized_per_channel = is_quantized_per_channel;
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100553
554 if(is_quantized)
555 {
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000556 const UniformQuantizationInfo iq_info = input->quantization_info().uniform();
557 const UniformQuantizationInfo oq_info = output->quantization_info().uniform();
558 const auto output_quant_info = (output->total_size() == 0) ? iq_info : oq_info;
559 const unsigned int num_filters = (is_quantized_per_channel) ? num_kernels : 1;
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100560
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000561 gemmlowp_output_stage.gemmlowp_multipliers.resize(num_filters);
562 gemmlowp_output_stage.gemmlowp_shifts.resize(num_filters);
563 quantization::compute_quantized_multipliers_and_shifts(input,
564 weights,
565 output,
566 idx_kernels,
567 gemmlowp_output_stage.gemmlowp_multipliers.data(),
568 gemmlowp_output_stage.gemmlowp_shifts.data());
569 gemmlowp_output_stage.gemmlowp_multiplier = gemmlowp_output_stage.gemmlowp_multipliers[0];
570 gemmlowp_output_stage.gemmlowp_shift = gemmlowp_output_stage.gemmlowp_shifts[0];
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100571
572 int min_activation = 0;
573 int max_activation = 0;
574
575 const std::set<ActivationLayerInfo::ActivationFunction> supported_acts = { ActivationLayerInfo::ActivationFunction::RELU,
576 ActivationLayerInfo::ActivationFunction::BOUNDED_RELU,
577 ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU
578 };
579
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100580 if(act_info.enabled())
Georgios Pinitas932491f2018-09-21 16:33:15 +0100581 {
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100582 if(supported_acts.count(act_info.activation()) != 0)
583 {
Sang-Hoon Park4715cf92020-01-08 16:02:47 +0000584 std::tie(min_activation, max_activation) = get_quantized_activation_min_max(act_info, data_type, output_quant_info);
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100585 }
586 else
587 {
588 fuse_activation = false;
589 }
Georgios Pinitas932491f2018-09-21 16:33:15 +0100590 }
Gian Marco Iodice3139f032018-11-05 14:26:32 +0000591
592 // Set the GEMMLowp output stage info
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000593 gemmlowp_output_stage.gemmlowp_offset = output_quant_info.offset;
594 gemmlowp_output_stage.gemmlowp_min_bound = min_activation;
595 gemmlowp_output_stage.gemmlowp_max_bound = max_activation;
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100596 }
597
Gian Marco Iodice3139f032018-11-05 14:26:32 +0000598 // In case of NHWC, we need to run GEMM3D (gemm_3d_depth != 0) in order to avoid reshaping the output matrix
599 const unsigned int gemm_3d_depth = (data_layout == DataLayout::NHWC) ? conv_h : 0;
600
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100601 ARM_COMPUTE_RETURN_ON_ERROR(validate_mm(gemm_input_to_use, weights_to_use, biases_to_use, gemm_output_to_use, gemmlowp_output_stage, gemm_3d_depth, skip_im2col, act_info));
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100602
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100603 // Validate Col2Im
Georgios Pinitas932491f2018-09-21 16:33:15 +0100604 if(!skip_col2im)
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100605 {
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100606 ARM_COMPUTE_RETURN_ON_ERROR(CLCol2ImKernel::validate(gemm_output_to_use, output, Size2D(conv_w, conv_h), num_groups));
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100607 }
608
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000609 //Validate Activation Layer
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100610 if(!fuse_activation)
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000611 {
Vidhya Sudhan Loganathanedf357c2018-04-27 14:25:30 +0100612 ARM_COMPUTE_RETURN_ON_ERROR(CLActivationLayer::validate(output, nullptr, act_info));
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000613 }
614
Georgios Pinitas78c00902018-01-09 17:33:11 +0000615 return Status{};
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000616}
617
618void CLGEMMConvolutionLayer::run()
619{
Georgios Pinitase0437672018-05-02 14:07:55 +0100620 prepare();
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000621
Georgios Pinitasda953f22019-04-02 17:27:03 +0100622 MemoryGroupResourceScope scope_mg(_memory_group);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000623
624 // Run im2col
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100625 if(!_skip_im2col)
626 {
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +0100627 CLScheduler::get().enqueue(*_im2col_kernel);
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100628 }
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000629
Georgios Pinitas78c00902018-01-09 17:33:11 +0000630 // Runs CLGEMM or CLGEMMLowpMatrixMultiplyCore functions
631 if(_is_quantized)
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000632 {
Georgios Pinitas78c00902018-01-09 17:33:11 +0000633 // Run gemmlowp
634 _mm_gemmlowp.run();
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000635 }
636 else
637 {
Georgios Pinitas78c00902018-01-09 17:33:11 +0000638 // Run gemm
639 _mm_gemm.run();
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000640 }
641
642 // Reshape output matrix
Georgios Pinitas932491f2018-09-21 16:33:15 +0100643 if(!_skip_col2im)
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100644 {
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +0100645 CLScheduler::get().enqueue(*_col2im_kernel.get(), false);
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100646 }
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000647
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100648 //Run Activation Layer if we cannot fuse in GEMM
649 if(!_fuse_activation)
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000650 {
651 _activationlayer_function.run();
652 }
Georgios Pinitase0437672018-05-02 14:07:55 +0100653}
Georgios Pinitas82b51482018-04-24 15:14:12 +0100654
Georgios Pinitase0437672018-05-02 14:07:55 +0100655void CLGEMMConvolutionLayer::prepare()
656{
657 if(!_is_prepared)
658 {
Michalis Spyroub27e13a2019-09-27 11:04:27 +0100659 ARM_COMPUTE_ERROR_ON(!_original_weights->is_used());
660 if(_weights_manager && _weights_manager->are_weights_managed(_original_weights))
661 {
662 _weights_manager->run(_original_weights, &_reshape_weights_managed);
663 }
664 else
665 {
666 // Run weights reshaping and mark original weights tensor as unused
667 _weights_reshaped.allocator()->allocate();
668 _reshape_weights.run();
669 _original_weights->mark_as_unused();
670 }
Georgios Pinitas72219332018-06-05 14:56:06 +0100671
672 // Prepare GEMM
673 _is_quantized ? _mm_gemmlowp.prepare() : _mm_gemm.prepare();
674 if(!_weights_reshaped.is_used())
Georgios Pinitase0437672018-05-02 14:07:55 +0100675 {
Georgios Pinitas72219332018-06-05 14:56:06 +0100676 _weights_reshaped.allocator()->free();
Georgios Pinitase0437672018-05-02 14:07:55 +0100677 }
678
679 CLScheduler::get().queue().finish();
680 _is_prepared = true;
681 }
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000682}
Michalis Spyroub27e13a2019-09-27 11:04:27 +0100683} // namespace arm_compute