blob: 4d26df5e43638795a6d0ed4c90718ec39fa57210 [file] [log] [blame]
Isabella Gottardif07d28d2018-02-06 14:52:43 +00001/*
Michele Di Giorgiod9eaf612020-07-08 11:12:57 +01002 * Copyright (c) 2017-2020 Arm Limited.
Isabella Gottardif07d28d2018-02-06 14:52:43 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h"
25
26#include "arm_compute/core/PixelValue.h"
27#include "arm_compute/core/Size2D.h"
28#include "arm_compute/core/Utils.h"
29#include "arm_compute/core/Validate.h"
Georgios Pinitas78c00902018-01-09 17:33:11 +000030#include "arm_compute/core/utils/misc/ShapeCalculator.h"
Isabella Gottardif07d28d2018-02-06 14:52:43 +000031#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
32#include "arm_compute/runtime/CL/CLScheduler.h"
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +010033#include "src/core/CL/kernels/CLCol2ImKernel.h"
34#include "src/core/CL/kernels/CLDepthConvertLayerKernel.h"
35#include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.h"
36#include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.h"
37#include "src/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.h"
38#include "src/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.h"
39#include "src/core/CL/kernels/CLGEMMLowpReductionKernel.h"
40#include "src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h"
41#include "src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.h"
42#include "src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedOnlyRHSKernel.h"
43#include "src/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.h"
44#include "src/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.h"
45#include "src/core/CL/kernels/CLIm2ColKernel.h"
46#include "src/core/CL/kernels/CLWeightsReshapeKernel.h"
Sang-Hoon Park68dd25f2020-10-19 16:00:11 +010047#include "src/core/helpers/AutoConfiguration.h"
48#include "support/Cast.h"
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +010049#include "support/MemorySupport.h"
Isabella Gottardif07d28d2018-02-06 14:52:43 +000050
51#include <cmath>
52#include <memory>
53#include <tuple>
54
Michalis Spyroub27e13a2019-09-27 11:04:27 +010055namespace arm_compute
56{
Georgios Pinitas78c00902018-01-09 17:33:11 +000057using namespace arm_compute::misc::shape_calculator;
Michalis Spyroub27e13a2019-09-27 11:04:27 +010058using namespace arm_compute::utils::cast;
Isabella Gottardif07d28d2018-02-06 14:52:43 +000059
Georgios Pinitasd8734b52017-12-22 15:27:52 +000060CLConvolutionLayerReshapeWeights::CLConvolutionLayerReshapeWeights()
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +010061 : _weights_reshape_kernel(support::cpp14::make_unique<CLWeightsReshapeKernel>())
Isabella Gottardif07d28d2018-02-06 14:52:43 +000062{
63}
64
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +010065CLConvolutionLayerReshapeWeights::~CLConvolutionLayerReshapeWeights() = default;
66
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +010067void CLConvolutionLayerReshapeWeights::configure(const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, unsigned int num_groups)
Isabella Gottardif07d28d2018-02-06 14:52:43 +000068{
Manuel Bottini2b84be52020-04-08 10:15:51 +010069 configure(CLKernelLibrary::get().get_compile_context(), weights, biases, output, num_groups);
70}
71
72void CLConvolutionLayerReshapeWeights::configure(const CLCompileContext &compile_context, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, unsigned int num_groups)
73{
Georgios Pinitas78c00902018-01-09 17:33:11 +000074 // Perform validation step
Isabella Gottardif07d28d2018-02-06 14:52:43 +000075 ARM_COMPUTE_ERROR_ON_NULLPTR(weights, output);
Georgios Pinitas78c00902018-01-09 17:33:11 +000076 ARM_COMPUTE_ERROR_THROW_ON(CLConvolutionLayerReshapeWeights::validate(weights->info(),
77 (biases != nullptr) ? biases->info() : nullptr,
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +010078 output->info(),
79 num_groups));
Georgios Pinitas78c00902018-01-09 17:33:11 +000080
81 const bool append_biases = (biases != nullptr) && !is_data_type_quantized_asymmetric(weights->info()->data_type());
82 const ICLTensor *biases_to_use = (append_biases) ? biases : nullptr;
83
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +010084 _weights_reshape_kernel->configure(compile_context, weights, biases_to_use, output, num_groups);
Georgios Pinitas78c00902018-01-09 17:33:11 +000085
86 output->info()->set_quantization_info(weights->info()->quantization_info());
87}
88
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +010089Status CLConvolutionLayerReshapeWeights::validate(const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, unsigned int num_groups)
Georgios Pinitas78c00902018-01-09 17:33:11 +000090{
91 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(weights);
Manuel Bottini8481d832019-12-10 15:28:40 +000092 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(weights, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::QSYMM8_PER_CHANNEL, DataType::F16, DataType::F32);
Georgios Pinitas78c00902018-01-09 17:33:11 +000093 ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 4);
Isabella Gottardif07d28d2018-02-06 14:52:43 +000094
95 if(biases != nullptr)
96 {
Georgios Pinitas19ea4192018-06-19 13:09:53 +010097 const int idx_kernels = get_data_layout_dimension_index(weights->data_layout(), DataLayoutDimension::BATCHES);
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +000098 ARM_COMPUTE_RETURN_ERROR_ON(is_data_type_quantized(weights->data_type()));
99
Georgios Pinitas78c00902018-01-09 17:33:11 +0000100 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(weights, biases);
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100101 ARM_COMPUTE_RETURN_ERROR_ON(biases->dimension(0) != weights->dimension(idx_kernels));
Georgios Pinitas78c00902018-01-09 17:33:11 +0000102 ARM_COMPUTE_RETURN_ERROR_ON(biases->num_dimensions() > 1);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000103 }
104
Georgios Pinitas78c00902018-01-09 17:33:11 +0000105 if((output != nullptr) && (output->total_size() != 0))
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000106 {
Georgios Pinitas78c00902018-01-09 17:33:11 +0000107 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(weights, output);
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100108 CLWeightsReshapeKernel::validate(weights, biases, output, num_groups);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000109 }
110
Georgios Pinitas78c00902018-01-09 17:33:11 +0000111 return Status{};
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000112}
113
114void CLConvolutionLayerReshapeWeights::run()
115{
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +0100116 CLScheduler::get().enqueue(*_weights_reshape_kernel);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000117}
118
Michalis Spyroub27e13a2019-09-27 11:04:27 +0100119CLGEMMConvolutionLayer::CLGEMMConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager, IWeightsManager *weights_manager)
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +0100120 : _memory_group(memory_manager), _weights_manager(weights_manager), _reshape_weights(), _reshape_weights_managed(), _im2col_kernel(support::cpp14::make_unique<CLIm2ColKernel>()),
121 _mm_gemm(memory_manager, weights_manager), _mm_gemmlowp(memory_manager), _col2im_kernel(support::cpp14::make_unique<CLCol2ImKernel>()), _activationlayer_function(), _original_weights(nullptr),
122 _im2col_output(), _weights_reshaped(), _gemm_output(), _skip_im2col(false), _skip_col2im(false), _is_quantized(false), _fuse_activation(true), _is_prepared(false)
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000123{
124}
125
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +0100126CLGEMMConvolutionLayer::~CLGEMMConvolutionLayer() = default;
127
Manuel Bottini2b84be52020-04-08 10:15:51 +0100128void CLGEMMConvolutionLayer::configure_mm(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output,
129 const GEMMLowpOutputStageInfo &gemmlowp_output_stage,
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100130 int gemm_3d_depth, const ActivationLayerInfo &act_info)
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000131{
132 ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights);
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100133 ARM_COMPUTE_ERROR_THROW_ON(validate_mm(input->info(), weights->info(), biases != nullptr ? biases->info() : nullptr, output->info(), gemmlowp_output_stage, gemm_3d_depth, _skip_im2col, act_info));
Georgios Pinitas78c00902018-01-09 17:33:11 +0000134
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100135 const GEMMInfo &gemm_info = GEMMInfo(false, // is_a_reshaped
136 false, // is_b_reshaped
137 true, // reshape_b_only_on_first_run
138 gemm_3d_depth, // depth_output_gemm3d
139 _skip_im2col, // reinterpret_input_as_3d
140 false, // retain_internal_weights
141 gemmlowp_output_stage, // gemmlowp_output_stage
142 false, // fp_mixed_precision
143 true, // broadcast_bias
144 act_info); // activation_info
Georgios Pinitas932491f2018-09-21 16:33:15 +0100145
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000146 if(_is_quantized)
147 {
Georgios Pinitas78c00902018-01-09 17:33:11 +0000148 // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
149 // Extract and negate input and weights offset
150 const QuantizationInfo input_quantization_info = input->info()->quantization_info();
151 const QuantizationInfo weights_quantization_info = weights->info()->quantization_info();
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000152
Georgios Pinitas4c5469b2019-05-21 13:32:43 +0100153 input->info()->set_quantization_info(QuantizationInfo(input_quantization_info.uniform().scale, -input_quantization_info.uniform().offset));
154 weights->info()->set_quantization_info(QuantizationInfo(weights_quantization_info.uniform().scale, -weights_quantization_info.uniform().offset));
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000155
Manuel Bottini2b84be52020-04-08 10:15:51 +0100156 _mm_gemmlowp.configure(compile_context, input, weights, biases, output, gemm_info);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000157
Georgios Pinitas78c00902018-01-09 17:33:11 +0000158 // Revert back QuantizatioInfo as input and weights could be used in other convolution layers
159 input->info()->set_quantization_info(input_quantization_info);
160 weights->info()->set_quantization_info(weights_quantization_info);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000161 }
162 else
163 {
Georgios Pinitas78c00902018-01-09 17:33:11 +0000164 // Configure matrix multiply function
Manuel Bottini2b84be52020-04-08 10:15:51 +0100165 _mm_gemm.configure(compile_context, input, weights, biases, output, 1.0f, 1.0f, gemm_info);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000166 }
167}
168
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100169Status CLGEMMConvolutionLayer::validate_mm(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output,
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100170 const GEMMLowpOutputStageInfo &gemmlowp_output_stage, int gemm_3d_depth, bool skip_im2col, const ActivationLayerInfo &act_info)
Georgios Pinitas78c00902018-01-09 17:33:11 +0000171{
172 const bool is_quantized = is_data_type_quantized_asymmetric(input->data_type());
173
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100174 const GEMMInfo &gemm_info = GEMMInfo(false, // is_a_reshaped
175 false, // is_b_reshaped
176 true, // reshape_b_only_on_first_run
177 gemm_3d_depth, // depth_output_gemm3d
178 skip_im2col, // reinterpret_input_as_3d
179 false, // retain_internal_weights
180 gemmlowp_output_stage, // gemmlowp_output_stage
181 false, // fp_mixed_precision
182 true, // broadcast_bias
183 act_info); // activation_info
Georgios Pinitas932491f2018-09-21 16:33:15 +0100184
Georgios Pinitas78c00902018-01-09 17:33:11 +0000185 if(is_quantized)
186 {
187 // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
188 // Extract and negate input and weights offset
189 const QuantizationInfo input_quantization_info = input->quantization_info();
190 const QuantizationInfo weights_quantization_info = weights->quantization_info();
191
192 std::unique_ptr<ITensorInfo> input_qa = input->clone();
193 std::unique_ptr<ITensorInfo> weights_qa = weights->clone();
Georgios Pinitas4c5469b2019-05-21 13:32:43 +0100194 input_qa->set_quantization_info(QuantizationInfo(input_quantization_info.uniform().scale, -input_quantization_info.uniform().offset));
195 weights_qa->set_quantization_info(QuantizationInfo(weights_quantization_info.uniform().scale, -weights_quantization_info.uniform().offset));
Georgios Pinitas78c00902018-01-09 17:33:11 +0000196
197 // Perform validation step on GEMMLowp
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100198 return CLGEMMLowpMatrixMultiplyCore::validate(input_qa.get(), weights_qa.get(), biases, output, gemm_info);
Georgios Pinitas78c00902018-01-09 17:33:11 +0000199 }
200 else
201 {
202 // Perform validation step on Matrix multiply function
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100203 return CLGEMM::validate(input, weights, biases, output, 1.0f, 1.0f, gemm_info);
Georgios Pinitas78c00902018-01-09 17:33:11 +0000204 }
Georgios Pinitas78c00902018-01-09 17:33:11 +0000205}
206
Alex Gilday7da29b62018-03-23 14:16:00 +0000207void CLGEMMConvolutionLayer::configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info,
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100208 const Size2D &dilation, const ActivationLayerInfo &act_info, unsigned int num_groups)
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000209{
Manuel Bottini2b84be52020-04-08 10:15:51 +0100210 configure(CLKernelLibrary::get().get_compile_context(), input, weights, biases, output, conv_info, weights_info, dilation, act_info, num_groups);
211}
212
213void CLGEMMConvolutionLayer::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output,
214 const PadStrideInfo &conv_info,
215 const WeightsInfo &weights_info, const Size2D &dilation, const ActivationLayerInfo &act_info, unsigned int num_groups)
216{
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000217 ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
Georgios Pinitas78c00902018-01-09 17:33:11 +0000218
219 ARM_COMPUTE_ERROR_THROW_ON(CLGEMMConvolutionLayer::validate(input->info(),
220 weights->info(),
221 biases != nullptr ? biases->info() : nullptr,
222 output->info(),
223 conv_info,
Alex Gilday7da29b62018-03-23 14:16:00 +0000224 weights_info,
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000225 dilation,
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100226 act_info,
227 num_groups));
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000228
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100229 const DataType data_type = input->info()->data_type();
230 const DataLayout data_layout = input->info()->data_layout();
231 const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
232 const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100233 const int idx_kernels = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
234
235 const unsigned int kernel_width = weights->info()->dimension(idx_width);
236 const unsigned int kernel_height = weights->info()->dimension(idx_height);
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000237 const unsigned int num_kernels = weights->info()->dimension(idx_kernels);
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100238
Georgios Pinitas4c5469b2019-05-21 13:32:43 +0100239 const UniformQuantizationInfo iq_info = input->info()->quantization_info().uniform();
Georgios Pinitas4c5469b2019-05-21 13:32:43 +0100240 const UniformQuantizationInfo oq_info = output->info()->quantization_info().uniform();
241
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100242 _is_prepared = weights_info.retain_internal_weights();
243 _original_weights = weights;
244 _is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
245 _skip_im2col = (data_layout == DataLayout::NHWC && kernel_width == 1 && kernel_height == 1 && conv_info.stride().first == 1 && conv_info.stride().second == 1);
246 _skip_col2im = data_layout == DataLayout::NHWC;
247
248 // Only for quantize there are few cases where we cannot fuse the activation function in GEMM
249 _fuse_activation = true;
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000250
Georgios Pinitas78c00902018-01-09 17:33:11 +0000251 // Set the GPU target for im2col and col2im
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +0100252 _im2col_kernel->set_target(CLScheduler::get().target());
253 _col2im_kernel->set_target(CLScheduler::get().target());
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000254
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100255 const ICLTensor *gemm_input_to_use = input;
256 ICLTensor *gemm_output_to_use = output;
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000257
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000258 // Get parameters from conv_info
259 unsigned int stride_x = 0;
260 unsigned int stride_y = 0;
261 std::tie(stride_x, stride_y) = conv_info.stride();
262
263 // Get convolved dimensions
264 unsigned int conv_w = 0;
265 unsigned int conv_h = 0;
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100266 std::tie(conv_w, conv_h) = scaled_dimensions(input->info()->dimension(idx_width),
267 input->info()->dimension(idx_height),
268 kernel_width,
269 kernel_height,
270 conv_info,
271 dilation);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000272
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000273 unsigned int mat_weights_cols = num_kernels / num_groups;
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000274
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100275 const ICLTensor *biases_to_use = biases;
276 bool append_bias = false;
277
Michalis Spyroub27e13a2019-09-27 11:04:27 +0100278 ICLTensor *weights_to_use = &_weights_reshaped;
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100279 if(num_groups != 1 && biases != nullptr)
280 {
281 // num_groups != 1 can only be for NCHW
282 // Since it is missing an utility function to reshape the biases, we append the biases into the weights tensor
283 biases_to_use = nullptr;
284 append_bias = true;
285
Michalis Spyroub27e13a2019-09-27 11:04:27 +0100286 if(_weights_manager && _weights_manager->are_weights_managed(weights))
287 {
Manuel Bottini2b84be52020-04-08 10:15:51 +0100288 _reshape_weights_managed.configure(compile_context, weights, biases, num_groups);
Michalis Spyroub27e13a2019-09-27 11:04:27 +0100289 weights_to_use = utils::cast::polymorphic_downcast<ICLTensor *>(_weights_manager->acquire(weights, &_reshape_weights_managed));
290 }
291 else
292 {
Manuel Bottini2b84be52020-04-08 10:15:51 +0100293 _reshape_weights.configure(compile_context, weights, biases, &_weights_reshaped, num_groups);
Michalis Spyroub27e13a2019-09-27 11:04:27 +0100294 }
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100295 }
296 else
297 {
Michalis Spyroub27e13a2019-09-27 11:04:27 +0100298 if(_weights_manager && _weights_manager->are_weights_managed(weights))
299 {
Manuel Bottini2b84be52020-04-08 10:15:51 +0100300 _reshape_weights_managed.configure(compile_context, weights, nullptr, num_groups);
Michalis Spyroub27e13a2019-09-27 11:04:27 +0100301 weights_to_use = utils::cast::polymorphic_downcast<ICLTensor *>(_weights_manager->acquire(weights, &_reshape_weights_managed));
302 }
303 else
304 {
Manuel Bottini2b84be52020-04-08 10:15:51 +0100305 _reshape_weights.configure(compile_context, weights, nullptr, &_weights_reshaped, num_groups);
Michalis Spyroub27e13a2019-09-27 11:04:27 +0100306 }
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100307 }
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000308
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000309 // Create tensor to store im2col reshaped inputs
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100310 if(!_skip_im2col)
311 {
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100312 _memory_group.manage(&_im2col_output);
313
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100314 // Configure and tune im2col. im2col output shape is auto-initialized
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +0100315 _im2col_kernel->configure(compile_context, input, &_im2col_output, Size2D(kernel_width, kernel_height), conv_info, append_bias, dilation, num_groups);
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100316
317 // Set quantization info
318 _im2col_output.info()->set_quantization_info(input->info()->quantization_info());
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +0100319 CLScheduler::get().tune_kernel_static(*_im2col_kernel);
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100320
321 // Update GEMM input
322 gemm_input_to_use = &_im2col_output;
323 }
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000324
325 // Create GEMM output tensor
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100326 if(!_skip_col2im)
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100327 {
Georgios Pinitas932491f2018-09-21 16:33:15 +0100328 TensorShape shape_gemm;
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100329
330 // If we cannot skip col2im it means we run im2col as well
331 shape_gemm = _im2col_output.info()->tensor_shape();
332 shape_gemm.set(0, mat_weights_cols);
333 shape_gemm.set(1, conv_w * conv_h);
334
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100335 TensorInfo info_gemm(shape_gemm, 1, data_type);
Georgios Pinitas932491f2018-09-21 16:33:15 +0100336 info_gemm.set_quantization_info(output->info()->quantization_info()).set_data_layout(input->info()->data_layout());
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100337 _gemm_output.allocator()->init(info_gemm);
338 _memory_group.manage(&_gemm_output);
339
340 // Update GEMM output
341 gemm_output_to_use = &_gemm_output;
342 }
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000343
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100344 GEMMLowpOutputStageInfo gemmlowp_output_stage;
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000345 gemmlowp_output_stage.type = GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT;
346 gemmlowp_output_stage.gemmlowp_offset = 0;
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000347
348 // Configure output stage for quantized case
349 if(_is_quantized)
350 {
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000351 const auto output_quant_info = (output->info()->total_size() == 0) ? iq_info : oq_info;
352 const bool is_quantized_per_channel = is_data_type_quantized_per_channel(weights->info()->data_type());
353 const unsigned int num_filters = (is_quantized_per_channel) ? num_kernels : 1;
Georgios Pinitas9be0c5a2018-02-19 12:46:29 +0000354
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000355 gemmlowp_output_stage.is_quantized_per_channel = is_quantized_per_channel;
356
357 gemmlowp_output_stage.gemmlowp_multipliers.resize(num_filters);
358 gemmlowp_output_stage.gemmlowp_shifts.resize(num_filters);
359 quantization::compute_quantized_multipliers_and_shifts(input->info(),
360 weights->info(),
361 output->info(),
362 idx_kernels,
363 gemmlowp_output_stage.gemmlowp_multipliers.data(),
364 gemmlowp_output_stage.gemmlowp_shifts.data());
365 gemmlowp_output_stage.gemmlowp_multiplier = gemmlowp_output_stage.gemmlowp_multipliers[0];
366 gemmlowp_output_stage.gemmlowp_shift = gemmlowp_output_stage.gemmlowp_shifts[0];
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100367
Giorgio Arena1856ff72020-02-07 13:46:45 +0000368 PixelValue min_val{};
369 PixelValue max_val{};
370 std::tie(min_val, max_val) = get_min_max(output->info()->data_type());
371
372 auto min_activation = min_val.get<int32_t>();
373 auto max_activation = max_val.get<int32_t>();
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100374
375 const std::set<ActivationLayerInfo::ActivationFunction> supported_acts = { ActivationLayerInfo::ActivationFunction::RELU,
Gian Marco Iodice3139f032018-11-05 14:26:32 +0000376 ActivationLayerInfo::ActivationFunction::BOUNDED_RELU,
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100377 ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU
378 };
379
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100380 if(act_info.enabled())
Georgios Pinitas932491f2018-09-21 16:33:15 +0100381 {
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100382 if(supported_acts.count(act_info.activation()) != 0)
383 {
Sang-Hoon Park4715cf92020-01-08 16:02:47 +0000384 std::tie(min_activation, max_activation) = get_quantized_activation_min_max(act_info, data_type, output_quant_info);
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100385 }
386 else
387 {
388 _fuse_activation = false;
389 }
Georgios Pinitas932491f2018-09-21 16:33:15 +0100390 }
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100391
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100392 // Set the GEMMLowp output stage info
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000393 gemmlowp_output_stage.gemmlowp_offset = output_quant_info.offset;
394 gemmlowp_output_stage.gemmlowp_min_bound = min_activation;
395 gemmlowp_output_stage.gemmlowp_max_bound = max_activation;
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100396 }
397
398 // Configure and tune GEMM
Gian Marco Iodice3139f032018-11-05 14:26:32 +0000399 // In case of NHWC, we need to run GEMM3D (gemm_3d_depth != 0) in order to avoid reshaping the output matrix
400 const unsigned int gemm_3d_depth = (data_layout == DataLayout::NHWC) ? conv_h : 0;
401
Manuel Bottini2b84be52020-04-08 10:15:51 +0100402 configure_mm(compile_context, gemm_input_to_use, weights_to_use, biases_to_use, gemm_output_to_use, gemmlowp_output_stage, gemm_3d_depth, act_info);
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100403
404 if(!_skip_im2col)
405 {
406 _im2col_output.allocator()->allocate();
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000407 }
408
Georgios Pinitas932491f2018-09-21 16:33:15 +0100409 if(!_skip_col2im)
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100410 {
Georgios Pinitas932491f2018-09-21 16:33:15 +0100411 // Configure and tune Col2Im
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +0100412 _col2im_kernel->configure(compile_context, gemm_output_to_use, output, Size2D(conv_w, conv_h), num_groups);
413 CLScheduler::get().tune_kernel_static(*_col2im_kernel.get());
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100414 }
415
Georgios Pinitas932491f2018-09-21 16:33:15 +0100416 if(!_skip_col2im)
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100417 {
418 _gemm_output.allocator()->allocate();
419 }
420
421 ARM_COMPUTE_ERROR_ON_MSG((output->info()->dimension(idx_width) != conv_w) || (output->info()->dimension(idx_height) != conv_h),
422 "Output shape does not match the expected one");
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000423
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100424 if(!_fuse_activation)
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000425 {
Manuel Bottini2b84be52020-04-08 10:15:51 +0100426 _activationlayer_function.configure(compile_context, output, nullptr, act_info);
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000427 }
428
Georgios Pinitas78c00902018-01-09 17:33:11 +0000429 ARM_COMPUTE_UNUSED(weights_info);
430}
431
432Status CLGEMMConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100433 const WeightsInfo &weights_info, const Size2D &dilation, const ActivationLayerInfo &act_info, unsigned int num_groups)
Georgios Pinitas78c00902018-01-09 17:33:11 +0000434{
435 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output);
436 ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights_info.are_reshaped(), "Weights already reshaped are not supported!");
Sang-Hoon Park4715cf92020-01-08 16:02:47 +0000437 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F16, DataType::F32);
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000438 const bool is_quantized_per_channel = is_data_type_quantized_per_channel(weights->data_type());
439
morgolockd13931d2020-06-23 15:49:35 +0100440 if(!is_quantized_per_channel)
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000441 {
442 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
443 }
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100444 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, weights);
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100445 ARM_COMPUTE_RETURN_ERROR_ON_MSG((num_groups != 1) && (input->data_layout() != DataLayout::NCHW), "Grouping (num_groups != 1) with NHWC data layout is not supported");
446 ARM_COMPUTE_RETURN_ERROR_ON_MSG((num_groups != 1) && (input->data_type() == DataType::QASYMM8), "Grouping (num_groups != 1) is not supported with QASYMM8");
447 ARM_COMPUTE_RETURN_ERROR_ON(((input->dimension(2) / weights->dimension(2)) != num_groups) && (input->data_layout() == DataLayout::NCHW));
Georgios Pinitas78c00902018-01-09 17:33:11 +0000448
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100449 const DataLayout data_layout = input->data_layout();
450 const DataType data_type = input->data_type();
451 const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
452 const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
453 const int idx_channel = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
454 const int idx_kernels = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000455
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100456 const unsigned int kernel_width = weights->dimension(idx_width);
457 const unsigned int kernel_height = weights->dimension(idx_height);
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000458 const unsigned int num_kernels = weights->dimension(idx_kernels);
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100459
Michalis Spyroubcfd09a2019-05-01 13:03:59 +0100460 TensorInfo im2col_reshaped_info{};
461 TensorInfo info_gemm{};
462 TensorInfo weights_reshaped_info{};
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100463 const ITensorInfo *gemm_input_to_use = input;
464 const ITensorInfo *gemm_output_to_use = output;
465 const ITensorInfo *weights_to_use = weights;
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000466 const bool is_quantized = is_data_type_quantized_asymmetric(data_type);
467 const bool skip_im2col = (data_layout == DataLayout::NHWC && kernel_width == 1 && kernel_height == 1 && conv_info.stride().first == 1 && conv_info.stride().second == 1);
468 const bool skip_col2im = data_layout == DataLayout::NHWC;
469 bool fuse_activation = true;
Georgios Pinitas4c5469b2019-05-21 13:32:43 +0100470
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100471 ARM_COMPUTE_RETURN_ERROR_ON((weights->dimension(idx_channel) * num_groups) != input->dimension(idx_channel));
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100472 ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 4);
Georgios Pinitas78c00902018-01-09 17:33:11 +0000473
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100474 // Validate biases
Georgios Pinitas78c00902018-01-09 17:33:11 +0000475 if(biases != nullptr)
476 {
477 if(is_quantized)
478 {
479 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(biases, 1, DataType::S32);
480 }
481 else
482 {
483 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, biases);
484 }
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100485 ARM_COMPUTE_RETURN_ERROR_ON(biases->dimension(0) != weights->dimension(idx_kernels));
Georgios Pinitas78c00902018-01-09 17:33:11 +0000486 ARM_COMPUTE_RETURN_ERROR_ON(biases->num_dimensions() > 1);
487 }
488
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100489 if(act_info.enabled())
490 {
491 ARM_COMPUTE_ERROR_ON(act_info.b() > act_info.a());
492 }
493
494 // Get convolved dimensions
495 unsigned int conv_w = 0;
496 unsigned int conv_h = 0;
497
498 std::tie(conv_w, conv_h) = scaled_dimensions(input->dimension(idx_width),
499 input->dimension(idx_height),
500 kernel_width,
501 kernel_height,
502 conv_info,
503 dilation);
504
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000505 unsigned int mat_weights_cols = num_kernels / num_groups;
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100506
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100507 const ITensorInfo *biases_to_use = biases;
508 bool append_bias = false;
509
510 if(num_groups != 1 && biases != nullptr)
511 {
512 // num_groups != 1 can only be for NCHW
513 // Since it is missing an utility function to reshape the biases, we append the biases into the weights tensor
514 biases_to_use = nullptr;
515 append_bias = true;
516
517 ARM_COMPUTE_RETURN_ON_ERROR(CLConvolutionLayerReshapeWeights::validate(weights, biases, nullptr, num_groups));
518 weights_reshaped_info = TensorInfo(compute_weights_reshaped_shape(*weights, true, num_groups), 1, data_type);
519 }
520 else
521 {
522 ARM_COMPUTE_RETURN_ON_ERROR(CLConvolutionLayerReshapeWeights::validate(weights, nullptr, nullptr, num_groups));
523 weights_reshaped_info = TensorInfo(compute_weights_reshaped_shape(*weights, false, num_groups), 1, data_type);
524 }
525
526 weights_to_use = &weights_reshaped_info;
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100527
528 if(!skip_im2col)
529 {
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100530 const Size2D kernel_dims(kernel_width, kernel_height);
531
532 // Output tensor auto initialization if not yet initialized
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100533 TensorShape expected_output_shape = compute_im2col_conv_shape(input, kernel_dims, conv_info, append_bias, dilation, num_groups == 1, num_groups);
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100534
535 auto_init_if_empty(im2col_reshaped_info, input->clone()->set_tensor_shape(expected_output_shape));
536
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100537 ARM_COMPUTE_RETURN_ON_ERROR(CLIm2ColKernel::validate(input, &im2col_reshaped_info, kernel_dims, conv_info, append_bias, dilation, num_groups));
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100538 gemm_input_to_use = &im2col_reshaped_info;
539 }
540
541 // Create GEMM output tensor
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100542 if(!skip_col2im)
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100543 {
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100544 TensorShape shape_gemm;
545
546 shape_gemm = gemm_input_to_use->tensor_shape();
547 shape_gemm.set(0, mat_weights_cols);
548 shape_gemm.set(1, conv_w * conv_h);
549
550 info_gemm = TensorInfo(shape_gemm, 1, data_type);
Georgios Pinitas932491f2018-09-21 16:33:15 +0100551 info_gemm.set_quantization_info(output->quantization_info()).set_data_layout(input->data_layout());
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100552 gemm_output_to_use = &info_gemm;
553 }
554
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100555 GEMMLowpOutputStageInfo gemmlowp_output_stage;
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000556 gemmlowp_output_stage.type = GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT;
557 gemmlowp_output_stage.gemmlowp_offset = 0;
558 gemmlowp_output_stage.is_quantized_per_channel = is_quantized_per_channel;
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100559
560 if(is_quantized)
561 {
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000562 const UniformQuantizationInfo iq_info = input->quantization_info().uniform();
563 const UniformQuantizationInfo oq_info = output->quantization_info().uniform();
564 const auto output_quant_info = (output->total_size() == 0) ? iq_info : oq_info;
565 const unsigned int num_filters = (is_quantized_per_channel) ? num_kernels : 1;
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100566
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000567 gemmlowp_output_stage.gemmlowp_multipliers.resize(num_filters);
568 gemmlowp_output_stage.gemmlowp_shifts.resize(num_filters);
569 quantization::compute_quantized_multipliers_and_shifts(input,
570 weights,
571 output,
572 idx_kernels,
573 gemmlowp_output_stage.gemmlowp_multipliers.data(),
574 gemmlowp_output_stage.gemmlowp_shifts.data());
575 gemmlowp_output_stage.gemmlowp_multiplier = gemmlowp_output_stage.gemmlowp_multipliers[0];
576 gemmlowp_output_stage.gemmlowp_shift = gemmlowp_output_stage.gemmlowp_shifts[0];
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100577
578 int min_activation = 0;
579 int max_activation = 0;
580
581 const std::set<ActivationLayerInfo::ActivationFunction> supported_acts = { ActivationLayerInfo::ActivationFunction::RELU,
582 ActivationLayerInfo::ActivationFunction::BOUNDED_RELU,
583 ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU
584 };
585
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100586 if(act_info.enabled())
Georgios Pinitas932491f2018-09-21 16:33:15 +0100587 {
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100588 if(supported_acts.count(act_info.activation()) != 0)
589 {
Sang-Hoon Park4715cf92020-01-08 16:02:47 +0000590 std::tie(min_activation, max_activation) = get_quantized_activation_min_max(act_info, data_type, output_quant_info);
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100591 }
592 else
593 {
594 fuse_activation = false;
595 }
Georgios Pinitas932491f2018-09-21 16:33:15 +0100596 }
Gian Marco Iodice3139f032018-11-05 14:26:32 +0000597
598 // Set the GEMMLowp output stage info
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000599 gemmlowp_output_stage.gemmlowp_offset = output_quant_info.offset;
600 gemmlowp_output_stage.gemmlowp_min_bound = min_activation;
601 gemmlowp_output_stage.gemmlowp_max_bound = max_activation;
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100602 }
603
Gian Marco Iodice3139f032018-11-05 14:26:32 +0000604 // In case of NHWC, we need to run GEMM3D (gemm_3d_depth != 0) in order to avoid reshaping the output matrix
605 const unsigned int gemm_3d_depth = (data_layout == DataLayout::NHWC) ? conv_h : 0;
606
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100607 ARM_COMPUTE_RETURN_ON_ERROR(validate_mm(gemm_input_to_use, weights_to_use, biases_to_use, gemm_output_to_use, gemmlowp_output_stage, gemm_3d_depth, skip_im2col, act_info));
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100608
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100609 // Validate Col2Im
Georgios Pinitas932491f2018-09-21 16:33:15 +0100610 if(!skip_col2im)
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100611 {
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100612 ARM_COMPUTE_RETURN_ON_ERROR(CLCol2ImKernel::validate(gemm_output_to_use, output, Size2D(conv_w, conv_h), num_groups));
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100613 }
614
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000615 //Validate Activation Layer
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100616 if(!fuse_activation)
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000617 {
Vidhya Sudhan Loganathanedf357c2018-04-27 14:25:30 +0100618 ARM_COMPUTE_RETURN_ON_ERROR(CLActivationLayer::validate(output, nullptr, act_info));
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000619 }
620
Georgios Pinitas78c00902018-01-09 17:33:11 +0000621 return Status{};
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000622}
623
624void CLGEMMConvolutionLayer::run()
625{
Georgios Pinitase0437672018-05-02 14:07:55 +0100626 prepare();
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000627
Georgios Pinitasda953f22019-04-02 17:27:03 +0100628 MemoryGroupResourceScope scope_mg(_memory_group);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000629
630 // Run im2col
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100631 if(!_skip_im2col)
632 {
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +0100633 CLScheduler::get().enqueue(*_im2col_kernel);
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100634 }
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000635
Georgios Pinitas78c00902018-01-09 17:33:11 +0000636 // Runs CLGEMM or CLGEMMLowpMatrixMultiplyCore functions
637 if(_is_quantized)
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000638 {
Georgios Pinitas78c00902018-01-09 17:33:11 +0000639 // Run gemmlowp
640 _mm_gemmlowp.run();
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000641 }
642 else
643 {
Georgios Pinitas78c00902018-01-09 17:33:11 +0000644 // Run gemm
645 _mm_gemm.run();
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000646 }
647
648 // Reshape output matrix
Georgios Pinitas932491f2018-09-21 16:33:15 +0100649 if(!_skip_col2im)
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100650 {
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +0100651 CLScheduler::get().enqueue(*_col2im_kernel.get(), false);
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100652 }
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000653
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100654 //Run Activation Layer if we cannot fuse in GEMM
655 if(!_fuse_activation)
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000656 {
657 _activationlayer_function.run();
658 }
Georgios Pinitase0437672018-05-02 14:07:55 +0100659}
Georgios Pinitas82b51482018-04-24 15:14:12 +0100660
Georgios Pinitase0437672018-05-02 14:07:55 +0100661void CLGEMMConvolutionLayer::prepare()
662{
663 if(!_is_prepared)
664 {
Michalis Spyroub27e13a2019-09-27 11:04:27 +0100665 ARM_COMPUTE_ERROR_ON(!_original_weights->is_used());
666 if(_weights_manager && _weights_manager->are_weights_managed(_original_weights))
667 {
668 _weights_manager->run(_original_weights, &_reshape_weights_managed);
669 }
670 else
671 {
672 // Run weights reshaping and mark original weights tensor as unused
673 _weights_reshaped.allocator()->allocate();
674 _reshape_weights.run();
675 _original_weights->mark_as_unused();
676 }
Georgios Pinitas72219332018-06-05 14:56:06 +0100677
678 // Prepare GEMM
679 _is_quantized ? _mm_gemmlowp.prepare() : _mm_gemm.prepare();
680 if(!_weights_reshaped.is_used())
Georgios Pinitase0437672018-05-02 14:07:55 +0100681 {
Georgios Pinitas72219332018-06-05 14:56:06 +0100682 _weights_reshaped.allocator()->free();
Georgios Pinitase0437672018-05-02 14:07:55 +0100683 }
684
685 CLScheduler::get().queue().finish();
686 _is_prepared = true;
687 }
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000688}
Michalis Spyroub27e13a2019-09-27 11:04:27 +0100689} // namespace arm_compute