blob: bb9620b2930db3ac5b305db8b05d7f1357a0d8ff [file] [log] [blame]
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +00001/*
George Wort2d7e6832019-02-22 16:37:41 +00002 * Copyright (c) 2017-2019 ARM Limited.
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h"
25
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000026#include "arm_compute/core/Size2D.h"
27#include "arm_compute/core/Utils.h"
28#include "arm_compute/core/Validate.h"
Gian Marco Iodice597a8562018-08-01 15:06:06 +010029#include "arm_compute/core/utils/misc/ShapeCalculator.h"
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000030#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
31#include "arm_compute/runtime/NEON/NEScheduler.h"
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000032
Georgios Pinitas08346e92018-10-16 19:10:46 +010033#include <set>
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000034#include <tuple>
35
Gian Marco Iodice597a8562018-08-01 15:06:06 +010036using namespace arm_compute;
37using namespace arm_compute::misc::shape_calculator;
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000038
Gian Marco Iodice597a8562018-08-01 15:06:06 +010039NEConvolutionLayerReshapeWeights::NEConvolutionLayerReshapeWeights()
40 : _weights_reshape_kernel()
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000041{
42}
43
Gian Marco Iodice597a8562018-08-01 15:06:06 +010044void NEConvolutionLayerReshapeWeights::configure(const ITensor *weights, const ITensor *biases, ITensor *output)
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000045{
46 // Perform validation step
47 ARM_COMPUTE_ERROR_ON_NULLPTR(weights, output);
48 ARM_COMPUTE_ERROR_THROW_ON(NEConvolutionLayerReshapeWeights::validate(weights->info(),
49 (biases != nullptr) ? biases->info() : nullptr,
Gian Marco Iodice597a8562018-08-01 15:06:06 +010050 output->info()));
Gian Marco Iodice597a8562018-08-01 15:06:06 +010051 const bool append_biases = (biases != nullptr) && !is_data_type_quantized_asymmetric(weights->info()->data_type());
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000052 const ITensor *biases_to_use = (append_biases) ? biases : nullptr;
53
Gian Marco Iodice597a8562018-08-01 15:06:06 +010054 _weights_reshape_kernel.configure(weights, biases_to_use, output);
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000055
56 output->info()->set_quantization_info(weights->info()->quantization_info());
57}
58
Gian Marco Iodice597a8562018-08-01 15:06:06 +010059Status NEConvolutionLayerReshapeWeights::validate(const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output)
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000060{
Gian Marco Iodice597a8562018-08-01 15:06:06 +010061 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(weights);
Georgios Pinitas6e1791b2019-12-02 19:01:25 +000062 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(weights, 1,
63 DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::QSYMM8_PER_CHANNEL,
64 DataType::F16, DataType::F32);
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000065 ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 4);
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000066
Gian Marco Iodice597a8562018-08-01 15:06:06 +010067 if(biases != nullptr)
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000068 {
Gian Marco Iodice597a8562018-08-01 15:06:06 +010069 const int idx_kernels = get_data_layout_dimension_index(weights->data_layout(), DataLayoutDimension::BATCHES);
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000070 ARM_COMPUTE_RETURN_ERROR_ON(is_data_type_quantized_asymmetric(weights->data_type()));
71 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(weights, biases);
Gian Marco Iodice597a8562018-08-01 15:06:06 +010072 ARM_COMPUTE_RETURN_ERROR_ON(biases->dimension(0) != weights->dimension(idx_kernels));
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000073 ARM_COMPUTE_RETURN_ERROR_ON(biases->num_dimensions() > 1);
74 }
75
Gian Marco Iodice597a8562018-08-01 15:06:06 +010076 if((output != nullptr) && (output->total_size() != 0))
Michalis Spyroue2503892018-04-23 15:17:31 +010077 {
Gian Marco Iodice597a8562018-08-01 15:06:06 +010078 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(weights, output);
Michalis Spyroue2503892018-04-23 15:17:31 +010079
Gian Marco Iodice597a8562018-08-01 15:06:06 +010080 NEWeightsReshapeKernel::validate(weights, biases, output);
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000081 }
82
83 return Status{};
84}
85
86void NEConvolutionLayerReshapeWeights::run()
87{
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000088 NEScheduler::get().schedule(&_weights_reshape_kernel, 3);
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000089}
90
Michalis Spyrou1a569a32019-09-10 17:20:34 +010091NEGEMMConvolutionLayer::NEGEMMConvolutionLayer(const std::shared_ptr<IMemoryManager> &memory_manager, IWeightsManager *weights_manager)
92 : _memory_group(memory_manager), _weights_manager(weights_manager), _reshape_weights(), _reshape_weights_managed(), _im2col_kernel(), _mm_gemm(memory_manager), _mm_gemmlowp(memory_manager),
Georgios Pinitas48b3ef82019-10-14 19:03:09 +010093 _col2im_kernel(), _reshape_layer(), _original_weights(nullptr), _im2col_output(), _weights_reshaped(), _gemm_output(), _tmp_output(), _data_layout(DataLayout::NCHW), _skip_im2col(false),
94 _skip_col2im(false), _is_quantized(false), _is_prepared(false)
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000095{
96}
97
George Wort2d7e6832019-02-22 16:37:41 +000098void NEGEMMConvolutionLayer::configure_mm(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const ActivationLayerInfo &act_info, int gemm_3d_depth)
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000099{
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100100 ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights);
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100101 ARM_COMPUTE_ERROR_THROW_ON(validate_mm(input->info(), weights->info(), biases == nullptr ? nullptr : biases->info(), output == nullptr ? nullptr : output->info(),
102 act_info, gemm_3d_depth, _skip_im2col));
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100103
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100104 // Create GEMMInfo structure
Georgios Pinitasbb081ca2018-11-08 10:22:01 +0000105 const GEMMInfo &gemm_info = GEMMInfo(false, false, true /* Reshape weights only for the first run */,
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100106 gemm_3d_depth, _skip_im2col /* Reinterpret the input as 3D if im2col is skipped */,
107 false, GEMMLowpOutputStageInfo(), false, false, act_info);
108
109 // Supported activations in GEMM
110 const std::set<ActivationLayerInfo::ActivationFunction> supported_acts = { ActivationLayerInfo::ActivationFunction::RELU,
111 ActivationLayerInfo::ActivationFunction::BOUNDED_RELU,
112 ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU
113 };
Georgios Pinitasbb081ca2018-11-08 10:22:01 +0000114
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000115 if(_is_quantized)
116 {
117 // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
118 // Extract and negate input and weights offset
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000119 const QuantizationInfo iqinfo = input->info()->quantization_info();
120 const QuantizationInfo wqinfo = weights->info()->quantization_info();
121 const QuantizationInfo oqinfo = (output->info()->total_size() == 0) ? iqinfo : output->info()->quantization_info();
122 const UniformQuantizationInfo uiqinfo = iqinfo.uniform();
123 const UniformQuantizationInfo uoqinfo = oqinfo.uniform();
124 const DataType data_type = input->info()->data_type();
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000125
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100126 input->info()->set_quantization_info(QuantizationInfo(uiqinfo.scale, -uiqinfo.offset));
127 if(!is_data_type_quantized_per_channel(weights->info()->data_type()))
128 {
129 const UniformQuantizationInfo uwqinfo = wqinfo.uniform();
130 weights->info()->set_quantization_info(QuantizationInfo(uwqinfo.scale, -uwqinfo.offset));
131 }
George Wort2d7e6832019-02-22 16:37:41 +0000132
133 // Merge activation with output stage
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000134 PixelValue type_min = 0;
135 PixelValue type_max = 0;
136 std::tie(type_min, type_max) = get_min_max(data_type);
137 int min_activation = type_min.get<int>();
138 int max_activation = type_max.get<int>();
George Wort2d7e6832019-02-22 16:37:41 +0000139
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100140 if(supported_acts.count(act_info.activation()) != 0)
George Wort2d7e6832019-02-22 16:37:41 +0000141 {
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000142 const bool is_quantized_signed = is_data_type_quantized_asymmetric_signed(data_type);
143 const int a_const_int = is_quantized_signed ? quantize_qasymm8_signed(act_info.a(), uoqinfo) : quantize_qasymm8(act_info.a(), uoqinfo);
144 const int b_const_int = is_quantized_signed ? quantize_qasymm8_signed(act_info.b(), uoqinfo) : quantize_qasymm8(act_info.b(), uoqinfo);
George Wort2d7e6832019-02-22 16:37:41 +0000145
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100146 min_activation = act_info.activation() != ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU ? uoqinfo.offset : b_const_int;
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000147 max_activation = act_info.activation() == ActivationLayerInfo::ActivationFunction::RELU ? max_activation : a_const_int;
George Wort2d7e6832019-02-22 16:37:41 +0000148 }
149
150 GEMMLowpOutputStageInfo output_info;
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000151 output_info.type = GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT;
152 output_info.gemmlowp_offset = uoqinfo.offset;
153 output_info.gemmlowp_min_bound = min_activation;
154 output_info.gemmlowp_max_bound = max_activation;
155 output_info.is_quantized_per_channel = (weights->info()->data_type() == DataType::QSYMM8_PER_CHANNEL);
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100156 quantization::calculate_quantized_multipliers_less_than_one(iqinfo, wqinfo, oqinfo, output_info);
George Wort2d7e6832019-02-22 16:37:41 +0000157
158 _mm_gemmlowp.configure(input, weights, biases, output, GEMMInfo(false, false, true, gemm_3d_depth, _skip_im2col, false, output_info));
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000159
160 // Revert back QuantizatioInfo as input and weights could be used in other convolution layers
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100161 input->info()->set_quantization_info(iqinfo);
162 weights->info()->set_quantization_info(wqinfo);
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000163 }
164 else
165 {
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100166 // Configure matrix multiply function
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100167 _mm_gemm.configure(input, weights, biases, output, 1.0f, 0.0f, gemm_info);
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000168 }
169}
170
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100171Status NEGEMMConvolutionLayer::validate_mm(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output,
172 const ActivationLayerInfo &act_info, int gemm_3d_depth, bool skip_im2col)
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100173{
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000174 const DataType data_type = input->data_type();
175 const bool is_quantized = is_data_type_quantized_asymmetric(data_type);
176 const bool is_activation_enabled = act_info.enabled();
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100177
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100178 // Create GEMMInfo structure
179 const GEMMInfo gemm_info = GEMMInfo(false, false, true /* Reshape weights only for the first run */,
180 gemm_3d_depth, skip_im2col /* Reinterpret the input as 3D if im2col is skipped */,
181 false, GEMMLowpOutputStageInfo(), false, false, act_info);
182
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100183 if(is_quantized)
184 {
185 // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
186 // Extract and negate input and weights offset
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100187 const QuantizationInfo &iqinfo = input->quantization_info();
188 const QuantizationInfo &wqinfo = weights->quantization_info();
189 const QuantizationInfo &oqinfo = (output->total_size() == 0) ? iqinfo : output->quantization_info();
190 const UniformQuantizationInfo uoqinfo = oqinfo.uniform();
George Wort2d7e6832019-02-22 16:37:41 +0000191
192 // Merge activation with output stage
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000193 PixelValue type_min = 0;
194 PixelValue type_max = 0;
195 std::tie(type_min, type_max) = get_min_max(data_type);
196 int min_activation = type_min.get<int>();
197 int max_activation = type_max.get<int>();
George Wort2d7e6832019-02-22 16:37:41 +0000198
199 const std::set<ActivationLayerInfo::ActivationFunction> supported_acts = { ActivationLayerInfo::ActivationFunction::RELU,
200 ActivationLayerInfo::ActivationFunction::BOUNDED_RELU,
201 ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU
202 };
203 if(is_activation_enabled && supported_acts.count(act_info.activation()) != 0)
204 {
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000205 const bool is_quantized_signed = is_data_type_quantized_asymmetric_signed(data_type);
206 const int a_const_int = is_quantized_signed ? quantize_qasymm8_signed(act_info.a(), uoqinfo) : quantize_qasymm8(act_info.a(), uoqinfo);
207 const int b_const_int = is_quantized_signed ? quantize_qasymm8_signed(act_info.b(), uoqinfo) : quantize_qasymm8(act_info.b(), uoqinfo);
George Wort2d7e6832019-02-22 16:37:41 +0000208
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100209 min_activation = act_info.activation() != ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU ? uoqinfo.offset : b_const_int;
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000210 max_activation = act_info.activation() == ActivationLayerInfo::ActivationFunction::RELU ? max_activation : a_const_int;
George Wort2d7e6832019-02-22 16:37:41 +0000211 }
212
213 GEMMLowpOutputStageInfo output_info;
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000214 output_info.type = GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT;
215 output_info.gemmlowp_offset = uoqinfo.offset;
216 output_info.gemmlowp_min_bound = min_activation;
217 output_info.gemmlowp_max_bound = max_activation;
218 output_info.is_quantized_per_channel = (weights->data_type() == DataType::QSYMM8_PER_CHANNEL);
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100219 ARM_COMPUTE_RETURN_ON_ERROR(quantization::calculate_quantized_multipliers_less_than_one(iqinfo, wqinfo, oqinfo, output_info));
George Wort2d7e6832019-02-22 16:37:41 +0000220
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100221 // Perform validation step on GEMMLowp
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100222 std::unique_ptr<ITensorInfo> input_qa = input->clone();
223 std::unique_ptr<ITensorInfo> weights_qa = weights->clone();
224 input_qa->set_quantization_info(QuantizationInfo(iqinfo.uniform().scale, -iqinfo.uniform().offset));
225 weights_qa->set_quantization_info(QuantizationInfo(wqinfo.uniform().scale, -wqinfo.uniform().offset));
George Wort2d7e6832019-02-22 16:37:41 +0000226 return NEGEMMLowpMatrixMultiplyCore::validate(input_qa.get(), weights_qa.get(), biases, output, GEMMInfo(false, false, true, gemm_3d_depth, skip_im2col, false, output_info));
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100227 }
228 else
229 {
230 // Perform validation step on Matrix multiply function
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100231 return NEGEMM::validate(input, weights, nullptr, output, 1.0f, 0.0f, gemm_info);
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100232 }
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100233}
234
Giorgio Arena7a669a82019-11-13 17:07:13 +0000235Status NEGEMMConvolutionLayer::validate_gemm3d(const ITensorInfo *input_info, const ITensorInfo *weights_info, const ActivationLayerInfo &act_info, int gemm_3d_depth, bool skip_im2col)
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100236{
George Wort2d7e6832019-02-22 16:37:41 +0000237 const DataType data_type = input_info->data_type();
238 const unsigned int mult_y = skip_im2col ? 1U : gemm_3d_depth;
239 const unsigned int mult_z = skip_im2col ? gemm_3d_depth : 1U;
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100240
241 // Set dummy tensor shapes for the validation
George Wort2d7e6832019-02-22 16:37:41 +0000242 const TensorInfo dummy_input_info(TensorShape(4U, 4U * mult_y, 1U * mult_z), 1, data_type, input_info->quantization_info());
Giorgio Arena7a669a82019-11-13 17:07:13 +0000243 const TensorInfo dummy_weights_info(TensorShape(4U, 4U), 1, data_type, weights_info->quantization_info());
George Wort2d7e6832019-02-22 16:37:41 +0000244 const TensorInfo dummy_output_info(TensorShape(4U, 4U, gemm_3d_depth), 1, data_type, input_info->quantization_info());
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100245
George Wort2d7e6832019-02-22 16:37:41 +0000246 return validate_mm(&dummy_input_info, &dummy_weights_info, nullptr, &dummy_output_info, act_info, gemm_3d_depth, skip_im2col);
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100247}
248
Alex Gilday7da29b62018-03-23 14:16:00 +0000249void NEGEMMConvolutionLayer::configure(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info,
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100250 const Size2D &dilation, const ActivationLayerInfo &act_info, unsigned int num_groups)
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000251{
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000252 ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100253 ARM_COMPUTE_UNUSED(num_groups, weights_info);
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100254 ARM_COMPUTE_ERROR_THROW_ON(NEGEMMConvolutionLayer::validate(input->info(),
255 weights->info(),
256 biases != nullptr ? biases->info() : nullptr,
257 output->info(),
258 conv_info,
259 weights_info,
260 dilation,
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100261 act_info,
262 num_groups));
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000263
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100264 const DataType data_type = input->info()->data_type();
265 const DataLayout data_layout = input->info()->data_layout();
266 const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
267 const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100268 const int idx_kernels = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
Michalis Spyroue2503892018-04-23 15:17:31 +0100269
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100270 const unsigned int kernel_width = weights->info()->dimension(idx_width);
271 const unsigned int kernel_height = weights->info()->dimension(idx_height);
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000272
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100273 _is_prepared = weights_info.retain_internal_weights();
274 _original_weights = weights;
275 _is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
276 _data_layout = data_layout;
277 _skip_im2col = (data_layout == DataLayout::NHWC && kernel_width == 1 && kernel_height == 1 && conv_info.stride().first == 1 && conv_info.stride().second == 1);
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000278
George Wort2d7e6832019-02-22 16:37:41 +0000279 const ITensor *gemm_input_to_use = input;
280 ITensor *gemm_output_to_use = output;
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000281
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100282 // Get convolved dimensions
283 unsigned int conv_w = 0;
284 unsigned int conv_h = 0;
285 std::tie(conv_w, conv_h) = scaled_dimensions(input->info()->dimension(idx_width),
286 input->info()->dimension(idx_height),
287 kernel_width,
288 kernel_height,
289 conv_info,
290 dilation);
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000291
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100292 // Check if GEMM3D is supported
Georgios Pinitase413d252018-11-14 18:29:58 +0000293 if(data_layout == DataLayout::NHWC)
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100294 {
Giorgio Arena7a669a82019-11-13 17:07:13 +0000295 _skip_col2im = bool(validate_gemm3d(input->info(), weights->info(), act_info, conv_h, true));
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100296 // If not supported, we need to perform im2col and col2im (or reshape layer)
Georgios Pinitase413d252018-11-14 18:29:58 +0000297 if(!_skip_col2im)
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100298 {
299 _skip_im2col = false;
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100300 }
301 }
Georgios Pinitase413d252018-11-14 18:29:58 +0000302 else
303 {
304 _skip_col2im = false;
305 }
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100306
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100307 // Get parameters from conv_info
308 unsigned int stride_x = 0;
309 unsigned int stride_y = 0;
310 std::tie(stride_x, stride_y) = conv_info.stride();
311
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100312 unsigned int mat_weights_cols = weights->info()->dimension(idx_kernels);
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000313
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100314 // _weights_reshaped will be auto configured in the kernel.
315 // Just append biases and do not transpose 1xW as it will be reshaped in NEGEMM
Michalis Spyrou1a569a32019-09-10 17:20:34 +0100316 const ITensor *weights_to_use = weights;
317
318 if(_weights_manager && _weights_manager->are_weights_managed(weights))
319 {
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100320 _reshape_weights_managed.configure(weights, nullptr);
Michalis Spyrou1a569a32019-09-10 17:20:34 +0100321 weights_to_use = _weights_manager->acquire(weights, &_reshape_weights_managed);
322 }
323 else
324 {
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100325 _reshape_weights.configure(weights, nullptr, &_weights_reshaped);
Michalis Spyrou1a569a32019-09-10 17:20:34 +0100326 weights_to_use = &_weights_reshaped;
327 }
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100328
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100329 // Create tensor to store im2col reshaped inputs
Michalis Spyroue2503892018-04-23 15:17:31 +0100330 if(!_skip_im2col)
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000331 {
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100332 _memory_group.manage(&_im2col_output);
Michalis Spyroue2503892018-04-23 15:17:31 +0100333
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100334 // Configure
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100335 _im2col_kernel.configure(input, &_im2col_output, Size2D(kernel_width, kernel_height), conv_info, false, dilation);
Michalis Spyroue2503892018-04-23 15:17:31 +0100336
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100337 // Update GEMM input
338 gemm_input_to_use = &_im2col_output;
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000339 }
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000340
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100341 // Create temporary GEMM output tensor in case we cannot skip col2im
George Wort2d7e6832019-02-22 16:37:41 +0000342 if(!_skip_col2im)
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000343 {
George Wort2d7e6832019-02-22 16:37:41 +0000344 TensorShape shape_gemm;
Georgios Pinitasbb081ca2018-11-08 10:22:01 +0000345
George Wort2d7e6832019-02-22 16:37:41 +0000346 // Calculate GEMM output shape
347 shape_gemm = _im2col_output.info()->tensor_shape();
348 shape_gemm.set(0, mat_weights_cols);
349 shape_gemm.set(1, conv_w * conv_h);
Georgios Pinitasbb081ca2018-11-08 10:22:01 +0000350
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100351 // FIXME: input->clone() doesn't work with subtensors for grouped convolutions.
George Wort2d7e6832019-02-22 16:37:41 +0000352 TensorInfo info_gemm(shape_gemm, 1, data_type);
Georgios Pinitas041f36d2018-09-18 18:38:37 +0100353 info_gemm.set_quantization_info(output->info()->quantization_info()).set_data_layout(input->info()->data_layout());
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100354 _gemm_output.allocator()->init(info_gemm);
355 _memory_group.manage(&_gemm_output);
356
357 // Update GEMM output
358 gemm_output_to_use = &_gemm_output;
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000359 }
360
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100361 // Configure GEMM
Gian Marco Iodice3139f032018-11-05 14:26:32 +0000362 // In case we need to skip col2im, GEMM3D (gemm_3d_depth != 0) must be called in order to avoid reshaping the output matrix
363 const unsigned int gemm_3d_depth = _skip_col2im ? conv_h : 0;
Michalis Spyrou1a569a32019-09-10 17:20:34 +0100364 configure_mm(gemm_input_to_use, weights_to_use, biases, gemm_output_to_use, act_info, gemm_3d_depth);
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100365
Michalis Spyroue2503892018-04-23 15:17:31 +0100366 if(!_skip_im2col)
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000367 {
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100368 _im2col_output.allocator()->allocate();
369 }
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000370
Georgios Pinitase413d252018-11-14 18:29:58 +0000371 if(!_skip_col2im)
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100372 {
Georgios Pinitase413d252018-11-14 18:29:58 +0000373 if(_data_layout == DataLayout::NCHW)
374 {
375 // Configure col2im
George Wort2d7e6832019-02-22 16:37:41 +0000376 _col2im_kernel.configure(gemm_output_to_use, output, Size2D(conv_w, conv_h));
Georgios Pinitase413d252018-11-14 18:29:58 +0000377 }
378 else
379 {
380 // Configure reshape layer
George Wort2d7e6832019-02-22 16:37:41 +0000381 _reshape_layer.configure(gemm_output_to_use, output);
Georgios Pinitase413d252018-11-14 18:29:58 +0000382 }
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100383 }
384
Georgios Pinitase413d252018-11-14 18:29:58 +0000385 if(_is_quantized && !_skip_col2im)
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100386 {
387 _tmp_output.allocator()->allocate();
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100388 }
389
Georgios Pinitasbb081ca2018-11-08 10:22:01 +0000390 if(!_skip_col2im || _is_quantized)
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100391 {
Michalis Spyroue2503892018-04-23 15:17:31 +0100392 _gemm_output.allocator()->allocate();
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000393 }
394
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100395 ARM_COMPUTE_ERROR_ON_MSG((output->info()->dimension(idx_width) != conv_w) || (output->info()->dimension(idx_height) != conv_h),
396 "Output shape does not match the expected one");
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000397}
398
399Status NEGEMMConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100400 const WeightsInfo &weights_info, const Size2D &dilation, const ActivationLayerInfo &act_info, unsigned int num_groups)
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000401{
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100402 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output);
403 ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights_info.are_reshaped(), "Weights already reshaped are not supported!");
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000404 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F16, DataType::F32);
405 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(weights, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::QSYMM8_PER_CHANNEL, DataType::F16, DataType::F32);
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100406 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, weights);
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100407 ARM_COMPUTE_RETURN_ERROR_ON_MSG(num_groups > 1, "Grouping (num_groups != 1) is not supported on NEON");
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000408
Michalis Spyroue2503892018-04-23 15:17:31 +0100409 const DataLayout data_layout = input->data_layout();
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100410 const DataType data_type = input->data_type();
Michalis Spyroue2503892018-04-23 15:17:31 +0100411 const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
412 const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100413 const int idx_channel = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
414 const int idx_kernels = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
Michalis Spyroue2503892018-04-23 15:17:31 +0100415
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100416 const unsigned int kernel_width = weights->dimension(idx_width);
417 const unsigned int kernel_height = weights->dimension(idx_height);
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000418
Michalis Spyroua4f378d2019-04-26 14:54:54 +0100419 TensorInfo im2col_reshaped_info{};
420 TensorInfo info_gemm{};
421 TensorInfo tmp_info{};
422 TensorInfo weights_reshaped_info{};
George Wort2d7e6832019-02-22 16:37:41 +0000423 const ITensorInfo *gemm_input_to_use = input;
424 const ITensorInfo *gemm_output_to_use = output;
425 const ITensorInfo *weights_to_use = weights;
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000426
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100427 const bool append_bias = false;
428 const bool is_quantized = is_data_type_quantized_asymmetric(data_type);
429 bool skip_im2col = (data_layout == DataLayout::NHWC && kernel_width == 1 && kernel_height == 1 && conv_info.stride().first == 1 && conv_info.stride().second == 1);
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100430
431 // Get convolved dimensions
432 unsigned int conv_w = 0;
433 unsigned int conv_h = 0;
434
435 std::tie(conv_w, conv_h) = scaled_dimensions(input->dimension(idx_width),
436 input->dimension(idx_height),
437 kernel_width,
438 kernel_height,
439 conv_info,
440 dilation);
441
442 // Check if GEMM3D is supported
Georgios Pinitase413d252018-11-14 18:29:58 +0000443 bool skip_col2im = false;
444 if(data_layout == DataLayout::NHWC)
445 {
Giorgio Arena7a669a82019-11-13 17:07:13 +0000446 skip_col2im = bool(validate_gemm3d(input, weights, act_info, conv_h, true));
Georgios Pinitase413d252018-11-14 18:29:58 +0000447 // If not supported, we need to perform im2col and col2im (or reshape layer)
448 if(!skip_col2im)
449 {
450 skip_im2col = false;
451 }
452 }
453
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100454 if(skip_col2im)
455 {
456 // If not supported, we need to perform im2col and col2im (or reshape layer)
Giorgio Arena7a669a82019-11-13 17:07:13 +0000457 if(!bool(validate_gemm3d(input, weights, act_info, conv_h, skip_im2col)))
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100458 {
459 skip_im2col = false;
460 skip_col2im = false;
461 }
462 }
463
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100464 ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(idx_channel) != input->dimension(idx_channel));
465 ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 4);
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000466
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100467 // Validate biases
468 if(biases != nullptr)
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000469 {
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100470 if(is_quantized)
471 {
472 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(biases, 1, DataType::S32);
473 }
474 else
475 {
476 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, biases);
477 }
478 ARM_COMPUTE_RETURN_ERROR_ON(biases->dimension(0) != weights->dimension(idx_kernels));
479 ARM_COMPUTE_RETURN_ERROR_ON(biases->num_dimensions() > 1);
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000480 }
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000481
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100482 unsigned int mat_weights_cols = weights->dimension(idx_kernels);
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100483 unsigned int mat_weights_rows = weights->dimension(idx_width) * weights->dimension(idx_height) * weights->dimension(idx_channel);
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100484
485 // Output tensor auto inizialization if not yet initialized
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100486 ARM_COMPUTE_RETURN_ON_ERROR(NEConvolutionLayerReshapeWeights::validate(weights, nullptr, nullptr));
487 weights_reshaped_info = TensorInfo(compute_weights_reshaped_shape(*weights, append_bias), 1, data_type);
Georgios Pinitas4d600c72019-07-30 15:09:10 +0100488 weights_reshaped_info.set_quantization_info(weights->quantization_info());
Michalis Spyrou1a569a32019-09-10 17:20:34 +0100489 weights_to_use = &weights_reshaped_info;
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100490
Michalis Spyroue2503892018-04-23 15:17:31 +0100491 if(!skip_im2col)
492 {
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100493 // Create tensor info for im2col reshaped inputs
494 // For NEON the batch size is on the fourth dimension
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100495 // TODO (giaiod01): Auto-initialize the output shape of im2col COMPMID-1482
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100496 TensorShape shape_im2col = input->tensor_shape();
497 shape_im2col.set(0, mat_weights_rows);
498 shape_im2col.set(1, conv_w * conv_h);
499 shape_im2col.set(2, 1);
500
501 im2col_reshaped_info = TensorInfo(shape_im2col, 1, data_type);
502 im2col_reshaped_info.set_quantization_info(input->quantization_info());
503
Giorgio Arena0f170392018-07-18 16:13:12 +0100504 ARM_COMPUTE_RETURN_ON_ERROR(NEIm2ColKernel::validate(input, &im2col_reshaped_info, Size2D(kernel_width, kernel_height), conv_info, append_bias, dilation));
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100505 gemm_input_to_use = &im2col_reshaped_info;
Michalis Spyroue2503892018-04-23 15:17:31 +0100506 }
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000507
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100508 // Create temporary GEMM output tensor in case we cannot skip col2im
509 if(!skip_col2im)
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000510 {
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100511 TensorShape shape_gemm = gemm_input_to_use->tensor_shape();
512 shape_gemm.set(0, mat_weights_cols);
513 shape_gemm.set(1, conv_w * conv_h);
George Wort2d7e6832019-02-22 16:37:41 +0000514 info_gemm = TensorInfo(shape_gemm, 1, data_type);
Michalis Spyroue2503892018-04-23 15:17:31 +0100515 }
Georgios Pinitasbb081ca2018-11-08 10:22:01 +0000516 else
517 {
George Wort2d7e6832019-02-22 16:37:41 +0000518 info_gemm = TensorInfo(output->tensor_shape(), 1, data_type);
Georgios Pinitasbb081ca2018-11-08 10:22:01 +0000519 }
520 info_gemm.set_quantization_info(output->quantization_info()).set_data_layout(input->data_layout());
521 gemm_output_to_use = &info_gemm;
George Wort2d7e6832019-02-22 16:37:41 +0000522 ARM_COMPUTE_RETURN_ON_ERROR(validate_mm(gemm_input_to_use, weights_to_use, biases, gemm_output_to_use, act_info, skip_col2im ? conv_h : 0, skip_im2col));
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100523
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100524 // Validate Col2Im/ReshapeLayer
525 if(!skip_col2im && (data_layout == DataLayout::NCHW))
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100526 {
George Wort2d7e6832019-02-22 16:37:41 +0000527 ARM_COMPUTE_RETURN_ON_ERROR(NECol2ImKernel::validate(gemm_output_to_use, output, Size2D(conv_w, conv_h)));
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100528 }
529
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000530 return Status{};
531}
532
533void NEGEMMConvolutionLayer::run()
534{
Georgios Pinitas72219332018-06-05 14:56:06 +0100535 prepare();
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000536
Georgios Pinitasda953f22019-04-02 17:27:03 +0100537 MemoryGroupResourceScope scope_mg(_memory_group);
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000538
Michalis Spyroue2503892018-04-23 15:17:31 +0100539 if(!_skip_im2col)
540 {
541 // Run input reshaping
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100542 unsigned int y_dim = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::HEIGHT);
543 NEScheduler::get().schedule(&_im2col_kernel, y_dim);
Michalis Spyroue2503892018-04-23 15:17:31 +0100544 }
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000545
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100546 // Runs NEGEMM or NEGEMMLowpMatrixMultiplyCore functions
547 if(_is_quantized)
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000548 {
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100549 // Run gemmlowp
550 _mm_gemmlowp.run();
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000551 }
552 else
553 {
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100554 // Run gemm
555 _mm_gemm.run();
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000556 }
557
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000558 // Reshape output matrix
Georgios Pinitase413d252018-11-14 18:29:58 +0000559 if(!_skip_col2im)
Michalis Spyroue2503892018-04-23 15:17:31 +0100560 {
Georgios Pinitase413d252018-11-14 18:29:58 +0000561 if(_data_layout == DataLayout::NCHW)
562 {
563 NEScheduler::get().schedule(&_col2im_kernel, Window::DimY);
564 }
565 else
566 {
567 _reshape_layer.run();
568 }
Michalis Spyroue2503892018-04-23 15:17:31 +0100569 }
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000570}
Georgios Pinitas72219332018-06-05 14:56:06 +0100571
572void NEGEMMConvolutionLayer::prepare()
573{
574 if(!_is_prepared)
575 {
Michalis Spyrou1a569a32019-09-10 17:20:34 +0100576 if(_weights_manager && _weights_manager->are_weights_managed(_original_weights))
577 {
578 _weights_manager->run(_original_weights, &_reshape_weights_managed);
579 }
580 else
581 {
582 // Run weights reshaping and mark original weights tensor as unused
583 _weights_reshaped.allocator()->allocate();
584 _reshape_weights.run();
585 _original_weights->mark_as_unused();
586 }
Georgios Pinitas72219332018-06-05 14:56:06 +0100587
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100588 // Prepare GEMM
589 _is_quantized ? _mm_gemmlowp.prepare() : _mm_gemm.prepare();
Georgios Pinitas72219332018-06-05 14:56:06 +0100590 if(!_weights_reshaped.is_used())
591 {
592 _weights_reshaped.allocator()->free();
593 }
594
595 _is_prepared = true;
596 }
597}