blob: 92ccd5d1cc03ea495c7a36a06d11129b35acf197 [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
Michele Di Giorgio807ce592020-01-03 14:39:37 +00002 * Copyright (c) 2017-2020 ARM Limited.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h"
25
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +000026#include "arm_compute/core/Helpers.h"
Gian Marco Iodice13edbff2017-06-26 17:20:16 +010027#include "arm_compute/core/Size2D.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010028#include "arm_compute/core/Validate.h"
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +000029#include "arm_compute/core/utils/misc/ShapeCalculator.h"
Giorgio Arenaa855af12018-07-16 17:20:38 +010030#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010031#include "arm_compute/runtime/NEON/NEScheduler.h"
32
33#include <algorithm>
34#include <cmath>
35
Michele Di Giorgiof29d1b72019-10-29 10:58:13 +000036namespace arm_compute
37{
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +000038using namespace arm_compute::misc::shape_calculator;
39
Giorgio Arenaa855af12018-07-16 17:20:38 +010040namespace
Anthony Barbier6ff3b192017-09-04 18:44:23 +010041{
Giorgio Arenaa855af12018-07-16 17:20:38 +010042Status validate_mm(const ITensorInfo &input, const ITensorInfo &weights, const ITensorInfo &output)
Anthony Barbier6ff3b192017-09-04 18:44:23 +010043{
Giorgio Arenaa855af12018-07-16 17:20:38 +010044 if(is_data_type_quantized_asymmetric(input.data_type()))
Anthony Barbier6ff3b192017-09-04 18:44:23 +010045 {
Giorgio Arenaa855af12018-07-16 17:20:38 +010046 // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
47 // Extract and negate input and weights offset
Georgios Pinitas4c5469b2019-05-21 13:32:43 +010048 const QuantizationInfo input_quantization_info(input.quantization_info().uniform().scale, -input.quantization_info().uniform().offset);
49 const QuantizationInfo weights_quantization_info(weights.quantization_info().uniform().scale, -weights.quantization_info().uniform().offset);
Anthony Barbier6ff3b192017-09-04 18:44:23 +010050
Giorgio Arenaa855af12018-07-16 17:20:38 +010051 // Validate gemmlowp function
52 ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMLowpMatrixMultiplyCore::validate(&input.clone()->set_quantization_info(input_quantization_info),
53 &weights.clone()->set_quantization_info(weights_quantization_info),
Gian Marco Iodice4b908652018-10-18 10:21:02 +010054 nullptr,
Giorgio Arenaa855af12018-07-16 17:20:38 +010055 &output));
Anthony Barbier6ff3b192017-09-04 18:44:23 +010056 }
57 else
58 {
Giorgio Arenaa855af12018-07-16 17:20:38 +010059 ARM_COMPUTE_RETURN_ON_ERROR(NEGEMM::validate(&input, &weights, nullptr, &output, 1.f, 0.0f, GEMMInfo(false, false, true /* Reshape weights only for the first run */)));
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +000060 }
61
62 return Status{};
Anthony Barbier6ff3b192017-09-04 18:44:23 +010063}
Giorgio Arenaa855af12018-07-16 17:20:38 +010064} // namespace
Anthony Barbier6ff3b192017-09-04 18:44:23 +010065
Giorgio Arenaa855af12018-07-16 17:20:38 +010066void NEFullyConnectedLayerReshapeWeights::configure(const ITensor *input, ITensor *output)
Anthony Barbier6ff3b192017-09-04 18:44:23 +010067{
Giorgio Arenaa855af12018-07-16 17:20:38 +010068 auto k = arm_compute::support::cpp14::make_unique<NETransposeKernel>();
69 k->configure(input, output);
70 _kernel = std::move(k);
71}
Georgios Pinitasbaf174e2017-09-08 19:47:30 +010072
Giorgio Arenaa855af12018-07-16 17:20:38 +010073Status NEFullyConnectedLayerReshapeWeights::validate(const ITensorInfo *input, const ITensorInfo *output)
74{
75 return NETransposeKernel::validate(input, output);
Anthony Barbier6ff3b192017-09-04 18:44:23 +010076}
77
Michalis Spyrou1a569a32019-09-10 17:20:34 +010078NEFullyConnectedLayer::NEFullyConnectedLayer(std::shared_ptr<IMemoryManager> memory_manager, IWeightsManager *weights_manager)
79 : _memory_group(std::move(memory_manager)), _weights_manager(weights_manager), _flatten_kernel(), _convert_weights(), _convert_weights_managed(), _reshape_weights_function(),
80 _reshape_weights_managed_function(), _mm_gemm(nullptr, weights_manager), _mm_gemmlowp(), _gemmlowp_output_stage(), _accumulate_biases_kernel(), _flatten_output(), _gemmlowp_output(),
81 _converted_weights_output(), _reshape_weights_output(), _original_weights(nullptr), _are_weights_converted(true), _are_weights_reshaped(false), _is_fc_after_conv(false), _accumulate_biases(false),
82 _is_quantized(false), _is_prepared(false)
Anthony Barbier6ff3b192017-09-04 18:44:23 +010083{
84}
85
Giorgio Arenaa855af12018-07-16 17:20:38 +010086void NEFullyConnectedLayer::configure_mm(const ITensor *input, const ITensor *weights, ITensor *output)
87{
88 if(_is_quantized)
89 {
90 // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
91 // Extract and negate input and weights offset
92 const QuantizationInfo input_quantization_info = input->info()->quantization_info();
93 const QuantizationInfo weights_quantization_info = weights->info()->quantization_info();
94
Georgios Pinitas4c5469b2019-05-21 13:32:43 +010095 input->info()->set_quantization_info(QuantizationInfo(input_quantization_info.uniform().scale, -input_quantization_info.uniform().offset));
96 weights->info()->set_quantization_info(QuantizationInfo(weights_quantization_info.uniform().scale, -weights_quantization_info.uniform().offset));
Giorgio Arenaa855af12018-07-16 17:20:38 +010097
98 // Configure gemmlowp function
Gian Marco Iodice4b908652018-10-18 10:21:02 +010099 _mm_gemmlowp.configure(input, weights, nullptr, output);
Giorgio Arenaa855af12018-07-16 17:20:38 +0100100
101 // Revert back QuantizatioInfo as input and weights could be used in other fully connected layers
102 input->info()->set_quantization_info(input_quantization_info);
103 weights->info()->set_quantization_info(weights_quantization_info);
104 }
105 else
106 {
107 // Configure matrix multiply kernel
108 _mm_gemm.configure(input, weights, nullptr, output, 1.f, 0.0f, GEMMInfo(false, false, true /* Reshape weights only for the first run */));
109 }
110}
111
112void NEFullyConnectedLayer::configure_conv_fc(const ITensor *input, const ITensor *weights, ITensor *output)
113{
114 ARM_COMPUTE_ERROR_ON((weights->info()->dimension(1) != (input->info()->dimension(0) * input->info()->dimension(1) * input->info()->dimension(2))));
115
116 // If the fully connected layer is called after a convolution layer, the input tensor must be linearized
117
Giorgio Arena368e6352018-08-20 15:06:07 +0100118 // Initialize output tensor for flatten
119 TensorShape shape_flatten = compute_flatten_shape(input->info());
120 _flatten_output.allocator()->init(input->info()->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(shape_flatten));
Giorgio Arenaa855af12018-07-16 17:20:38 +0100121
Giorgio Arena368e6352018-08-20 15:06:07 +0100122 // Configure flatten kernel
123 _memory_group.manage(&_flatten_output);
124 _flatten_kernel.configure(input, &_flatten_output);
Giorgio Arenaa855af12018-07-16 17:20:38 +0100125
126 // Configure matrix multiply kernel
Giorgio Arena368e6352018-08-20 15:06:07 +0100127 configure_mm(&_flatten_output, weights, output);
Giorgio Arenaa855af12018-07-16 17:20:38 +0100128
Giorgio Arena368e6352018-08-20 15:06:07 +0100129 // Allocate the output tensor for flatten once all the configure methods have been called
130 _flatten_output.allocator()->allocate();
Giorgio Arenaa855af12018-07-16 17:20:38 +0100131}
132
133void NEFullyConnectedLayer::configure_fc_fc(const ITensor *input, const ITensor *weights, ITensor *output)
134{
135 ARM_COMPUTE_ERROR_ON(input->info()->dimension(0) != weights->info()->dimension(1));
136
137 // Configure matrix multiply kernel
138 configure_mm(input, weights, output);
139}
140
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100141void NEFullyConnectedLayer::configure(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output,
142 FullyConnectedLayerInfo fc_info)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100143{
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000144 // Perform validate step
Michele Di Giorgio9c700372020-01-08 11:33:44 +0000145 ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000146 ARM_COMPUTE_ERROR_THROW_ON(NEFullyConnectedLayer::validate(input->info(),
147 weights->info(),
148 biases != nullptr ? biases->info() : nullptr,
149 output->info(),
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100150 fc_info));
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100151
Georgios Pinitasef776a82018-07-25 17:57:49 +0100152 _are_weights_converted = true;
153 _are_weights_reshaped = fc_info.transpose_weights ? fc_info.are_weights_reshaped : true;
154 _is_fc_after_conv = true;
155 _accumulate_biases = false;
156 _is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
157 _original_weights = weights;
Moritz Pflanzer484e7b32017-08-09 11:43:18 +0100158
Michalis Spyrou1a569a32019-09-10 17:20:34 +0100159 if(_weights_manager)
160 {
161 _weights_manager->manage(weights);
162 }
163
Giorgio Arenaa855af12018-07-16 17:20:38 +0100164 // Configure gemmlowp output
165 if(_is_quantized)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100166 {
Giorgio Arenaa855af12018-07-16 17:20:38 +0100167 _gemmlowp_output.allocator()->init(output->info()->clone()->set_is_resizable(true).reset_padding().set_data_type(DataType::S32));
Moritz Pflanzer484e7b32017-08-09 11:43:18 +0100168 }
169
Giorgio Arenaa855af12018-07-16 17:20:38 +0100170 // Configure accumulate biases kernel for non quantized asymmetric types
171 if(biases != nullptr && !_is_quantized)
Moritz Pflanzer484e7b32017-08-09 11:43:18 +0100172 {
Giorgio Arenaa855af12018-07-16 17:20:38 +0100173 _accumulate_biases = true;
Moritz Pflanzer484e7b32017-08-09 11:43:18 +0100174
Moritz Pflanzer484e7b32017-08-09 11:43:18 +0100175 // Configure accumulate biases kernel
176 _accumulate_biases_kernel.configure(output, biases);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100177 }
178
Giorgio Arenaa855af12018-07-16 17:20:38 +0100179 // With the Fully Connected layer we can have 4 different cases:
180 // 1) Convolution layer -> Fully Connected layer without batches
181 // 2) Fully Connected layer -> Fully Connected layer without batches
182 // 3) Convolution layer -> Fully Connected layer with batches
183 // 4) Fully Connected layer -> Fully Connected layer with batches
184
185 const ITensor *weights_to_use = weights;
186
Giorgio Arenaa855af12018-07-16 17:20:38 +0100187 // Check if we have a fully connected layer with batches
188 const bool is_batched_fc_layer = output->info()->dimension(1) > 1;
Giorgio Arenaa855af12018-07-16 17:20:38 +0100189 if(is_batched_fc_layer)
Moritz Pflanzer484e7b32017-08-09 11:43:18 +0100190 {
Giorgio Arenaa855af12018-07-16 17:20:38 +0100191 _is_fc_after_conv = (TensorShape::num_max_dimensions >= 4) && (std::equal(input->info()->tensor_shape().cbegin() + 3,
192 input->info()->tensor_shape().cend(),
193 output->info()->tensor_shape().cbegin() + 1));
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100194 }
Giorgio Arenaa855af12018-07-16 17:20:38 +0100195 else
196 {
197 _is_fc_after_conv = input->info()->num_dimensions() > 1;
198 }
199
Georgios Pinitasef776a82018-07-25 17:57:49 +0100200 // Reshape weights if needed
201 if(!_are_weights_reshaped)
202 {
Michalis Spyrou1a569a32019-09-10 17:20:34 +0100203 if(_weights_manager && _weights_manager->are_weights_managed(weights))
204 {
205 _reshape_weights_managed_function.configure(weights);
206 weights_to_use = _weights_manager->acquire(weights, &_reshape_weights_managed_function);
207 }
208 else
209 {
210 // Reshape the weights
211 _reshape_weights_function.configure(weights, &_reshape_weights_output);
212 weights_to_use = &_reshape_weights_output;
213 }
Georgios Pinitasef776a82018-07-25 17:57:49 +0100214 }
215
216 // Convert weights if needed
217 if(_is_fc_after_conv && (input->info()->data_layout() != fc_info.weights_trained_layout))
218 {
Michalis Spyrou1a569a32019-09-10 17:20:34 +0100219 if(_weights_manager && _weights_manager->are_weights_managed(weights_to_use))
220 {
221 _convert_weights_managed.configure(weights_to_use,
222 input->info()->tensor_shape(),
223 fc_info.weights_trained_layout);
224 weights_to_use = _weights_manager->acquire(weights, &_convert_weights_managed);
225 }
226 else
227 {
228 // Convert weights
229 _convert_weights.configure(weights_to_use,
230 &_converted_weights_output,
231 input->info()->tensor_shape(),
232 fc_info.weights_trained_layout);
Georgios Pinitasef776a82018-07-25 17:57:49 +0100233
Michalis Spyrou1a569a32019-09-10 17:20:34 +0100234 weights_to_use = &_converted_weights_output;
235 }
Georgios Pinitasef776a82018-07-25 17:57:49 +0100236 _are_weights_converted = false;
237 }
238
Giorgio Arenaa855af12018-07-16 17:20:38 +0100239 ITensor *tmp_output = (_is_quantized) ? &_gemmlowp_output : output;
240 if(_is_fc_after_conv)
241 {
242 // Fully Connected layer after a Convolution Layer without batches
243 configure_conv_fc(input, weights_to_use, tmp_output);
244 }
245 else
246 {
247 // Fully Connected layer after a Fully Connected Layer without batches
248 configure_fc_fc(input, weights_to_use, tmp_output);
249 }
250
251 // Configure output stage for asymmetric quantized types
252 if(_is_quantized)
253 {
Georgios Pinitas4c5469b2019-05-21 13:32:43 +0100254 const UniformQuantizationInfo iq_info = input->info()->quantization_info().uniform();
255 const UniformQuantizationInfo wq_info = weights->info()->quantization_info().uniform();
256 const UniformQuantizationInfo oq_info = output->info()->quantization_info().uniform();
257
Michalis Spyroue7be8a02019-12-12 16:16:09 +0000258 float multiplier = (iq_info.scale * wq_info.scale) / oq_info.scale;
259 int32_t output_multiplier;
260 int32_t output_shift;
Michele Di Giorgiof29d1b72019-10-29 10:58:13 +0000261 quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift);
Michele Di Giorgio9c700372020-01-08 11:33:44 +0000262
263 GEMMLowpOutputStageInfo gemmlowp_output_stage_info;
264 gemmlowp_output_stage_info.gemmlowp_multiplier = output_multiplier;
265 gemmlowp_output_stage_info.gemmlowp_shift = output_shift;
266 gemmlowp_output_stage_info.gemmlowp_offset = oq_info.offset;
267 gemmlowp_output_stage_info.type = GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT;
268 _gemmlowp_output_stage.configure(&_gemmlowp_output, biases, output, gemmlowp_output_stage_info);
Giorgio Arenaa855af12018-07-16 17:20:38 +0100269 _gemmlowp_output.allocator()->allocate();
270 }
271
272 _are_weights_reshaped = _are_weights_reshaped || fc_info.retain_internal_weights;
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100273}
274
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100275Status NEFullyConnectedLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output,
276 FullyConnectedLayerInfo fc_info)
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000277{
Giorgio Arenaa855af12018-07-16 17:20:38 +0100278 ARM_COMPUTE_UNUSED(fc_info.retain_internal_weights);
279 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output);
Michele Di Giorgio9c700372020-01-08 11:33:44 +0000280 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F16, DataType::F32);
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000281 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights, output);
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000282 ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 2);
283
Giorgio Arenaa855af12018-07-16 17:20:38 +0100284 bool weights_reshaped = fc_info.transpose_weights ? fc_info.are_weights_reshaped : true;
285 bool is_fc_after_conv = true;
286 bool is_quantized = is_data_type_quantized_asymmetric(input->data_type());
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000287
Giorgio Arena368e6352018-08-20 15:06:07 +0100288 const ITensorInfo &flatten_input = TensorInfo(input->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(compute_flatten_shape(input)));
Georgios Pinitasef776a82018-07-25 17:57:49 +0100289 const ITensorInfo &reshaped_weights = TensorInfo(weights->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(compute_transposed_shape(*weights)));
Georgios Pinitas195b0ba2018-08-02 17:18:51 +0100290 const ITensorInfo &converted_weights = weights_reshaped ? TensorInfo(weights->clone()->set_is_resizable(true).reset_padding()) : TensorInfo(*reshaped_weights.clone());
Georgios Pinitasef776a82018-07-25 17:57:49 +0100291 const ITensorInfo &gemmlowp_output = TensorInfo(output->clone()->set_is_resizable(true).reset_padding().set_data_type(DataType::S32));
Giorgio Arenaa855af12018-07-16 17:20:38 +0100292
293 // Configure accumulate biases kernel for non quantized asymmetric types
294 if(biases != nullptr && !is_quantized)
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000295 {
Giorgio Arenaa855af12018-07-16 17:20:38 +0100296 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, biases);
297 ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMMatrixAccumulateBiasesKernel::validate(output, biases));
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000298 }
299
Giorgio Arenaa855af12018-07-16 17:20:38 +0100300 // With the Fully Connected layer we can have 4 different cases:
301 // 1) Convolution layer -> Fully Connected layer without batches
302 // 2) Fully Connected layer -> Fully Connected layer without batches
303 // 3) Convolution layer -> Fully Connected layer with batches
304 // 4) Fully Connected layer -> Fully Connected layer with batches
305
306 const ITensorInfo *input_to_use = input;
307 const ITensorInfo *weights_to_use = weights;
308 const ITensorInfo *tmp_output = (is_quantized) ? &gemmlowp_output : output;
309
Giorgio Arenaa855af12018-07-16 17:20:38 +0100310 // Check if we have a fully connected layer with batches
311 const bool is_batched_fc_layer = output->dimension(1) > 1;
312
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000313 if(is_batched_fc_layer)
314 {
Giorgio Arenaa855af12018-07-16 17:20:38 +0100315 is_fc_after_conv = (TensorShape::num_max_dimensions >= 4) && (std::equal(input->tensor_shape().cbegin() + 3,
316 input->tensor_shape().cend(),
317 output->tensor_shape().cbegin() + 1));
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000318 }
319 else
320 {
Giorgio Arenaa855af12018-07-16 17:20:38 +0100321 is_fc_after_conv = input->num_dimensions() > 1;
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000322 }
323
Georgios Pinitasef776a82018-07-25 17:57:49 +0100324 if(!weights_reshaped)
325 {
326 // Validate reshape weights kernel
327 ARM_COMPUTE_RETURN_ON_ERROR(NEFullyConnectedLayerReshapeWeights::validate(weights, &reshaped_weights));
328 weights_to_use = &reshaped_weights;
329 }
330
331 if(is_fc_after_conv && (input->data_layout() != fc_info.weights_trained_layout))
332 {
333 // Validate convert weights kernel
334 ARM_COMPUTE_RETURN_ON_ERROR(NEConvertFullyConnectedWeights::validate(weights_to_use,
335 &converted_weights,
336 input->tensor_shape(),
337 fc_info.weights_trained_layout));
338 weights_to_use = &converted_weights;
339 }
340
Giorgio Arenaa855af12018-07-16 17:20:38 +0100341 if(is_fc_after_conv)
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000342 {
Giorgio Arenaa855af12018-07-16 17:20:38 +0100343 // Fully Connected layer after a Convolution Layer without batches
344 ARM_COMPUTE_RETURN_ERROR_ON((weights_to_use->dimension(1) != (input->dimension(0) * input->dimension(1) * input->dimension(2))));
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000345
Giorgio Arena368e6352018-08-20 15:06:07 +0100346 // Validate flatten kernel
347 ARM_COMPUTE_RETURN_ON_ERROR(NEFlattenLayerKernel::validate(input, &flatten_input));
348 input_to_use = &flatten_input;
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000349 }
Giorgio Arenaa855af12018-07-16 17:20:38 +0100350 else
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000351 {
Giorgio Arenaa855af12018-07-16 17:20:38 +0100352 // Fully Connected layer after a Fully Connected Layer without batches
353 ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(0) != weights_to_use->dimension(1));
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000354 }
Giorgio Arenaa855af12018-07-16 17:20:38 +0100355 // Validate matrix multiply kernel
356 ARM_COMPUTE_RETURN_ON_ERROR(validate_mm(*input_to_use, *weights_to_use, *tmp_output));
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000357
Giorgio Arenaa855af12018-07-16 17:20:38 +0100358 // Validate output stage for asymmetric quantized types
359 if(is_quantized)
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000360 {
Michele Di Giorgiof29d1b72019-10-29 10:58:13 +0000361 const UniformQuantizationInfo iq_info = input->quantization_info().uniform();
362 const UniformQuantizationInfo wq_info = weights->quantization_info().uniform();
363 const UniformQuantizationInfo oq_info = output->quantization_info().uniform();
Gian Marco Iodiceec8cce82019-08-21 17:01:53 +0100364
Michele Di Giorgio807ce592020-01-03 14:39:37 +0000365 float multiplier = (iq_info.scale * wq_info.scale) / oq_info.scale;
366 int32_t output_multiplier;
367 int32_t output_shift;
Michele Di Giorgiof29d1b72019-10-29 10:58:13 +0000368 ARM_COMPUTE_RETURN_ON_ERROR(quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift));
Michele Di Giorgio9c700372020-01-08 11:33:44 +0000369
370 GEMMLowpOutputStageInfo gemmlowp_output_stage_info;
371 gemmlowp_output_stage_info.gemmlowp_multiplier = output_multiplier;
372 gemmlowp_output_stage_info.gemmlowp_shift = output_shift;
373 gemmlowp_output_stage_info.gemmlowp_offset = oq_info.offset;
374 gemmlowp_output_stage_info.type = GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT;
375 ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMLowpOutputStage::validate(&gemmlowp_output, biases, output, gemmlowp_output_stage_info));
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000376 }
377
378 return Status{};
379}
380
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100381void NEFullyConnectedLayer::run()
382{
Georgios Pinitas72219332018-06-05 14:56:06 +0100383 prepare();
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100384
Georgios Pinitasda953f22019-04-02 17:27:03 +0100385 MemoryGroupResourceScope scope_mg(_memory_group);
Georgios Pinitasbaf174e2017-09-08 19:47:30 +0100386
Moritz Pflanzer484e7b32017-08-09 11:43:18 +0100387 // Linearize input if it comes from a convolutional layer
Giorgio Arenaa855af12018-07-16 17:20:38 +0100388 if(_is_fc_after_conv)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100389 {
Giorgio Arena368e6352018-08-20 15:06:07 +0100390 NEScheduler::get().schedule(&_flatten_kernel, Window::DimY);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100391 }
392
Giorgio Arenaa855af12018-07-16 17:20:38 +0100393 // Run matrix multiply
394 if(_is_quantized)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100395 {
Giorgio Arenaa855af12018-07-16 17:20:38 +0100396 _mm_gemmlowp.run();
397 }
398 else
399 {
400 _mm_gemm.run();
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100401 }
402
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100403 // Accumulate biases if provided
Giorgio Arenaa855af12018-07-16 17:20:38 +0100404 if(_is_quantized)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100405 {
Giorgio Arenaa855af12018-07-16 17:20:38 +0100406 _gemmlowp_output_stage.run();
407 }
408 else
409 {
410 if(_accumulate_biases)
411 {
412 NEScheduler::get().schedule(&_accumulate_biases_kernel, Window::DimY);
413 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100414 }
415}
Georgios Pinitas72219332018-06-05 14:56:06 +0100416
417void NEFullyConnectedLayer::prepare()
418{
Georgios Pinitas72219332018-06-05 14:56:06 +0100419 if(!_is_prepared)
420 {
Michalis Spyrou1a569a32019-09-10 17:20:34 +0100421 if(!_weights_manager)
422 {
423 ARM_COMPUTE_ERROR_ON(!_original_weights->is_used());
424 }
Georgios Pinitasef776a82018-07-25 17:57:49 +0100425
426 auto release_unused = [](Tensor * w)
427 {
428 if(!w->is_used())
429 {
430 w->allocator()->free();
431 }
432 };
433
434 // Pointer to current weights
435 const ITensor *cur_weights = _original_weights;
436
Giorgio Arenaa855af12018-07-16 17:20:38 +0100437 // Reshape of the weights (happens only once)
438 if(!_are_weights_reshaped)
439 {
Michalis Spyrou1a569a32019-09-10 17:20:34 +0100440 if(_weights_manager && _weights_manager->are_weights_managed(_original_weights))
441 {
Michalis Spyrou1a569a32019-09-10 17:20:34 +0100442 cur_weights = _weights_manager->run(cur_weights, &_reshape_weights_managed_function);
443 }
444 else
445 {
446 // Reshape of the weights (happens only once)
447 if(!_are_weights_reshaped)
448 {
449 // Run reshape weights kernel and mark weights as unused
450 _reshape_weights_output.allocator()->allocate();
451 _reshape_weights_function.run();
452 }
453 cur_weights->mark_as_unused();
454 cur_weights = &_reshape_weights_output;
455 }
Giorgio Arenaa855af12018-07-16 17:20:38 +0100456 _are_weights_reshaped = true;
457 }
Georgios Pinitas72219332018-06-05 14:56:06 +0100458
Georgios Pinitasef776a82018-07-25 17:57:49 +0100459 // Convert weights if needed (happens only once)
460 if(!_are_weights_converted)
461 {
Michalis Spyrou1a569a32019-09-10 17:20:34 +0100462 if(_weights_manager && _weights_manager->are_weights_managed(cur_weights))
463 {
464 _weights_manager->run(cur_weights, &_convert_weights_managed);
465 }
466 else
467 {
468 _converted_weights_output.allocator()->allocate();
469 _convert_weights.run();
Michalis Spyrou20c2b502019-10-01 15:39:42 +0100470 cur_weights->mark_as_unused();
Michalis Spyrou1a569a32019-09-10 17:20:34 +0100471 }
Georgios Pinitasef776a82018-07-25 17:57:49 +0100472
Georgios Pinitasef776a82018-07-25 17:57:49 +0100473 _are_weights_converted = true;
474 }
475
476 // Release reshaped weights if unused
477 release_unused(&_reshape_weights_output);
478
479 // Prepare GEMM prepare and release unused weights
480 if(!_is_quantized)
481 {
482 _mm_gemm.prepare();
483 }
484
485 // Release converted weights if unused
486 release_unused(&_reshape_weights_output);
487 release_unused(&_converted_weights_output);
488
Georgios Pinitas72219332018-06-05 14:56:06 +0100489 _is_prepared = true;
490 }
Michele Di Giorgiof29d1b72019-10-29 10:58:13 +0000491}
492} // namespace arm_compute