blob: 7a74a7ea90e04c5c73d2ae6e6cc394dfb4f95913 [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
Georgios Pinitasda953f22019-04-02 17:27:03 +01002 * Copyright (c) 2017-2019 ARM Limited.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h"
25
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +000026#include "arm_compute/core/Helpers.h"
Gian Marco Iodice13edbff2017-06-26 17:20:16 +010027#include "arm_compute/core/Size2D.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010028#include "arm_compute/core/Validate.h"
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +000029#include "arm_compute/core/utils/misc/ShapeCalculator.h"
Giorgio Arenaa855af12018-07-16 17:20:38 +010030#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010031#include "arm_compute/runtime/NEON/NEScheduler.h"
32
33#include <algorithm>
34#include <cmath>
35
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +000036using namespace arm_compute;
37using namespace arm_compute::misc::shape_calculator;
38
Giorgio Arenaa855af12018-07-16 17:20:38 +010039namespace
Anthony Barbier6ff3b192017-09-04 18:44:23 +010040{
Giorgio Arenaa855af12018-07-16 17:20:38 +010041Status validate_mm(const ITensorInfo &input, const ITensorInfo &weights, const ITensorInfo &output)
Anthony Barbier6ff3b192017-09-04 18:44:23 +010042{
Giorgio Arenaa855af12018-07-16 17:20:38 +010043 if(is_data_type_quantized_asymmetric(input.data_type()))
Anthony Barbier6ff3b192017-09-04 18:44:23 +010044 {
Giorgio Arenaa855af12018-07-16 17:20:38 +010045 // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
46 // Extract and negate input and weights offset
Georgios Pinitas4c5469b2019-05-21 13:32:43 +010047 const QuantizationInfo input_quantization_info(input.quantization_info().uniform().scale, -input.quantization_info().uniform().offset);
48 const QuantizationInfo weights_quantization_info(weights.quantization_info().uniform().scale, -weights.quantization_info().uniform().offset);
Anthony Barbier6ff3b192017-09-04 18:44:23 +010049
Giorgio Arenaa855af12018-07-16 17:20:38 +010050 // Validate gemmlowp function
51 ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMLowpMatrixMultiplyCore::validate(&input.clone()->set_quantization_info(input_quantization_info),
52 &weights.clone()->set_quantization_info(weights_quantization_info),
Gian Marco Iodice4b908652018-10-18 10:21:02 +010053 nullptr,
Giorgio Arenaa855af12018-07-16 17:20:38 +010054 &output));
Anthony Barbier6ff3b192017-09-04 18:44:23 +010055 }
56 else
57 {
Giorgio Arenaa855af12018-07-16 17:20:38 +010058 ARM_COMPUTE_RETURN_ON_ERROR(NEGEMM::validate(&input, &weights, nullptr, &output, 1.f, 0.0f, GEMMInfo(false, false, true /* Reshape weights only for the first run */)));
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +000059 }
60
61 return Status{};
Anthony Barbier6ff3b192017-09-04 18:44:23 +010062}
Giorgio Arenaa855af12018-07-16 17:20:38 +010063} // namespace
Anthony Barbier6ff3b192017-09-04 18:44:23 +010064
Giorgio Arenaa855af12018-07-16 17:20:38 +010065void NEFullyConnectedLayerReshapeWeights::configure(const ITensor *input, ITensor *output)
Anthony Barbier6ff3b192017-09-04 18:44:23 +010066{
Giorgio Arenaa855af12018-07-16 17:20:38 +010067 auto k = arm_compute::support::cpp14::make_unique<NETransposeKernel>();
68 k->configure(input, output);
69 _kernel = std::move(k);
70}
Georgios Pinitasbaf174e2017-09-08 19:47:30 +010071
Giorgio Arenaa855af12018-07-16 17:20:38 +010072Status NEFullyConnectedLayerReshapeWeights::validate(const ITensorInfo *input, const ITensorInfo *output)
73{
74 return NETransposeKernel::validate(input, output);
Anthony Barbier6ff3b192017-09-04 18:44:23 +010075}
76
Georgios Pinitasbaf174e2017-09-08 19:47:30 +010077NEFullyConnectedLayer::NEFullyConnectedLayer(std::shared_ptr<IMemoryManager> memory_manager)
Giorgio Arena368e6352018-08-20 15:06:07 +010078 : _memory_group(std::move(memory_manager)), _flatten_kernel(), _convert_weights(), _reshape_weights_function(), _mm_gemm(), _mm_gemmlowp(), _gemmlowp_output_stage(), _accumulate_biases_kernel(),
79 _flatten_output(), _gemmlowp_output(), _converted_weights_output(), _reshape_weights_output(), _original_weights(nullptr), _are_weights_converted(true), _are_weights_reshaped(false),
Georgios Pinitasef776a82018-07-25 17:57:49 +010080 _is_fc_after_conv(false), _accumulate_biases(false), _is_quantized(false), _is_prepared(false)
Anthony Barbier6ff3b192017-09-04 18:44:23 +010081{
82}
83
Giorgio Arenaa855af12018-07-16 17:20:38 +010084void NEFullyConnectedLayer::configure_mm(const ITensor *input, const ITensor *weights, ITensor *output)
85{
86 if(_is_quantized)
87 {
88 // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
89 // Extract and negate input and weights offset
90 const QuantizationInfo input_quantization_info = input->info()->quantization_info();
91 const QuantizationInfo weights_quantization_info = weights->info()->quantization_info();
92
Georgios Pinitas4c5469b2019-05-21 13:32:43 +010093 input->info()->set_quantization_info(QuantizationInfo(input_quantization_info.uniform().scale, -input_quantization_info.uniform().offset));
94 weights->info()->set_quantization_info(QuantizationInfo(weights_quantization_info.uniform().scale, -weights_quantization_info.uniform().offset));
Giorgio Arenaa855af12018-07-16 17:20:38 +010095
96 // Configure gemmlowp function
Gian Marco Iodice4b908652018-10-18 10:21:02 +010097 _mm_gemmlowp.configure(input, weights, nullptr, output);
Giorgio Arenaa855af12018-07-16 17:20:38 +010098
99 // Revert back QuantizatioInfo as input and weights could be used in other fully connected layers
100 input->info()->set_quantization_info(input_quantization_info);
101 weights->info()->set_quantization_info(weights_quantization_info);
102 }
103 else
104 {
105 // Configure matrix multiply kernel
106 _mm_gemm.configure(input, weights, nullptr, output, 1.f, 0.0f, GEMMInfo(false, false, true /* Reshape weights only for the first run */));
107 }
108}
109
110void NEFullyConnectedLayer::configure_conv_fc(const ITensor *input, const ITensor *weights, ITensor *output)
111{
112 ARM_COMPUTE_ERROR_ON((weights->info()->dimension(1) != (input->info()->dimension(0) * input->info()->dimension(1) * input->info()->dimension(2))));
113
114 // If the fully connected layer is called after a convolution layer, the input tensor must be linearized
115
Giorgio Arena368e6352018-08-20 15:06:07 +0100116 // Initialize output tensor for flatten
117 TensorShape shape_flatten = compute_flatten_shape(input->info());
118 _flatten_output.allocator()->init(input->info()->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(shape_flatten));
Giorgio Arenaa855af12018-07-16 17:20:38 +0100119
Giorgio Arena368e6352018-08-20 15:06:07 +0100120 // Configure flatten kernel
121 _memory_group.manage(&_flatten_output);
122 _flatten_kernel.configure(input, &_flatten_output);
Giorgio Arenaa855af12018-07-16 17:20:38 +0100123
124 // Configure matrix multiply kernel
Giorgio Arena368e6352018-08-20 15:06:07 +0100125 configure_mm(&_flatten_output, weights, output);
Giorgio Arenaa855af12018-07-16 17:20:38 +0100126
Giorgio Arena368e6352018-08-20 15:06:07 +0100127 // Allocate the output tensor for flatten once all the configure methods have been called
128 _flatten_output.allocator()->allocate();
Giorgio Arenaa855af12018-07-16 17:20:38 +0100129}
130
131void NEFullyConnectedLayer::configure_fc_fc(const ITensor *input, const ITensor *weights, ITensor *output)
132{
133 ARM_COMPUTE_ERROR_ON(input->info()->dimension(0) != weights->info()->dimension(1));
134
135 // Configure matrix multiply kernel
136 configure_mm(input, weights, output);
137}
138
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100139void NEFullyConnectedLayer::configure(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output,
140 FullyConnectedLayerInfo fc_info)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100141{
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000142 ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100143
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000144 // Perform validate step
145 ARM_COMPUTE_ERROR_THROW_ON(NEFullyConnectedLayer::validate(input->info(),
146 weights->info(),
147 biases != nullptr ? biases->info() : nullptr,
148 output->info(),
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100149 fc_info));
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100150
Georgios Pinitasef776a82018-07-25 17:57:49 +0100151 _are_weights_converted = true;
152 _are_weights_reshaped = fc_info.transpose_weights ? fc_info.are_weights_reshaped : true;
153 _is_fc_after_conv = true;
154 _accumulate_biases = false;
155 _is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
156 _original_weights = weights;
Moritz Pflanzer484e7b32017-08-09 11:43:18 +0100157
Giorgio Arenaa855af12018-07-16 17:20:38 +0100158 // Configure gemmlowp output
159 if(_is_quantized)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100160 {
Giorgio Arenaa855af12018-07-16 17:20:38 +0100161 _gemmlowp_output.allocator()->init(output->info()->clone()->set_is_resizable(true).reset_padding().set_data_type(DataType::S32));
Moritz Pflanzer484e7b32017-08-09 11:43:18 +0100162 }
163
Giorgio Arenaa855af12018-07-16 17:20:38 +0100164 // Configure accumulate biases kernel for non quantized asymmetric types
165 if(biases != nullptr && !_is_quantized)
Moritz Pflanzer484e7b32017-08-09 11:43:18 +0100166 {
Giorgio Arenaa855af12018-07-16 17:20:38 +0100167 _accumulate_biases = true;
Moritz Pflanzer484e7b32017-08-09 11:43:18 +0100168
Moritz Pflanzer484e7b32017-08-09 11:43:18 +0100169 // Configure accumulate biases kernel
170 _accumulate_biases_kernel.configure(output, biases);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100171 }
172
Giorgio Arenaa855af12018-07-16 17:20:38 +0100173 // With the Fully Connected layer we can have 4 different cases:
174 // 1) Convolution layer -> Fully Connected layer without batches
175 // 2) Fully Connected layer -> Fully Connected layer without batches
176 // 3) Convolution layer -> Fully Connected layer with batches
177 // 4) Fully Connected layer -> Fully Connected layer with batches
178
179 const ITensor *weights_to_use = weights;
180
Giorgio Arenaa855af12018-07-16 17:20:38 +0100181 // Check if we have a fully connected layer with batches
182 const bool is_batched_fc_layer = output->info()->dimension(1) > 1;
Giorgio Arenaa855af12018-07-16 17:20:38 +0100183 if(is_batched_fc_layer)
Moritz Pflanzer484e7b32017-08-09 11:43:18 +0100184 {
Giorgio Arenaa855af12018-07-16 17:20:38 +0100185 _is_fc_after_conv = (TensorShape::num_max_dimensions >= 4) && (std::equal(input->info()->tensor_shape().cbegin() + 3,
186 input->info()->tensor_shape().cend(),
187 output->info()->tensor_shape().cbegin() + 1));
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100188 }
Giorgio Arenaa855af12018-07-16 17:20:38 +0100189 else
190 {
191 _is_fc_after_conv = input->info()->num_dimensions() > 1;
192 }
193
Georgios Pinitasef776a82018-07-25 17:57:49 +0100194 // Reshape weights if needed
195 if(!_are_weights_reshaped)
196 {
197 // Reshape the weights
198 _reshape_weights_function.configure(weights, &_reshape_weights_output);
199 weights_to_use = &_reshape_weights_output;
200 }
201
202 // Convert weights if needed
203 if(_is_fc_after_conv && (input->info()->data_layout() != fc_info.weights_trained_layout))
204 {
205 // Convert weights
206 _convert_weights.configure(weights_to_use,
207 &_converted_weights_output,
208 input->info()->tensor_shape(),
209 fc_info.weights_trained_layout);
210
211 weights_to_use = &_converted_weights_output;
212 _are_weights_converted = false;
213 }
214
Giorgio Arenaa855af12018-07-16 17:20:38 +0100215 ITensor *tmp_output = (_is_quantized) ? &_gemmlowp_output : output;
216 if(_is_fc_after_conv)
217 {
218 // Fully Connected layer after a Convolution Layer without batches
219 configure_conv_fc(input, weights_to_use, tmp_output);
220 }
221 else
222 {
223 // Fully Connected layer after a Fully Connected Layer without batches
224 configure_fc_fc(input, weights_to_use, tmp_output);
225 }
226
227 // Configure output stage for asymmetric quantized types
228 if(_is_quantized)
229 {
Georgios Pinitas4c5469b2019-05-21 13:32:43 +0100230 const UniformQuantizationInfo iq_info = input->info()->quantization_info().uniform();
231 const UniformQuantizationInfo wq_info = weights->info()->quantization_info().uniform();
232 const UniformQuantizationInfo oq_info = output->info()->quantization_info().uniform();
233
234 float multiplier = (iq_info.scale * wq_info.scale) / oq_info.scale;
Michalis Spyroua4f378d2019-04-26 14:54:54 +0100235 int output_multiplier;
236 int output_shift;
Giorgio Arenaa855af12018-07-16 17:20:38 +0100237 quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
Georgios Pinitas4c5469b2019-05-21 13:32:43 +0100238 _gemmlowp_output_stage.configure(&_gemmlowp_output, biases, output, output_multiplier, output_shift, oq_info.offset);
Giorgio Arenaa855af12018-07-16 17:20:38 +0100239 _gemmlowp_output.allocator()->allocate();
240 }
241
242 _are_weights_reshaped = _are_weights_reshaped || fc_info.retain_internal_weights;
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100243}
244
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100245Status NEFullyConnectedLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output,
246 FullyConnectedLayerInfo fc_info)
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000247{
Giorgio Arenaa855af12018-07-16 17:20:38 +0100248 ARM_COMPUTE_UNUSED(fc_info.retain_internal_weights);
249 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output);
250 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F16, DataType::F32);
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000251 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights, output);
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000252 ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 2);
253
Giorgio Arenaa855af12018-07-16 17:20:38 +0100254 bool weights_reshaped = fc_info.transpose_weights ? fc_info.are_weights_reshaped : true;
255 bool is_fc_after_conv = true;
256 bool is_quantized = is_data_type_quantized_asymmetric(input->data_type());
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000257
Giorgio Arena368e6352018-08-20 15:06:07 +0100258 const ITensorInfo &flatten_input = TensorInfo(input->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(compute_flatten_shape(input)));
Georgios Pinitasef776a82018-07-25 17:57:49 +0100259 const ITensorInfo &reshaped_weights = TensorInfo(weights->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(compute_transposed_shape(*weights)));
Georgios Pinitas195b0ba2018-08-02 17:18:51 +0100260 const ITensorInfo &converted_weights = weights_reshaped ? TensorInfo(weights->clone()->set_is_resizable(true).reset_padding()) : TensorInfo(*reshaped_weights.clone());
Georgios Pinitasef776a82018-07-25 17:57:49 +0100261 const ITensorInfo &gemmlowp_output = TensorInfo(output->clone()->set_is_resizable(true).reset_padding().set_data_type(DataType::S32));
Giorgio Arenaa855af12018-07-16 17:20:38 +0100262
263 // Configure accumulate biases kernel for non quantized asymmetric types
264 if(biases != nullptr && !is_quantized)
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000265 {
Giorgio Arenaa855af12018-07-16 17:20:38 +0100266 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, biases);
267 ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMMatrixAccumulateBiasesKernel::validate(output, biases));
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000268 }
269
Giorgio Arenaa855af12018-07-16 17:20:38 +0100270 // With the Fully Connected layer we can have 4 different cases:
271 // 1) Convolution layer -> Fully Connected layer without batches
272 // 2) Fully Connected layer -> Fully Connected layer without batches
273 // 3) Convolution layer -> Fully Connected layer with batches
274 // 4) Fully Connected layer -> Fully Connected layer with batches
275
276 const ITensorInfo *input_to_use = input;
277 const ITensorInfo *weights_to_use = weights;
278 const ITensorInfo *tmp_output = (is_quantized) ? &gemmlowp_output : output;
279
Giorgio Arenaa855af12018-07-16 17:20:38 +0100280 // Check if we have a fully connected layer with batches
281 const bool is_batched_fc_layer = output->dimension(1) > 1;
282
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000283 if(is_batched_fc_layer)
284 {
Giorgio Arenaa855af12018-07-16 17:20:38 +0100285 is_fc_after_conv = (TensorShape::num_max_dimensions >= 4) && (std::equal(input->tensor_shape().cbegin() + 3,
286 input->tensor_shape().cend(),
287 output->tensor_shape().cbegin() + 1));
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000288 }
289 else
290 {
Giorgio Arenaa855af12018-07-16 17:20:38 +0100291 is_fc_after_conv = input->num_dimensions() > 1;
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000292 }
293
Georgios Pinitasef776a82018-07-25 17:57:49 +0100294 if(!weights_reshaped)
295 {
296 // Validate reshape weights kernel
297 ARM_COMPUTE_RETURN_ON_ERROR(NEFullyConnectedLayerReshapeWeights::validate(weights, &reshaped_weights));
298 weights_to_use = &reshaped_weights;
299 }
300
301 if(is_fc_after_conv && (input->data_layout() != fc_info.weights_trained_layout))
302 {
303 // Validate convert weights kernel
304 ARM_COMPUTE_RETURN_ON_ERROR(NEConvertFullyConnectedWeights::validate(weights_to_use,
305 &converted_weights,
306 input->tensor_shape(),
307 fc_info.weights_trained_layout));
308 weights_to_use = &converted_weights;
309 }
310
Giorgio Arenaa855af12018-07-16 17:20:38 +0100311 if(is_fc_after_conv)
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000312 {
Giorgio Arenaa855af12018-07-16 17:20:38 +0100313 // Fully Connected layer after a Convolution Layer without batches
314 ARM_COMPUTE_RETURN_ERROR_ON((weights_to_use->dimension(1) != (input->dimension(0) * input->dimension(1) * input->dimension(2))));
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000315
Giorgio Arena368e6352018-08-20 15:06:07 +0100316 // Validate flatten kernel
317 ARM_COMPUTE_RETURN_ON_ERROR(NEFlattenLayerKernel::validate(input, &flatten_input));
318 input_to_use = &flatten_input;
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000319 }
Giorgio Arenaa855af12018-07-16 17:20:38 +0100320 else
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000321 {
Giorgio Arenaa855af12018-07-16 17:20:38 +0100322 // Fully Connected layer after a Fully Connected Layer without batches
323 ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(0) != weights_to_use->dimension(1));
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000324 }
Giorgio Arenaa855af12018-07-16 17:20:38 +0100325 // Validate matrix multiply kernel
326 ARM_COMPUTE_RETURN_ON_ERROR(validate_mm(*input_to_use, *weights_to_use, *tmp_output));
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000327
Giorgio Arenaa855af12018-07-16 17:20:38 +0100328 // Validate output stage for asymmetric quantized types
329 if(is_quantized)
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000330 {
Giorgio Arenaa855af12018-07-16 17:20:38 +0100331 ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint::validate(&gemmlowp_output, biases, output));
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000332 }
333
334 return Status{};
335}
336
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100337void NEFullyConnectedLayer::run()
338{
Georgios Pinitas72219332018-06-05 14:56:06 +0100339 prepare();
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100340
Georgios Pinitasda953f22019-04-02 17:27:03 +0100341 MemoryGroupResourceScope scope_mg(_memory_group);
Georgios Pinitasbaf174e2017-09-08 19:47:30 +0100342
Moritz Pflanzer484e7b32017-08-09 11:43:18 +0100343 // Linearize input if it comes from a convolutional layer
Giorgio Arenaa855af12018-07-16 17:20:38 +0100344 if(_is_fc_after_conv)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100345 {
Giorgio Arena368e6352018-08-20 15:06:07 +0100346 NEScheduler::get().schedule(&_flatten_kernel, Window::DimY);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100347 }
348
Giorgio Arenaa855af12018-07-16 17:20:38 +0100349 // Run matrix multiply
350 if(_is_quantized)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100351 {
Giorgio Arenaa855af12018-07-16 17:20:38 +0100352 _mm_gemmlowp.run();
353 }
354 else
355 {
356 _mm_gemm.run();
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100357 }
358
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100359 // Accumulate biases if provided
Giorgio Arenaa855af12018-07-16 17:20:38 +0100360 if(_is_quantized)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100361 {
Giorgio Arenaa855af12018-07-16 17:20:38 +0100362 _gemmlowp_output_stage.run();
363 }
364 else
365 {
366 if(_accumulate_biases)
367 {
368 NEScheduler::get().schedule(&_accumulate_biases_kernel, Window::DimY);
369 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100370 }
371}
Georgios Pinitas72219332018-06-05 14:56:06 +0100372
373void NEFullyConnectedLayer::prepare()
374{
Georgios Pinitas72219332018-06-05 14:56:06 +0100375 if(!_is_prepared)
376 {
Georgios Pinitasef776a82018-07-25 17:57:49 +0100377 ARM_COMPUTE_ERROR_ON(!_original_weights->is_used());
378
379 auto release_unused = [](Tensor * w)
380 {
381 if(!w->is_used())
382 {
383 w->allocator()->free();
384 }
385 };
386
387 // Pointer to current weights
388 const ITensor *cur_weights = _original_weights;
389
Giorgio Arenaa855af12018-07-16 17:20:38 +0100390 // Reshape of the weights (happens only once)
391 if(!_are_weights_reshaped)
392 {
Giorgio Arenaa855af12018-07-16 17:20:38 +0100393 // Run reshape weights kernel and mark weights as unused
394 _reshape_weights_output.allocator()->allocate();
395 _reshape_weights_function.run();
Giorgio Arenaa855af12018-07-16 17:20:38 +0100396
Georgios Pinitasef776a82018-07-25 17:57:49 +0100397 cur_weights->mark_as_unused();
398 cur_weights = &_reshape_weights_output;
Giorgio Arenaa855af12018-07-16 17:20:38 +0100399 _are_weights_reshaped = true;
400 }
Georgios Pinitas72219332018-06-05 14:56:06 +0100401
Georgios Pinitasef776a82018-07-25 17:57:49 +0100402 // Convert weights if needed (happens only once)
403 if(!_are_weights_converted)
404 {
405 _converted_weights_output.allocator()->allocate();
406 _convert_weights.run();
407
408 cur_weights->mark_as_unused();
409 _are_weights_converted = true;
410 }
411
412 // Release reshaped weights if unused
413 release_unused(&_reshape_weights_output);
414
415 // Prepare GEMM prepare and release unused weights
416 if(!_is_quantized)
417 {
418 _mm_gemm.prepare();
419 }
420
421 // Release converted weights if unused
422 release_unused(&_reshape_weights_output);
423 release_unused(&_converted_weights_output);
424
Georgios Pinitas72219332018-06-05 14:56:06 +0100425 _is_prepared = true;
426 }
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100427}