blob: e275bca2f9cef6978a67ab6724fbfbbe9a4c4b2b [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
Michele Di Giorgio807ce592020-01-03 14:39:37 +00002 * Copyright (c) 2017-2020 ARM Limited.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h"
25
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +000026#include "arm_compute/core/Helpers.h"
Gian Marco Iodice13edbff2017-06-26 17:20:16 +010027#include "arm_compute/core/Size2D.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010028#include "arm_compute/core/Validate.h"
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +000029#include "arm_compute/core/utils/misc/ShapeCalculator.h"
Giorgio Arenaa855af12018-07-16 17:20:38 +010030#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010031#include "arm_compute/runtime/NEON/NEScheduler.h"
32
33#include <algorithm>
34#include <cmath>
35
Michele Di Giorgiof29d1b72019-10-29 10:58:13 +000036namespace arm_compute
37{
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +000038using namespace arm_compute::misc::shape_calculator;
39
Giorgio Arenaa855af12018-07-16 17:20:38 +010040namespace
Anthony Barbier6ff3b192017-09-04 18:44:23 +010041{
SiCongLi2e5fd632020-03-02 15:39:15 +000042// Get min, max bound of a quantized assymetric output tensor, with the effect of fused activation
43std::pair<PixelValue, PixelValue> get_quantized_asymmetric_output_min_max(const QuantizationInfo &q_info, const ActivationLayerInfo &act_info, DataType data_type)
44{
45 PixelValue type_min{};
46 PixelValue type_max{};
47 std::tie(type_min, type_max) = get_min_max(data_type);
48 const UniformQuantizationInfo q_unif = q_info.uniform();
49
50 if(act_info.enabled())
51 {
52 switch(act_info.activation())
53 {
54 case ActivationLayerInfo::ActivationFunction::RELU:
55 type_min = PixelValue(q_unif.offset);
56 break;
57 case ActivationLayerInfo::ActivationFunction::BOUNDED_RELU:
58 type_min = PixelValue(q_unif.offset);
59 type_max = PixelValue(act_info.a(), data_type, q_info);
60 break;
61 case ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU:
62 type_min = PixelValue(act_info.b(), data_type, q_info);
63 type_max = PixelValue(act_info.a(), data_type, q_info);
64 break;
65 default:
66 ARM_COMPUTE_ERROR("Activation function not supported.");
67 break;
68 }
69 }
70
71 return std::make_pair(type_min, type_max);
72}
73
74Status get_gemmlowp_output_stage_info(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *output, const ActivationLayerInfo &act,
75 GEMMLowpOutputStageInfo &gemmlowp_output_stage_info)
76{
77 const auto data_type = input->data_type();
78 const QuantizationInfo oq_info = output->quantization_info();
79 const UniformQuantizationInfo iq_unif = input->quantization_info().uniform();
80 const UniformQuantizationInfo wq_unif = weights->quantization_info().uniform();
81 const UniformQuantizationInfo oq_unif = oq_info.uniform();
82
83 float multiplier = (iq_unif.scale * wq_unif.scale) / oq_unif.scale;
84 int32_t output_multiplier;
85 int32_t output_shift;
86
87 ARM_COMPUTE_RETURN_ON_ERROR(quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift));
88
89 PixelValue type_min{};
90 PixelValue type_max{};
91 std::tie(type_min, type_max) = get_quantized_asymmetric_output_min_max(oq_info, act, data_type);
92
93 gemmlowp_output_stage_info.gemmlowp_multiplier = output_multiplier;
94 gemmlowp_output_stage_info.gemmlowp_shift = output_shift;
95 gemmlowp_output_stage_info.gemmlowp_offset = oq_unif.offset;
96 gemmlowp_output_stage_info.type = GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT;
97 gemmlowp_output_stage_info.gemmlowp_min_bound = type_min.get<int32_t>();
98 gemmlowp_output_stage_info.gemmlowp_max_bound = type_max.get<int32_t>();
99
100 return Status{};
101}
102
103Status validate_mm(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const ActivationLayerInfo &act)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100104{
SiCong Liadb32912020-02-17 16:39:27 +0000105 if(is_data_type_quantized_asymmetric(input->data_type()))
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100106 {
Giorgio Arenaa855af12018-07-16 17:20:38 +0100107 // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
108 // Extract and negate input and weights offset
SiCong Liadb32912020-02-17 16:39:27 +0000109 const QuantizationInfo input_quantization_info(input->quantization_info().uniform().scale, -input->quantization_info().uniform().offset);
110 const QuantizationInfo weights_quantization_info(weights->quantization_info().uniform().scale, -weights->quantization_info().uniform().offset);
111
SiCong Liadb32912020-02-17 16:39:27 +0000112 GEMMLowpOutputStageInfo gemmlowp_output_stage_info;
SiCongLi2e5fd632020-03-02 15:39:15 +0000113 ARM_COMPUTE_RETURN_ON_ERROR(get_gemmlowp_output_stage_info(input, weights, output, act, gemmlowp_output_stage_info));
SiCong Liadb32912020-02-17 16:39:27 +0000114
115 GEMMInfo gemm_info;
116 gemm_info.set_gemmlowp_output_stage(gemmlowp_output_stage_info);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100117
Giorgio Arenaa855af12018-07-16 17:20:38 +0100118 // Validate gemmlowp function
SiCong Liadb32912020-02-17 16:39:27 +0000119 ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMLowpMatrixMultiplyCore::validate(&input->clone()->set_quantization_info(input_quantization_info),
120 &weights->clone()->set_quantization_info(weights_quantization_info),
121 biases,
122 output,
123 gemm_info));
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100124 }
125 else
126 {
SiCong Liadb32912020-02-17 16:39:27 +0000127 ARM_COMPUTE_RETURN_ON_ERROR(NEGEMM::validate(input, weights, biases, output, 1.f, 1.0f, GEMMInfo(false, false, true /* Reshape weights only for the first run */)));
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000128 }
129
130 return Status{};
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100131}
Giorgio Arenaa855af12018-07-16 17:20:38 +0100132} // namespace
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100133
Giorgio Arenaa855af12018-07-16 17:20:38 +0100134void NEFullyConnectedLayerReshapeWeights::configure(const ITensor *input, ITensor *output)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100135{
Giorgio Arenaa855af12018-07-16 17:20:38 +0100136 auto k = arm_compute::support::cpp14::make_unique<NETransposeKernel>();
137 k->configure(input, output);
138 _kernel = std::move(k);
139}
Georgios Pinitasbaf174e2017-09-08 19:47:30 +0100140
Giorgio Arenaa855af12018-07-16 17:20:38 +0100141Status NEFullyConnectedLayerReshapeWeights::validate(const ITensorInfo *input, const ITensorInfo *output)
142{
143 return NETransposeKernel::validate(input, output);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100144}
145
Michalis Spyrou1a569a32019-09-10 17:20:34 +0100146NEFullyConnectedLayer::NEFullyConnectedLayer(std::shared_ptr<IMemoryManager> memory_manager, IWeightsManager *weights_manager)
147 : _memory_group(std::move(memory_manager)), _weights_manager(weights_manager), _flatten_kernel(), _convert_weights(), _convert_weights_managed(), _reshape_weights_function(),
SiCongLi2e5fd632020-03-02 15:39:15 +0000148 _reshape_weights_managed_function(), _mm_gemm(nullptr, weights_manager), _mm_gemmlowp(nullptr, weights_manager), _flatten_output(), _converted_weights_output(), _reshape_weights_output(),
149 _original_weights(nullptr), _are_weights_converted(true), _are_weights_reshaped(false), _is_fc_after_conv(false), _is_quantized_asymmetric(false), _is_prepared(false)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100150{
151}
152
SiCongLi2e5fd632020-03-02 15:39:15 +0000153void NEFullyConnectedLayer::configure_mm(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const ActivationLayerInfo &act)
Giorgio Arenaa855af12018-07-16 17:20:38 +0100154{
SiCongLi2e5fd632020-03-02 15:39:15 +0000155 if(_is_quantized_asymmetric)
Giorgio Arenaa855af12018-07-16 17:20:38 +0100156 {
157 // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
158 // Extract and negate input and weights offset
159 const QuantizationInfo input_quantization_info = input->info()->quantization_info();
160 const QuantizationInfo weights_quantization_info = weights->info()->quantization_info();
161
Georgios Pinitas4c5469b2019-05-21 13:32:43 +0100162 input->info()->set_quantization_info(QuantizationInfo(input_quantization_info.uniform().scale, -input_quantization_info.uniform().offset));
163 weights->info()->set_quantization_info(QuantizationInfo(weights_quantization_info.uniform().scale, -weights_quantization_info.uniform().offset));
Giorgio Arenaa855af12018-07-16 17:20:38 +0100164
SiCong Liadb32912020-02-17 16:39:27 +0000165 // Configure gemmlowp function and output stage for asymmetric quantized types
SiCong Liadb32912020-02-17 16:39:27 +0000166 GEMMLowpOutputStageInfo gemmlowp_output_stage_info;
SiCongLi2e5fd632020-03-02 15:39:15 +0000167 const Status status = get_gemmlowp_output_stage_info(input->info(), weights->info(), output->info(), act, gemmlowp_output_stage_info);
168 ARM_COMPUTE_ERROR_ON(status.error_code() != ErrorCode::OK);
169
SiCong Liadb32912020-02-17 16:39:27 +0000170 GEMMInfo gemm_info;
171 gemm_info.set_gemmlowp_output_stage(gemmlowp_output_stage_info);
SiCongLi2e5fd632020-03-02 15:39:15 +0000172 gemm_info.set_activation_info(act);
SiCong Liadb32912020-02-17 16:39:27 +0000173 _mm_gemmlowp.configure(input, weights, biases, output, gemm_info);
Giorgio Arenaa855af12018-07-16 17:20:38 +0100174
175 // Revert back QuantizatioInfo as input and weights could be used in other fully connected layers
176 input->info()->set_quantization_info(input_quantization_info);
177 weights->info()->set_quantization_info(weights_quantization_info);
178 }
179 else
180 {
181 // Configure matrix multiply kernel
SiCongLi2e5fd632020-03-02 15:39:15 +0000182 GEMMInfo gemm_info(false, false, true /* Reshape weights only for the first run */);
183 gemm_info.set_activation_info(act);
184 _mm_gemm.configure(input, weights, biases, output, 1.f, 1.0f, gemm_info);
Giorgio Arenaa855af12018-07-16 17:20:38 +0100185 }
186}
187
SiCongLi2e5fd632020-03-02 15:39:15 +0000188void NEFullyConnectedLayer::configure_conv_fc(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const ActivationLayerInfo &act)
Giorgio Arenaa855af12018-07-16 17:20:38 +0100189{
190 ARM_COMPUTE_ERROR_ON((weights->info()->dimension(1) != (input->info()->dimension(0) * input->info()->dimension(1) * input->info()->dimension(2))));
191
192 // If the fully connected layer is called after a convolution layer, the input tensor must be linearized
193
Giorgio Arena368e6352018-08-20 15:06:07 +0100194 // Initialize output tensor for flatten
195 TensorShape shape_flatten = compute_flatten_shape(input->info());
196 _flatten_output.allocator()->init(input->info()->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(shape_flatten));
Giorgio Arenaa855af12018-07-16 17:20:38 +0100197
Giorgio Arena368e6352018-08-20 15:06:07 +0100198 // Configure flatten kernel
199 _memory_group.manage(&_flatten_output);
200 _flatten_kernel.configure(input, &_flatten_output);
Giorgio Arenaa855af12018-07-16 17:20:38 +0100201
202 // Configure matrix multiply kernel
SiCongLi2e5fd632020-03-02 15:39:15 +0000203 configure_mm(&_flatten_output, weights, biases, output, act);
Giorgio Arenaa855af12018-07-16 17:20:38 +0100204
Giorgio Arena368e6352018-08-20 15:06:07 +0100205 // Allocate the output tensor for flatten once all the configure methods have been called
206 _flatten_output.allocator()->allocate();
Giorgio Arenaa855af12018-07-16 17:20:38 +0100207}
208
SiCongLi2e5fd632020-03-02 15:39:15 +0000209void NEFullyConnectedLayer::configure_fc_fc(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const ActivationLayerInfo &act)
Giorgio Arenaa855af12018-07-16 17:20:38 +0100210{
211 ARM_COMPUTE_ERROR_ON(input->info()->dimension(0) != weights->info()->dimension(1));
212
213 // Configure matrix multiply kernel
SiCongLi2e5fd632020-03-02 15:39:15 +0000214 configure_mm(input, weights, biases, output, act);
Giorgio Arenaa855af12018-07-16 17:20:38 +0100215}
216
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100217void NEFullyConnectedLayer::configure(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output,
218 FullyConnectedLayerInfo fc_info)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100219{
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000220 // Perform validate step
Michele Di Giorgio9c700372020-01-08 11:33:44 +0000221 ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000222 ARM_COMPUTE_ERROR_THROW_ON(NEFullyConnectedLayer::validate(input->info(),
223 weights->info(),
224 biases != nullptr ? biases->info() : nullptr,
225 output->info(),
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100226 fc_info));
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100227
SiCongLi2e5fd632020-03-02 15:39:15 +0000228 _are_weights_converted = true;
229 _are_weights_reshaped = fc_info.transpose_weights ? fc_info.are_weights_reshaped : true;
230 _is_fc_after_conv = true;
231 _is_quantized_asymmetric = is_data_type_quantized_asymmetric(input->info()->data_type());
232 _original_weights = weights;
Moritz Pflanzer484e7b32017-08-09 11:43:18 +0100233
Michalis Spyrou1a569a32019-09-10 17:20:34 +0100234 if(_weights_manager)
235 {
236 _weights_manager->manage(weights);
237 }
238
Giorgio Arenaa855af12018-07-16 17:20:38 +0100239 // With the Fully Connected layer we can have 4 different cases:
240 // 1) Convolution layer -> Fully Connected layer without batches
241 // 2) Fully Connected layer -> Fully Connected layer without batches
242 // 3) Convolution layer -> Fully Connected layer with batches
243 // 4) Fully Connected layer -> Fully Connected layer with batches
244
245 const ITensor *weights_to_use = weights;
246
Giorgio Arenaa855af12018-07-16 17:20:38 +0100247 // Check if we have a fully connected layer with batches
248 const bool is_batched_fc_layer = output->info()->dimension(1) > 1;
Giorgio Arenaa855af12018-07-16 17:20:38 +0100249 if(is_batched_fc_layer)
Moritz Pflanzer484e7b32017-08-09 11:43:18 +0100250 {
Giorgio Arenaa855af12018-07-16 17:20:38 +0100251 _is_fc_after_conv = (TensorShape::num_max_dimensions >= 4) && (std::equal(input->info()->tensor_shape().cbegin() + 3,
252 input->info()->tensor_shape().cend(),
253 output->info()->tensor_shape().cbegin() + 1));
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100254 }
Giorgio Arenaa855af12018-07-16 17:20:38 +0100255 else
256 {
257 _is_fc_after_conv = input->info()->num_dimensions() > 1;
258 }
259
Georgios Pinitasef776a82018-07-25 17:57:49 +0100260 // Reshape weights if needed
261 if(!_are_weights_reshaped)
262 {
Michalis Spyrou1a569a32019-09-10 17:20:34 +0100263 if(_weights_manager && _weights_manager->are_weights_managed(weights))
264 {
265 _reshape_weights_managed_function.configure(weights);
266 weights_to_use = _weights_manager->acquire(weights, &_reshape_weights_managed_function);
267 }
268 else
269 {
270 // Reshape the weights
271 _reshape_weights_function.configure(weights, &_reshape_weights_output);
272 weights_to_use = &_reshape_weights_output;
273 }
Georgios Pinitasef776a82018-07-25 17:57:49 +0100274 }
275
276 // Convert weights if needed
277 if(_is_fc_after_conv && (input->info()->data_layout() != fc_info.weights_trained_layout))
278 {
Michalis Spyrou1a569a32019-09-10 17:20:34 +0100279 if(_weights_manager && _weights_manager->are_weights_managed(weights_to_use))
280 {
281 _convert_weights_managed.configure(weights_to_use,
282 input->info()->tensor_shape(),
283 fc_info.weights_trained_layout);
284 weights_to_use = _weights_manager->acquire(weights, &_convert_weights_managed);
285 }
286 else
287 {
288 // Convert weights
289 _convert_weights.configure(weights_to_use,
290 &_converted_weights_output,
291 input->info()->tensor_shape(),
292 fc_info.weights_trained_layout);
Georgios Pinitasef776a82018-07-25 17:57:49 +0100293
Michalis Spyrou1a569a32019-09-10 17:20:34 +0100294 weights_to_use = &_converted_weights_output;
295 }
Georgios Pinitasef776a82018-07-25 17:57:49 +0100296 _are_weights_converted = false;
297 }
298
Giorgio Arenaa855af12018-07-16 17:20:38 +0100299 if(_is_fc_after_conv)
300 {
301 // Fully Connected layer after a Convolution Layer without batches
SiCongLi2e5fd632020-03-02 15:39:15 +0000302 configure_conv_fc(input, weights_to_use, biases, output, fc_info.activation_info);
Giorgio Arenaa855af12018-07-16 17:20:38 +0100303 }
304 else
305 {
306 // Fully Connected layer after a Fully Connected Layer without batches
SiCongLi2e5fd632020-03-02 15:39:15 +0000307 configure_fc_fc(input, weights_to_use, biases, output, fc_info.activation_info);
Giorgio Arenaa855af12018-07-16 17:20:38 +0100308 }
309
310 _are_weights_reshaped = _are_weights_reshaped || fc_info.retain_internal_weights;
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100311}
312
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100313Status NEFullyConnectedLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output,
314 FullyConnectedLayerInfo fc_info)
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000315{
Giorgio Arenaa855af12018-07-16 17:20:38 +0100316 ARM_COMPUTE_UNUSED(fc_info.retain_internal_weights);
317 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output);
Michele Di Giorgio9c700372020-01-08 11:33:44 +0000318 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F16, DataType::F32);
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000319 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights, output);
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000320 ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 2);
Georgios Pinitasc6aef872020-04-29 13:37:09 +0100321 ARM_COMPUTE_RETURN_ERROR_ON(biases != nullptr && biases->num_dimensions() > 1);
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000322
Giorgio Arenaa855af12018-07-16 17:20:38 +0100323 bool weights_reshaped = fc_info.transpose_weights ? fc_info.are_weights_reshaped : true;
324 bool is_fc_after_conv = true;
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000325
Giorgio Arena368e6352018-08-20 15:06:07 +0100326 const ITensorInfo &flatten_input = TensorInfo(input->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(compute_flatten_shape(input)));
Georgios Pinitasef776a82018-07-25 17:57:49 +0100327 const ITensorInfo &reshaped_weights = TensorInfo(weights->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(compute_transposed_shape(*weights)));
Georgios Pinitas195b0ba2018-08-02 17:18:51 +0100328 const ITensorInfo &converted_weights = weights_reshaped ? TensorInfo(weights->clone()->set_is_resizable(true).reset_padding()) : TensorInfo(*reshaped_weights.clone());
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000329
Giorgio Arenaa855af12018-07-16 17:20:38 +0100330 // With the Fully Connected layer we can have 4 different cases:
331 // 1) Convolution layer -> Fully Connected layer without batches
332 // 2) Fully Connected layer -> Fully Connected layer without batches
333 // 3) Convolution layer -> Fully Connected layer with batches
334 // 4) Fully Connected layer -> Fully Connected layer with batches
335
336 const ITensorInfo *input_to_use = input;
337 const ITensorInfo *weights_to_use = weights;
Giorgio Arenaa855af12018-07-16 17:20:38 +0100338
Giorgio Arenaa855af12018-07-16 17:20:38 +0100339 // Check if we have a fully connected layer with batches
340 const bool is_batched_fc_layer = output->dimension(1) > 1;
341
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000342 if(is_batched_fc_layer)
343 {
Giorgio Arenaa855af12018-07-16 17:20:38 +0100344 is_fc_after_conv = (TensorShape::num_max_dimensions >= 4) && (std::equal(input->tensor_shape().cbegin() + 3,
345 input->tensor_shape().cend(),
346 output->tensor_shape().cbegin() + 1));
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000347 }
348 else
349 {
Giorgio Arenaa855af12018-07-16 17:20:38 +0100350 is_fc_after_conv = input->num_dimensions() > 1;
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000351 }
352
Georgios Pinitasef776a82018-07-25 17:57:49 +0100353 if(!weights_reshaped)
354 {
355 // Validate reshape weights kernel
356 ARM_COMPUTE_RETURN_ON_ERROR(NEFullyConnectedLayerReshapeWeights::validate(weights, &reshaped_weights));
357 weights_to_use = &reshaped_weights;
358 }
359
360 if(is_fc_after_conv && (input->data_layout() != fc_info.weights_trained_layout))
361 {
362 // Validate convert weights kernel
363 ARM_COMPUTE_RETURN_ON_ERROR(NEConvertFullyConnectedWeights::validate(weights_to_use,
364 &converted_weights,
365 input->tensor_shape(),
366 fc_info.weights_trained_layout));
367 weights_to_use = &converted_weights;
368 }
369
Giorgio Arenaa855af12018-07-16 17:20:38 +0100370 if(is_fc_after_conv)
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000371 {
Giorgio Arenaa855af12018-07-16 17:20:38 +0100372 // Fully Connected layer after a Convolution Layer without batches
373 ARM_COMPUTE_RETURN_ERROR_ON((weights_to_use->dimension(1) != (input->dimension(0) * input->dimension(1) * input->dimension(2))));
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000374
Giorgio Arena368e6352018-08-20 15:06:07 +0100375 // Validate flatten kernel
376 ARM_COMPUTE_RETURN_ON_ERROR(NEFlattenLayerKernel::validate(input, &flatten_input));
377 input_to_use = &flatten_input;
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000378 }
Giorgio Arenaa855af12018-07-16 17:20:38 +0100379 else
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000380 {
Giorgio Arenaa855af12018-07-16 17:20:38 +0100381 // Fully Connected layer after a Fully Connected Layer without batches
382 ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(0) != weights_to_use->dimension(1));
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000383 }
Giorgio Arenaa855af12018-07-16 17:20:38 +0100384 // Validate matrix multiply kernel
SiCongLi2e5fd632020-03-02 15:39:15 +0000385 ARM_COMPUTE_RETURN_ON_ERROR(validate_mm(input_to_use, weights_to_use, biases, output, fc_info.activation_info));
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000386
387 return Status{};
388}
389
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100390void NEFullyConnectedLayer::run()
391{
Georgios Pinitas72219332018-06-05 14:56:06 +0100392 prepare();
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100393
Georgios Pinitasda953f22019-04-02 17:27:03 +0100394 MemoryGroupResourceScope scope_mg(_memory_group);
Georgios Pinitasbaf174e2017-09-08 19:47:30 +0100395
Moritz Pflanzer484e7b32017-08-09 11:43:18 +0100396 // Linearize input if it comes from a convolutional layer
Giorgio Arenaa855af12018-07-16 17:20:38 +0100397 if(_is_fc_after_conv)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100398 {
Giorgio Arena368e6352018-08-20 15:06:07 +0100399 NEScheduler::get().schedule(&_flatten_kernel, Window::DimY);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100400 }
401
Giorgio Arenaa855af12018-07-16 17:20:38 +0100402 // Run matrix multiply
SiCongLi2e5fd632020-03-02 15:39:15 +0000403 if(_is_quantized_asymmetric)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100404 {
Giorgio Arenaa855af12018-07-16 17:20:38 +0100405 _mm_gemmlowp.run();
406 }
407 else
408 {
409 _mm_gemm.run();
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100410 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100411}
Georgios Pinitas72219332018-06-05 14:56:06 +0100412
413void NEFullyConnectedLayer::prepare()
414{
Georgios Pinitas72219332018-06-05 14:56:06 +0100415 if(!_is_prepared)
416 {
Michalis Spyrou1a569a32019-09-10 17:20:34 +0100417 if(!_weights_manager)
418 {
419 ARM_COMPUTE_ERROR_ON(!_original_weights->is_used());
420 }
Georgios Pinitasef776a82018-07-25 17:57:49 +0100421
422 auto release_unused = [](Tensor * w)
423 {
424 if(!w->is_used())
425 {
426 w->allocator()->free();
427 }
428 };
429
430 // Pointer to current weights
431 const ITensor *cur_weights = _original_weights;
432
Giorgio Arenaa855af12018-07-16 17:20:38 +0100433 // Reshape of the weights (happens only once)
434 if(!_are_weights_reshaped)
435 {
Michalis Spyrou1a569a32019-09-10 17:20:34 +0100436 if(_weights_manager && _weights_manager->are_weights_managed(_original_weights))
437 {
Michalis Spyrou1a569a32019-09-10 17:20:34 +0100438 cur_weights = _weights_manager->run(cur_weights, &_reshape_weights_managed_function);
439 }
440 else
441 {
442 // Reshape of the weights (happens only once)
443 if(!_are_weights_reshaped)
444 {
445 // Run reshape weights kernel and mark weights as unused
446 _reshape_weights_output.allocator()->allocate();
447 _reshape_weights_function.run();
448 }
449 cur_weights->mark_as_unused();
450 cur_weights = &_reshape_weights_output;
451 }
Giorgio Arenaa855af12018-07-16 17:20:38 +0100452 _are_weights_reshaped = true;
453 }
Georgios Pinitas72219332018-06-05 14:56:06 +0100454
Georgios Pinitasef776a82018-07-25 17:57:49 +0100455 // Convert weights if needed (happens only once)
456 if(!_are_weights_converted)
457 {
Michalis Spyrou1a569a32019-09-10 17:20:34 +0100458 if(_weights_manager && _weights_manager->are_weights_managed(cur_weights))
459 {
460 _weights_manager->run(cur_weights, &_convert_weights_managed);
461 }
462 else
463 {
464 _converted_weights_output.allocator()->allocate();
465 _convert_weights.run();
Michalis Spyrou20c2b502019-10-01 15:39:42 +0100466 cur_weights->mark_as_unused();
Michalis Spyrou1a569a32019-09-10 17:20:34 +0100467 }
Georgios Pinitasef776a82018-07-25 17:57:49 +0100468
Georgios Pinitasef776a82018-07-25 17:57:49 +0100469 _are_weights_converted = true;
470 }
471
472 // Release reshaped weights if unused
473 release_unused(&_reshape_weights_output);
474
475 // Prepare GEMM prepare and release unused weights
SiCongLi2e5fd632020-03-02 15:39:15 +0000476 if(!_is_quantized_asymmetric)
Georgios Pinitasef776a82018-07-25 17:57:49 +0100477 {
478 _mm_gemm.prepare();
479 }
480
481 // Release converted weights if unused
482 release_unused(&_reshape_weights_output);
483 release_unused(&_converted_weights_output);
484
Georgios Pinitas72219332018-06-05 14:56:06 +0100485 _is_prepared = true;
486 }
Michele Di Giorgiof29d1b72019-10-29 10:58:13 +0000487}
Georgios Pinitasc6aef872020-04-29 13:37:09 +0100488} // namespace arm_compute