blob: af630154cfec9cd390009c6d0e91d3ae3d19ca3e [file] [log] [blame]
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +01001/*
Jonathan Deakin464ed202023-01-12 11:41:14 +00002 * Copyright (c) 2021-2023 Arm Limited.
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Georgios Pinitas7891a732021-08-20 21:39:25 +010024#include "src/cpu/operators/CpuFullyConnected.h"
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +010025
26#include "arm_compute/core/Helpers.h"
27#include "arm_compute/core/ITensorPack.h"
28#include "arm_compute/core/Validate.h"
29#include "arm_compute/core/utils/misc/ShapeCalculator.h"
30#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
31#include "arm_compute/runtime/NEON/NEScheduler.h"
ramelg013ae3d882021-09-12 23:07:47 +010032#include "src/common/utils/Log.h"
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +010033#include "src/core/helpers/AutoConfiguration.h"
34#include "src/core/helpers/MemoryHelpers.h"
Georgios Pinitas7891a732021-08-20 21:39:25 +010035#include "src/cpu/kernels/CpuTransposeKernel.h"
36#include "src/cpu/operators/CpuConvertFullyConnectedWeights.h"
37#include "src/cpu/operators/CpuFlatten.h"
38#include "src/cpu/operators/CpuGemm.h"
39#include "src/cpu/operators/CpuGemmLowpMatrixMultiplyCore.h"
40#include "src/cpu/utils/CpuAuxTensorHandler.h"
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +010041
42namespace arm_compute
43{
44namespace cpu
45{
46using namespace arm_compute::experimental;
47using namespace arm_compute::misc::shape_calculator;
48
49namespace
50{
51// Get min, max bound of a quantized asymmetric dst tensor, with the effect of fused activation
52std::pair<PixelValue, PixelValue> get_quantized_asymmetric_output_min_max(const QuantizationInfo &q_info, const ActivationLayerInfo &act_info, DataType data_type)
53{
54 PixelValue type_min{};
55 PixelValue type_max{};
Milos Puzovic13b623e2022-07-27 17:53:21 +000056 std::tie(type_min, type_max) = get_min_max(data_type);
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +010057 const UniformQuantizationInfo q_unif = q_info.uniform();
58
59 if(act_info.enabled())
60 {
61 switch(act_info.activation())
62 {
63 case ActivationLayerInfo::ActivationFunction::RELU:
64 type_min = PixelValue(q_unif.offset);
65 break;
66 case ActivationLayerInfo::ActivationFunction::BOUNDED_RELU:
67 type_min = PixelValue(q_unif.offset);
68 type_max = PixelValue(act_info.a(), data_type, q_info);
69 break;
70 case ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU:
71 type_min = PixelValue(act_info.b(), data_type, q_info);
72 type_max = PixelValue(act_info.a(), data_type, q_info);
73 break;
74 default:
75 ARM_COMPUTE_ERROR("Activation function not supported.");
76 break;
77 }
78 }
79
80 return std::make_pair(type_min, type_max);
81}
82
83Status get_gemmlowp_output_stage_info(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *dst, const ActivationLayerInfo &act,
84 GEMMLowpOutputStageInfo &gemmlowp_output_stage_info)
85{
86 const auto data_type = src->data_type();
87 const QuantizationInfo oq_info = dst->quantization_info();
88 const UniformQuantizationInfo iq_unif = src->quantization_info().uniform();
89 const UniformQuantizationInfo wq_unif = weights->quantization_info().uniform();
90 const UniformQuantizationInfo oq_unif = oq_info.uniform();
91
92 float multiplier = (iq_unif.scale * wq_unif.scale) / oq_unif.scale;
93 int32_t output_multiplier;
94 int32_t output_shift;
95
96 ARM_COMPUTE_RETURN_ON_ERROR(quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift));
97
98 PixelValue type_min{};
99 PixelValue type_max{};
100 std::tie(type_min, type_max) = get_quantized_asymmetric_output_min_max(oq_info, act, data_type);
101
102 gemmlowp_output_stage_info.gemmlowp_multiplier = output_multiplier;
103 gemmlowp_output_stage_info.gemmlowp_shift = output_shift;
104 gemmlowp_output_stage_info.gemmlowp_offset = oq_unif.offset;
105 gemmlowp_output_stage_info.type = GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT;
106 gemmlowp_output_stage_info.gemmlowp_min_bound = type_min.get<int32_t>();
107 gemmlowp_output_stage_info.gemmlowp_max_bound = type_max.get<int32_t>();
108
109 return Status{};
110}
111
Jonathan Deakin464ed202023-01-12 11:41:14 +0000112Status validate_mm(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst, const ActivationLayerInfo &act, bool enable_fast_math, WeightFormat weight_format)
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +0100113{
114 if(is_data_type_quantized_asymmetric(src->data_type()))
115 {
116 // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
117 // Extract and negate src and weights offset
118 const QuantizationInfo src_quantization_info(src->quantization_info().uniform().scale, -src->quantization_info().uniform().offset);
119 const QuantizationInfo weights_quantization_info(weights->quantization_info().uniform().scale, -weights->quantization_info().uniform().offset);
120
121 GEMMLowpOutputStageInfo gemmlowp_output_stage_info;
122 ARM_COMPUTE_RETURN_ON_ERROR(get_gemmlowp_output_stage_info(src, weights, dst, act, gemmlowp_output_stage_info));
123
124 GEMMInfo gemm_info;
125 gemm_info.set_gemmlowp_output_stage(gemmlowp_output_stage_info);
cfRodf2c022e2021-11-05 11:29:53 +0000126 gemm_info.set_fast_math(enable_fast_math);
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +0100127
128 // Validate gemmlowp function
129 TensorInfo src_info = src->clone()->set_quantization_info(src_quantization_info);
130 TensorInfo weights_info = weights->clone()->set_quantization_info(weights_quantization_info);
131 ARM_COMPUTE_RETURN_ON_ERROR(CpuGemmLowpMatrixMultiplyCore::validate(&src_info,
132 &weights_info,
133 biases,
134 dst,
135 gemm_info));
136 }
137 else
138 {
cfRodf2c022e2021-11-05 11:29:53 +0000139 GEMMInfo gemm_info(false, false, true /* Reshape weights only for the first run */);
Jonathan Deakin464ed202023-01-12 11:41:14 +0000140 gemm_info.set_weight_format(weight_format);
141 gemm_info.set_fixed_format(weight_format != WeightFormat::UNSPECIFIED);
cfRodf2c022e2021-11-05 11:29:53 +0000142 gemm_info.set_fast_math(enable_fast_math);
143 ARM_COMPUTE_RETURN_ON_ERROR(CpuGemm::validate(src, weights, biases, dst, 1.f, 1.0f, gemm_info));
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +0100144 }
145
146 return Status{};
147}
148} // namespace
149
150CpuFullyConnected::CpuFullyConnected()
151 : _flatten(nullptr),
152 _convert_weights(nullptr),
153 _transpose_weights(nullptr),
154 _mm_gemm(nullptr),
155 _mm_gemmlowp(nullptr),
156 _flattened_src(),
157 _converted_weights(),
158 _reshaped_weights(),
Georgios Pinitasfa1db172021-08-12 06:28:09 +0100159 _trans_weights(),
160 _trans_weights_idx(AuxTensorIdx::Count),
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +0100161 _aux_mem(Count),
Georgios Pinitasfa1db172021-08-12 06:28:09 +0100162 _needs_weights_conversion(false),
163 _needs_weights_reshape(false),
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +0100164 _is_fc_after_conv(false),
165 _is_quantized_asymmetric(false),
cfRodf2c022e2021-11-05 11:29:53 +0000166 _is_prepared(false),
Milos Puzovic13b623e2022-07-27 17:53:21 +0000167 _enable_fast_math(false),
168 _fixed_format(false),
Viet-Hoa Doa3e57c22023-03-13 16:20:04 +0000169 _weight_format(arm_compute::WeightFormat::UNSPECIFIED),
170 _dynamic_weights(false)
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +0100171{
172}
173
174CpuFullyConnected::~CpuFullyConnected() = default;
175
176void CpuFullyConnected::configure_mm(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, ITensorInfo *dst, const ActivationLayerInfo &act)
177{
178 if(_is_quantized_asymmetric)
179 {
180 // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
181 // Extract and negate src and weights offset
182 const QuantizationInfo src_quantization_info(src->quantization_info().uniform().scale, -src->quantization_info().uniform().offset);
183 const QuantizationInfo weights_quantization_info(weights->quantization_info().uniform().scale, -weights->quantization_info().uniform().offset);
184
185 TensorInfo src_info = src->clone()->set_quantization_info(src_quantization_info);
186 TensorInfo weights_info = weights->clone()->set_quantization_info(weights_quantization_info);
187
188 // Configure gemmlowp function and output stage for asymmetric quantized types
189 GEMMLowpOutputStageInfo gemmlowp_output_stage_info;
190 const Status status = get_gemmlowp_output_stage_info(&src_info, &weights_info, dst, act, gemmlowp_output_stage_info);
191 ARM_COMPUTE_ERROR_ON(status.error_code() != ErrorCode::OK);
192
Viet-Hoa Doa3e57c22023-03-13 16:20:04 +0000193 GEMMInfo gemm_info(false, false, !_dynamic_weights /* Reshape weights only for the first run */);
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +0100194 gemm_info.set_gemmlowp_output_stage(gemmlowp_output_stage_info);
195 gemm_info.set_activation_info(act);
cfRodf2c022e2021-11-05 11:29:53 +0000196 gemm_info.set_fast_math(_enable_fast_math);
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +0100197 _mm_gemmlowp = std::make_unique<CpuGemmLowpMatrixMultiplyCore>();
198 _mm_gemmlowp->configure(&src_info, &weights_info, biases, dst, gemm_info);
199 }
200 else
201 {
202 // Configure matrix multiply kernel
Viet-Hoa Doa3e57c22023-03-13 16:20:04 +0000203 GEMMInfo gemm_info(false, false, !_dynamic_weights /* Reshape weights only for the first run */);
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +0100204 gemm_info.set_activation_info(act);
cfRodf2c022e2021-11-05 11:29:53 +0000205 gemm_info.set_fast_math(_enable_fast_math);
Milos Puzovic13b623e2022-07-27 17:53:21 +0000206 gemm_info.set_fixed_format(_fixed_format);
207 gemm_info.set_weight_format(_weight_format);
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +0100208 _mm_gemm = std::make_unique<CpuGemm>();
209 _mm_gemm->configure(src, weights, biases, dst, 1.f, 1.0f, gemm_info);
210 }
211}
212
213void CpuFullyConnected::configure_conv_fc(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, ITensorInfo *dst, const ActivationLayerInfo &act)
214{
215 ARM_COMPUTE_ERROR_ON((weights->dimension(1) != (src->dimension(0) * src->dimension(1) * src->dimension(2))));
216
217 // If the fully connected layer is called after a convolution layer, the src tensor must be linearized
218
219 // Initialize output tensor for flatten
220 auto_init_if_empty(_flattened_src, src->clone()->set_tensor_shape(compute_flatten_shape(src)));
221
222 _flatten = std::make_unique<CpuFlatten>();
223 _flatten->configure(src, &_flattened_src);
224
225 // Configure matrix multiply kernel
226 configure_mm(&_flattened_src, weights, biases, dst, act);
227}
228
229void CpuFullyConnected::configure_fc_fc(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, ITensorInfo *dst, const ActivationLayerInfo &act)
230{
231 ARM_COMPUTE_ERROR_ON(src->dimension(0) != weights->dimension(1));
232
233 // Configure matrix multiply kernel
234 configure_mm(src, weights, biases, dst, act);
235}
236
237void CpuFullyConnected::configure(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, ITensorInfo *dst,
Milos Puzovic13b623e2022-07-27 17:53:21 +0000238 FullyConnectedLayerInfo fc_info, const WeightsInfo &weights_info)
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +0100239{
240 // Perform validate step
241 ARM_COMPUTE_ERROR_ON_NULLPTR(src, weights, dst);
242 ARM_COMPUTE_ERROR_THROW_ON(CpuFullyConnected::validate(src,
243 weights,
244 biases != nullptr ? biases : nullptr,
245 dst,
Jonathan Deakin464ed202023-01-12 11:41:14 +0000246 fc_info,
247 weights_info));
ramelg013ae3d882021-09-12 23:07:47 +0100248 ARM_COMPUTE_LOG_PARAMS(src, weights, biases, dst, fc_info);
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +0100249
Georgios Pinitasfa1db172021-08-12 06:28:09 +0100250 _needs_weights_conversion = false;
251 _needs_weights_reshape = fc_info.transpose_weights ? !fc_info.are_weights_reshaped : false;
252 _needs_weights_reshape = _needs_weights_reshape && !fc_info.retain_internal_weights;
253 _is_fc_after_conv = true;
254 _is_quantized_asymmetric = is_data_type_quantized_asymmetric(src->data_type());
255 _is_prepared = false;
256 _trans_weights_idx = AuxTensorIdx::Count;
cfRodf2c022e2021-11-05 11:29:53 +0000257 _enable_fast_math = fc_info.enable_fast_math;
Milos Puzovic13b623e2022-07-27 17:53:21 +0000258 _fixed_format = weights_info.weight_format() != WeightFormat::UNSPECIFIED;
259 _weight_format = weights_info.weight_format();
Viet-Hoa Doa3e57c22023-03-13 16:20:04 +0000260 _dynamic_weights = !weights->are_values_constant() && _needs_weights_reshape;
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +0100261
262 // With the Fully Connected layer we can have 4 different cases:
263 // 1) Convolution layer -> Fully Connected layer without batches
264 // 2) Fully Connected layer -> Fully Connected layer without batches
265 // 3) Convolution layer -> Fully Connected layer with batches
266 // 4) Fully Connected layer -> Fully Connected layer with batches
267
268 const ITensorInfo *weights_to_use = weights;
269
270 // Check if we have a fully connected layer with batches
271 const bool is_batched_fc_layer = dst->dimension(1) > 1;
272 if(is_batched_fc_layer)
273 {
Milos Puzovic13b623e2022-07-27 17:53:21 +0000274 _is_fc_after_conv = (TensorShape::num_max_dimensions >= 4) && (std::equal(src->tensor_shape().cbegin() + 3, src->tensor_shape().cend(), dst->tensor_shape().cbegin() + 1));
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +0100275 }
276 else
277 {
278 _is_fc_after_conv = src->num_dimensions() > 1;
279 }
280
281 // Reshape weights if needed
Georgios Pinitasfa1db172021-08-12 06:28:09 +0100282 if(_needs_weights_reshape)
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +0100283 {
284 // Reshape the weights
285 _transpose_weights = std::make_unique<kernels::CpuTransposeKernel>();
286 _transpose_weights->configure(weights, &_reshaped_weights);
Georgios Pinitasfa1db172021-08-12 06:28:09 +0100287 weights_to_use = &_reshaped_weights;
288 _trans_weights_idx = AuxTensorIdx::TransposedWeights;
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +0100289 }
290
291 // Convert weights if needed
292 if(_is_fc_after_conv && (src->data_layout() != fc_info.weights_trained_layout))
293 {
294 // Convert weights
295 _convert_weights = std::make_unique<CpuConvertFullyConnectedWeights>();
296 _convert_weights->configure(weights_to_use,
297 &_converted_weights,
298 src->tensor_shape(),
299 fc_info.weights_trained_layout);
300
Georgios Pinitasfa1db172021-08-12 06:28:09 +0100301 weights_to_use = &_converted_weights;
302 _needs_weights_conversion = true;
303 _trans_weights_idx = AuxTensorIdx::ConvertedWeights;
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +0100304 }
305
306 if(_is_fc_after_conv)
307 {
308 // Fully Connected layer after a Convolution Layer without batches
309 configure_conv_fc(src, weights_to_use, biases, dst, fc_info.activation_info);
310 }
311 else
312 {
313 // Fully Connected layer after a Fully Connected Layer without batches
314 configure_fc_fc(src, weights_to_use, biases, dst, fc_info.activation_info);
315 }
316
Georgios Pinitasfa1db172021-08-12 06:28:09 +0100317 // Retain the tensorinfo with the weights to use
318 if(_needs_weights_reshape || _needs_weights_conversion)
319 {
320 _trans_weights = *weights_to_use;
321 }
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +0100322
323 // Set auxiliary memory requirements
324 auto gemm_mem_req = (_is_quantized_asymmetric) ? _mm_gemmlowp->workspace() : _mm_gemm->workspace();
325 for(unsigned int i = 0; i < gemm_mem_req.size(); ++i)
326 {
327 _aux_mem[i] = gemm_mem_req[i];
328 }
329
330 if(_aux_mem[Pretranspose].size > 0)
331 {
Giorgio Arena63e0beb2021-09-24 14:04:27 +0100332 // Release permuted weights at the end of prepare as they are further transposed by the assembly dispatch
333 // Do not release them if biases are dynamic and data type is quantized, since the weights tensor will be used for biases offset calculation
Viet-Hoa Doa3e57c22023-03-13 16:20:04 +0000334 // Keep all the auxiliary tensors in case of dynamic weights as they are recalculated every time.
335 _aux_mem[TransposedWeights] = MemoryInfo(
336 offset_int_vec(TransposedWeights),
337 _dynamic_weights ? MemoryLifetime::Temporary :
338 (_is_quantized_asymmetric && biases && !(biases->are_values_constant())) ? MemoryLifetime::Persistent :
339 MemoryLifetime::Prepare,
340 _reshaped_weights.total_size());
341
342 _aux_mem[ConvertedWeights] = MemoryInfo(
343 offset_int_vec(ConvertedWeights),
344 _dynamic_weights ? MemoryLifetime::Temporary : MemoryLifetime::Prepare,
345 _converted_weights.total_size());
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +0100346 }
347 else
348 {
Viet-Hoa Doa3e57c22023-03-13 16:20:04 +0000349 _aux_mem[TransposedWeights] = MemoryInfo(
350 offset_int_vec(TransposedWeights),
351 _dynamic_weights ? MemoryLifetime::Temporary :
352 _needs_weights_conversion ? MemoryLifetime::Prepare :
353 MemoryLifetime::Persistent,
354 _reshaped_weights.total_size());
355
356 _aux_mem[ConvertedWeights] = MemoryInfo(
357 offset_int_vec(ConvertedWeights),
358 _dynamic_weights ? MemoryLifetime::Temporary : MemoryLifetime::Persistent,
359 _converted_weights.total_size());
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +0100360 }
361 _aux_mem[FlattenedSrc] = MemoryInfo(offset_int_vec(FlattenedSrc), MemoryLifetime::Temporary, _flattened_src.total_size());
362}
363
Milos Puzovic13b623e2022-07-27 17:53:21 +0000364Status CpuFullyConnected::has_opt_impl(arm_compute::WeightFormat &expected_weight_format, const ITensorInfo *src, const ITensorInfo *weights,
365 const ITensorInfo *biases, const ITensorInfo *dst, FullyConnectedLayerInfo fc_info, WeightsInfo weights_info)
366{
367 GEMMInfo gemm_info(false, false, true /* Reshape weights only for the first run */);
368 gemm_info.set_activation_info(fc_info.activation_info);
369 gemm_info.set_fast_math(fc_info.enable_fast_math);
370 gemm_info.set_fixed_format(weights_info.weight_format() != WeightFormat::UNSPECIFIED);
371 gemm_info.set_weight_format(weights_info.weight_format());
372
373 return CpuGemm::has_opt_impl(expected_weight_format, src, weights, biases, dst, gemm_info);
374}
375
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +0100376Status CpuFullyConnected::validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst,
Jonathan Deakin464ed202023-01-12 11:41:14 +0000377 FullyConnectedLayerInfo fc_info, const WeightsInfo &weights_info)
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +0100378{
379 ARM_COMPUTE_UNUSED(fc_info.retain_internal_weights);
380 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, weights, dst);
381 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F16, DataType::F32);
Jonathan Deakin464ed202023-01-12 11:41:14 +0000382
383 if (is_fixed_format_fast_math(weights_info.weight_format()))
384 {
385 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(src, DataType::F32);
386 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(weights, DataType::BFLOAT16);
387 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(dst, DataType::F32);
388 }
389 else
390 {
391 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, weights, dst);
392 }
393
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +0100394 ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 2);
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +0100395 ARM_COMPUTE_RETURN_ERROR_ON(fc_info.activation_info.enabled() && is_data_type_quantized(src->data_type()) && fc_info.activation_info.activation() != ActivationLayerInfo::ActivationFunction::RELU
396 && fc_info.activation_info.activation() != ActivationLayerInfo::ActivationFunction::BOUNDED_RELU && fc_info.activation_info.activation() != ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU);
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +0100397
398 bool weights_reshaped = fc_info.transpose_weights ? fc_info.are_weights_reshaped : true;
399 bool is_fc_after_conv = true;
400
401 const ITensorInfo &flatten_src = TensorInfo(src->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(compute_flatten_shape(src)));
402 const ITensorInfo &reshaped_weights = TensorInfo(weights->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(compute_transposed_shape(*weights)));
403 const ITensorInfo &converted_weights = weights_reshaped ? TensorInfo(weights->clone()->set_is_resizable(true).reset_padding()) : TensorInfo(*reshaped_weights.clone());
404
405 // With the Fully Connected layer we can have 4 different cases:
406 // 1) Convolution layer -> Fully Connected layer without batches
407 // 2) Fully Connected layer -> Fully Connected layer without batches
408 // 3) Convolution layer -> Fully Connected layer with batches
409 // 4) Fully Connected layer -> Fully Connected layer with batches
410
411 const ITensorInfo *src_to_use = src;
412 const ITensorInfo *weights_to_use = weights;
413
414 // Check if we have a fully connected layer with batches
415 const bool is_batched_fc_layer = dst->dimension(1) > 1;
416
Giorgio Arena63e0beb2021-09-24 14:04:27 +0100417 if(biases != nullptr)
418 {
419 ARM_COMPUTE_RETURN_ERROR_ON(biases->num_dimensions() > 1);
420 if(is_data_type_quantized(src->data_type()))
421 {
422 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(biases, 1, DataType::S32);
423 }
424 else
425 {
426 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, biases);
427 }
428 }
429
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +0100430 if(is_batched_fc_layer)
431 {
Milos Puzovic13b623e2022-07-27 17:53:21 +0000432 is_fc_after_conv = (TensorShape::num_max_dimensions >= 4) && (std::equal(src->tensor_shape().cbegin() + 3, src->tensor_shape().cend(), dst->tensor_shape().cbegin() + 1));
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +0100433 }
434 else
435 {
436 is_fc_after_conv = src->num_dimensions() > 1;
437 }
438
439 if(!weights_reshaped)
440 {
441 // Validate reshape weights kernel
442 ARM_COMPUTE_RETURN_ON_ERROR(kernels::CpuTransposeKernel::validate(weights, &reshaped_weights));
443 weights_to_use = &reshaped_weights;
444 }
445
446 if(is_fc_after_conv && (src->data_layout() != fc_info.weights_trained_layout))
447 {
448 // Validate convert weights kernel
449 ARM_COMPUTE_RETURN_ON_ERROR(CpuConvertFullyConnectedWeights::validate(weights_to_use,
450 &converted_weights,
451 src->tensor_shape(),
452 fc_info.weights_trained_layout));
453 weights_to_use = &converted_weights;
454 }
455
456 if(is_fc_after_conv)
457 {
458 // Fully Connected layer after a Convolution Layer without batches
459 ARM_COMPUTE_RETURN_ERROR_ON((weights_to_use->dimension(1) != (src->dimension(0) * src->dimension(1) * src->dimension(2))));
460
461 // Validate flatten kernel
462 ARM_COMPUTE_RETURN_ON_ERROR(CpuFlatten::validate(src, &flatten_src));
463 src_to_use = &flatten_src;
464 }
465 else
466 {
467 // Fully Connected layer after a Fully Connected Layer without batches
468 ARM_COMPUTE_RETURN_ERROR_ON(src->dimension(0) != weights_to_use->dimension(1));
469 }
470 // Validate matrix multiply kernel
Jonathan Deakin464ed202023-01-12 11:41:14 +0000471 ARM_COMPUTE_RETURN_ON_ERROR(validate_mm(src_to_use, weights_to_use, biases, dst, fc_info.activation_info, fc_info.enable_fast_math, weights_info.weight_format()));
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +0100472
473 return Status{};
474}
475
476void CpuFullyConnected::run(ITensorPack &tensors)
477{
478 prepare(tensors);
479
Viet-Hoa Doa3e57c22023-03-13 16:20:04 +0000480#ifdef ARM_COMPUTE_ASSERTS_ENABLED
481 ++_asrt_run_count;
482 ARM_COMPUTE_ERROR_ON(_dynamic_weights && _asrt_prepare_count != _asrt_run_count);
483#endif // ARM_COMPUTE_ASSERTS_ENABLED
484
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +0100485 auto src = tensors.get_const_tensor(ACL_SRC_0);
486
487 CpuAuxTensorHandler flattened_src(offset_int_vec(FlattenedSrc), _flattened_src, tensors, false);
Georgios Pinitasfa1db172021-08-12 06:28:09 +0100488 CpuAuxTensorHandler transformed_wei(offset_int_vec(_trans_weights_idx), _trans_weights, tensors, false);
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +0100489
490 // Linearize src if it comes from a convolutional layer
491 if(_is_fc_after_conv)
492 {
493 ITensorPack flatten_pack{ { ACL_SRC, src }, { ACL_DST, flattened_src.get() } };
494 _flatten->run(flatten_pack);
495 }
496
497 ITensorPack gemm_pack = tensors;
498 gemm_pack.add_const_tensor(ACL_SRC_0, (_is_fc_after_conv) ? flattened_src.get() : src);
Georgios Pinitasfa1db172021-08-12 06:28:09 +0100499 if(_needs_weights_reshape || _needs_weights_conversion)
500 {
501 gemm_pack.add_const_tensor(ACL_SRC_1, transformed_wei.get());
502 }
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +0100503
504 // Run matrix multiply
505 if(_is_quantized_asymmetric)
506 {
507 _mm_gemmlowp->run(gemm_pack);
508 }
509 else
510 {
511 _mm_gemm->run(gemm_pack);
512 }
513}
514
515void CpuFullyConnected::prepare(ITensorPack &tensors)
516{
Viet-Hoa Doa3e57c22023-03-13 16:20:04 +0000517 if(!_is_prepared || _dynamic_weights)
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +0100518 {
Viet-Hoa Doa3e57c22023-03-13 16:20:04 +0000519#ifdef ARM_COMPUTE_ASSERTS_ENABLED
520 ++_asrt_prepare_count;
521 ARM_COMPUTE_ERROR_ON(!_dynamic_weights && _asrt_prepare_count > 1);
522#endif // ARM_COMPUTE_ASSERTS_ENABLED
523
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +0100524 auto weights = tensors.get_const_tensor(ACL_SRC_1);
525
526 CpuAuxTensorHandler reshaped_weights(offset_int_vec(TransposedWeights), _reshaped_weights, tensors, false);
527 CpuAuxTensorHandler converted_weights(offset_int_vec(ConvertedWeights), _converted_weights, tensors, false);
528
529 // Pointer to current weights
530 const ITensor *cur_weights = weights;
531
532 // Reshape of the weights (happens only once)
Georgios Pinitasfa1db172021-08-12 06:28:09 +0100533 if(_needs_weights_reshape)
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +0100534 {
535 // Run reshape weights kernel and mark weights as unused
536 ITensorPack transpose_pack{ { ACL_SRC, weights }, { ACL_DST, reshaped_weights.get() } };
537 NEScheduler::get().schedule_op(_transpose_weights.get(), Window::DimY, _transpose_weights->window(), transpose_pack);
538
539 cur_weights->mark_as_unused();
540 cur_weights = reshaped_weights.get();
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +0100541 }
542
543 // Convert weights if needed (happens only once)
Georgios Pinitasfa1db172021-08-12 06:28:09 +0100544 if(_needs_weights_conversion)
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +0100545 {
546 ITensorPack convert_pack{ { ACL_SRC, cur_weights }, { ACL_DST, converted_weights.get() } };
547 _convert_weights->run(convert_pack);
548
549 cur_weights->mark_as_unused();
550 cur_weights = converted_weights.get();
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +0100551 }
552
Georgios Pinitasfa1db172021-08-12 06:28:09 +0100553 ITensorPack gemm_pack = tensors;
554 gemm_pack.add_const_tensor(ACL_SRC_1, cur_weights);
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +0100555
556 // Prepare GEMM prepare and release unused weights
557 if(!_is_quantized_asymmetric)
558 {
Georgios Pinitasfa1db172021-08-12 06:28:09 +0100559 _mm_gemm->prepare(gemm_pack);
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +0100560 }
561 else
562 {
Georgios Pinitasfa1db172021-08-12 06:28:09 +0100563 _mm_gemmlowp->prepare(gemm_pack);
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +0100564 }
565
566 _is_prepared = true;
567 }
568}
569
570experimental::MemoryRequirements CpuFullyConnected::workspace() const
571{
572 return _aux_mem;
573}
574} // namespace cpu
575} // namespace arm_compute