blob: cafb3484b624356750d158522e7023e8d7f93807 [file] [log] [blame]
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +01001/*
2 * Copyright (c) 2021 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Georgios Pinitas7891a732021-08-20 21:39:25 +010024#include "src/cpu/operators/CpuFullyConnected.h"
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +010025
26#include "arm_compute/core/Helpers.h"
27#include "arm_compute/core/ITensorPack.h"
28#include "arm_compute/core/Validate.h"
29#include "arm_compute/core/utils/misc/ShapeCalculator.h"
30#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
31#include "arm_compute/runtime/NEON/NEScheduler.h"
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +010032#include "src/core/helpers/AutoConfiguration.h"
33#include "src/core/helpers/MemoryHelpers.h"
Georgios Pinitas7891a732021-08-20 21:39:25 +010034#include "src/cpu/kernels/CpuTransposeKernel.h"
35#include "src/cpu/operators/CpuConvertFullyConnectedWeights.h"
36#include "src/cpu/operators/CpuFlatten.h"
37#include "src/cpu/operators/CpuGemm.h"
38#include "src/cpu/operators/CpuGemmLowpMatrixMultiplyCore.h"
39#include "src/cpu/utils/CpuAuxTensorHandler.h"
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +010040
41namespace arm_compute
42{
43namespace cpu
44{
45using namespace arm_compute::experimental;
46using namespace arm_compute::misc::shape_calculator;
47
48namespace
49{
50// Get min, max bound of a quantized asymmetric dst tensor, with the effect of fused activation
51std::pair<PixelValue, PixelValue> get_quantized_asymmetric_output_min_max(const QuantizationInfo &q_info, const ActivationLayerInfo &act_info, DataType data_type)
52{
53 PixelValue type_min{};
54 PixelValue type_max{};
55 std::tie(type_min, type_max) = get_min_max(data_type);
56 const UniformQuantizationInfo q_unif = q_info.uniform();
57
58 if(act_info.enabled())
59 {
60 switch(act_info.activation())
61 {
62 case ActivationLayerInfo::ActivationFunction::RELU:
63 type_min = PixelValue(q_unif.offset);
64 break;
65 case ActivationLayerInfo::ActivationFunction::BOUNDED_RELU:
66 type_min = PixelValue(q_unif.offset);
67 type_max = PixelValue(act_info.a(), data_type, q_info);
68 break;
69 case ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU:
70 type_min = PixelValue(act_info.b(), data_type, q_info);
71 type_max = PixelValue(act_info.a(), data_type, q_info);
72 break;
73 default:
74 ARM_COMPUTE_ERROR("Activation function not supported.");
75 break;
76 }
77 }
78
79 return std::make_pair(type_min, type_max);
80}
81
82Status get_gemmlowp_output_stage_info(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *dst, const ActivationLayerInfo &act,
83 GEMMLowpOutputStageInfo &gemmlowp_output_stage_info)
84{
85 const auto data_type = src->data_type();
86 const QuantizationInfo oq_info = dst->quantization_info();
87 const UniformQuantizationInfo iq_unif = src->quantization_info().uniform();
88 const UniformQuantizationInfo wq_unif = weights->quantization_info().uniform();
89 const UniformQuantizationInfo oq_unif = oq_info.uniform();
90
91 float multiplier = (iq_unif.scale * wq_unif.scale) / oq_unif.scale;
92 int32_t output_multiplier;
93 int32_t output_shift;
94
95 ARM_COMPUTE_RETURN_ON_ERROR(quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift));
96
97 PixelValue type_min{};
98 PixelValue type_max{};
99 std::tie(type_min, type_max) = get_quantized_asymmetric_output_min_max(oq_info, act, data_type);
100
101 gemmlowp_output_stage_info.gemmlowp_multiplier = output_multiplier;
102 gemmlowp_output_stage_info.gemmlowp_shift = output_shift;
103 gemmlowp_output_stage_info.gemmlowp_offset = oq_unif.offset;
104 gemmlowp_output_stage_info.type = GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT;
105 gemmlowp_output_stage_info.gemmlowp_min_bound = type_min.get<int32_t>();
106 gemmlowp_output_stage_info.gemmlowp_max_bound = type_max.get<int32_t>();
107
108 return Status{};
109}
110
111Status validate_mm(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst, const ActivationLayerInfo &act)
112{
113 if(is_data_type_quantized_asymmetric(src->data_type()))
114 {
115 // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
116 // Extract and negate src and weights offset
117 const QuantizationInfo src_quantization_info(src->quantization_info().uniform().scale, -src->quantization_info().uniform().offset);
118 const QuantizationInfo weights_quantization_info(weights->quantization_info().uniform().scale, -weights->quantization_info().uniform().offset);
119
120 GEMMLowpOutputStageInfo gemmlowp_output_stage_info;
121 ARM_COMPUTE_RETURN_ON_ERROR(get_gemmlowp_output_stage_info(src, weights, dst, act, gemmlowp_output_stage_info));
122
123 GEMMInfo gemm_info;
124 gemm_info.set_gemmlowp_output_stage(gemmlowp_output_stage_info);
125
126 // Validate gemmlowp function
127 TensorInfo src_info = src->clone()->set_quantization_info(src_quantization_info);
128 TensorInfo weights_info = weights->clone()->set_quantization_info(weights_quantization_info);
129 ARM_COMPUTE_RETURN_ON_ERROR(CpuGemmLowpMatrixMultiplyCore::validate(&src_info,
130 &weights_info,
131 biases,
132 dst,
133 gemm_info));
134 }
135 else
136 {
137 ARM_COMPUTE_RETURN_ON_ERROR(CpuGemm::validate(src, weights, biases, dst, 1.f, 1.0f, GEMMInfo(false, false, true /* Reshape weights only for the first run */)));
138 }
139
140 return Status{};
141}
142} // namespace
143
144CpuFullyConnected::CpuFullyConnected()
145 : _flatten(nullptr),
146 _convert_weights(nullptr),
147 _transpose_weights(nullptr),
148 _mm_gemm(nullptr),
149 _mm_gemmlowp(nullptr),
150 _flattened_src(),
151 _converted_weights(),
152 _reshaped_weights(),
Georgios Pinitasfa1db172021-08-12 06:28:09 +0100153 _trans_weights(),
154 _trans_weights_idx(AuxTensorIdx::Count),
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +0100155 _aux_mem(Count),
Georgios Pinitasfa1db172021-08-12 06:28:09 +0100156 _needs_weights_conversion(false),
157 _needs_weights_reshape(false),
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +0100158 _is_fc_after_conv(false),
159 _is_quantized_asymmetric(false),
160 _is_prepared(false)
161
162{
163}
164
165CpuFullyConnected::~CpuFullyConnected() = default;
166
167void CpuFullyConnected::configure_mm(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, ITensorInfo *dst, const ActivationLayerInfo &act)
168{
169 if(_is_quantized_asymmetric)
170 {
171 // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
172 // Extract and negate src and weights offset
173 const QuantizationInfo src_quantization_info(src->quantization_info().uniform().scale, -src->quantization_info().uniform().offset);
174 const QuantizationInfo weights_quantization_info(weights->quantization_info().uniform().scale, -weights->quantization_info().uniform().offset);
175
176 TensorInfo src_info = src->clone()->set_quantization_info(src_quantization_info);
177 TensorInfo weights_info = weights->clone()->set_quantization_info(weights_quantization_info);
178
179 // Configure gemmlowp function and output stage for asymmetric quantized types
180 GEMMLowpOutputStageInfo gemmlowp_output_stage_info;
181 const Status status = get_gemmlowp_output_stage_info(&src_info, &weights_info, dst, act, gemmlowp_output_stage_info);
182 ARM_COMPUTE_ERROR_ON(status.error_code() != ErrorCode::OK);
183
184 GEMMInfo gemm_info;
185 gemm_info.set_gemmlowp_output_stage(gemmlowp_output_stage_info);
186 gemm_info.set_activation_info(act);
187 _mm_gemmlowp = std::make_unique<CpuGemmLowpMatrixMultiplyCore>();
188 _mm_gemmlowp->configure(&src_info, &weights_info, biases, dst, gemm_info);
189 }
190 else
191 {
192 // Configure matrix multiply kernel
193 GEMMInfo gemm_info(false, false, true /* Reshape weights only for the first run */);
194 gemm_info.set_activation_info(act);
195 _mm_gemm = std::make_unique<CpuGemm>();
196 _mm_gemm->configure(src, weights, biases, dst, 1.f, 1.0f, gemm_info);
197 }
198}
199
200void CpuFullyConnected::configure_conv_fc(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, ITensorInfo *dst, const ActivationLayerInfo &act)
201{
202 ARM_COMPUTE_ERROR_ON((weights->dimension(1) != (src->dimension(0) * src->dimension(1) * src->dimension(2))));
203
204 // If the fully connected layer is called after a convolution layer, the src tensor must be linearized
205
206 // Initialize output tensor for flatten
207 auto_init_if_empty(_flattened_src, src->clone()->set_tensor_shape(compute_flatten_shape(src)));
208
209 _flatten = std::make_unique<CpuFlatten>();
210 _flatten->configure(src, &_flattened_src);
211
212 // Configure matrix multiply kernel
213 configure_mm(&_flattened_src, weights, biases, dst, act);
214}
215
216void CpuFullyConnected::configure_fc_fc(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, ITensorInfo *dst, const ActivationLayerInfo &act)
217{
218 ARM_COMPUTE_ERROR_ON(src->dimension(0) != weights->dimension(1));
219
220 // Configure matrix multiply kernel
221 configure_mm(src, weights, biases, dst, act);
222}
223
224void CpuFullyConnected::configure(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, ITensorInfo *dst,
225 FullyConnectedLayerInfo fc_info)
226{
227 // Perform validate step
228 ARM_COMPUTE_ERROR_ON_NULLPTR(src, weights, dst);
229 ARM_COMPUTE_ERROR_THROW_ON(CpuFullyConnected::validate(src,
230 weights,
231 biases != nullptr ? biases : nullptr,
232 dst,
233 fc_info));
234
Georgios Pinitasfa1db172021-08-12 06:28:09 +0100235 _needs_weights_conversion = false;
236 _needs_weights_reshape = fc_info.transpose_weights ? !fc_info.are_weights_reshaped : false;
237 _needs_weights_reshape = _needs_weights_reshape && !fc_info.retain_internal_weights;
238 _is_fc_after_conv = true;
239 _is_quantized_asymmetric = is_data_type_quantized_asymmetric(src->data_type());
240 _is_prepared = false;
241 _trans_weights_idx = AuxTensorIdx::Count;
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +0100242
243 // With the Fully Connected layer we can have 4 different cases:
244 // 1) Convolution layer -> Fully Connected layer without batches
245 // 2) Fully Connected layer -> Fully Connected layer without batches
246 // 3) Convolution layer -> Fully Connected layer with batches
247 // 4) Fully Connected layer -> Fully Connected layer with batches
248
249 const ITensorInfo *weights_to_use = weights;
250
251 // Check if we have a fully connected layer with batches
252 const bool is_batched_fc_layer = dst->dimension(1) > 1;
253 if(is_batched_fc_layer)
254 {
255 _is_fc_after_conv = (TensorShape::num_max_dimensions >= 4) && (std::equal(src->tensor_shape().cbegin() + 3,
256 src->tensor_shape().cend(),
257 dst->tensor_shape().cbegin() + 1));
258 }
259 else
260 {
261 _is_fc_after_conv = src->num_dimensions() > 1;
262 }
263
264 // Reshape weights if needed
Georgios Pinitasfa1db172021-08-12 06:28:09 +0100265 if(_needs_weights_reshape)
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +0100266 {
267 // Reshape the weights
268 _transpose_weights = std::make_unique<kernels::CpuTransposeKernel>();
269 _transpose_weights->configure(weights, &_reshaped_weights);
Georgios Pinitasfa1db172021-08-12 06:28:09 +0100270 weights_to_use = &_reshaped_weights;
271 _trans_weights_idx = AuxTensorIdx::TransposedWeights;
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +0100272 }
273
274 // Convert weights if needed
275 if(_is_fc_after_conv && (src->data_layout() != fc_info.weights_trained_layout))
276 {
277 // Convert weights
278 _convert_weights = std::make_unique<CpuConvertFullyConnectedWeights>();
279 _convert_weights->configure(weights_to_use,
280 &_converted_weights,
281 src->tensor_shape(),
282 fc_info.weights_trained_layout);
283
Georgios Pinitasfa1db172021-08-12 06:28:09 +0100284 weights_to_use = &_converted_weights;
285 _needs_weights_conversion = true;
286 _trans_weights_idx = AuxTensorIdx::ConvertedWeights;
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +0100287 }
288
289 if(_is_fc_after_conv)
290 {
291 // Fully Connected layer after a Convolution Layer without batches
292 configure_conv_fc(src, weights_to_use, biases, dst, fc_info.activation_info);
293 }
294 else
295 {
296 // Fully Connected layer after a Fully Connected Layer without batches
297 configure_fc_fc(src, weights_to_use, biases, dst, fc_info.activation_info);
298 }
299
Georgios Pinitasfa1db172021-08-12 06:28:09 +0100300 // Retain the tensorinfo with the weights to use
301 if(_needs_weights_reshape || _needs_weights_conversion)
302 {
303 _trans_weights = *weights_to_use;
304 }
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +0100305
306 // Set auxiliary memory requirements
307 auto gemm_mem_req = (_is_quantized_asymmetric) ? _mm_gemmlowp->workspace() : _mm_gemm->workspace();
308 for(unsigned int i = 0; i < gemm_mem_req.size(); ++i)
309 {
310 _aux_mem[i] = gemm_mem_req[i];
311 }
312
313 if(_aux_mem[Pretranspose].size > 0)
314 {
315 // Release permuted weights at the of prepare as they are further transposed by the assembly dispatch
316 _aux_mem[TransposedWeights] = MemoryInfo(offset_int_vec(TransposedWeights), MemoryLifetime::Prepare, _reshaped_weights.total_size());
317 _aux_mem[ConvertedWeights] = MemoryInfo(offset_int_vec(ConvertedWeights), MemoryLifetime::Prepare, _converted_weights.total_size());
318 }
319 else
320 {
Georgios Pinitasfa1db172021-08-12 06:28:09 +0100321 _aux_mem[TransposedWeights] = MemoryInfo(offset_int_vec(TransposedWeights), _needs_weights_conversion ? MemoryLifetime::Prepare : MemoryLifetime::Persistent, _reshaped_weights.total_size());
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +0100322 _aux_mem[ConvertedWeights] = MemoryInfo(offset_int_vec(ConvertedWeights), MemoryLifetime::Persistent, _converted_weights.total_size());
323 }
324 _aux_mem[FlattenedSrc] = MemoryInfo(offset_int_vec(FlattenedSrc), MemoryLifetime::Temporary, _flattened_src.total_size());
325}
326
327Status CpuFullyConnected::validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst,
328 FullyConnectedLayerInfo fc_info)
329{
330 ARM_COMPUTE_UNUSED(fc_info.retain_internal_weights);
331 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, weights, dst);
332 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F16, DataType::F32);
333 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, weights, dst);
334 ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 2);
335 ARM_COMPUTE_RETURN_ERROR_ON(biases != nullptr && biases->num_dimensions() > 1);
336 ARM_COMPUTE_RETURN_ERROR_ON(fc_info.activation_info.enabled() && is_data_type_quantized(src->data_type()) && fc_info.activation_info.activation() != ActivationLayerInfo::ActivationFunction::RELU
337 && fc_info.activation_info.activation() != ActivationLayerInfo::ActivationFunction::BOUNDED_RELU && fc_info.activation_info.activation() != ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU);
Michele Di Giorgio0f6ca4b2021-08-04 14:30:28 +0100338 ARM_COMPUTE_RETURN_ERROR_ON_MSG(!fc_info.constant_weights, "Non-constant weights are currently not supported");
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +0100339
340 bool weights_reshaped = fc_info.transpose_weights ? fc_info.are_weights_reshaped : true;
341 bool is_fc_after_conv = true;
342
343 const ITensorInfo &flatten_src = TensorInfo(src->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(compute_flatten_shape(src)));
344 const ITensorInfo &reshaped_weights = TensorInfo(weights->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(compute_transposed_shape(*weights)));
345 const ITensorInfo &converted_weights = weights_reshaped ? TensorInfo(weights->clone()->set_is_resizable(true).reset_padding()) : TensorInfo(*reshaped_weights.clone());
346
347 // With the Fully Connected layer we can have 4 different cases:
348 // 1) Convolution layer -> Fully Connected layer without batches
349 // 2) Fully Connected layer -> Fully Connected layer without batches
350 // 3) Convolution layer -> Fully Connected layer with batches
351 // 4) Fully Connected layer -> Fully Connected layer with batches
352
353 const ITensorInfo *src_to_use = src;
354 const ITensorInfo *weights_to_use = weights;
355
356 // Check if we have a fully connected layer with batches
357 const bool is_batched_fc_layer = dst->dimension(1) > 1;
358
359 if(is_batched_fc_layer)
360 {
361 is_fc_after_conv = (TensorShape::num_max_dimensions >= 4) && (std::equal(src->tensor_shape().cbegin() + 3,
362 src->tensor_shape().cend(),
363 dst->tensor_shape().cbegin() + 1));
364 }
365 else
366 {
367 is_fc_after_conv = src->num_dimensions() > 1;
368 }
369
370 if(!weights_reshaped)
371 {
372 // Validate reshape weights kernel
373 ARM_COMPUTE_RETURN_ON_ERROR(kernels::CpuTransposeKernel::validate(weights, &reshaped_weights));
374 weights_to_use = &reshaped_weights;
375 }
376
377 if(is_fc_after_conv && (src->data_layout() != fc_info.weights_trained_layout))
378 {
379 // Validate convert weights kernel
380 ARM_COMPUTE_RETURN_ON_ERROR(CpuConvertFullyConnectedWeights::validate(weights_to_use,
381 &converted_weights,
382 src->tensor_shape(),
383 fc_info.weights_trained_layout));
384 weights_to_use = &converted_weights;
385 }
386
387 if(is_fc_after_conv)
388 {
389 // Fully Connected layer after a Convolution Layer without batches
390 ARM_COMPUTE_RETURN_ERROR_ON((weights_to_use->dimension(1) != (src->dimension(0) * src->dimension(1) * src->dimension(2))));
391
392 // Validate flatten kernel
393 ARM_COMPUTE_RETURN_ON_ERROR(CpuFlatten::validate(src, &flatten_src));
394 src_to_use = &flatten_src;
395 }
396 else
397 {
398 // Fully Connected layer after a Fully Connected Layer without batches
399 ARM_COMPUTE_RETURN_ERROR_ON(src->dimension(0) != weights_to_use->dimension(1));
400 }
401 // Validate matrix multiply kernel
402 ARM_COMPUTE_RETURN_ON_ERROR(validate_mm(src_to_use, weights_to_use, biases, dst, fc_info.activation_info));
403
404 return Status{};
405}
406
407void CpuFullyConnected::run(ITensorPack &tensors)
408{
409 prepare(tensors);
410
411 auto src = tensors.get_const_tensor(ACL_SRC_0);
412
413 CpuAuxTensorHandler flattened_src(offset_int_vec(FlattenedSrc), _flattened_src, tensors, false);
Georgios Pinitasfa1db172021-08-12 06:28:09 +0100414 CpuAuxTensorHandler transformed_wei(offset_int_vec(_trans_weights_idx), _trans_weights, tensors, false);
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +0100415
416 // Linearize src if it comes from a convolutional layer
417 if(_is_fc_after_conv)
418 {
419 ITensorPack flatten_pack{ { ACL_SRC, src }, { ACL_DST, flattened_src.get() } };
420 _flatten->run(flatten_pack);
421 }
422
423 ITensorPack gemm_pack = tensors;
424 gemm_pack.add_const_tensor(ACL_SRC_0, (_is_fc_after_conv) ? flattened_src.get() : src);
Georgios Pinitasfa1db172021-08-12 06:28:09 +0100425 if(_needs_weights_reshape || _needs_weights_conversion)
426 {
427 gemm_pack.add_const_tensor(ACL_SRC_1, transformed_wei.get());
428 }
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +0100429
430 // Run matrix multiply
431 if(_is_quantized_asymmetric)
432 {
433 _mm_gemmlowp->run(gemm_pack);
434 }
435 else
436 {
437 _mm_gemm->run(gemm_pack);
438 }
439}
440
441void CpuFullyConnected::prepare(ITensorPack &tensors)
442{
443 if(!_is_prepared)
444 {
445 auto weights = tensors.get_const_tensor(ACL_SRC_1);
446
447 CpuAuxTensorHandler reshaped_weights(offset_int_vec(TransposedWeights), _reshaped_weights, tensors, false);
448 CpuAuxTensorHandler converted_weights(offset_int_vec(ConvertedWeights), _converted_weights, tensors, false);
449
450 // Pointer to current weights
451 const ITensor *cur_weights = weights;
452
453 // Reshape of the weights (happens only once)
Georgios Pinitasfa1db172021-08-12 06:28:09 +0100454 if(_needs_weights_reshape)
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +0100455 {
456 // Run reshape weights kernel and mark weights as unused
457 ITensorPack transpose_pack{ { ACL_SRC, weights }, { ACL_DST, reshaped_weights.get() } };
458 NEScheduler::get().schedule_op(_transpose_weights.get(), Window::DimY, _transpose_weights->window(), transpose_pack);
459
460 cur_weights->mark_as_unused();
461 cur_weights = reshaped_weights.get();
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +0100462 }
463
464 // Convert weights if needed (happens only once)
Georgios Pinitasfa1db172021-08-12 06:28:09 +0100465 if(_needs_weights_conversion)
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +0100466 {
467 ITensorPack convert_pack{ { ACL_SRC, cur_weights }, { ACL_DST, converted_weights.get() } };
468 _convert_weights->run(convert_pack);
469
470 cur_weights->mark_as_unused();
471 cur_weights = converted_weights.get();
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +0100472 }
473
Georgios Pinitasfa1db172021-08-12 06:28:09 +0100474 ITensorPack gemm_pack = tensors;
475 gemm_pack.add_const_tensor(ACL_SRC_1, cur_weights);
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +0100476
477 // Prepare GEMM prepare and release unused weights
478 if(!_is_quantized_asymmetric)
479 {
Georgios Pinitasfa1db172021-08-12 06:28:09 +0100480 _mm_gemm->prepare(gemm_pack);
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +0100481 }
482 else
483 {
Georgios Pinitasfa1db172021-08-12 06:28:09 +0100484 _mm_gemmlowp->prepare(gemm_pack);
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +0100485 }
486
487 _is_prepared = true;
488 }
489}
490
491experimental::MemoryRequirements CpuFullyConnected::workspace() const
492{
493 return _aux_mem;
494}
495} // namespace cpu
496} // namespace arm_compute