blob: 34b845928df58f7431676fcdee1ee3577b04b1d8 [file] [log] [blame]
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +01001/*
Jonathan Deakin464ed202023-01-12 11:41:14 +00002 * Copyright (c) 2021-2023 Arm Limited.
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Georgios Pinitas7891a732021-08-20 21:39:25 +010024#include "src/cpu/operators/CpuGemm.h"
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +010025
26#include "arm_compute/core/TensorInfo.h"
27#include "arm_compute/core/Validate.h"
28#include "arm_compute/core/utils/misc/ShapeCalculator.h"
29#include "arm_compute/runtime/NEON/NEScheduler.h"
ramelg013ae3d882021-09-12 23:07:47 +010030#include "src/common/utils/Log.h"
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +010031#include "src/core/CPP/Validate.h"
32#include "src/core/helpers/AutoConfiguration.h"
33#include "src/core/helpers/MemoryHelpers.h"
Georgios Pinitas7891a732021-08-20 21:39:25 +010034#include "src/cpu/utils/CpuAuxTensorHandler.h"
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +010035
36using namespace arm_compute::experimental;
37using namespace arm_compute::misc::shape_calculator;
38
39namespace arm_compute
40{
41namespace cpu
42{
43namespace
44{
45cpu::AsmGemmInfo init_assembly_metadata(const GEMMInfo &info)
46{
47 cpu::AsmGemmInfo asm_info;
48 asm_info.method = cpu::AsmConvMethod::Im2Col;
49 asm_info.reinterpret_input_as_3d = info.reinterpret_input_as_3d();
50 asm_info.depth_output_gemm3d = info.depth_output_gemm3d();
51 asm_info.activation_info = info.activation_info();
Georgios Pinitas4ee8b152021-07-16 16:16:43 +010052 asm_info.fast_mode = info.fast_math();
Francesco.Petrogalli@arm.com5fcf22d2022-04-05 10:31:08 +000053 asm_info.fixed_format = info.fixed_format();
Francesco Petrogalli553f6952022-06-30 10:22:01 +000054 asm_info.weight_format = info.weight_format();
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +010055
56 return asm_info;
57}
58} // namespace
59
60void CpuGemm::configure(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, ITensorInfo *d, float alpha, float beta, const GEMMInfo &gemm_info)
61{
62 ARM_COMPUTE_ERROR_ON_NULLPTR(a, b, d);
63 ARM_COMPUTE_ERROR_THROW_ON(CpuGemm::validate(a, b, c, d, alpha, beta, gemm_info));
ramelg013ae3d882021-09-12 23:07:47 +010064 ARM_COMPUTE_LOG_PARAMS(a, b, c, d, alpha, beta, gemm_info);
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +010065
66 const cpu::AsmGemmInfo asm_info = init_assembly_metadata(gemm_info);
Viet-Hoa Doa3e57c22023-03-13 16:20:04 +000067 const bool is_c_bias = beta == 1 && c != nullptr;
Viet-Hoa Do9b0a6b42023-04-03 16:27:25 +010068 bool run_optimised = bool(cpu::CpuGemmAssemblyDispatch::validate(a, b, (is_c_bias) ? c : nullptr, d, asm_info)) &&
69 (c == nullptr || beta == 0.f || beta == 1.f) && // Optimized GeMM doesn't support beta coefficient.
70 !(!b->are_values_constant() && b->tensor_shape().z() > 1); // Disable batch matmul as optimized GeMM handles batching differently.
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +010071
72 // Check if we need to reshape the matrix B only on the first run
73 _is_prepared = false;
Viet-Hoa Do9b0a6b42023-04-03 16:27:25 +010074 _reshape_b_only_on_first_run = b->are_values_constant();
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +010075 _run_vector_matrix_multiplication = a->dimension(1) < 2;
76 _run_alpha_scale = alpha != 1.f;
Viet-Hoa Doa3e57c22023-03-13 16:20:04 +000077 _run_bias_addition = is_c_bias;
78 _run_addition = beta != 0 && beta != 1 && c != nullptr;
Francesco.Petrogalli@arm.com5fcf22d2022-04-05 10:31:08 +000079 _run_activation = gemm_info.activation_info().enabled() && (!run_optimised || (run_optimised && !cpu::CpuGemmAssemblyDispatch::is_activation_supported(gemm_info.activation_info())));
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +010080
81 if(run_optimised)
82 {
83 const ITensorInfo *c_to_use = is_c_bias ? c : nullptr;
84 _asm_glue = std::make_unique<cpu::CpuGemmAssemblyDispatch>();
85 _asm_glue->configure(a, b, c_to_use, d, asm_info);
86 ARM_COMPUTE_ERROR_ON(!_asm_glue->is_configured());
87
88 auto asm_mem_req = _asm_glue->workspace();
89 _aux_mem[AsmGemmWorkspace] = asm_mem_req[AsmGemmWorkspace];
90 _aux_mem[Pretraspose] = asm_mem_req[Pretraspose];
91
92 // Scale product by alpha
93 if(_run_alpha_scale)
94 {
95 _alpha_scale_func = std::make_unique<cpu::CpuActivation>();
96 _alpha_scale_func->configure(d, nullptr, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LINEAR, alpha, 0.f));
97 }
98 }
99 else
100 {
101 // Pick output tensor in case bias addition should be performed
102 ITensorInfo *gemm_output_to_use = (_run_bias_addition) ? &_tmp_d : d;
103
104 _mm_kernel = std::make_unique<cpu::kernels::CpuGemmMatrixMultiplyKernel>();
105
106 // Select between GEMV and GEMM
107 if(_run_vector_matrix_multiplication)
108 {
109 // Configure the matrix multiply kernel
110 _mm_kernel->configure(a, b, gemm_output_to_use, alpha, false);
111 }
112 else
113 {
114 const int m = a->dimension(1);
115 const int n = b->dimension(0);
116 const int k = a->dimension(0);
117
118 // Configure interleave kernel
119 _interleave_kernel = std::make_unique<cpu::kernels::CpuGemmInterleave4x4Kernel>();
120 _interleave_kernel->configure(a, &_tmp_a);
121 _aux_mem[InterleavedLHS] = MemoryInfo(offset_int_vec(InterleavedLHS), MemoryLifetime::Temporary, _tmp_a.total_size());
122
123 // Configure transpose kernel
124 _transpose_kernel = std::make_unique<cpu::kernels::CpuGemmTranspose1xWKernel>();
125 _transpose_kernel->configure(b, &_tmp_b);
126 _aux_mem[TransposedRHS] = MemoryInfo(offset_int_vec(TransposedRHS), MemoryLifetime::Persistent, _tmp_b.total_size());
127
128 // Configure matrix multiplication kernel
129 _mm_kernel->configure(&_tmp_a, &_tmp_b, gemm_output_to_use, alpha, true, GEMMReshapeInfo(m, n, k));
130 }
131
132 if(_run_bias_addition)
133 {
134 _add_bias = std::make_unique<cpu::CpuAdd>();
135 _add_bias->configure(gemm_output_to_use, c, d, ConvertPolicy::SATURATE);
Michele Di Giorgiod9cdf142021-07-02 15:17:08 +0100136 _aux_mem[TempResult] = MemoryInfo(offset_int_vec(TempResult), MemoryLifetime::Temporary, _tmp_d.total_size());
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100137 }
138 }
139
140 // Configure matrix addition kernel
141 if(_run_addition)
142 {
143 _ma_kernel = std::make_unique<cpu::kernels::CpuGemmMatrixAdditionKernel>();
144 _ma_kernel->configure(c, d, beta);
145 }
146
147 // Configure activation
148 if(_run_activation)
149 {
150 _activation_func = std::make_unique<cpu::CpuActivation>();
151 _activation_func->configure(d, nullptr, gemm_info.activation_info());
152 }
153}
154
155Status CpuGemm::validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *d, float alpha, float beta, const GEMMInfo &gemm_info)
156{
157 ARM_COMPUTE_UNUSED(alpha);
Renato Arantes57132942023-04-24 07:19:59 +0000158 const bool is_c_bias = beta == 1 && c != nullptr;
Viet-Hoa Doa3e57c22023-03-13 16:20:04 +0000159 const bool run_addition = c != nullptr && beta != 0 && beta != 1;
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100160
161 ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(a);
162 ARM_COMPUTE_RETURN_ERROR_ON_CPU_BF16_UNSUPPORTED(a);
163 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(a, 1, DataType::BFLOAT16, DataType::F16, DataType::F32);
Viet-Hoa Doa3e57c22023-03-13 16:20:04 +0000164
Renato Arantes57132942023-04-24 07:19:59 +0000165 if(is_fixed_format_fast_math(gemm_info.weight_format()))
Jonathan Deakin464ed202023-01-12 11:41:14 +0000166 {
167 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(a, DataType::F32);
168 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(b, DataType::BFLOAT16);
169 }
170 else
171 {
172 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(a, b);
173 }
174
Renato Arantes57132942023-04-24 07:19:59 +0000175 const int block_by = arm_compute::block_by(gemm_info.weight_format());
Renato Arantes47a50ef2023-06-15 13:40:02 +0000176 // test if im2col has changed the dimensions that are needed for padding
177 if(a->dimension(0) != b->dimension(1) && block_by > 1)
Renato Arantes57132942023-04-24 07:19:59 +0000178 {
179 // have to verify bias
180 const size_t dim0_sz = a->dimension(0);
181 ARM_COMPUTE_RETURN_ERROR_ON_MSG((dim0_sz % block_by) != 0, ("The matrix A number of columns must be a multiple of block_by=" + std::to_string(block_by)).c_str());
182 // a->dimension(0) = kernel_area * input_channel + kernel_area * input_pad_right
183 // b->dimension(1) = kernel_area * input_channel
184 // a->dimension(0) = b->dimension(1) + kernel_area * input_pad_right
185 const size_t input_pad_right = (dim0_sz - b->dimension(1)) % block_by;
186 const size_t kernel_area = (dim0_sz - b->dimension(1)) / input_pad_right;
187 ARM_COMPUTE_RETURN_ERROR_ON_MSG((dim0_sz - kernel_area * input_pad_right) != b->dimension(1), "The product AB is defined only if A number of columns and B number of rows are related");
188 }
189 else
190 {
191 ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->dimension(0) != b->dimension(1), "The product AB is defined only if the number of columns in A is equal to the number of rows in B");
192 }
193
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100194 ARM_COMPUTE_RETURN_ERROR_ON_MSG(gemm_info.is_a_reshaped(), "Matrix A already reshaped is not supported");
195 ARM_COMPUTE_RETURN_ERROR_ON_MSG(gemm_info.is_b_reshaped(), "Matrix B already reshaped is not supported");
196 if(a->data_type() != DataType::BFLOAT16)
197 {
198 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(a, d);
199 }
200
Viet-Hoa Doa3e57c22023-03-13 16:20:04 +0000201 if(run_addition)
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100202 {
203 ARM_COMPUTE_RETURN_ERROR_ON(gemm_info.depth_output_gemm3d() != 0);
204 ARM_COMPUTE_RETURN_ERROR_ON(gemm_info.reinterpret_input_as_3d());
205 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(c, d);
206 ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->dimension(1) != c->dimension(1), "The C matrix must have the same number of rows as the matrix A");
207 ARM_COMPUTE_RETURN_ERROR_ON_MSG(b->dimension(0) != c->dimension(0), "The C matrix must have the same number of columns as the matrix B");
208 }
209
210 if(d->total_size() != 0)
211 {
Francesco Petrogalli553f6952022-06-30 10:22:01 +0000212 // For fixed format we are expecting some kind of blocked format for B/RHS so the dimension won't necessarily match the result matrix any more.
213 ARM_COMPUTE_RETURN_ERROR_ON(!gemm_info.fixed_format() && b->dimension(0) != d->dimension(0));
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100214 if(gemm_info.depth_output_gemm3d() != 0)
215 {
216 if(gemm_info.reinterpret_input_as_3d())
217 {
218 ARM_COMPUTE_RETURN_ERROR_ON(a->dimension(1) != d->dimension(1));
219 ARM_COMPUTE_RETURN_ERROR_ON(a->dimension(2) != d->dimension(2));
220 }
221 else
222 {
223 ARM_COMPUTE_RETURN_ERROR_ON(a->dimension(1) != d->dimension(1) * d->dimension(2));
224 }
225 }
226 else
227 {
228 ARM_COMPUTE_RETURN_ERROR_ON(a->dimension(1) != d->dimension(1));
229 }
230 }
231
232 // Check if we need to run the optimized assembly kernel
233 cpu::AsmGemmInfo asm_info = init_assembly_metadata(gemm_info);
Viet-Hoa Do9b0a6b42023-04-03 16:27:25 +0100234 const bool run_optimised = bool(cpu::CpuGemmAssemblyDispatch::validate(a, b, is_c_bias ? c : nullptr, d, asm_info)) &&
235 (c == nullptr || beta == 0.f || beta == 1.f) && // Optimized GeMM doesn't support beta coefficient.
236 !(!b->are_values_constant() && b->tensor_shape().z() > 1); // Disable batch matmul as optimized GeMM handles batching differently.
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100237
238 if(!run_optimised)
239 {
240 ARM_COMPUTE_RETURN_ERROR_ON_MSG(gemm_info.reinterpret_input_as_3d(), "CpuGemm cannot reinterpret the input tensor as 3D");
241 ARM_COMPUTE_RETURN_ERROR_ON_MSG(gemm_info.depth_output_gemm3d() != 0, "CpuGemm cannot reinterpret the output tensor as 3D");
242
243 // Check if the first input tensor is a vector.
244 const bool run_vector_matrix_multiplication = a->dimension(1) < 2;
245 // Check if we need to reshape the matrix A and matrix B
Viet-Hoa Do9b0a6b42023-04-03 16:27:25 +0100246 const bool run_interleave_transpose = !run_vector_matrix_multiplication && !b->are_values_constant();
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100247
248 // Arguments used by GEMMReshapeInfo
249 // If we pass the matrix A and matrix B reshaped to CpuGemmMatrixMultiplyKernel, we need to pass m, n, k, mult_transpose1xW_width and mult_interleave4x4_height to GEMMReshapeInfo
250 // in order to know how the matrices have been reshaped
251 const int m = a->dimension(1);
252 const int n = b->dimension(0);
253 const int k = a->dimension(0);
254 int mult_transpose1xW_width = 1;
255 int mult_interleave4x4_height = 1;
256
257 const GEMMReshapeInfo reshape_info = GEMMReshapeInfo(m, n, k, mult_transpose1xW_width, mult_interleave4x4_height, gemm_info.depth_output_gemm3d());
258
259 const ITensorInfo *matrix_a_info = a;
260 const ITensorInfo *matrix_b_info = b;
261
262 TensorInfo tmp_a_info{};
263 TensorInfo tmp_b_info{};
264 TensorInfo tmp_output_info = *d->clone();
265
266 if(run_interleave_transpose)
267 {
268 matrix_a_info = &tmp_a_info;
269 matrix_b_info = &tmp_b_info;
270
271 // Validate interleave kernel
272 auto_init_if_empty(tmp_a_info, a->clone()->set_tensor_shape(compute_interleaved_shape(*a, mult_interleave4x4_height, gemm_info.reinterpret_input_as_3d())));
273 ARM_COMPUTE_RETURN_ON_ERROR(cpu::kernels::CpuGemmInterleave4x4Kernel::validate(a, &tmp_a_info));
274
275 // Validate transpose kernel
276 auto_init_if_empty(tmp_b_info, b->clone()->set_tensor_shape(compute_transpose1xW_with_element_size_shape(*b, mult_transpose1xW_width)));
277 ARM_COMPUTE_RETURN_ON_ERROR(cpu::kernels::CpuGemmTranspose1xWKernel::validate(b, &tmp_b_info));
278 }
279
280 // Validate matrix multiply
281 auto_init_if_empty(tmp_output_info, matrix_a_info->clone()->set_tensor_shape(compute_mm_shape(*matrix_a_info, *matrix_b_info, run_interleave_transpose, reshape_info)));
282 ARM_COMPUTE_RETURN_ON_ERROR(cpu::kernels::CpuGemmMatrixMultiplyKernel::validate(matrix_a_info, matrix_b_info, &tmp_output_info, alpha, run_interleave_transpose, reshape_info));
283
Viet-Hoa Do9b0a6b42023-04-03 16:27:25 +0100284 if(is_c_bias)
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100285 {
286 ARM_COMPUTE_RETURN_ON_ERROR(cpu::CpuAdd::validate(&tmp_output_info, c, d, ConvertPolicy::SATURATE));
287 }
288 }
289
290 // Validate matrix addition kernel
Viet-Hoa Doa3e57c22023-03-13 16:20:04 +0000291 if(run_addition)
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100292 {
293 ARM_COMPUTE_RETURN_ON_ERROR(cpu::kernels::CpuGemmMatrixAdditionKernel::validate(c, d, beta));
294 }
295
296 // Validate activation
297 const ActivationLayerInfo &activation = gemm_info.activation_info();
298 if(activation.enabled())
299 {
300 ARM_COMPUTE_RETURN_ON_ERROR(cpu::CpuActivation::validate(d, nullptr, activation));
301 }
302
303 return Status{};
304}
305
306void CpuGemm::run(ITensorPack &tensors)
307{
308 prepare(tensors);
309
310 auto a = tensors.get_const_tensor(ACL_SRC_0);
311 auto b = tensors.get_const_tensor(ACL_SRC_1);
312 auto c = tensors.get_const_tensor(ACL_SRC_2);
313 auto d = tensors.get_tensor(ACL_DST);
314
Francesco Petrogalli553f6952022-06-30 10:22:01 +0000315 if(_asm_glue && _asm_glue->is_configured())
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100316 {
317 // Pass c to asm dispatch only if it's the bias tensor
318 ITensorPack asm_pack = tensors;
Viet-Hoa Do9b0a6b42023-04-03 16:27:25 +0100319 asm_pack.add_const_tensor(ACL_SRC_2, _run_bias_addition ? c : nullptr);
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100320 _asm_glue->run(asm_pack);
321 if(_run_alpha_scale)
322 {
323 ITensorPack pack{ { ACL_SRC, d }, { ACL_DST, d } };
324 _alpha_scale_func->run(pack);
325 }
326 }
327 else
328 {
329 CpuAuxTensorHandler interleaved_a(offset_int_vec(InterleavedLHS), _tmp_a, tensors, true);
330 CpuAuxTensorHandler transposed_b(offset_int_vec(TransposedRHS), _tmp_b, tensors, true);
331 CpuAuxTensorHandler temp_d(offset_int_vec(TempResult), _tmp_d, tensors, true);
332
333 ITensorPack mm_pack{ { ACL_SRC_0, a }, { ACL_SRC_1, b }, { ACL_DST, (_run_bias_addition) ? temp_d.get() : d } };
334 if(!_run_vector_matrix_multiplication)
335 {
336 // Run interleave kernel
337 ITensorPack interleave_pack{ { ACL_SRC, a }, { ACL_DST, interleaved_a.get() } };
338 NEScheduler::get().schedule_op(_interleave_kernel.get(), Window::DimY, _interleave_kernel->window(), interleave_pack);
339
340 if(!_reshape_b_only_on_first_run)
341 {
342 // Run transpose kernel
343 ITensorPack transpose_pack{ { ACL_SRC, b }, { ACL_DST, transposed_b.get() } };
344 NEScheduler::get().schedule_op(_transpose_kernel.get(), Window::DimY, _transpose_kernel->window(), transpose_pack);
345 }
346
347 // Use reshaped matrices
348 mm_pack.add_const_tensor(ACL_SRC_0, interleaved_a.get());
349 mm_pack.add_const_tensor(ACL_SRC_1, transposed_b.get());
350 }
351
352 NEScheduler::get().schedule_op(_mm_kernel.get(), _run_vector_matrix_multiplication ? Window::DimX : Window::DimY, _mm_kernel->window(), mm_pack);
353
354 // Run bias addition kernel
355 if(_run_bias_addition)
356 {
357 ITensorPack pack{ { ACL_SRC_0, temp_d.get() }, { ACL_SRC_1, c }, { ACL_DST, d } };
358 _add_bias->run(pack);
359 }
360 }
361
362 // Run matrix addition kernel
363 if(_run_addition)
364 {
365 ITensorPack c_add_pack{ { ACL_SRC, c }, { ACL_DST, d } };
366 NEScheduler::get().schedule_op(_ma_kernel.get(), Window::DimY, _ma_kernel->window(), c_add_pack);
367 }
368
369 // Run activation function
370 if(_run_activation)
371 {
372 ITensorPack pack{ { ACL_SRC, d }, { ACL_DST, d } };
373 _activation_func->run(pack);
374 }
375}
376
377void CpuGemm::prepare(ITensorPack &tensors)
378{
379 if(!_is_prepared)
380 {
Francesco Petrogalli553f6952022-06-30 10:22:01 +0000381 if(_asm_glue && _asm_glue->is_configured())
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100382 {
383 _asm_glue->prepare(tensors);
384 }
385 else if(_reshape_b_only_on_first_run && !_run_vector_matrix_multiplication)
386 {
387 const ITensor *b = tensors.get_const_tensor(ACL_SRC_1);
388 ITensor *b_aux = utils::cast::polymorphic_cast<ITensor *>(tensors.get_tensor(offset_int_vec(TransposedRHS)));
389 ARM_COMPUTE_ERROR_ON_NULLPTR(b, b_aux);
390
391 CpuAuxTensorHandler transposed_b(_tmp_b, *b_aux);
392 ITensorPack transpose_pack{ { ACL_SRC, b }, { ACL_DST, transposed_b.get() } };
393 NEScheduler::get().schedule_op(_transpose_kernel.get(), Window::DimY, _transpose_kernel->window(), transpose_pack);
394 }
395 _is_prepared = true;
396 }
397}
398
399experimental::MemoryRequirements CpuGemm::workspace() const
400{
401 return _aux_mem;
402}
Francesco Petrogalli553f6952022-06-30 10:22:01 +0000403
Ramy Elgammal91780022022-07-20 14:57:37 +0100404Status CpuGemm::has_opt_impl(arm_compute::WeightFormat &expected_weight_format, const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *d,
Francesco Petrogalli553f6952022-06-30 10:22:01 +0000405 const GEMMInfo &gemm_info)
406{
407 const cpu::AsmGemmInfo asm_info = init_assembly_metadata(gemm_info);
408
409 return CpuGemmAssemblyDispatch::has_opt_impl(expected_weight_format, a, b, c, d, asm_info);
410}
411
412bool CpuGemm::isVarWeightsKernel() const
413{
414 return _asm_glue && _asm_glue->isVarWeightsKernel();
415}
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100416} // namespace cpu
417} // namespace arm_compute