blob: 8da166dbeff48a59373bf30351abdf077006b7cb [file] [log] [blame]
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +01001/*
Jonathan Deakin464ed202023-01-12 11:41:14 +00002 * Copyright (c) 2021-2023 Arm Limited.
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Georgios Pinitas7891a732021-08-20 21:39:25 +010024#include "src/cpu/operators/CpuGemm.h"
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +010025
26#include "arm_compute/core/TensorInfo.h"
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +010027#include "arm_compute/core/utils/misc/ShapeCalculator.h"
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010028#include "arm_compute/core/Validate.h"
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +010029#include "arm_compute/runtime/NEON/NEScheduler.h"
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010030
ramelg013ae3d882021-09-12 23:07:47 +010031#include "src/common/utils/Log.h"
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +010032#include "src/core/CPP/Validate.h"
33#include "src/core/helpers/AutoConfiguration.h"
34#include "src/core/helpers/MemoryHelpers.h"
Georgios Pinitas7891a732021-08-20 21:39:25 +010035#include "src/cpu/utils/CpuAuxTensorHandler.h"
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +010036
37using namespace arm_compute::experimental;
38using namespace arm_compute::misc::shape_calculator;
39
40namespace arm_compute
41{
42namespace cpu
43{
44namespace
45{
46cpu::AsmGemmInfo init_assembly_metadata(const GEMMInfo &info)
47{
48 cpu::AsmGemmInfo asm_info;
49 asm_info.method = cpu::AsmConvMethod::Im2Col;
50 asm_info.reinterpret_input_as_3d = info.reinterpret_input_as_3d();
51 asm_info.depth_output_gemm3d = info.depth_output_gemm3d();
52 asm_info.activation_info = info.activation_info();
Georgios Pinitas4ee8b152021-07-16 16:16:43 +010053 asm_info.fast_mode = info.fast_math();
Francesco.Petrogalli@arm.com5fcf22d2022-04-05 10:31:08 +000054 asm_info.fixed_format = info.fixed_format();
Francesco Petrogalli553f6952022-06-30 10:22:01 +000055 asm_info.weight_format = info.weight_format();
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +010056
57 return asm_info;
58}
59} // namespace
60
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010061void CpuGemm::configure(const ITensorInfo *a,
62 const ITensorInfo *b,
63 const ITensorInfo *c,
64 ITensorInfo *d,
65 float alpha,
66 float beta,
67 const GEMMInfo &gemm_info)
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +010068{
69 ARM_COMPUTE_ERROR_ON_NULLPTR(a, b, d);
70 ARM_COMPUTE_ERROR_THROW_ON(CpuGemm::validate(a, b, c, d, alpha, beta, gemm_info));
ramelg013ae3d882021-09-12 23:07:47 +010071 ARM_COMPUTE_LOG_PARAMS(a, b, c, d, alpha, beta, gemm_info);
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +010072
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010073 const cpu::AsmGemmInfo asm_info = init_assembly_metadata(gemm_info);
74 const bool is_c_bias = beta == 1 && c != nullptr;
75 bool run_optimised =
76 bool(cpu::CpuGemmAssemblyDispatch::validate(a, b, (is_c_bias) ? c : nullptr, d, asm_info)) &&
77 (c == nullptr || beta == 0.f || beta == 1.f) && // Optimized GeMM doesn't support beta coefficient.
78 !(!b->are_values_constant() &&
79 b->tensor_shape().z() > 1); // Disable batch matmul as optimized GeMM handles batching differently.
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +010080
81 // Check if we need to reshape the matrix B only on the first run
82 _is_prepared = false;
Viet-Hoa Do9b0a6b42023-04-03 16:27:25 +010083 _reshape_b_only_on_first_run = b->are_values_constant();
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +010084 _run_vector_matrix_multiplication = a->dimension(1) < 2;
85 _run_alpha_scale = alpha != 1.f;
Viet-Hoa Doa3e57c22023-03-13 16:20:04 +000086 _run_bias_addition = is_c_bias;
87 _run_addition = beta != 0 && beta != 1 && c != nullptr;
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010088 _run_activation =
89 gemm_info.activation_info().enabled() &&
90 (!run_optimised ||
91 (run_optimised && !cpu::CpuGemmAssemblyDispatch::is_activation_supported(gemm_info.activation_info())));
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +010092
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010093 if (run_optimised)
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +010094 {
95 const ITensorInfo *c_to_use = is_c_bias ? c : nullptr;
96 _asm_glue = std::make_unique<cpu::CpuGemmAssemblyDispatch>();
97 _asm_glue->configure(a, b, c_to_use, d, asm_info);
98 ARM_COMPUTE_ERROR_ON(!_asm_glue->is_configured());
99
100 auto asm_mem_req = _asm_glue->workspace();
101 _aux_mem[AsmGemmWorkspace] = asm_mem_req[AsmGemmWorkspace];
102 _aux_mem[Pretraspose] = asm_mem_req[Pretraspose];
103
104 // Scale product by alpha
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100105 if (_run_alpha_scale)
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100106 {
107 _alpha_scale_func = std::make_unique<cpu::CpuActivation>();
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100108 _alpha_scale_func->configure(
109 d, nullptr, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LINEAR, alpha, 0.f));
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100110 }
111 }
112 else
113 {
114 // Pick output tensor in case bias addition should be performed
115 ITensorInfo *gemm_output_to_use = (_run_bias_addition) ? &_tmp_d : d;
116
117 _mm_kernel = std::make_unique<cpu::kernels::CpuGemmMatrixMultiplyKernel>();
118
119 // Select between GEMV and GEMM
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100120 if (_run_vector_matrix_multiplication)
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100121 {
122 // Configure the matrix multiply kernel
123 _mm_kernel->configure(a, b, gemm_output_to_use, alpha, false);
124 }
125 else
126 {
127 const int m = a->dimension(1);
128 const int n = b->dimension(0);
129 const int k = a->dimension(0);
130
131 // Configure interleave kernel
132 _interleave_kernel = std::make_unique<cpu::kernels::CpuGemmInterleave4x4Kernel>();
133 _interleave_kernel->configure(a, &_tmp_a);
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100134 _aux_mem[InterleavedLHS] =
135 MemoryInfo(offset_int_vec(InterleavedLHS), MemoryLifetime::Temporary, _tmp_a.total_size());
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100136
137 // Configure transpose kernel
138 _transpose_kernel = std::make_unique<cpu::kernels::CpuGemmTranspose1xWKernel>();
139 _transpose_kernel->configure(b, &_tmp_b);
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100140 _aux_mem[TransposedRHS] =
141 MemoryInfo(offset_int_vec(TransposedRHS), MemoryLifetime::Persistent, _tmp_b.total_size());
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100142
143 // Configure matrix multiplication kernel
144 _mm_kernel->configure(&_tmp_a, &_tmp_b, gemm_output_to_use, alpha, true, GEMMReshapeInfo(m, n, k));
145 }
146
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100147 if (_run_bias_addition)
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100148 {
149 _add_bias = std::make_unique<cpu::CpuAdd>();
150 _add_bias->configure(gemm_output_to_use, c, d, ConvertPolicy::SATURATE);
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100151 _aux_mem[TempResult] =
152 MemoryInfo(offset_int_vec(TempResult), MemoryLifetime::Temporary, _tmp_d.total_size());
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100153 }
154 }
155
156 // Configure matrix addition kernel
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100157 if (_run_addition)
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100158 {
159 _ma_kernel = std::make_unique<cpu::kernels::CpuGemmMatrixAdditionKernel>();
160 _ma_kernel->configure(c, d, beta);
161 }
162
163 // Configure activation
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100164 if (_run_activation)
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100165 {
166 _activation_func = std::make_unique<cpu::CpuActivation>();
167 _activation_func->configure(d, nullptr, gemm_info.activation_info());
168 }
169}
170
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100171Status CpuGemm::validate(const ITensorInfo *a,
172 const ITensorInfo *b,
173 const ITensorInfo *c,
174 const ITensorInfo *d,
175 float alpha,
176 float beta,
177 const GEMMInfo &gemm_info)
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100178{
179 ARM_COMPUTE_UNUSED(alpha);
Renato Arantes57132942023-04-24 07:19:59 +0000180 const bool is_c_bias = beta == 1 && c != nullptr;
Viet-Hoa Doa3e57c22023-03-13 16:20:04 +0000181 const bool run_addition = c != nullptr && beta != 0 && beta != 1;
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100182
183 ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(a);
184 ARM_COMPUTE_RETURN_ERROR_ON_CPU_BF16_UNSUPPORTED(a);
185 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(a, 1, DataType::BFLOAT16, DataType::F16, DataType::F32);
Viet-Hoa Doa3e57c22023-03-13 16:20:04 +0000186
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100187 if (is_fixed_format_fast_math(gemm_info.weight_format()))
Jonathan Deakin464ed202023-01-12 11:41:14 +0000188 {
189 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(a, DataType::F32);
190 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(b, DataType::BFLOAT16);
191 }
192 else
193 {
194 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(a, b);
195 }
196
Renato Arantes57132942023-04-24 07:19:59 +0000197 const int block_by = arm_compute::block_by(gemm_info.weight_format());
Renato Arantes47a50ef2023-06-15 13:40:02 +0000198 // test if im2col has changed the dimensions that are needed for padding
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100199 if (a->dimension(0) != b->dimension(1) && block_by > 1)
Renato Arantes57132942023-04-24 07:19:59 +0000200 {
201 // have to verify bias
202 const size_t dim0_sz = a->dimension(0);
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100203 ARM_COMPUTE_RETURN_ERROR_ON_MSG(
204 (dim0_sz % block_by) != 0,
205 ("The matrix A number of columns must be a multiple of block_by=" + std::to_string(block_by)).c_str());
Renato Arantes57132942023-04-24 07:19:59 +0000206 // a->dimension(0) = kernel_area * input_channel + kernel_area * input_pad_right
207 // b->dimension(1) = kernel_area * input_channel
208 // a->dimension(0) = b->dimension(1) + kernel_area * input_pad_right
209 const size_t input_pad_right = (dim0_sz - b->dimension(1)) % block_by;
210 const size_t kernel_area = (dim0_sz - b->dimension(1)) / input_pad_right;
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100211 ARM_COMPUTE_RETURN_ERROR_ON_MSG(
212 (dim0_sz - kernel_area * input_pad_right) != b->dimension(1),
213 "The product AB is defined only if A number of columns and B number of rows are related");
Renato Arantes57132942023-04-24 07:19:59 +0000214 }
215 else
216 {
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100217 ARM_COMPUTE_RETURN_ERROR_ON_MSG(
218 a->dimension(0) != b->dimension(1),
219 "The product AB is defined only if the number of columns in A is equal to the number of rows in B");
Renato Arantes57132942023-04-24 07:19:59 +0000220 }
221
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100222 ARM_COMPUTE_RETURN_ERROR_ON_MSG(gemm_info.is_a_reshaped(), "Matrix A already reshaped is not supported");
223 ARM_COMPUTE_RETURN_ERROR_ON_MSG(gemm_info.is_b_reshaped(), "Matrix B already reshaped is not supported");
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100224 if (a->data_type() != DataType::BFLOAT16)
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100225 {
226 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(a, d);
227 }
228
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100229 if (run_addition)
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100230 {
231 ARM_COMPUTE_RETURN_ERROR_ON(gemm_info.depth_output_gemm3d() != 0);
232 ARM_COMPUTE_RETURN_ERROR_ON(gemm_info.reinterpret_input_as_3d());
233 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(c, d);
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100234 ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->dimension(1) != c->dimension(1),
235 "The C matrix must have the same number of rows as the matrix A");
236 ARM_COMPUTE_RETURN_ERROR_ON_MSG(b->dimension(0) != c->dimension(0),
237 "The C matrix must have the same number of columns as the matrix B");
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100238 }
239
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100240 if (d->total_size() != 0)
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100241 {
Francesco Petrogalli553f6952022-06-30 10:22:01 +0000242 // For fixed format we are expecting some kind of blocked format for B/RHS so the dimension won't necessarily match the result matrix any more.
243 ARM_COMPUTE_RETURN_ERROR_ON(!gemm_info.fixed_format() && b->dimension(0) != d->dimension(0));
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100244 if (gemm_info.depth_output_gemm3d() != 0)
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100245 {
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100246 if (gemm_info.reinterpret_input_as_3d())
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100247 {
248 ARM_COMPUTE_RETURN_ERROR_ON(a->dimension(1) != d->dimension(1));
249 ARM_COMPUTE_RETURN_ERROR_ON(a->dimension(2) != d->dimension(2));
250 }
251 else
252 {
253 ARM_COMPUTE_RETURN_ERROR_ON(a->dimension(1) != d->dimension(1) * d->dimension(2));
254 }
255 }
256 else
257 {
258 ARM_COMPUTE_RETURN_ERROR_ON(a->dimension(1) != d->dimension(1));
259 }
260 }
261
262 // Check if we need to run the optimized assembly kernel
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100263 cpu::AsmGemmInfo asm_info = init_assembly_metadata(gemm_info);
264 const bool run_optimised =
265 bool(cpu::CpuGemmAssemblyDispatch::validate(a, b, is_c_bias ? c : nullptr, d, asm_info)) &&
266 (c == nullptr || beta == 0.f || beta == 1.f) && // Optimized GeMM doesn't support beta coefficient.
267 !(!b->are_values_constant() &&
268 b->tensor_shape().z() > 1); // Disable batch matmul as optimized GeMM handles batching differently.
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100269
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100270 if (!run_optimised)
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100271 {
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100272 ARM_COMPUTE_RETURN_ERROR_ON_MSG(gemm_info.reinterpret_input_as_3d(),
273 "CpuGemm cannot reinterpret the input tensor as 3D");
274 ARM_COMPUTE_RETURN_ERROR_ON_MSG(gemm_info.depth_output_gemm3d() != 0,
275 "CpuGemm cannot reinterpret the output tensor as 3D");
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100276
277 // Check if the first input tensor is a vector.
278 const bool run_vector_matrix_multiplication = a->dimension(1) < 2;
279 // Check if we need to reshape the matrix A and matrix B
Viet-Hoa Do9b0a6b42023-04-03 16:27:25 +0100280 const bool run_interleave_transpose = !run_vector_matrix_multiplication && !b->are_values_constant();
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100281
282 // Arguments used by GEMMReshapeInfo
283 // If we pass the matrix A and matrix B reshaped to CpuGemmMatrixMultiplyKernel, we need to pass m, n, k, mult_transpose1xW_width and mult_interleave4x4_height to GEMMReshapeInfo
284 // in order to know how the matrices have been reshaped
285 const int m = a->dimension(1);
286 const int n = b->dimension(0);
287 const int k = a->dimension(0);
288 int mult_transpose1xW_width = 1;
289 int mult_interleave4x4_height = 1;
290
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100291 const GEMMReshapeInfo reshape_info = GEMMReshapeInfo(
292 m, n, k, mult_transpose1xW_width, mult_interleave4x4_height, gemm_info.depth_output_gemm3d());
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100293
294 const ITensorInfo *matrix_a_info = a;
295 const ITensorInfo *matrix_b_info = b;
296
297 TensorInfo tmp_a_info{};
298 TensorInfo tmp_b_info{};
299 TensorInfo tmp_output_info = *d->clone();
300
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100301 if (run_interleave_transpose)
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100302 {
303 matrix_a_info = &tmp_a_info;
304 matrix_b_info = &tmp_b_info;
305
306 // Validate interleave kernel
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100307 auto_init_if_empty(tmp_a_info, a->clone()->set_tensor_shape(compute_interleaved_shape(
308 *a, mult_interleave4x4_height, gemm_info.reinterpret_input_as_3d())));
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100309 ARM_COMPUTE_RETURN_ON_ERROR(cpu::kernels::CpuGemmInterleave4x4Kernel::validate(a, &tmp_a_info));
310
311 // Validate transpose kernel
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100312 auto_init_if_empty(tmp_b_info, b->clone()->set_tensor_shape(compute_transpose1xW_with_element_size_shape(
313 *b, mult_transpose1xW_width)));
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100314 ARM_COMPUTE_RETURN_ON_ERROR(cpu::kernels::CpuGemmTranspose1xWKernel::validate(b, &tmp_b_info));
315 }
316
317 // Validate matrix multiply
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100318 auto_init_if_empty(tmp_output_info,
319 matrix_a_info->clone()->set_tensor_shape(compute_mm_shape(
320 *matrix_a_info, *matrix_b_info, run_interleave_transpose, reshape_info)));
321 ARM_COMPUTE_RETURN_ON_ERROR(cpu::kernels::CpuGemmMatrixMultiplyKernel::validate(
322 matrix_a_info, matrix_b_info, &tmp_output_info, alpha, run_interleave_transpose, reshape_info));
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100323
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100324 if (is_c_bias)
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100325 {
326 ARM_COMPUTE_RETURN_ON_ERROR(cpu::CpuAdd::validate(&tmp_output_info, c, d, ConvertPolicy::SATURATE));
327 }
328 }
329
330 // Validate matrix addition kernel
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100331 if (run_addition)
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100332 {
333 ARM_COMPUTE_RETURN_ON_ERROR(cpu::kernels::CpuGemmMatrixAdditionKernel::validate(c, d, beta));
334 }
335
336 // Validate activation
337 const ActivationLayerInfo &activation = gemm_info.activation_info();
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100338 if (activation.enabled())
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100339 {
340 ARM_COMPUTE_RETURN_ON_ERROR(cpu::CpuActivation::validate(d, nullptr, activation));
341 }
342
343 return Status{};
344}
345
346void CpuGemm::run(ITensorPack &tensors)
347{
348 prepare(tensors);
349
350 auto a = tensors.get_const_tensor(ACL_SRC_0);
351 auto b = tensors.get_const_tensor(ACL_SRC_1);
352 auto c = tensors.get_const_tensor(ACL_SRC_2);
353 auto d = tensors.get_tensor(ACL_DST);
354
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100355 if (_asm_glue && _asm_glue->is_configured())
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100356 {
357 // Pass c to asm dispatch only if it's the bias tensor
358 ITensorPack asm_pack = tensors;
Viet-Hoa Do9b0a6b42023-04-03 16:27:25 +0100359 asm_pack.add_const_tensor(ACL_SRC_2, _run_bias_addition ? c : nullptr);
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100360 _asm_glue->run(asm_pack);
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100361 if (_run_alpha_scale)
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100362 {
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100363 ITensorPack pack{{ACL_SRC, d}, {ACL_DST, d}};
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100364 _alpha_scale_func->run(pack);
365 }
366 }
367 else
368 {
369 CpuAuxTensorHandler interleaved_a(offset_int_vec(InterleavedLHS), _tmp_a, tensors, true);
370 CpuAuxTensorHandler transposed_b(offset_int_vec(TransposedRHS), _tmp_b, tensors, true);
371 CpuAuxTensorHandler temp_d(offset_int_vec(TempResult), _tmp_d, tensors, true);
372
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100373 ITensorPack mm_pack{{ACL_SRC_0, a}, {ACL_SRC_1, b}, {ACL_DST, (_run_bias_addition) ? temp_d.get() : d}};
374 if (!_run_vector_matrix_multiplication)
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100375 {
376 // Run interleave kernel
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100377 ITensorPack interleave_pack{{ACL_SRC, a}, {ACL_DST, interleaved_a.get()}};
378 NEScheduler::get().schedule_op(_interleave_kernel.get(), Window::DimY, _interleave_kernel->window(),
379 interleave_pack);
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100380
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100381 if (!_reshape_b_only_on_first_run)
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100382 {
383 // Run transpose kernel
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100384 ITensorPack transpose_pack{{ACL_SRC, b}, {ACL_DST, transposed_b.get()}};
385 NEScheduler::get().schedule_op(_transpose_kernel.get(), Window::DimY, _transpose_kernel->window(),
386 transpose_pack);
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100387 }
388
389 // Use reshaped matrices
390 mm_pack.add_const_tensor(ACL_SRC_0, interleaved_a.get());
391 mm_pack.add_const_tensor(ACL_SRC_1, transposed_b.get());
392 }
393
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100394 NEScheduler::get().schedule_op(_mm_kernel.get(),
395 _run_vector_matrix_multiplication ? Window::DimX : Window::DimY,
396 _mm_kernel->window(), mm_pack);
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100397
398 // Run bias addition kernel
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100399 if (_run_bias_addition)
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100400 {
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100401 ITensorPack pack{{ACL_SRC_0, temp_d.get()}, {ACL_SRC_1, c}, {ACL_DST, d}};
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100402 _add_bias->run(pack);
403 }
404 }
405
406 // Run matrix addition kernel
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100407 if (_run_addition)
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100408 {
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100409 ITensorPack c_add_pack{{ACL_SRC, c}, {ACL_DST, d}};
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100410 NEScheduler::get().schedule_op(_ma_kernel.get(), Window::DimY, _ma_kernel->window(), c_add_pack);
411 }
412
413 // Run activation function
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100414 if (_run_activation)
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100415 {
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100416 ITensorPack pack{{ACL_SRC, d}, {ACL_DST, d}};
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100417 _activation_func->run(pack);
418 }
419}
420
421void CpuGemm::prepare(ITensorPack &tensors)
422{
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100423 if (!_is_prepared)
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100424 {
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100425 if (_asm_glue && _asm_glue->is_configured())
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100426 {
427 _asm_glue->prepare(tensors);
428 }
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100429 else if (_reshape_b_only_on_first_run && !_run_vector_matrix_multiplication)
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100430 {
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100431 const ITensor *b = tensors.get_const_tensor(ACL_SRC_1);
432 ITensor *b_aux =
433 utils::cast::polymorphic_cast<ITensor *>(tensors.get_tensor(offset_int_vec(TransposedRHS)));
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100434 ARM_COMPUTE_ERROR_ON_NULLPTR(b, b_aux);
435
436 CpuAuxTensorHandler transposed_b(_tmp_b, *b_aux);
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100437 ITensorPack transpose_pack{{ACL_SRC, b}, {ACL_DST, transposed_b.get()}};
438 NEScheduler::get().schedule_op(_transpose_kernel.get(), Window::DimY, _transpose_kernel->window(),
439 transpose_pack);
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100440 }
441 _is_prepared = true;
442 }
443}
444
445experimental::MemoryRequirements CpuGemm::workspace() const
446{
447 return _aux_mem;
448}
Francesco Petrogalli553f6952022-06-30 10:22:01 +0000449
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100450Status CpuGemm::has_opt_impl(arm_compute::WeightFormat &expected_weight_format,
451 const ITensorInfo *a,
452 const ITensorInfo *b,
453 const ITensorInfo *c,
454 const ITensorInfo *d,
455 const GEMMInfo &gemm_info)
Francesco Petrogalli553f6952022-06-30 10:22:01 +0000456{
457 const cpu::AsmGemmInfo asm_info = init_assembly_metadata(gemm_info);
458
459 return CpuGemmAssemblyDispatch::has_opt_impl(expected_weight_format, a, b, c, d, asm_info);
460}
461
462bool CpuGemm::isVarWeightsKernel() const
463{
464 return _asm_glue && _asm_glue->isVarWeightsKernel();
465}
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100466} // namespace cpu
467} // namespace arm_compute