blob: 9a4d171ce68a35e197a2b518af983ecc1d8c8db7 [file] [log] [blame]
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +01001/*
2 * Copyright (c) 2021 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "src/runtime/cpu/operators/CpuGemm.h"
25
26#include "arm_compute/core/TensorInfo.h"
27#include "arm_compute/core/Validate.h"
28#include "arm_compute/core/utils/misc/ShapeCalculator.h"
29#include "arm_compute/runtime/NEON/NEScheduler.h"
30#include "src/core/CPP/Validate.h"
31#include "src/core/helpers/AutoConfiguration.h"
32#include "src/core/helpers/MemoryHelpers.h"
33#include "src/runtime/cpu/utils/CpuAuxTensorHandler.h"
34
35using namespace arm_compute::experimental;
36using namespace arm_compute::misc::shape_calculator;
37
38namespace arm_compute
39{
40namespace cpu
41{
42namespace
43{
44cpu::AsmGemmInfo init_assembly_metadata(const GEMMInfo &info)
45{
46 cpu::AsmGemmInfo asm_info;
47 asm_info.method = cpu::AsmConvMethod::Im2Col;
48 asm_info.reinterpret_input_as_3d = info.reinterpret_input_as_3d();
49 asm_info.depth_output_gemm3d = info.depth_output_gemm3d();
50 asm_info.activation_info = info.activation_info();
51
52 return asm_info;
53}
54} // namespace
55
56void CpuGemm::configure(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, ITensorInfo *d, float alpha, float beta, const GEMMInfo &gemm_info)
57{
58 ARM_COMPUTE_ERROR_ON_NULLPTR(a, b, d);
59 ARM_COMPUTE_ERROR_THROW_ON(CpuGemm::validate(a, b, c, d, alpha, beta, gemm_info));
60
61 const cpu::AsmGemmInfo asm_info = init_assembly_metadata(gemm_info);
62 const bool is_c_bias = gemm_info.reshape_b_only_on_first_run();
63 bool run_optimised = bool(cpu::CpuGemmAssemblyDispatch::validate(a, b, (is_c_bias) ? c : nullptr, d, asm_info));
64
65 // Check if we need to reshape the matrix B only on the first run
66 _is_prepared = false;
67 _reshape_b_only_on_first_run = gemm_info.reshape_b_only_on_first_run();
68 _run_vector_matrix_multiplication = a->dimension(1) < 2;
69 _run_alpha_scale = alpha != 1.f;
70 _run_bias_addition = c != nullptr && gemm_info.reshape_b_only_on_first_run();
71 _run_addition = beta != 0 && c != nullptr && !gemm_info.reshape_b_only_on_first_run();
72 _run_activation = gemm_info.activation_info().enabled() && (!run_optimised || (run_optimised
73 && !cpu::CpuGemmAssemblyDispatch::is_activation_supported(gemm_info.activation_info())));
74
75 if(run_optimised)
76 {
77 const ITensorInfo *c_to_use = is_c_bias ? c : nullptr;
78 _asm_glue = std::make_unique<cpu::CpuGemmAssemblyDispatch>();
79 _asm_glue->configure(a, b, c_to_use, d, asm_info);
80 ARM_COMPUTE_ERROR_ON(!_asm_glue->is_configured());
81
82 auto asm_mem_req = _asm_glue->workspace();
83 _aux_mem[AsmGemmWorkspace] = asm_mem_req[AsmGemmWorkspace];
84 _aux_mem[Pretraspose] = asm_mem_req[Pretraspose];
85
86 // Scale product by alpha
87 if(_run_alpha_scale)
88 {
89 _alpha_scale_func = std::make_unique<cpu::CpuActivation>();
90 _alpha_scale_func->configure(d, nullptr, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LINEAR, alpha, 0.f));
91 }
92 }
93 else
94 {
95 // Pick output tensor in case bias addition should be performed
96 ITensorInfo *gemm_output_to_use = (_run_bias_addition) ? &_tmp_d : d;
97
98 _mm_kernel = std::make_unique<cpu::kernels::CpuGemmMatrixMultiplyKernel>();
99
100 // Select between GEMV and GEMM
101 if(_run_vector_matrix_multiplication)
102 {
103 // Configure the matrix multiply kernel
104 _mm_kernel->configure(a, b, gemm_output_to_use, alpha, false);
105 }
106 else
107 {
108 const int m = a->dimension(1);
109 const int n = b->dimension(0);
110 const int k = a->dimension(0);
111
112 // Configure interleave kernel
113 _interleave_kernel = std::make_unique<cpu::kernels::CpuGemmInterleave4x4Kernel>();
114 _interleave_kernel->configure(a, &_tmp_a);
115 _aux_mem[InterleavedLHS] = MemoryInfo(offset_int_vec(InterleavedLHS), MemoryLifetime::Temporary, _tmp_a.total_size());
116
117 // Configure transpose kernel
118 _transpose_kernel = std::make_unique<cpu::kernels::CpuGemmTranspose1xWKernel>();
119 _transpose_kernel->configure(b, &_tmp_b);
120 _aux_mem[TransposedRHS] = MemoryInfo(offset_int_vec(TransposedRHS), MemoryLifetime::Persistent, _tmp_b.total_size());
121
122 // Configure matrix multiplication kernel
123 _mm_kernel->configure(&_tmp_a, &_tmp_b, gemm_output_to_use, alpha, true, GEMMReshapeInfo(m, n, k));
124 }
125
126 if(_run_bias_addition)
127 {
128 _add_bias = std::make_unique<cpu::CpuAdd>();
129 _add_bias->configure(gemm_output_to_use, c, d, ConvertPolicy::SATURATE);
130 _aux_mem[TempResult] = MemoryInfo(offset_int_vec(TempResult), MemoryLifetime::Persistent, _tmp_d.total_size());
131 }
132 }
133
134 // Configure matrix addition kernel
135 if(_run_addition)
136 {
137 _ma_kernel = std::make_unique<cpu::kernels::CpuGemmMatrixAdditionKernel>();
138 _ma_kernel->configure(c, d, beta);
139 }
140
141 // Configure activation
142 if(_run_activation)
143 {
144 _activation_func = std::make_unique<cpu::CpuActivation>();
145 _activation_func->configure(d, nullptr, gemm_info.activation_info());
146 }
147}
148
149Status CpuGemm::validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *d, float alpha, float beta, const GEMMInfo &gemm_info)
150{
151 ARM_COMPUTE_UNUSED(alpha);
152 const bool is_c_bias = gemm_info.reshape_b_only_on_first_run();
153
154 ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(a);
155 ARM_COMPUTE_RETURN_ERROR_ON_CPU_BF16_UNSUPPORTED(a);
156 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(a, 1, DataType::BFLOAT16, DataType::F16, DataType::F32);
157 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(a, b);
158 ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->dimension(0) != b->dimension(1), "The product AB is defined only if the number of columns in A is equal to the number of rows in B");
159 ARM_COMPUTE_RETURN_ERROR_ON_MSG(gemm_info.is_a_reshaped(), "Matrix A already reshaped is not supported");
160 ARM_COMPUTE_RETURN_ERROR_ON_MSG(gemm_info.is_b_reshaped(), "Matrix B already reshaped is not supported");
161 if(a->data_type() != DataType::BFLOAT16)
162 {
163 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(a, d);
164 }
165
166 if(c != nullptr && !is_c_bias)
167 {
168 ARM_COMPUTE_RETURN_ERROR_ON(gemm_info.depth_output_gemm3d() != 0);
169 ARM_COMPUTE_RETURN_ERROR_ON(gemm_info.reinterpret_input_as_3d());
170 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(c, d);
171 ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->dimension(1) != c->dimension(1), "The C matrix must have the same number of rows as the matrix A");
172 ARM_COMPUTE_RETURN_ERROR_ON_MSG(b->dimension(0) != c->dimension(0), "The C matrix must have the same number of columns as the matrix B");
173 }
174
175 if(d->total_size() != 0)
176 {
177 ARM_COMPUTE_RETURN_ERROR_ON(b->dimension(0) != d->dimension(0));
178 if(gemm_info.depth_output_gemm3d() != 0)
179 {
180 if(gemm_info.reinterpret_input_as_3d())
181 {
182 ARM_COMPUTE_RETURN_ERROR_ON(a->dimension(1) != d->dimension(1));
183 ARM_COMPUTE_RETURN_ERROR_ON(a->dimension(2) != d->dimension(2));
184 }
185 else
186 {
187 ARM_COMPUTE_RETURN_ERROR_ON(a->dimension(1) != d->dimension(1) * d->dimension(2));
188 }
189 }
190 else
191 {
192 ARM_COMPUTE_RETURN_ERROR_ON(a->dimension(1) != d->dimension(1));
193 }
194 }
195
196 // Check if we need to run the optimized assembly kernel
197 cpu::AsmGemmInfo asm_info = init_assembly_metadata(gemm_info);
198 const bool run_optimised = bool(cpu::CpuGemmAssemblyDispatch::validate(a, b, is_c_bias ? c : nullptr, d, asm_info));
199
200 if(!run_optimised)
201 {
202 ARM_COMPUTE_RETURN_ERROR_ON_MSG(gemm_info.reinterpret_input_as_3d(), "CpuGemm cannot reinterpret the input tensor as 3D");
203 ARM_COMPUTE_RETURN_ERROR_ON_MSG(gemm_info.depth_output_gemm3d() != 0, "CpuGemm cannot reinterpret the output tensor as 3D");
204
205 // Check if the first input tensor is a vector.
206 const bool run_vector_matrix_multiplication = a->dimension(1) < 2;
207 // Check if we need to reshape the matrix A and matrix B
208 const bool run_interleave_transpose = !run_vector_matrix_multiplication && !(gemm_info.reshape_b_only_on_first_run());
209
210 // Arguments used by GEMMReshapeInfo
211 // If we pass the matrix A and matrix B reshaped to CpuGemmMatrixMultiplyKernel, we need to pass m, n, k, mult_transpose1xW_width and mult_interleave4x4_height to GEMMReshapeInfo
212 // in order to know how the matrices have been reshaped
213 const int m = a->dimension(1);
214 const int n = b->dimension(0);
215 const int k = a->dimension(0);
216 int mult_transpose1xW_width = 1;
217 int mult_interleave4x4_height = 1;
218
219 const GEMMReshapeInfo reshape_info = GEMMReshapeInfo(m, n, k, mult_transpose1xW_width, mult_interleave4x4_height, gemm_info.depth_output_gemm3d());
220
221 const ITensorInfo *matrix_a_info = a;
222 const ITensorInfo *matrix_b_info = b;
223
224 TensorInfo tmp_a_info{};
225 TensorInfo tmp_b_info{};
226 TensorInfo tmp_output_info = *d->clone();
227
228 if(run_interleave_transpose)
229 {
230 matrix_a_info = &tmp_a_info;
231 matrix_b_info = &tmp_b_info;
232
233 // Validate interleave kernel
234 auto_init_if_empty(tmp_a_info, a->clone()->set_tensor_shape(compute_interleaved_shape(*a, mult_interleave4x4_height, gemm_info.reinterpret_input_as_3d())));
235 ARM_COMPUTE_RETURN_ON_ERROR(cpu::kernels::CpuGemmInterleave4x4Kernel::validate(a, &tmp_a_info));
236
237 // Validate transpose kernel
238 auto_init_if_empty(tmp_b_info, b->clone()->set_tensor_shape(compute_transpose1xW_with_element_size_shape(*b, mult_transpose1xW_width)));
239 ARM_COMPUTE_RETURN_ON_ERROR(cpu::kernels::CpuGemmTranspose1xWKernel::validate(b, &tmp_b_info));
240 }
241
242 // Validate matrix multiply
243 auto_init_if_empty(tmp_output_info, matrix_a_info->clone()->set_tensor_shape(compute_mm_shape(*matrix_a_info, *matrix_b_info, run_interleave_transpose, reshape_info)));
244 ARM_COMPUTE_RETURN_ON_ERROR(cpu::kernels::CpuGemmMatrixMultiplyKernel::validate(matrix_a_info, matrix_b_info, &tmp_output_info, alpha, run_interleave_transpose, reshape_info));
245
246 if(c != nullptr && gemm_info.reshape_b_only_on_first_run())
247 {
248 ARM_COMPUTE_RETURN_ON_ERROR(cpu::CpuAdd::validate(&tmp_output_info, c, d, ConvertPolicy::SATURATE));
249 }
250 }
251
252 // Validate matrix addition kernel
253 if(beta != 0 && c != nullptr && !is_c_bias)
254 {
255 ARM_COMPUTE_RETURN_ON_ERROR(cpu::kernels::CpuGemmMatrixAdditionKernel::validate(c, d, beta));
256 }
257
258 // Validate activation
259 const ActivationLayerInfo &activation = gemm_info.activation_info();
260 if(activation.enabled())
261 {
262 ARM_COMPUTE_RETURN_ON_ERROR(cpu::CpuActivation::validate(d, nullptr, activation));
263 }
264
265 return Status{};
266}
267
268void CpuGemm::run(ITensorPack &tensors)
269{
270 prepare(tensors);
271
272 auto a = tensors.get_const_tensor(ACL_SRC_0);
273 auto b = tensors.get_const_tensor(ACL_SRC_1);
274 auto c = tensors.get_const_tensor(ACL_SRC_2);
275 auto d = tensors.get_tensor(ACL_DST);
276
277 if(_asm_glue->is_configured())
278 {
279 // Pass c to asm dispatch only if it's the bias tensor
280 ITensorPack asm_pack = tensors;
281 asm_pack.add_const_tensor(ACL_SRC_2, (_reshape_b_only_on_first_run) ? c : nullptr);
282 _asm_glue->run(asm_pack);
283 if(_run_alpha_scale)
284 {
285 ITensorPack pack{ { ACL_SRC, d }, { ACL_DST, d } };
286 _alpha_scale_func->run(pack);
287 }
288 }
289 else
290 {
291 CpuAuxTensorHandler interleaved_a(offset_int_vec(InterleavedLHS), _tmp_a, tensors, true);
292 CpuAuxTensorHandler transposed_b(offset_int_vec(TransposedRHS), _tmp_b, tensors, true);
293 CpuAuxTensorHandler temp_d(offset_int_vec(TempResult), _tmp_d, tensors, true);
294
295 ITensorPack mm_pack{ { ACL_SRC_0, a }, { ACL_SRC_1, b }, { ACL_DST, (_run_bias_addition) ? temp_d.get() : d } };
296 if(!_run_vector_matrix_multiplication)
297 {
298 // Run interleave kernel
299 ITensorPack interleave_pack{ { ACL_SRC, a }, { ACL_DST, interleaved_a.get() } };
300 NEScheduler::get().schedule_op(_interleave_kernel.get(), Window::DimY, _interleave_kernel->window(), interleave_pack);
301
302 if(!_reshape_b_only_on_first_run)
303 {
304 // Run transpose kernel
305 ITensorPack transpose_pack{ { ACL_SRC, b }, { ACL_DST, transposed_b.get() } };
306 NEScheduler::get().schedule_op(_transpose_kernel.get(), Window::DimY, _transpose_kernel->window(), transpose_pack);
307 }
308
309 // Use reshaped matrices
310 mm_pack.add_const_tensor(ACL_SRC_0, interleaved_a.get());
311 mm_pack.add_const_tensor(ACL_SRC_1, transposed_b.get());
312 }
313
314 NEScheduler::get().schedule_op(_mm_kernel.get(), _run_vector_matrix_multiplication ? Window::DimX : Window::DimY, _mm_kernel->window(), mm_pack);
315
316 // Run bias addition kernel
317 if(_run_bias_addition)
318 {
319 ITensorPack pack{ { ACL_SRC_0, temp_d.get() }, { ACL_SRC_1, c }, { ACL_DST, d } };
320 _add_bias->run(pack);
321 }
322 }
323
324 // Run matrix addition kernel
325 if(_run_addition)
326 {
327 ITensorPack c_add_pack{ { ACL_SRC, c }, { ACL_DST, d } };
328 NEScheduler::get().schedule_op(_ma_kernel.get(), Window::DimY, _ma_kernel->window(), c_add_pack);
329 }
330
331 // Run activation function
332 if(_run_activation)
333 {
334 ITensorPack pack{ { ACL_SRC, d }, { ACL_DST, d } };
335 _activation_func->run(pack);
336 }
337}
338
339void CpuGemm::prepare(ITensorPack &tensors)
340{
341 if(!_is_prepared)
342 {
343 if(_asm_glue->is_configured())
344 {
345 _asm_glue->prepare(tensors);
346 }
347 else if(_reshape_b_only_on_first_run && !_run_vector_matrix_multiplication)
348 {
349 const ITensor *b = tensors.get_const_tensor(ACL_SRC_1);
350 ITensor *b_aux = utils::cast::polymorphic_cast<ITensor *>(tensors.get_tensor(offset_int_vec(TransposedRHS)));
351 ARM_COMPUTE_ERROR_ON_NULLPTR(b, b_aux);
352
353 CpuAuxTensorHandler transposed_b(_tmp_b, *b_aux);
354 ITensorPack transpose_pack{ { ACL_SRC, b }, { ACL_DST, transposed_b.get() } };
355 NEScheduler::get().schedule_op(_transpose_kernel.get(), Window::DimY, _transpose_kernel->window(), transpose_pack);
356 }
357 _is_prepared = true;
358 }
359}
360
361experimental::MemoryRequirements CpuGemm::workspace() const
362{
363 return _aux_mem;
364}
365} // namespace cpu
366} // namespace arm_compute