Michele Di Giorgio | 4dfc553 | 2021-06-30 12:05:34 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2021 Arm Limited. |
| 3 | * |
| 4 | * SPDX-License-Identifier: MIT |
| 5 | * |
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
| 7 | * of this software and associated documentation files (the "Software"), to |
| 8 | * deal in the Software without restriction, including without limitation the |
| 9 | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
| 10 | * sell copies of the Software, and to permit persons to whom the Software is |
| 11 | * furnished to do so, subject to the following conditions: |
| 12 | * |
| 13 | * The above copyright notice and this permission notice shall be included in all |
| 14 | * copies or substantial portions of the Software. |
| 15 | * |
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
| 19 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| 21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 22 | * SOFTWARE. |
| 23 | */ |
Georgios Pinitas | 7891a73 | 2021-08-20 21:39:25 +0100 | [diff] [blame] | 24 | #include "src/cpu/operators/CpuGemm.h" |
Michele Di Giorgio | 4dfc553 | 2021-06-30 12:05:34 +0100 | [diff] [blame] | 25 | |
| 26 | #include "arm_compute/core/TensorInfo.h" |
| 27 | #include "arm_compute/core/Validate.h" |
| 28 | #include "arm_compute/core/utils/misc/ShapeCalculator.h" |
| 29 | #include "arm_compute/runtime/NEON/NEScheduler.h" |
ramelg01 | 3ae3d88 | 2021-09-12 23:07:47 +0100 | [diff] [blame] | 30 | #include "src/common/utils/Log.h" |
Michele Di Giorgio | 4dfc553 | 2021-06-30 12:05:34 +0100 | [diff] [blame] | 31 | #include "src/core/CPP/Validate.h" |
| 32 | #include "src/core/helpers/AutoConfiguration.h" |
| 33 | #include "src/core/helpers/MemoryHelpers.h" |
Georgios Pinitas | 7891a73 | 2021-08-20 21:39:25 +0100 | [diff] [blame] | 34 | #include "src/cpu/utils/CpuAuxTensorHandler.h" |
Michele Di Giorgio | 4dfc553 | 2021-06-30 12:05:34 +0100 | [diff] [blame] | 35 | |
| 36 | using namespace arm_compute::experimental; |
| 37 | using namespace arm_compute::misc::shape_calculator; |
| 38 | |
| 39 | namespace arm_compute |
| 40 | { |
| 41 | namespace cpu |
| 42 | { |
| 43 | namespace |
| 44 | { |
| 45 | cpu::AsmGemmInfo init_assembly_metadata(const GEMMInfo &info) |
| 46 | { |
| 47 | cpu::AsmGemmInfo asm_info; |
| 48 | asm_info.method = cpu::AsmConvMethod::Im2Col; |
| 49 | asm_info.reinterpret_input_as_3d = info.reinterpret_input_as_3d(); |
| 50 | asm_info.depth_output_gemm3d = info.depth_output_gemm3d(); |
| 51 | asm_info.activation_info = info.activation_info(); |
Georgios Pinitas | 4ee8b15 | 2021-07-16 16:16:43 +0100 | [diff] [blame] | 52 | asm_info.fast_mode = info.fast_math(); |
Michele Di Giorgio | 4dfc553 | 2021-06-30 12:05:34 +0100 | [diff] [blame] | 53 | |
| 54 | return asm_info; |
| 55 | } |
| 56 | } // namespace |
| 57 | |
| 58 | void CpuGemm::configure(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, ITensorInfo *d, float alpha, float beta, const GEMMInfo &gemm_info) |
| 59 | { |
| 60 | ARM_COMPUTE_ERROR_ON_NULLPTR(a, b, d); |
| 61 | ARM_COMPUTE_ERROR_THROW_ON(CpuGemm::validate(a, b, c, d, alpha, beta, gemm_info)); |
ramelg01 | 3ae3d88 | 2021-09-12 23:07:47 +0100 | [diff] [blame] | 62 | ARM_COMPUTE_LOG_PARAMS(a, b, c, d, alpha, beta, gemm_info); |
Michele Di Giorgio | 4dfc553 | 2021-06-30 12:05:34 +0100 | [diff] [blame] | 63 | |
| 64 | const cpu::AsmGemmInfo asm_info = init_assembly_metadata(gemm_info); |
| 65 | const bool is_c_bias = gemm_info.reshape_b_only_on_first_run(); |
| 66 | bool run_optimised = bool(cpu::CpuGemmAssemblyDispatch::validate(a, b, (is_c_bias) ? c : nullptr, d, asm_info)); |
| 67 | |
| 68 | // Check if we need to reshape the matrix B only on the first run |
| 69 | _is_prepared = false; |
| 70 | _reshape_b_only_on_first_run = gemm_info.reshape_b_only_on_first_run(); |
| 71 | _run_vector_matrix_multiplication = a->dimension(1) < 2; |
| 72 | _run_alpha_scale = alpha != 1.f; |
| 73 | _run_bias_addition = c != nullptr && gemm_info.reshape_b_only_on_first_run(); |
| 74 | _run_addition = beta != 0 && c != nullptr && !gemm_info.reshape_b_only_on_first_run(); |
| 75 | _run_activation = gemm_info.activation_info().enabled() && (!run_optimised || (run_optimised |
| 76 | && !cpu::CpuGemmAssemblyDispatch::is_activation_supported(gemm_info.activation_info()))); |
| 77 | |
| 78 | if(run_optimised) |
| 79 | { |
| 80 | const ITensorInfo *c_to_use = is_c_bias ? c : nullptr; |
| 81 | _asm_glue = std::make_unique<cpu::CpuGemmAssemblyDispatch>(); |
| 82 | _asm_glue->configure(a, b, c_to_use, d, asm_info); |
| 83 | ARM_COMPUTE_ERROR_ON(!_asm_glue->is_configured()); |
| 84 | |
| 85 | auto asm_mem_req = _asm_glue->workspace(); |
| 86 | _aux_mem[AsmGemmWorkspace] = asm_mem_req[AsmGemmWorkspace]; |
| 87 | _aux_mem[Pretraspose] = asm_mem_req[Pretraspose]; |
| 88 | |
| 89 | // Scale product by alpha |
| 90 | if(_run_alpha_scale) |
| 91 | { |
| 92 | _alpha_scale_func = std::make_unique<cpu::CpuActivation>(); |
| 93 | _alpha_scale_func->configure(d, nullptr, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LINEAR, alpha, 0.f)); |
| 94 | } |
| 95 | } |
| 96 | else |
| 97 | { |
| 98 | // Pick output tensor in case bias addition should be performed |
| 99 | ITensorInfo *gemm_output_to_use = (_run_bias_addition) ? &_tmp_d : d; |
| 100 | |
| 101 | _mm_kernel = std::make_unique<cpu::kernels::CpuGemmMatrixMultiplyKernel>(); |
| 102 | |
| 103 | // Select between GEMV and GEMM |
| 104 | if(_run_vector_matrix_multiplication) |
| 105 | { |
| 106 | // Configure the matrix multiply kernel |
| 107 | _mm_kernel->configure(a, b, gemm_output_to_use, alpha, false); |
| 108 | } |
| 109 | else |
| 110 | { |
| 111 | const int m = a->dimension(1); |
| 112 | const int n = b->dimension(0); |
| 113 | const int k = a->dimension(0); |
| 114 | |
| 115 | // Configure interleave kernel |
| 116 | _interleave_kernel = std::make_unique<cpu::kernels::CpuGemmInterleave4x4Kernel>(); |
| 117 | _interleave_kernel->configure(a, &_tmp_a); |
| 118 | _aux_mem[InterleavedLHS] = MemoryInfo(offset_int_vec(InterleavedLHS), MemoryLifetime::Temporary, _tmp_a.total_size()); |
| 119 | |
| 120 | // Configure transpose kernel |
| 121 | _transpose_kernel = std::make_unique<cpu::kernels::CpuGemmTranspose1xWKernel>(); |
| 122 | _transpose_kernel->configure(b, &_tmp_b); |
| 123 | _aux_mem[TransposedRHS] = MemoryInfo(offset_int_vec(TransposedRHS), MemoryLifetime::Persistent, _tmp_b.total_size()); |
| 124 | |
| 125 | // Configure matrix multiplication kernel |
| 126 | _mm_kernel->configure(&_tmp_a, &_tmp_b, gemm_output_to_use, alpha, true, GEMMReshapeInfo(m, n, k)); |
| 127 | } |
| 128 | |
| 129 | if(_run_bias_addition) |
| 130 | { |
| 131 | _add_bias = std::make_unique<cpu::CpuAdd>(); |
| 132 | _add_bias->configure(gemm_output_to_use, c, d, ConvertPolicy::SATURATE); |
Michele Di Giorgio | d9cdf14 | 2021-07-02 15:17:08 +0100 | [diff] [blame] | 133 | _aux_mem[TempResult] = MemoryInfo(offset_int_vec(TempResult), MemoryLifetime::Temporary, _tmp_d.total_size()); |
Michele Di Giorgio | 4dfc553 | 2021-06-30 12:05:34 +0100 | [diff] [blame] | 134 | } |
| 135 | } |
| 136 | |
| 137 | // Configure matrix addition kernel |
| 138 | if(_run_addition) |
| 139 | { |
| 140 | _ma_kernel = std::make_unique<cpu::kernels::CpuGemmMatrixAdditionKernel>(); |
| 141 | _ma_kernel->configure(c, d, beta); |
| 142 | } |
| 143 | |
| 144 | // Configure activation |
| 145 | if(_run_activation) |
| 146 | { |
| 147 | _activation_func = std::make_unique<cpu::CpuActivation>(); |
| 148 | _activation_func->configure(d, nullptr, gemm_info.activation_info()); |
| 149 | } |
| 150 | } |
| 151 | |
| 152 | Status CpuGemm::validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *d, float alpha, float beta, const GEMMInfo &gemm_info) |
| 153 | { |
| 154 | ARM_COMPUTE_UNUSED(alpha); |
| 155 | const bool is_c_bias = gemm_info.reshape_b_only_on_first_run(); |
| 156 | |
| 157 | ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(a); |
| 158 | ARM_COMPUTE_RETURN_ERROR_ON_CPU_BF16_UNSUPPORTED(a); |
| 159 | ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(a, 1, DataType::BFLOAT16, DataType::F16, DataType::F32); |
| 160 | ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(a, b); |
| 161 | ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->dimension(0) != b->dimension(1), "The product AB is defined only if the number of columns in A is equal to the number of rows in B"); |
| 162 | ARM_COMPUTE_RETURN_ERROR_ON_MSG(gemm_info.is_a_reshaped(), "Matrix A already reshaped is not supported"); |
| 163 | ARM_COMPUTE_RETURN_ERROR_ON_MSG(gemm_info.is_b_reshaped(), "Matrix B already reshaped is not supported"); |
| 164 | if(a->data_type() != DataType::BFLOAT16) |
| 165 | { |
| 166 | ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(a, d); |
| 167 | } |
| 168 | |
| 169 | if(c != nullptr && !is_c_bias) |
| 170 | { |
| 171 | ARM_COMPUTE_RETURN_ERROR_ON(gemm_info.depth_output_gemm3d() != 0); |
| 172 | ARM_COMPUTE_RETURN_ERROR_ON(gemm_info.reinterpret_input_as_3d()); |
| 173 | ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(c, d); |
| 174 | ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->dimension(1) != c->dimension(1), "The C matrix must have the same number of rows as the matrix A"); |
| 175 | ARM_COMPUTE_RETURN_ERROR_ON_MSG(b->dimension(0) != c->dimension(0), "The C matrix must have the same number of columns as the matrix B"); |
| 176 | } |
| 177 | |
| 178 | if(d->total_size() != 0) |
| 179 | { |
| 180 | ARM_COMPUTE_RETURN_ERROR_ON(b->dimension(0) != d->dimension(0)); |
| 181 | if(gemm_info.depth_output_gemm3d() != 0) |
| 182 | { |
| 183 | if(gemm_info.reinterpret_input_as_3d()) |
| 184 | { |
| 185 | ARM_COMPUTE_RETURN_ERROR_ON(a->dimension(1) != d->dimension(1)); |
| 186 | ARM_COMPUTE_RETURN_ERROR_ON(a->dimension(2) != d->dimension(2)); |
| 187 | } |
| 188 | else |
| 189 | { |
| 190 | ARM_COMPUTE_RETURN_ERROR_ON(a->dimension(1) != d->dimension(1) * d->dimension(2)); |
| 191 | } |
| 192 | } |
| 193 | else |
| 194 | { |
| 195 | ARM_COMPUTE_RETURN_ERROR_ON(a->dimension(1) != d->dimension(1)); |
| 196 | } |
| 197 | } |
| 198 | |
| 199 | // Check if we need to run the optimized assembly kernel |
| 200 | cpu::AsmGemmInfo asm_info = init_assembly_metadata(gemm_info); |
| 201 | const bool run_optimised = bool(cpu::CpuGemmAssemblyDispatch::validate(a, b, is_c_bias ? c : nullptr, d, asm_info)); |
| 202 | |
| 203 | if(!run_optimised) |
| 204 | { |
| 205 | ARM_COMPUTE_RETURN_ERROR_ON_MSG(gemm_info.reinterpret_input_as_3d(), "CpuGemm cannot reinterpret the input tensor as 3D"); |
| 206 | ARM_COMPUTE_RETURN_ERROR_ON_MSG(gemm_info.depth_output_gemm3d() != 0, "CpuGemm cannot reinterpret the output tensor as 3D"); |
| 207 | |
| 208 | // Check if the first input tensor is a vector. |
| 209 | const bool run_vector_matrix_multiplication = a->dimension(1) < 2; |
| 210 | // Check if we need to reshape the matrix A and matrix B |
| 211 | const bool run_interleave_transpose = !run_vector_matrix_multiplication && !(gemm_info.reshape_b_only_on_first_run()); |
| 212 | |
| 213 | // Arguments used by GEMMReshapeInfo |
| 214 | // If we pass the matrix A and matrix B reshaped to CpuGemmMatrixMultiplyKernel, we need to pass m, n, k, mult_transpose1xW_width and mult_interleave4x4_height to GEMMReshapeInfo |
| 215 | // in order to know how the matrices have been reshaped |
| 216 | const int m = a->dimension(1); |
| 217 | const int n = b->dimension(0); |
| 218 | const int k = a->dimension(0); |
| 219 | int mult_transpose1xW_width = 1; |
| 220 | int mult_interleave4x4_height = 1; |
| 221 | |
| 222 | const GEMMReshapeInfo reshape_info = GEMMReshapeInfo(m, n, k, mult_transpose1xW_width, mult_interleave4x4_height, gemm_info.depth_output_gemm3d()); |
| 223 | |
| 224 | const ITensorInfo *matrix_a_info = a; |
| 225 | const ITensorInfo *matrix_b_info = b; |
| 226 | |
| 227 | TensorInfo tmp_a_info{}; |
| 228 | TensorInfo tmp_b_info{}; |
| 229 | TensorInfo tmp_output_info = *d->clone(); |
| 230 | |
| 231 | if(run_interleave_transpose) |
| 232 | { |
| 233 | matrix_a_info = &tmp_a_info; |
| 234 | matrix_b_info = &tmp_b_info; |
| 235 | |
| 236 | // Validate interleave kernel |
| 237 | auto_init_if_empty(tmp_a_info, a->clone()->set_tensor_shape(compute_interleaved_shape(*a, mult_interleave4x4_height, gemm_info.reinterpret_input_as_3d()))); |
| 238 | ARM_COMPUTE_RETURN_ON_ERROR(cpu::kernels::CpuGemmInterleave4x4Kernel::validate(a, &tmp_a_info)); |
| 239 | |
| 240 | // Validate transpose kernel |
| 241 | auto_init_if_empty(tmp_b_info, b->clone()->set_tensor_shape(compute_transpose1xW_with_element_size_shape(*b, mult_transpose1xW_width))); |
| 242 | ARM_COMPUTE_RETURN_ON_ERROR(cpu::kernels::CpuGemmTranspose1xWKernel::validate(b, &tmp_b_info)); |
| 243 | } |
| 244 | |
| 245 | // Validate matrix multiply |
| 246 | auto_init_if_empty(tmp_output_info, matrix_a_info->clone()->set_tensor_shape(compute_mm_shape(*matrix_a_info, *matrix_b_info, run_interleave_transpose, reshape_info))); |
| 247 | ARM_COMPUTE_RETURN_ON_ERROR(cpu::kernels::CpuGemmMatrixMultiplyKernel::validate(matrix_a_info, matrix_b_info, &tmp_output_info, alpha, run_interleave_transpose, reshape_info)); |
| 248 | |
| 249 | if(c != nullptr && gemm_info.reshape_b_only_on_first_run()) |
| 250 | { |
| 251 | ARM_COMPUTE_RETURN_ON_ERROR(cpu::CpuAdd::validate(&tmp_output_info, c, d, ConvertPolicy::SATURATE)); |
| 252 | } |
| 253 | } |
| 254 | |
| 255 | // Validate matrix addition kernel |
| 256 | if(beta != 0 && c != nullptr && !is_c_bias) |
| 257 | { |
| 258 | ARM_COMPUTE_RETURN_ON_ERROR(cpu::kernels::CpuGemmMatrixAdditionKernel::validate(c, d, beta)); |
| 259 | } |
| 260 | |
| 261 | // Validate activation |
| 262 | const ActivationLayerInfo &activation = gemm_info.activation_info(); |
| 263 | if(activation.enabled()) |
| 264 | { |
| 265 | ARM_COMPUTE_RETURN_ON_ERROR(cpu::CpuActivation::validate(d, nullptr, activation)); |
| 266 | } |
| 267 | |
| 268 | return Status{}; |
| 269 | } |
| 270 | |
| 271 | void CpuGemm::run(ITensorPack &tensors) |
| 272 | { |
| 273 | prepare(tensors); |
| 274 | |
| 275 | auto a = tensors.get_const_tensor(ACL_SRC_0); |
| 276 | auto b = tensors.get_const_tensor(ACL_SRC_1); |
| 277 | auto c = tensors.get_const_tensor(ACL_SRC_2); |
| 278 | auto d = tensors.get_tensor(ACL_DST); |
| 279 | |
| 280 | if(_asm_glue->is_configured()) |
| 281 | { |
| 282 | // Pass c to asm dispatch only if it's the bias tensor |
| 283 | ITensorPack asm_pack = tensors; |
| 284 | asm_pack.add_const_tensor(ACL_SRC_2, (_reshape_b_only_on_first_run) ? c : nullptr); |
| 285 | _asm_glue->run(asm_pack); |
| 286 | if(_run_alpha_scale) |
| 287 | { |
| 288 | ITensorPack pack{ { ACL_SRC, d }, { ACL_DST, d } }; |
| 289 | _alpha_scale_func->run(pack); |
| 290 | } |
| 291 | } |
| 292 | else |
| 293 | { |
| 294 | CpuAuxTensorHandler interleaved_a(offset_int_vec(InterleavedLHS), _tmp_a, tensors, true); |
| 295 | CpuAuxTensorHandler transposed_b(offset_int_vec(TransposedRHS), _tmp_b, tensors, true); |
| 296 | CpuAuxTensorHandler temp_d(offset_int_vec(TempResult), _tmp_d, tensors, true); |
| 297 | |
| 298 | ITensorPack mm_pack{ { ACL_SRC_0, a }, { ACL_SRC_1, b }, { ACL_DST, (_run_bias_addition) ? temp_d.get() : d } }; |
| 299 | if(!_run_vector_matrix_multiplication) |
| 300 | { |
| 301 | // Run interleave kernel |
| 302 | ITensorPack interleave_pack{ { ACL_SRC, a }, { ACL_DST, interleaved_a.get() } }; |
| 303 | NEScheduler::get().schedule_op(_interleave_kernel.get(), Window::DimY, _interleave_kernel->window(), interleave_pack); |
| 304 | |
| 305 | if(!_reshape_b_only_on_first_run) |
| 306 | { |
| 307 | // Run transpose kernel |
| 308 | ITensorPack transpose_pack{ { ACL_SRC, b }, { ACL_DST, transposed_b.get() } }; |
| 309 | NEScheduler::get().schedule_op(_transpose_kernel.get(), Window::DimY, _transpose_kernel->window(), transpose_pack); |
| 310 | } |
| 311 | |
| 312 | // Use reshaped matrices |
| 313 | mm_pack.add_const_tensor(ACL_SRC_0, interleaved_a.get()); |
| 314 | mm_pack.add_const_tensor(ACL_SRC_1, transposed_b.get()); |
| 315 | } |
| 316 | |
| 317 | NEScheduler::get().schedule_op(_mm_kernel.get(), _run_vector_matrix_multiplication ? Window::DimX : Window::DimY, _mm_kernel->window(), mm_pack); |
| 318 | |
| 319 | // Run bias addition kernel |
| 320 | if(_run_bias_addition) |
| 321 | { |
| 322 | ITensorPack pack{ { ACL_SRC_0, temp_d.get() }, { ACL_SRC_1, c }, { ACL_DST, d } }; |
| 323 | _add_bias->run(pack); |
| 324 | } |
| 325 | } |
| 326 | |
| 327 | // Run matrix addition kernel |
| 328 | if(_run_addition) |
| 329 | { |
| 330 | ITensorPack c_add_pack{ { ACL_SRC, c }, { ACL_DST, d } }; |
| 331 | NEScheduler::get().schedule_op(_ma_kernel.get(), Window::DimY, _ma_kernel->window(), c_add_pack); |
| 332 | } |
| 333 | |
| 334 | // Run activation function |
| 335 | if(_run_activation) |
| 336 | { |
| 337 | ITensorPack pack{ { ACL_SRC, d }, { ACL_DST, d } }; |
| 338 | _activation_func->run(pack); |
| 339 | } |
| 340 | } |
| 341 | |
| 342 | void CpuGemm::prepare(ITensorPack &tensors) |
| 343 | { |
| 344 | if(!_is_prepared) |
| 345 | { |
| 346 | if(_asm_glue->is_configured()) |
| 347 | { |
| 348 | _asm_glue->prepare(tensors); |
| 349 | } |
| 350 | else if(_reshape_b_only_on_first_run && !_run_vector_matrix_multiplication) |
| 351 | { |
| 352 | const ITensor *b = tensors.get_const_tensor(ACL_SRC_1); |
| 353 | ITensor *b_aux = utils::cast::polymorphic_cast<ITensor *>(tensors.get_tensor(offset_int_vec(TransposedRHS))); |
| 354 | ARM_COMPUTE_ERROR_ON_NULLPTR(b, b_aux); |
| 355 | |
| 356 | CpuAuxTensorHandler transposed_b(_tmp_b, *b_aux); |
| 357 | ITensorPack transpose_pack{ { ACL_SRC, b }, { ACL_DST, transposed_b.get() } }; |
| 358 | NEScheduler::get().schedule_op(_transpose_kernel.get(), Window::DimY, _transpose_kernel->window(), transpose_pack); |
| 359 | } |
| 360 | _is_prepared = true; |
| 361 | } |
| 362 | } |
| 363 | |
| 364 | experimental::MemoryRequirements CpuGemm::workspace() const |
| 365 | { |
| 366 | return _aux_mem; |
| 367 | } |
| 368 | } // namespace cpu |
| 369 | } // namespace arm_compute |