blob: a05258d20631b4381406f6b66c8e4f48243a02c5 [file] [log] [blame]
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +01001/*
SiCong Li1b6377b2023-01-09 15:34:20 +00002 * Copyright (c) 2021-2023 Arm Limited.
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
SiCong Lic5ab4df2023-10-17 17:38:57 +010024#ifndef ACL_SRC_CPU_OPERATORS_CPUGEMM_H
25#define ACL_SRC_CPU_OPERATORS_CPUGEMM_H
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +010026
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +010027#include "arm_compute/core/ITensorPack.h"
28#include "arm_compute/core/TensorInfo.h"
29#include "arm_compute/core/Types.h"
SiCong Li91295492023-07-21 18:16:13 +010030#include "arm_compute/function_info/GEMMInfo.h"
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010031
32#include "src/cpu/ICpuOperator.h"
Georgios Pinitas7891a732021-08-20 21:39:25 +010033#include "src/cpu/kernels/CpuGemmInterleave4x4Kernel.h"
34#include "src/cpu/kernels/CpuGemmMatrixAdditionKernel.h"
35#include "src/cpu/kernels/CpuGemmMatrixMultiplyKernel.h"
36#include "src/cpu/kernels/CpuGemmTranspose1xWKernel.h"
37#include "src/cpu/operators/CpuActivation.h"
38#include "src/cpu/operators/CpuAdd.h"
SiCong Lic5ab4df2023-10-17 17:38:57 +010039#include "src/cpu/operators/CpuTranspose.h"
Georgios Pinitas7891a732021-08-20 21:39:25 +010040#include "src/cpu/operators/internal/CpuGemmAssemblyDispatch.h"
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +010041
42#include <memory>
43
44namespace arm_compute
45{
46namespace cpu
47{
48/** Basic function to execute GEMM. This function calls the following kernels:
49 *
50 * If optimized assembly is available:
51 * -# @ref cpu::CpuGemmAssemblyDispatch
52 * -# @ref cpu::CpuActivation (if alpha != 1.0)
53 * Else:
54 * -# @ref cpu::kernels::CpuGemmInterleave4x4Kernel (if the output tensor is a matrix)
55 * -# @ref cpu::kernels::CpuGemmTranspose1xWKernel (if the output tensor is a matrix)
56 * -# @ref cpu::kernels::CpuGemmMatrixMultiplyKernel
57 * In both cases:
58 * -# @ref cpu::kernels::CpuGemmMatrixAdditionKernel (if c != nullptr and beta != 0.0 and is not reshaped once)
59 * Else:
60 * -# @ref cpu::CpuAdd (if c != nullptr and is reshaped once and not optimized assembly in place)
61 *
62 * -# @ref cpu::CpuActivation (if activation is specified in GEMMInfo)
63 */
64class CpuGemm : public ICpuOperator
65{
66public:
67 /** Default constructor */
68 CpuGemm() = default;
69 /** Default destructor */
70 ~CpuGemm() = default;
71 /** Configure operator for a given list of arguments
72 *
73 * Valid data layouts:
74 * - All
75 *
76 * Valid data type configurations:
77 * |src0 |src1 |src2 |dst |
78 * |:------------|:-----------|:---------|:--------------|
79 * |F32 |F32 |F32 |F32 |
80 * |F16 |F16 |F16 |F16 |
Ramy Elgammalc8cc0242022-10-05 17:05:20 +010081 * |BFLOAT16 |BFLOAT16 |BFLOAT16 |FP32 |
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +010082 *
83 * @note GEMM: General Matrix Multiply - [alpha * A * B + beta * C].
84 * @note GEMM: The tensors a, b, c, d must have the same data type. You should not mix data types when calling this function.
85 *
SiCong Li1b6377b2023-01-09 15:34:20 +000086 * @note Batched GEMM only supports broadcasting cases where RHS rank < LHS rank but not the other way around
87 *
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +010088 * @param[in] a First input tensor info (Matrix A or Vector A). Data type supported: BFLOAT16/F16/F32
89 * @param[in] b Second input tensor info (Matrix B). Data type supported: same as @p a
90 * @param[in] c Third input tensor info (Matrix C). It can be a nullptr if just the multiplication between @p a and @p b is needed. Data type supported: same as @p a
91 * @param[out] d Output tensor info. Data type supported: same as @p a
92 * @param[in] alpha Weight of the matrix product
93 * @param[in] beta Weight of matrix C
94 * @param[in] gemm_info (Optional) Specifies if the matrix A and/or matrix B have been reshaped and
95 * if the reshape of matrix B should happen only for the first run
96 */
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010097 void configure(const ITensorInfo *a,
98 const ITensorInfo *b,
99 const ITensorInfo *c,
100 ITensorInfo *d,
101 float alpha,
102 float beta,
103 const GEMMInfo &gemm_info = GEMMInfo());
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100104 /** Static function to check if given info will lead to a valid configuration of @ref CpuGemm.
105 *
106 * Similar to @ref CpuGemm::configure()
107 *
108 * @return a status
109 */
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100110 static Status validate(const ITensorInfo *a,
111 const ITensorInfo *b,
112 const ITensorInfo *c,
113 const ITensorInfo *d,
114 float alpha,
115 float beta,
116 const GEMMInfo &gemm_info = GEMMInfo());
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100117
Francesco Petrogalli553f6952022-06-30 10:22:01 +0000118 /** Indicates whether or not there is an optimal assembly implementation that can be used to process the given parameters.
119 *
120 * This method has the same use of @ref
121 * NEGEMMConvolutionLayer::has_opt_impl, with the only caveat that
Ramy Elgammal91780022022-07-20 14:57:37 +0100122 * the value of arm_compute::WeightFormat need to be passed via the
Francesco Petrogalli553f6952022-06-30 10:22:01 +0000123 * parameter gemm_info.
124 */
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100125 static Status has_opt_impl(arm_compute::WeightFormat &weight_format,
126 const ITensorInfo *a,
127 const ITensorInfo *b,
128 const ITensorInfo *c,
129 const ITensorInfo *d,
130 const GEMMInfo &gemm_info = GEMMInfo());
Francesco Petrogalli553f6952022-06-30 10:22:01 +0000131
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100132 // Inherited methods overridden:
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100133 void run(ITensorPack &tensors) override;
134 void prepare(ITensorPack &constants) override;
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100135 experimental::MemoryRequirements workspace() const override;
136
Francesco Petrogalli553f6952022-06-30 10:22:01 +0000137 /** Indicates if the convolution executes in variable weights mode.
138 *
139 * When ACL executes convolution in variable weights mode, it does
140 * not perform any processing of the weights tensor. Instead, it
141 * utilizes the data as it is given by the user.
142 */
143 bool isVarWeightsKernel() const;
144
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100145private:
146 enum AuxTensorIdx
147 {
SiCong Lic5ab4df2023-10-17 17:38:57 +0100148 /* Slots 0 - 2 reserved for CpuGemmAssemblyDispatch */
149 InterleavedLHS = 3,
150 PreTransposedRHS,
151 Transposed1xWRHS,
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100152 TempResult,
153 Count
154 };
155
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100156 std::unique_ptr<kernels::CpuGemmInterleave4x4Kernel> _interleave_kernel{nullptr};
SiCong Lic5ab4df2023-10-17 17:38:57 +0100157 std::unique_ptr<CpuTranspose> _pretranspose_b_func{nullptr};
158 std::unique_ptr<kernels::CpuGemmTranspose1xWKernel> _transpose1xW_b_kernel{nullptr};
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100159 std::unique_ptr<kernels::CpuGemmMatrixMultiplyKernel> _mm_kernel{nullptr};
160 std::unique_ptr<CpuGemmAssemblyDispatch> _asm_glue{nullptr};
161 std::unique_ptr<kernels::CpuGemmMatrixAdditionKernel> _ma_kernel{nullptr};
162 std::unique_ptr<CpuActivation> _alpha_scale_func{nullptr};
163 std::unique_ptr<CpuAdd> _add_bias{nullptr};
164 std::unique_ptr<CpuActivation> _activation_func{nullptr};
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100165
166 TensorInfo _tmp_a{};
SiCong Lic5ab4df2023-10-17 17:38:57 +0100167 TensorInfo _pretransposed_b{};
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100168 TensorInfo _tmp_b{};
169 TensorInfo _tmp_d{};
170
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100171 bool _run_vector_matrix_multiplication{false};
SiCong Lic5ab4df2023-10-17 17:38:57 +0100172 bool _run_interleave_transpose{
173 true}; /**< If we run CpuGemmInterleave4x4Kernel on lhs and CpuGemmTranspose1xWKernel on rhs */
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100174 bool _run_alpha_scale{false};
175 bool _run_addition{false};
176 bool _run_bias_addition{false};
177 bool _run_activation{false};
178 bool _reshape_b_only_on_first_run{false};
179 bool _is_prepared{false};
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100180
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100181 experimental::MemoryRequirements _aux_mem{Count};
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100182};
183} // namespace cpu
184} // namespace arm_compute
SiCong Lic5ab4df2023-10-17 17:38:57 +0100185#endif // ACL_SRC_CPU_OPERATORS_CPUGEMM_H