blob: 20f1139a0293d44af69b7b0dfac4d84c0643c83c [file] [log] [blame]
Moritz Pflanzer4dfc2352017-08-02 14:51:36 +01001/*
Renato Arantes36a75da2024-01-26 17:31:18 +00002 * Copyright (c) 2017-2021,2024 Arm Limited.
Moritz Pflanzer4dfc2352017-08-02 14:51:36 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "GEMM.h"
25
Michalis Spyroud1d77222020-04-08 14:10:15 +010026#include "arm_compute/core/Helpers.h"
Georgios Pinitas583137c2017-08-31 18:12:42 +010027#include "arm_compute/core/Types.h"
Moritz Pflanzer4dfc2352017-08-02 14:51:36 +010028
29namespace arm_compute
30{
31namespace test
32{
33namespace validation
34{
35namespace reference
36{
37template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type>
Renato Arantes36a75da2024-01-26 17:31:18 +000038SimpleTensor<T>
39gemm(const SimpleTensor<T> &a, const SimpleTensor<T> &b, const SimpleTensor<T> &c, float alpha, float beta)
Moritz Pflanzer4dfc2352017-08-02 14:51:36 +010040{
41 // Create reference
Renato Arantes36a75da2024-01-26 17:31:18 +000042 SimpleTensor<T> dst{c.shape(), c.data_type(), 1};
Moritz Pflanzer4dfc2352017-08-02 14:51:36 +010043
44 // Compute reference
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +010045 const int M = a.shape().y();
46 const int N = b.shape().x();
Moritz Pflanzer4dfc2352017-08-02 14:51:36 +010047 const int K = a.shape().x();
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +010048 const int D = a.shape().z(); // Number of matrices in a batch
49 const int W = a.shape()[3]; // Number of batched-gemm (Winograd case)
Moritz Pflanzer4dfc2352017-08-02 14:51:36 +010050
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +010051 const int a_stride_z = K * M;
52 const int a_stride_w = K * M * D;
53
Renato Arantes36a75da2024-01-26 17:31:18 +000054 const int b_stride_z =
55 b.shape().num_dimensions() > 2
56 ? N * K
57 : 0; // Do not slide the matrix B along the 3th dimension in case matrix B has less than 3 dimensions
58 int b_stride_w =
59 b.shape().num_dimensions() > 3
60 ? K * N * D
61 : 0; // Do not slide the matrix B along the 4th dimension in case matrix B has less than 4 dimensions
Gian Marco Iodice37a46112021-08-04 15:22:28 +010062
63 // Note: There are 3 gemm types: batched-gemm, multi-gemm, and batched of multi-gemms. The third dimension of tensor b is overloaded when tensor b has exactly 3 dimensions:
64 // it can be either number of batches or multis. Batched-GEMM computation is detected only when the third dimension of "a" and "c" tensors is 1 and the number of dimensions is 4
Renato Arantes36a75da2024-01-26 17:31:18 +000065 const bool is_batched_gemm = b.shape().num_dimensions() == 3 && a.shape().num_dimensions() == 4 &&
66 c.shape().num_dimensions() == 4 && a.shape()[2] == 1 && c.shape()[2] == 1;
Gian Marco Iodice37a46112021-08-04 15:22:28 +010067
68 // Batched-GEMM
Renato Arantes36a75da2024-01-26 17:31:18 +000069 if (is_batched_gemm)
Gian Marco Iodice37a46112021-08-04 15:22:28 +010070 {
71 b_stride_w = b_stride_z;
72 }
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +010073
74 const int c_stride_z = N * M;
75 const int c_stride_w = N * M * D;
76
Gian Marco Iodice37a46112021-08-04 15:22:28 +010077#if defined(_OPENMP) && !(defined(__arm__) && defined(__ANDROID__))
Michalis Spyroud1d77222020-04-08 14:10:15 +010078 #pragma omp parallel for collapse(2)
79#endif /* _OPENMP */
Renato Arantes36a75da2024-01-26 17:31:18 +000080 for (int w = 0; w < W; ++w)
Moritz Pflanzer4dfc2352017-08-02 14:51:36 +010081 {
Renato Arantes36a75da2024-01-26 17:31:18 +000082 for (int depth = 0; depth < D; ++depth)
Moritz Pflanzer4dfc2352017-08-02 14:51:36 +010083 {
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +010084 const int base_addr_a = depth * a_stride_z + w * a_stride_w;
85 const int base_addr_b = depth * b_stride_z + w * b_stride_w;
86 const int base_addr_c = depth * c_stride_z + w * c_stride_w;
Moritz Pflanzer4dfc2352017-08-02 14:51:36 +010087
Renato Arantes36a75da2024-01-26 17:31:18 +000088 for (int row = 0; row < M; ++row)
Moritz Pflanzer4dfc2352017-08-02 14:51:36 +010089 {
Renato Arantes36a75da2024-01-26 17:31:18 +000090 for (int col = 0; col < N; ++col)
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +010091 {
92 T acc(0);
Moritz Pflanzer4dfc2352017-08-02 14:51:36 +010093
Renato Arantes36a75da2024-01-26 17:31:18 +000094 for (int k = 0; k < K; ++k)
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +010095 {
96 acc += a[base_addr_a + k + row * K] * b[base_addr_b + col + k * N];
97 }
98
99 // Finalize the result: alpha * A * B + beta * C
100 dst[base_addr_c + col + row * N] = alpha * acc + beta * c[base_addr_c + col + row * N];
101 }
102 }
Moritz Pflanzer4dfc2352017-08-02 14:51:36 +0100103 }
104 }
105
106 return dst;
107}
108
Gian Marco Iodice0c17aa22019-09-27 09:23:15 +0100109template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type>
Renato Arantes36a75da2024-01-26 17:31:18 +0000110SimpleTensor<T> gemm_mixed_precision(
111 const SimpleTensor<T> &a, const SimpleTensor<T> &b, const SimpleTensor<T> &c, float alpha, float beta)
Gian Marco Iodice0c17aa22019-09-27 09:23:15 +0100112{
113 // GEMM mixed-precision combines F32 accumulators with F16 multiplications
114 // Create reference
Renato Arantes36a75da2024-01-26 17:31:18 +0000115 SimpleTensor<T> dst{c.shape(), c.data_type(), 1};
Gian Marco Iodice0c17aa22019-09-27 09:23:15 +0100116
117 // Compute reference
118 const int M = a.shape().y();
119 const int N = b.shape().x();
120 const int K = a.shape().x();
121 const int D = a.shape().z(); // Number of matrices in a batch
122 const int W = a.shape()[3]; // Number of batched-gemm (Winograd case)
123
124 const int a_stride_z = K * M;
125 const int a_stride_w = K * M * D;
126
Renato Arantes36a75da2024-01-26 17:31:18 +0000127 const int b_stride_z =
128 b.shape().num_dimensions() > 2
129 ? N * K
130 : 0; // Do not slide the matrix B along the 3th dimension in case matrix B has less than 3 dimensions
131 int b_stride_w =
132 b.shape().num_dimensions() > 3
133 ? K * N * D
134 : 0; // Do not slide the matrix B along the 4th dimension in case matrix B has less than 4 dimensions
Gian Marco Iodice37a46112021-08-04 15:22:28 +0100135
136 // Note: There are 3 gemm types: batched-gemm, multi-gemm, and batched of multi-gemms. The third dimension of tensor b is overloaded when tensor b has exactly 3 dimensions:
137 // it can be either number of batches or multis. Batched-GEMM computation is detected only when the third dimension of "a" and "c" tensors is 1 and the number of dimensions is 4
Renato Arantes36a75da2024-01-26 17:31:18 +0000138 const bool is_batched_gemm = b.shape().num_dimensions() == 3 && a.shape().num_dimensions() == 4 &&
139 c.shape().num_dimensions() == 4 && a.shape()[2] == 1 && c.shape()[2] == 1;
Gian Marco Iodice37a46112021-08-04 15:22:28 +0100140
141 // Batched-GEMM
Renato Arantes36a75da2024-01-26 17:31:18 +0000142 if (is_batched_gemm)
Gian Marco Iodice37a46112021-08-04 15:22:28 +0100143 {
144 b_stride_w = b_stride_z;
145 }
Gian Marco Iodice0c17aa22019-09-27 09:23:15 +0100146
147 const int c_stride_z = N * M;
148 const int c_stride_w = N * M * D;
149
Gian Marco Iodice37a46112021-08-04 15:22:28 +0100150#if defined(_OPENMP) && !(defined(__arm__) && defined(__ANDROID__))
Michalis Spyroud1d77222020-04-08 14:10:15 +0100151 #pragma omp parallel for collapse(2)
152#endif /* _OPENMP */
Renato Arantes36a75da2024-01-26 17:31:18 +0000153 for (int w = 0; w < W; ++w)
Gian Marco Iodice0c17aa22019-09-27 09:23:15 +0100154 {
Renato Arantes36a75da2024-01-26 17:31:18 +0000155 for (int depth = 0; depth < D; ++depth)
Gian Marco Iodice0c17aa22019-09-27 09:23:15 +0100156 {
157 const int base_addr_a = depth * a_stride_z + w * a_stride_w;
158 const int base_addr_b = depth * b_stride_z + w * b_stride_w;
159 const int base_addr_c = depth * c_stride_z + w * c_stride_w;
160
Renato Arantes36a75da2024-01-26 17:31:18 +0000161 for (int row = 0; row < M; ++row)
Gian Marco Iodice0c17aa22019-09-27 09:23:15 +0100162 {
Renato Arantes36a75da2024-01-26 17:31:18 +0000163 for (int col = 0; col < N; ++col)
Gian Marco Iodice0c17aa22019-09-27 09:23:15 +0100164 {
165 float acc(0);
166
Renato Arantes36a75da2024-01-26 17:31:18 +0000167 for (int k = 0; k < K; ++k)
Gian Marco Iodice0c17aa22019-09-27 09:23:15 +0100168 {
169 acc += static_cast<float>(a[base_addr_a + k + row * K] * b[base_addr_b + col + k * N]);
170 }
171
172 // Finalize the result: alpha * A * B + beta * C
Renato Arantes36a75da2024-01-26 17:31:18 +0000173 dst[base_addr_c + col + row * N] =
174 static_cast<T>(alpha * acc + beta * c[base_addr_c + col + row * N]);
Gian Marco Iodice0c17aa22019-09-27 09:23:15 +0100175 }
176 }
177 }
178 }
179
180 return dst;
181}
182
Renato Arantes36a75da2024-01-26 17:31:18 +0000183template SimpleTensor<float>
184gemm(const SimpleTensor<float> &a, const SimpleTensor<float> &b, const SimpleTensor<float> &c, float alpha, float beta);
185template SimpleTensor<bfloat16> gemm(const SimpleTensor<bfloat16> &a,
186 const SimpleTensor<bfloat16> &b,
187 const SimpleTensor<bfloat16> &c,
188 float alpha,
189 float beta);
190template SimpleTensor<half>
191gemm(const SimpleTensor<half> &a, const SimpleTensor<half> &b, const SimpleTensor<half> &c, float alpha, float beta);
192template SimpleTensor<half> gemm_mixed_precision(
193 const SimpleTensor<half> &a, const SimpleTensor<half> &b, const SimpleTensor<half> &c, float alpha, float beta);
Moritz Pflanzer4dfc2352017-08-02 14:51:36 +0100194} // namespace reference
195} // namespace validation
196} // namespace test
197} // namespace arm_compute