blob: 9fad66df689617ed80910f56ffa33c89ed1bf097 [file] [log] [blame]
Gian Marco05288a22017-11-21 10:57:50 +00001/*
Viet-Hoa Do82169b32022-05-26 16:50:21 +01002 * Copyright (c) 2017-2022 Arm Limited.
Gian Marco05288a22017-11-21 10:57:50 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Gian Marco Iodice43a129e2019-05-14 10:14:08 +010024#include "gemm_helpers.h"
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +000025#include "helpers_asymm.h"
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +000026#include "repeat.h"
Giorgio Arena5f6fdc12021-06-09 15:23:06 +010027#include "tile_helpers.h"
Gian Marco05288a22017-11-21 10:57:50 +000028
Michele Di Giorgiof9179d32019-11-27 16:17:30 +000029#if defined(DATA_TYPE) && defined(ACC_DATA_TYPE)
30
Georgios Pinitasdaa38552018-08-28 17:43:18 +010031#if defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)
32#if defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8)
Gian Marco Iodice4b908652018-10-18 10:21:02 +010033#define ARM_DOT(x, y, val) val = arm_dot_acc((x), (y), (val));
Georgios Pinitasdaa38552018-08-28 17:43:18 +010034#else // defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8)
Gian Marco Iodice4b908652018-10-18 10:21:02 +010035#define ARM_DOT(x, y, val) val += arm_dot((x), (y));
Georgios Pinitasdaa38552018-08-28 17:43:18 +010036#endif // defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8)
37#endif // defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)
Giorgio Arenac50da382018-07-26 15:50:09 +010038
Gian Marco Iodice43a129e2019-05-14 10:14:08 +010039#if defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)
40
Michele Di Giorgiof9179d32019-11-27 16:17:30 +000041#define ARM_DOT1(a, b, c) \
42 ({ \
43 ARM_DOT((VEC_DATA_TYPE(DATA_TYPE, 4))(a, (VEC_DATA_TYPE(DATA_TYPE, 3))0), (VEC_DATA_TYPE(DATA_TYPE, 4))(b, (VEC_DATA_TYPE(DATA_TYPE, 3))0), c); \
Gian Marco Iodice43a129e2019-05-14 10:14:08 +010044 })
Michele Di Giorgiof9179d32019-11-27 16:17:30 +000045#define ARM_DOT2(a, b, c) \
46 ({ \
47 ARM_DOT((VEC_DATA_TYPE(DATA_TYPE, 4))(a, (VEC_DATA_TYPE(DATA_TYPE, 2))0), (VEC_DATA_TYPE(DATA_TYPE, 4))(b, (VEC_DATA_TYPE(DATA_TYPE, 2))0), c); \
Gian Marco Iodice43a129e2019-05-14 10:14:08 +010048 })
Michele Di Giorgiof9179d32019-11-27 16:17:30 +000049#define ARM_DOT3(a, b, c) \
50 ({ \
51 ARM_DOT((VEC_DATA_TYPE(DATA_TYPE, 4))(a, (DATA_TYPE)0), (VEC_DATA_TYPE(DATA_TYPE, 4))(b, (DATA_TYPE)0), c); \
Gian Marco Iodice43a129e2019-05-14 10:14:08 +010052 })
53#define ARM_DOT4(a, b, c) \
54 ({ \
55 ARM_DOT(a, b, c); \
56 })
57#define ARM_DOT8(a, b, c) \
58 ({ \
59 ARM_DOT4((a.lo), (b.lo), c); \
60 ARM_DOT4((a.hi), (b.hi), c); \
61 })
62#define ARM_DOT16(a, b, c) \
63 ({ \
64 ARM_DOT8((a.lo), (b.lo), c); \
65 ARM_DOT8((a.hi), (b.hi), c); \
66 })
67
68#else // defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)
69
70/** Specialized macros to perform the dot product instruction between two vectors of size K0 [1,16] without using the dot8 instruction. */
Michele Di Giorgiof9179d32019-11-27 16:17:30 +000071#define ARM_DOT1(a, b, c) \
72 ({ \
73 c += (ACC_DATA_TYPE)a * b; \
Gian Marco Iodice43a129e2019-05-14 10:14:08 +010074 })
Michele Di Giorgiof9179d32019-11-27 16:17:30 +000075#define ARM_DOT2(a, b, c) \
76 ({ \
77 c += (ACC_DATA_TYPE)a.s0 * b.s0; \
78 c += (ACC_DATA_TYPE)a.s1 * b.s1; \
Gian Marco Iodice43a129e2019-05-14 10:14:08 +010079 })
Michele Di Giorgiof9179d32019-11-27 16:17:30 +000080#define ARM_DOT3(a, b, c) \
81 ({ \
82 ARM_DOT2(a, b, c); \
83 c += (ACC_DATA_TYPE)a.s2 * b.s2; \
Gian Marco Iodice43a129e2019-05-14 10:14:08 +010084 })
Michele Di Giorgiof9179d32019-11-27 16:17:30 +000085#define ARM_DOT4(a, b, c) \
86 ({ \
87 ARM_DOT3(a, b, c); \
88 c += (ACC_DATA_TYPE)a.s3 * b.s3; \
Gian Marco Iodice43a129e2019-05-14 10:14:08 +010089 })
90#define ARM_DOT8(a, b, c) \
91 ({ \
92 ARM_DOT4((a.lo), (b.lo), c); \
93 ARM_DOT4((a.hi), (b.hi), c); \
94 })
95#define ARM_DOT16(a, b, c) \
96 ({ \
97 ARM_DOT8((a.lo), (b.lo), c); \
98 ARM_DOT8((a.hi), (b.hi), c); \
99 })
100#endif // defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)
101
102/** Specialized macros to perform a broadcast dot product operation between one vector "a" and N0 vectors "b" of size K0 [1,16] */
Gian Marco Iodice061eefd2020-04-23 13:40:00 +0100103#define ARM_DOT_K0X1(k0, a, b, c) \
104 ({ \
105 ARM_DOT_K0(k0, (a), (b##0), (c)); \
106 })
Gian Marco Iodice43a129e2019-05-14 10:14:08 +0100107#define ARM_DOT_K0X2(k0, a, b, c) \
108 ({ \
109 ARM_DOT_K0(k0, (a), (b##0), (c.s0)); \
110 ARM_DOT_K0(k0, (a), (b##1), (c.s1)); \
111 })
112#define ARM_DOT_K0X3(k0, a, b, c) \
113 ({ \
114 ARM_DOT_K0X2(k0, a, b, c); \
115 ARM_DOT_K0(k0, (a), (b##2), (c.s2)); \
116 })
117#define ARM_DOT_K0X4(k0, a, b, c) \
118 ({ \
119 ARM_DOT_K0X3(k0, a, b, c); \
120 ARM_DOT_K0(k0, (a), (b##3), (c.s3)); \
121 })
122#define ARM_DOT_K0X8(k0, a, b, c) \
123 ({ \
124 ARM_DOT_K0X4(k0, a, b, c); \
125 ARM_DOT_K0(k0, (a), (b##4), (c.s4)); \
126 ARM_DOT_K0(k0, (a), (b##5), (c.s5)); \
127 ARM_DOT_K0(k0, (a), (b##6), (c.s6)); \
128 ARM_DOT_K0(k0, (a), (b##7), (c.s7)); \
129 })
130#define ARM_DOT_K0X16(k0, a, b, c) \
131 ({ \
132 ARM_DOT_K0X8(k0, a, b, c); \
133 ARM_DOT_K0(k0, (a), (b##8), (c.s8)); \
134 ARM_DOT_K0(k0, (a), (b##9), (c.s9)); \
135 ARM_DOT_K0(k0, (a), (b##A), (c.sA)); \
136 ARM_DOT_K0(k0, (a), (b##B), (c.sB)); \
137 ARM_DOT_K0(k0, (a), (b##C), (c.sC)); \
138 ARM_DOT_K0(k0, (a), (b##D), (c.sD)); \
139 ARM_DOT_K0(k0, (a), (b##E), (c.sE)); \
140 ARM_DOT_K0(k0, (a), (b##F), (c.sF)); \
141 })
142
SiCong Li738893e2020-05-01 12:55:16 +0100143/** Specialized macros to perform a partial matrix multiplication with dimensions M0,N0,K0 */
Gian Marco Iodice43a129e2019-05-14 10:14:08 +0100144#define ARM_MM_K0XN0X1(n0, k0, a, b, c) \
145 ({ \
146 ARM_DOT_K0XN0(n0, k0, (a##0), b, (c##0)); \
147 })
148#define ARM_MM_K0XN0X2(n0, k0, a, b, c) \
149 ({ \
150 ARM_MM_K0XN0X1(n0, k0, a, b, c); \
151 ARM_DOT_K0XN0(n0, k0, (a##1), b, (c##1)); \
152 })
153#define ARM_MM_K0XN0X3(n0, k0, a, b, c) \
154 ({ \
155 ARM_MM_K0XN0X2(n0, k0, a, b, c); \
156 ARM_DOT_K0XN0(n0, k0, (a##2), b, (c##2)); \
157 })
158#define ARM_MM_K0XN0X4(n0, k0, a, b, c) \
159 ({ \
160 ARM_MM_K0XN0X3(n0, k0, a, b, c); \
161 ARM_DOT_K0XN0(n0, k0, (a##3), b, (c##3)); \
162 })
163#define ARM_MM_K0XN0X5(n0, k0, a, b, c) \
164 ({ \
165 ARM_MM_K0XN0X4(n0, k0, a, b, c); \
166 ARM_DOT_K0XN0(n0, k0, (a##4), b, (c##4)); \
167 })
168#define ARM_MM_K0XN0X6(n0, k0, a, b, c) \
169 ({ \
170 ARM_MM_K0XN0X5(n0, k0, a, b, c); \
171 ARM_DOT_K0XN0(n0, k0, (a##5), b, (c##5)); \
172 })
173#define ARM_MM_K0XN0X7(n0, k0, a, b, c) \
174 ({ \
175 ARM_MM_K0XN0X6(n0, k0, a, b, c); \
176 ARM_DOT_K0XN0(n0, k0, (a##6), b, (c##6)); \
177 })
178#define ARM_MM_K0XN0X8(n0, k0, a, b, c) \
179 ({ \
180 ARM_MM_K0XN0X7(n0, k0, a, b, c); \
181 ARM_DOT_K0XN0(n0, k0, (a##7), b, (c##7)); \
182 })
183
184#define ARM_DOT_K0(k0, a, b, c) \
185 ({ \
186 CONCAT(ARM_DOT, k0) \
187 ((a), (b), (c)); \
188 })
189
190#define ARM_DOT_K0XN0(n0, k0, a, b, c) \
191 ({ \
192 CONCAT(ARM_DOT_K0X, n0) \
193 (k0, (a), b, (c)); \
194 })
195
196#define ARM_MM_K0XN0XM0(m0, n0, k0, a, b, c) \
197 ({ \
198 CONCAT(ARM_MM_K0XN0X, m0) \
199 (n0, k0, a, b, c); \
200 })
201
SiCong Li738893e2020-05-01 12:55:16 +0100202/** Specialized macros to perform a broadcast dot product operation between one vector "a" and N0 vectors "b" of size K0 [1,16] */
203#define ARM_MUL_N0X1(VECTOR_ACC_TYPE, a, b, c) \
204 ({ \
205 c += CONVERT(b##0, VECTOR_ACC_TYPE) * a; \
206 })
207#define ARM_MUL_N0X2(VECTOR_ACC_TYPE, a, b, c) \
208 ({ \
209 c += CONVERT(b##0, VECTOR_ACC_TYPE) * a.s##0; \
210 c += CONVERT(b##1, VECTOR_ACC_TYPE) * a.s##1; \
211 })
212#define ARM_MUL_N0X3(VECTOR_ACC_TYPE, a, b, c) \
213 ({ \
214 ARM_MUL_N0X2(VECTOR_ACC_TYPE, a, b, c); \
215 c += CONVERT(b##2, VECTOR_ACC_TYPE) * a.s##2; \
216 })
217#define ARM_MUL_N0X4(VECTOR_ACC_TYPE, a, b, c) \
218 ({ \
219 ARM_MUL_N0X3(VECTOR_ACC_TYPE, a, b, c); \
220 c += CONVERT(b##3, VECTOR_ACC_TYPE) * a.s##3; \
221 })
222#define ARM_MUL_N0X8(VECTOR_ACC_TYPE, a, b, c) \
223 ({ \
224 ARM_MUL_N0X4(VECTOR_ACC_TYPE, a, b, c); \
225 c += CONVERT(b##4, VECTOR_ACC_TYPE) * a.s##4; \
226 c += CONVERT(b##5, VECTOR_ACC_TYPE) * a.s##5; \
227 c += CONVERT(b##6, VECTOR_ACC_TYPE) * a.s##6; \
228 c += CONVERT(b##7, VECTOR_ACC_TYPE) * a.s##7; \
229 })
230#define ARM_MUL_N0X16(VECTOR_ACC_TYPE, a, b, c) \
231 ({ \
232 ARM_MUL_N0X8(VECTOR_ACC_TYPE, a, b, c); \
233 c += CONVERT(b##8, VECTOR_ACC_TYPE) * a.s##8; \
234 c += CONVERT(b##9, VECTOR_ACC_TYPE) * a.s##9; \
235 c += CONVERT(b##A, VECTOR_ACC_TYPE) * a.s##A; \
236 c += CONVERT(b##B, VECTOR_ACC_TYPE) * a.s##B; \
237 c += CONVERT(b##C, VECTOR_ACC_TYPE) * a.s##C; \
238 c += CONVERT(b##D, VECTOR_ACC_TYPE) * a.s##D; \
239 c += CONVERT(b##E, VECTOR_ACC_TYPE) * a.s##E; \
240 c += CONVERT(b##F, VECTOR_ACC_TYPE) * a.s##F; \
241 })
242/** Specialized macros to perform a a partial matrix multiplication with dimensions M0,N0,K0 */
243#define ARM_MM_NATIVE_N0XK0X1(VECTOR_ACC_TYPE, k0, a, b, c) \
244 ({ \
245 ARM_MUL_N0XK0(VECTOR_ACC_TYPE, k0, (a##0), b, (c##0)); \
246 })
247#define ARM_MM_NATIVE_N0XK0X2(VECTOR_ACC_TYPE, k0, a, b, c) \
248 ({ \
249 ARM_MM_NATIVE_N0XK0X1(VECTOR_ACC_TYPE, k0, a, b, c); \
250 ARM_MUL_N0XK0(VECTOR_ACC_TYPE, k0, (a##1), b, (c##1)); \
251 })
252#define ARM_MM_NATIVE_N0XK0X3(VECTOR_ACC_TYPE, k0, a, b, c) \
253 ({ \
254 ARM_MM_NATIVE_N0XK0X2(VECTOR_ACC_TYPE, k0, a, b, c); \
255 ARM_MUL_N0XK0(VECTOR_ACC_TYPE, k0, (a##2), b, (c##2)); \
256 })
257#define ARM_MM_NATIVE_N0XK0X4(VECTOR_ACC_TYPE, k0, a, b, c) \
258 ({ \
259 ARM_MM_NATIVE_N0XK0X3(VECTOR_ACC_TYPE, k0, a, b, c); \
260 ARM_MUL_N0XK0(VECTOR_ACC_TYPE, k0, (a##3), b, (c##3)); \
261 })
262#define ARM_MM_NATIVE_N0XK0X5(VECTOR_ACC_TYPE, k0, a, b, c) \
263 ({ \
264 ARM_MM_NATIVE_N0XK0X4(VECTOR_ACC_TYPE, k0, a, b, c); \
265 ARM_MUL_N0XK0(VECTOR_ACC_TYPE, k0, (a##4), b, (c##4)); \
266 })
267#define ARM_MM_NATIVE_N0XK0X6(VECTOR_ACC_TYPE, k0, a, b, c) \
268 ({ \
269 ARM_MM_NATIVE_N0XK0X5(VECTOR_ACC_TYPE, k0, a, b, c); \
270 ARM_MUL_N0XK0(VECTOR_ACC_TYPE, k0, (a##5), b, (c##5)); \
271 })
272#define ARM_MM_NATIVE_N0XK0X7(VECTOR_ACC_TYPE, k0, a, b, c) \
273 ({ \
274 ARM_MM_NATIVE_N0XK0X6(VECTOR_ACC_TYPE, k0, a, b, c); \
275 ARM_MUL_N0XK0(VECTOR_ACC_TYPE, k0, (a##6), b, (c##6)); \
276 })
277#define ARM_MM_NATIVE_N0XK0X8(VECTOR_ACC_TYPE, k0, a, b, c) \
278 ({ \
279 ARM_MM_NATIVE_N0XK0X7(VECTOR_ACC_TYPE, k0, a, b, c); \
280 ARM_MUL_N0XK0(VECTOR_ACC_TYPE, k0, (a##7), b, (c##7)); \
281 })
282#define ARM_MUL_N0XK0(VECTOR_ACC_TYPE, k0, a, b, c) \
283 ({ \
284 CONCAT(ARM_MUL_N0X, k0) \
285 (VECTOR_ACC_TYPE, (a), b, (c)); \
286 })
287#define ARM_MM_NATIVE_N0XK0XM0(VECTOR_ACC_TYPE, m0, k0, a, b, c) \
288 ({ \
289 CONCAT(ARM_MM_NATIVE_N0XK0X, m0) \
290 (VECTOR_ACC_TYPE, k0, a, b, c); \
291 })
292
Manuel Bottini8cf753f2020-10-21 12:34:38 +0100293#if defined(M0) && defined(N0) && defined(K0) && defined(V0) && defined(H0) && defined(M) && defined(N) && defined(PARTIAL_STORE_M0) && defined(PARTIAL_STORE_N0)
Sheri Zhang28287af2020-02-25 14:13:54 +0000294/** This OpenCL kernel computes the matrix multiplication between 2 matrices with QASYMM/QASYMM_SIGNED data type.
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000295 * The LHS matrix must be reshaped with @ref CLGEMMReshapeLHSMatrixKernel and the M0xK0 must be NOT transposed
296 * The RHS matrix must be reshaped with @ref CLGEMMReshapeRHSMatrixKernel and the K0xN0 must be transposed
297 *
Michele Di Giorgiof9179d32019-11-27 16:17:30 +0000298 * @note The input data type must be passed at compile time using -DDATA_TYPE (i.e. -DDATA_TYPE=uchar)
299 * @note The accumulator data type must be passed at compile time using -DACC_DATA_TYPE (i.e. -DACC_DATA_TYPE=uint)
Gian Marco Iodiceb0c50372019-03-15 10:13:05 +0000300 * @note If the first two dimensions of NDRange have been dispatched with "dummy_work_items" support, the option -DDUMMY_WORK_ITEMS must be passed at compile time.
301 * @note The GEMM's dimensions M and N must be passed at compile time using -DM and -DN (i.e. -DM=52 and -DN=90).
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000302 * @note The block's dimensions used for reshaping the LHS matrix and the RHS matrix (M0, N0 and K0) must be passed at compile time using -DM0, -DN0 and -DK0 (i.e. -DM0=4, -DN0=8, -DK0=4).
303 * @note The number of M0xK0 vertical blocks stored on the same output row of the reshaped LHS matrix must be passed at compile time using -DV0 (i.e. -DV0=2)
304 * @note The number of K0xN0 horizontal blocks stored on the same output row of the reshaped RHS matrix must be passed at compile time using -DH0 (i.e. -DH0=2)
305 * @note If the M0xK0 blocks in the reshaped LHS matrix have been interleaved, the option -DLHS_INTERLEAVE must passed at compile time.
306 * @note If the K0xN0 blocks in the reshaped RHS matrix have been interleaved, the option -DRHS_INTERLEAVE must passed at compile time.
307 * @note Only the following configurations of M0, N0 and K0 are currently supported:
308 * - M0 = 2, 3, 4, 5, 6, 7, 8
309 * - N0 = 2, 3, 4, 8, 16
310 * - K0 = 2, 3, 4, 8, 16
Gian Marco Iodice62251f72019-03-11 16:07:12 +0000311 * - V0 >= 1
312 * - H0 >= 1
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000313 *
314 * @note In case the output has to be reinterpreted as a 3D tensor (i.e. output of convolution layer), the following information must be passed at compile time:
315 * -# REINTERPRET_OUTPUT_AS_3D: To reinterpret the output as 3D
316 * -# HEIGHT_GEMM3D: The height of the output in case it has to be reinterpreted as a 3D tensor.
317 * -# DEPTH_GEMM3D: The depth of the output in case it has to be reinterpreted as a 3D tensor
318 * (HEIGHT_GEMM3D * DEPTH_GEMM3D) = columns LHS matrix NOT reshaped
319 *
Sheri Zhang28287af2020-02-25 14:13:54 +0000320 * @param[in] lhs_ptr Pointer to the LHS reshaped matrix. Supported data type: QASYMM8/QASYMM_SIGNED
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000321 * @param[in] lhs_stride_x Stride of the LHS reshaped matrix in X dimension (in bytes)
322 * @param[in] lhs_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
323 * @param[in] lhs_stride_y Stride of the LHS reshaped matrix in Y dimension (in bytes)
324 * @param[in] lhs_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
325 * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the LHS reshaped matrix
326 * @param[in] rhs_ptr Pointer to the RHS reshaped matrix. Supported data type: same as @p lhs_ptr
327 * @param[in] rhs_stride_x Stride of the RHS reshaped matrix in X dimension (in bytes)
328 * @param[in] rhs_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
329 * @param[in] rhs_stride_y Stride of the RHS reshaped matrix in Y dimension (in bytes)
330 * @param[in] rhs_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
331 * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the RHS reshaped matrix
Sheri Zhang28287af2020-02-25 14:13:54 +0000332 * @param[out] dst_ptr Pointer to the destination matrix Supported data type: S32
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000333 * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
334 * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
335 * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
336 * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
337 * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
338 * @param[in] k Number of columns in LHS matrix and rows in RHS matrix not reshaped.
339 * @param[in] lhs_stride_z Stride of the LHS reshaped matrix in Z dimension (in bytes)
340 * @param[in] rhs_stride_z Stride of the RHS reshaped matrix in Z dimension (in bytes)
341 * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
342 * @param[in] dst_cross_plane_pad (Optional) Bottom paddings in unit of elements (only if defined REINTERPRET_OUTPUT_AS_3D)
343 */
344__kernel void gemmlowp_mm_reshaped_lhs_nt_rhs_t(IMAGE_DECLARATION(lhs),
345 IMAGE_DECLARATION(rhs),
346 IMAGE_DECLARATION(dst),
347 uint k,
348 uint lhs_stride_z,
349 uint rhs_stride_z,
350 uint dst_stride_z
351#if defined(REINTERPRET_OUTPUT_AS_3D)
352 ,
353 uint dst_cross_plane_pad
354#endif // REINTERPRET_OUTPUT_AS_3D
355 )
356{
357 // Block size
358#define LHS_BLOCK_SIZE ((K0) * (M0))
359
360#if defined(LHS_INTERLEAVE)
361#define LHS_OFFSET_X (K0)
362#define LHS_STEP_X ((K0) * (V0))
363#define LHS_STEP_LOOP (1)
364#else // defined(INTERLEAVE)
365#define LHS_OFFSET_X (LHS_BLOCK_SIZE)
366#define LHS_STEP_X (K0)
367#define LHS_STEP_LOOP (V0)
368#endif // defined(INTERLEAVE)
369
370 // Block size
371#define RHS_BLOCK_SIZE ((K0) * (N0))
372
373 // RHS offset and step X
374#if defined(RHS_INTERLEAVE)
375#define RHS_OFFSET_X (K0)
376#define RHS_STEP_X ((K0) * (H0))
377#define RHS_STEP_LOOP (1)
378#else // defined(RHS_INTERLEAVE)
379#define RHS_OFFSET_X (RHS_BLOCK_SIZE)
380#define RHS_STEP_X (K0)
381#define RHS_STEP_LOOP (H0)
382#endif // defined(RHS_INTERLEAVE)
383
Gian Marco Iodice43a129e2019-05-14 10:14:08 +0100384 uint x = get_global_id(0);
385 uint y = get_global_id(1);
386 uint z = get_global_id(2);
387
Gian Marco Iodiceb0c50372019-03-15 10:13:05 +0000388#if defined(DUMMY_WORK_ITEMS)
Gian Marco Iodice43a129e2019-05-14 10:14:08 +0100389 if((x * N0 >= N) || (y * M0 >= M))
Gian Marco Iodiceb0c50372019-03-15 10:13:05 +0000390 {
391 return;
392 }
393#endif // defined(DUMMY_WORK_ITEMS)
394
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000395 // Compute LHS matrix address
Michalis Spyrou2b7fee02021-04-27 14:10:20 +0100396 __global DATA_TYPE *lhs_addr = (__global DATA_TYPE *)(lhs_ptr + lhs_offset_first_element_in_bytes + (y % V0) * (uint)LHS_OFFSET_X + (y / V0) * (uint)lhs_stride_y + (z * lhs_stride_z));
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000397
398 // Compute RHS matrix address
Michalis Spyrou2b7fee02021-04-27 14:10:20 +0100399 __global DATA_TYPE *rhs_addr = (__global DATA_TYPE *)(rhs_ptr + rhs_offset_first_element_in_bytes + (x % H0) * (uint)RHS_OFFSET_X + (x / (uint)H0) * rhs_stride_y);
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000400
401#if defined(MATRIX_B_DEPTH)
402 // Do not slide matrix B if the matrix B has 3 dimensions and matrix A more than 3
Gian Marco Iodice43a129e2019-05-14 10:14:08 +0100403 rhs_addr += (z % MATRIX_B_DEPTH) * rhs_stride_z;
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000404#else // defined(MATRIX_B_DEPTH)
Gian Marco Iodice43a129e2019-05-14 10:14:08 +0100405 rhs_addr += z * rhs_stride_z;
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000406#endif // defined(MATRIX_B_DEPTH)
407
Gian Marco Iodice43a129e2019-05-14 10:14:08 +0100408 REPEAT_VAR_INIT_TO_CONST(8, uint, zlhs, 0); //uint zout0=0,zout1=0,zout2=0,... zout7=0;
409 REPEAT_VAR_INIT_TO_CONST(16, uint, zrhs, 0);
410
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000411 // Initialize the accumulators
Michele Di Giorgiof9179d32019-11-27 16:17:30 +0000412 REPEAT_VAR_INIT_TO_CONST(M0, VEC_DATA_TYPE(ACC_DATA_TYPE, N0), c, 0); //VEC_DATA_TYPE(ACC_DATA_TYPE, N0) c0=0,c1=0,c2=0,... c(M0-1)=0;
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000413
414 for(int i = 0; i < k; i += K0)
415 {
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000416 // Load values from LHS matrix
Michele Di Giorgiof9179d32019-11-27 16:17:30 +0000417 LOAD_BLOCK(M0, K0, DATA_TYPE, a, lhs_addr, 0, LHS_STEP_X, zlhs);
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000418
419 // Load values from RHS matrix
Michele Di Giorgiof9179d32019-11-27 16:17:30 +0000420 LOAD_BLOCK(N0, K0, DATA_TYPE, b, rhs_addr, 0, RHS_STEP_X, zrhs);
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000421
Gian Marco Iodice43a129e2019-05-14 10:14:08 +0100422 // Partial matrix multiplication M0,N0,K0
423 ARM_MM_K0XN0XM0(M0, N0, K0, a, b, c);
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000424
Gian Marco Iodice43a129e2019-05-14 10:14:08 +0100425 // Update address
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000426 lhs_addr += (M0 * LHS_STEP_X * LHS_STEP_LOOP);
427 rhs_addr += (N0 * RHS_STEP_X * RHS_STEP_LOOP);
428 }
429
Gian Marco Iodice43a129e2019-05-14 10:14:08 +0100430 __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + (x * (uint)N0 * sizeof(int)) + (y * (uint)M0 * dst_stride_y);
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000431
432 REPEAT_VAR_INIT_TO_CONST(8, uint, zout, 0); //uint zout0=0,zout1=0,zout2=0,... zout7=0;
433
434#if defined(REINTERPRET_OUTPUT_AS_3D)
Gian Marco Iodice43a129e2019-05-14 10:14:08 +0100435 // The plane (zout) is calculated dividing M (y * M0) by HEIGHT_GEMM3D
Gian Marco Iodice9ae06d42020-10-22 16:37:12 +0100436 CALCULATE_Z_OFFSET(M0, uint, zout, y * M0, HEIGHT_GEMM3D, DEPTH_GEMM3D, dst_cross_plane_pad, dst_stride_y);
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000437
438 // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
439 // multiply dst_stride_z by DEPTH_GEMM3D
Gian Marco Iodice43a129e2019-05-14 10:14:08 +0100440 dst_addr += z * dst_stride_z * DEPTH_GEMM3D;
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000441
442#else // defined(REINTERPRET_OUTPUT_AS_3D)
443
444 // Add offset for batched GEMM
Gian Marco Iodice43a129e2019-05-14 10:14:08 +0100445 dst_addr += z * dst_stride_z;
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000446
447#endif // defined(REINTERPRET_OUTPUT_AS_3D)
448
Gian Marco Iodice43a129e2019-05-14 10:14:08 +0100449 // Convert and store output block
Manuel Bottini8cf753f2020-10-21 12:34:38 +0100450 const bool cond_y = ((get_global_id(1) + 1) * M0 >= M);
451 const bool cond_x = ((get_global_id(0) + 1) * N0 >= N);
452
453 // Store output block
454 REPEAT_VAR_INIT_CONVERT_SAT(M0, VEC_DATA_TYPE(int, N0), c, c_lp);
455 STORE_BLOCK_BOUNDARY_AWARE(M0, N0, int, c_lp, dst_addr, dst_stride_y, zout, PARTIAL_STORE_M0, PARTIAL_STORE_N0, cond_y, cond_x);
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000456
457#undef LHS_BLOCK_SIZE
458#undef LHS_OFFSET_X
459#undef LHS_STEP_X
460#undef RHS_BLOCK_SIZE
461#undef RHS_OFFSET_X
462#undef RHS_STEP_X
463}
Manuel Bottini8cf753f2020-10-21 12:34:38 +0100464#endif // defined(M0) && defined(N0) && defined(K0) && defined(V0) && defined(H0) && defined(M) && defined(N) && defined(PARTIAL_STORE_M0) && defined(PARTIAL_STORE_N0)
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000465
Manuel Bottini488f5082020-10-29 13:51:23 +0000466#if defined(M0) && defined(N0) && defined(K0) && defined(H0) && defined(K) && defined(PARTIAL_STORE_M0) && defined(PARTIAL_STORE_N0)
Gian Marco Iodice62251f72019-03-11 16:07:12 +0000467
Giorgio Arena5f6fdc12021-06-09 15:23:06 +0100468#if defined(RESULT_OFFSET) && defined(RESULT_MULTIPLIER) && defined(RESULT_SHIFT)
469#define FUSED_OUTPUT_STAGE_FIXED_POINT
470#endif // defined(RESULT_OFFSET) && defined(RESULT_MULTIPLIER) && defined(RESULT_SHIFT)
Gian Marco Iodice62251f72019-03-11 16:07:12 +0000471
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000472/** This OpenCL kernel computes the matrix multiplication between 2 matrices with fused output stage using fixed-point arithmetic.
473 * The LHS matrix is NOT reshaped
474 * The RHS matrix is reshaped with @ref CLGEMMReshapeRHSMatrixKernel and the block K0xN0 is transposed
475 *
476 * @note The input data type must be passed at compile time using -DDATA_TYPE (i.e. -DDATA_TYPE=uchar)
477 * @note The accumulator data type must be passed at compile time using -DACC_DATA_TYPE (i.e. -DACC_DATA_TYPE=uint)
478 * @note The number of columns of LHS matrix must be passed at compile time using -DK (i.e. -DK=64)
479 * @note The block's dimensions used for reshaping the RHS matrix (N0 and K0) must be passed at compile time using -DN0 and -DK0 (i.e. -DN0=8, -DK0=4).
480 * @note The number of M0 rows to process must be passed at compile time using -DM0 (i.e. -DM0=2)
481 * @note The number of K0xN0 horizontal blocks stored on the same output row of the reshaped RHS matrix must be passed at compile time using -DH0 (i.e. -DH0=2)
482 * @note If the K0xN0 blocks in the reshaped RHS matrix have been interleaved, the option -DRHS_INTERLEAVE must passed at compile time.
483 * @note Only the following configurations of M0, N0 and K0 are currently supported:
484 * - M0 = 1, 2, 3, 4, 5, 6, 7, 8
485 * - N0 = 2, 3, 4, 8, 16
486 * - K0 = 2, 3, 4, 8, 16
487 * - H0 >= 1
488 *
489 * @note In case the input or output have to be reinterpreted as a 3D tensor, the following information must be passed at compile time:
490 * -# REINTERPRET_INPUT_AS_3D: To reinterpret the input as 3D
491 * -# REINTERPRET_OUTPUT_AS_3D: To reinterpret the output as 3D
492 * -# HEIGHT_GEMM3D: The height of the output in case it has to be reinterpreted as a 3D tensor.
493 * -# DEPTH_GEMM3D: The depth of the output in case it has to be reinterpreted as a 3D tensor
494 * (HEIGHT_GEMM3D * DEPTH_GEMM3D) = columns LHS matrix
495 *
496 * @note The offset, scalar scale factor and number of bits to shift right of output tensor must be passed at compile time using -DRESULT_OFFSET, -RESULT_MULTIPLIER and -DRESULT_SHIFT
497 * @note In case the addition of int32 biases is required, -DADD_BIAS should be passed at compile time
498 * @note The output datatype should be passed at compile time using -DOUTPUT_DATA_TYPE
499 * @note In case the clamping of the result is required, the min and max bounds can be passed at compile time using -DMIN_BOUND and -DMAX_BOUND.
500 * These values can be used to implement "rectified linear unit" activation functions
501 * @note In case of per-channel quantization of matrix B, -DPER_CHANNEL_QUANTIZATION must be passed at compile time.
502 *
503 * @param[in] lhs_ptr Pointer to the LHS reshaped matrix. Supported data type: QASYMM8/QASYMM8_SIGNED
504 * @param[in] lhs_stride_x Stride of the LHS reshaped matrix in X dimension (in bytes)
505 * @param[in] lhs_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
506 * @param[in] lhs_stride_y Stride of the LHS reshaped matrix in Y dimension (in bytes)
507 * @param[in] lhs_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
508 * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the LHS reshaped matrix
509 * @param[in] rhs_ptr Pointer to the RHS reshaped matrix. Supported data type: same as @p lhs_ptr
510 * @param[in] rhs_stride_x Stride of the RHS reshaped matrix in X dimension (in bytes)
511 * @param[in] rhs_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
512 * @param[in] rhs_stride_y Stride of the RHS reshaped matrix in Y dimension (in bytes)
513 * @param[in] rhs_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
514 * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the RHS reshaped matrix
515 * @param[out] dst_ptr Pointer to the destination matrix Supported data type: same as @p lhs_ptr
516 * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
517 * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
518 * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
519 * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
520 * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
521 * @param[in] lhs_stride_z Stride of the LHS reshaped matrix in Z dimension (in bytes)
522 * @param[in] rhs_stride_z Stride of the RHS reshaped matrix in Z dimension (in bytes)
523 * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
524 * @param[in] lhs_cross_plane_pad (Optional) Bottom paddings for LHS matrix in unit of elements (only if defined REINTERPRET_INPUT_AS_3D)
525 * @param[in] dst_cross_plane_pad (Optional) Bottom paddings for the output matrix in unit of elements (only if defined REINTERPRET_OUTPUT_AS_3D)
526 * @param[in] sum_col_ptr (Optional) Pointer to the source tensor. Supported data type: S32
527 * @param[in] sum_col_stride_x (Optional) Stride of the source tensor in X dimension (in bytes)
528 * @param[in] sum_col_step_x (Optional) sum_col_stride_x * number of elements along X processed per workitem(in bytes)
529 * @param[in] sum_col_stride_y (Optional) Stride of the source tensor in Y dimension (in bytes)
530 * @param[in] sum_col_step_y (Optional) sum_col_stride_y * number of elements along Y processed per workitem(in bytes)
531 * @param[in] sum_col_offset_first_element_in_bytes (Optional) The offset of the first element in the source tensor
532 * @param[in] sum_row_ptr (Optional) Pointer to the source tensor. Supported data type: S32
533 * @param[in] sum_row_stride_x (Optional) Stride of the source tensor in X dimension (in bytes)
534 * @param[in] sum_row_step_x (Optional) sum_row_stride_x * number of elements along X processed per workitem(in bytes)
535 * @param[in] sum_row_stride_y (Optional) Stride of the source tensor in Y dimension (in bytes)
536 * @param[in] sum_row_step_y (Optional) sum_row_stride_y * number of elements along Y processed per workitem(in bytes)
537 * @param[in] sum_row_offset_first_element_in_bytes (Optional) The offset of the first element in the source tensor
538 * @param[in] biases_ptr (Optional) Pointer to the biases tensor. Supported data type: S32
539 * @param[in] biases_stride_x (Optional) Stride of the biases tensor in X dimension (in bytes)
540 * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
541 * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases tensor
542 * @param[in] result_multipliers_ptr (Optional) Pointer to the output multipliers vector for per-channel quantization. Supported data types: S32
543 * @param[in] result_multipliers_stride_x (Optional) Stride of the output multipliers vector in X dimension (in bytes)
544 * @param[in] result_multipliers_step_x (Optional) output_multipliers_stride_x * number of elements along X processed per workitem(in bytes)
545 * @param[in] result_multipliers_offset_first_element_in_bytes (Optional) The offset of the first element in the output multipliers vector
546 * @param[in] result_shifts_ptr (Optional) Pointer to the output shifts vector for per-channel quantization. Supported data types: S32
547 * @param[in] result_shifts_stride_x (Optional) Stride of the output shifts vector in X dimension (in bytes)
548 * @param[in] result_shifts_step_x (Optional) output_shifts_stride_x * number of elements along X processed per workitem(in bytes)
549 * @param[in] result_shifts_offset_first_element_in_bytes (Optional) The offset of the first element in the output shifts vector
550 */
Giorgio Arena5f6fdc12021-06-09 15:23:06 +0100551#if defined(FUSED_OUTPUT_STAGE_FIXED_POINT)
552__kernel void gemmlowp_mm_reshaped_only_rhs_t_fused_output_stage_fixedpoint
553#else // defined(FUSED_OUTPUT_STAGE_FIXED_POINT)
554__kernel void gemmlowp_mm_reshaped_only_rhs_t
555#endif // defined(FUSED_OUTPUT_STAGE_FIXED_POINT)
556(IMAGE_DECLARATION(lhs),
557 IMAGE_DECLARATION(rhs),
558 IMAGE_DECLARATION(dst),
559 uint lhs_stride_z,
560 uint rhs_stride_z,
561 uint dst_stride_z
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000562#if defined(REINTERPRET_INPUT_AS_3D)
Giorgio Arena5f6fdc12021-06-09 15:23:06 +0100563 ,
564 uint lhs_cross_plane_pad
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000565#endif // REINTERPRET_INPUT_AS_3D
566#if defined(REINTERPRET_OUTPUT_AS_3D)
Giorgio Arena5f6fdc12021-06-09 15:23:06 +0100567 ,
568 uint dst_cross_plane_pad
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000569#endif // REINTERPRET_OUTPUT_AS_3D
570#if defined(A_OFFSET)
Giorgio Arena5f6fdc12021-06-09 15:23:06 +0100571 ,
572 IMAGE_DECLARATION(sum_col)
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000573#endif // defined(A_OFFSET)
574#if defined(B_OFFSET)
Giorgio Arena5f6fdc12021-06-09 15:23:06 +0100575 ,
576 IMAGE_DECLARATION(sum_row)
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000577#endif // defined(B_OFFSET)
578#if defined(ADD_BIAS)
Giorgio Arena5f6fdc12021-06-09 15:23:06 +0100579 ,
580 VECTOR_DECLARATION(biases)
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000581#endif // defined(ADD_BIAS)
582#if defined(PER_CHANNEL_QUANTIZATION)
Giorgio Arena5f6fdc12021-06-09 15:23:06 +0100583 ,
584 VECTOR_DECLARATION(result_multipliers),
585 VECTOR_DECLARATION(result_shifts)
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000586#endif // defined(PER_CHANNEL_QUANTIZATION)
Giorgio Arena5f6fdc12021-06-09 15:23:06 +0100587)
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000588{
Giorgio Arena5f6fdc12021-06-09 15:23:06 +0100589 // @note: replace with (DIMENSION + PAD) once we pass the relevant info at compile time
590#define FULL_LHS_HEIGHT (lhs_stride_z / lhs_stride_y)
591#define FULL_DST_HEIGHT (dst_stride_z / dst_stride_y)
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000592
593 // RHS offset and step X
594#if defined(RHS_INTERLEAVE)
595#define RHS_OFFSET_X (K0)
Giorgio Arena5f6fdc12021-06-09 15:23:06 +0100596#define RHS_STEP_X (K0 * H0)
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000597#else // defined(RHS_INTERLEAVE)
Giorgio Arena5f6fdc12021-06-09 15:23:06 +0100598#define RHS_OFFSET_X (K0 * N0)
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000599#define RHS_STEP_X (K0)
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000600#endif // defined(RHS_INTERLEAVE)
Giorgio Arena5f6fdc12021-06-09 15:23:06 +0100601#define RHS_STEP_LOOP (N0 * K0 * H0)
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000602
Giorgio Arena5f6fdc12021-06-09 15:23:06 +0100603 uint x = GET_SPATIAL_IDX(0, 1, 1);
604 uint y = GET_SPATIAL_IDX(1, M0, PARTIAL_STORE_M0);
605 uint z = GET_SPATIAL_IDX(2, 1, 1);
606 int xo = (x * N0);
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000607
608#if defined(DUMMY_WORK_ITEMS)
Giorgio Arena5f6fdc12021-06-09 15:23:06 +0100609 if((xo >= N) || (y >= M))
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000610 {
611 return;
612 }
613#endif // defined(DUMMY_WORK_ITEMS)
614
615 // Compute LHS matrix address
Giorgio Arena5f6fdc12021-06-09 15:23:06 +0100616 uint lhs_y = y + z * FULL_LHS_HEIGHT;
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000617
618 // Compute RHS matrix address
Giorgio Arena5f6fdc12021-06-09 15:23:06 +0100619 uint rhs_offset_x = (x % H0) * RHS_OFFSET_X;
620 uint rhs_offset_y = (x / H0) * rhs_stride_y;
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000621
622#if defined(MATRIX_B_DEPTH)
623 // Do not slide matrix B if the matrix B has 3 dimensions and matrix A more than 3
Giorgio Arena5f6fdc12021-06-09 15:23:06 +0100624 rhs_offset_y += (z % MATRIX_B_DEPTH) * rhs_stride_z;
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000625#else // defined(MATRIX_B_DEPTH)
Giorgio Arena5f6fdc12021-06-09 15:23:06 +0100626 rhs_offset_y += z * rhs_stride_z;
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000627#endif // defined(MATRIX_B_DEPTH)
628
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000629 // Initialize the accumulators
Giorgio Arena5f6fdc12021-06-09 15:23:06 +0100630 TILE(ACC_DATA_TYPE, M0, N0, c);
631 LOOP_UNROLLING(int, i, 0, 1, M0,
632 {
633 c[i].v = 0;
634 })
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000635
Manuel Bottini488f5082020-10-29 13:51:23 +0000636 int i = 0;
637 for(; i <= (K - K0); i += K0)
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000638 {
Giorgio Arena5f6fdc12021-06-09 15:23:06 +0100639 TILE(DATA_TYPE, M0, K0, a);
640 TILE(DATA_TYPE, N0, K0, b);
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000641
Giorgio Arena5f6fdc12021-06-09 15:23:06 +0100642 // Load values from LHS matrix
643 T_LOAD(DATA_TYPE, M0, K0, BUFFER, lhs, i, lhs_y, 1, lhs_stride_y, a);
644
645 // // Load values from RHS matrix
646 LOOP_UNROLLING(int, _i, 0, 1, N0,
647 {
648 b[_i].v = VLOAD(K0)(0, (__global DATA_TYPE *)(rhs_ptr + rhs_offset_first_element_in_bytes + rhs_offset_x + rhs_offset_y + _i * RHS_STEP_X));
649 })
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000650
651 // Partial matrix multiplication M0,N0,K0
Giorgio Arena5f6fdc12021-06-09 15:23:06 +0100652 T_MMUL(DATA_TYPE, DATA_TYPE, ACC_DATA_TYPE, M0, N0, K0, NT, T, a, b, c);
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000653
Giorgio Arena5f6fdc12021-06-09 15:23:06 +0100654 rhs_offset_x += RHS_STEP_LOOP;
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000655 }
Giorgio Arena5f6fdc12021-06-09 15:23:06 +0100656
657#if((K % K0) != 0)
658
Manuel Bottini488f5082020-10-29 13:51:23 +0000659 // Left-over accumulations
660 for(; i < K; ++i)
661 {
Giorgio Arena5f6fdc12021-06-09 15:23:06 +0100662 TILE(DATA_TYPE, M0, 1, a);
663 TILE(DATA_TYPE, N0, 1, b);
664
Manuel Bottini488f5082020-10-29 13:51:23 +0000665 // Load values from LHS matrix
Giorgio Arena5f6fdc12021-06-09 15:23:06 +0100666 T_LOAD(DATA_TYPE, M0, 1, BUFFER, lhs, i, lhs_y, 1, lhs_stride_y, a);
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000667
Giorgio Arena5f6fdc12021-06-09 15:23:06 +0100668 LOOP_UNROLLING(int, _i, 0, 1, N0,
669 {
670 b[_i].v = *(__global DATA_TYPE *)(rhs_ptr + rhs_offset_first_element_in_bytes + rhs_offset_x + rhs_offset_y + _i * RHS_STEP_X);
671 })
Manuel Bottini488f5082020-10-29 13:51:23 +0000672
Giorgio Arena5f6fdc12021-06-09 15:23:06 +0100673 T_MMUL(DATA_TYPE, DATA_TYPE, ACC_DATA_TYPE, M0, N0, 1, NT, T, a, b, c);
674
675 rhs_offset_x += 1;
Manuel Bottini488f5082020-10-29 13:51:23 +0000676 }
Giorgio Arena5f6fdc12021-06-09 15:23:06 +0100677#endif // ((K % K0) != 0)
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000678
Giorgio Arena5f6fdc12021-06-09 15:23:06 +0100679#if defined(FUSED_OUTPUT_STAGE_FIXED_POINT)
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000680
Giorgio Arena5f6fdc12021-06-09 15:23:06 +0100681 TILE(int, M0, N0, c_int);
682 TILE(int, M0, N0, offset_s32);
683 LOOP_UNROLLING(int, i, 0, 1, M0,
684 {
685 offset_s32[i].v = (VEC_DATA_TYPE(int, N0))K_OFFSET;
686 })
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000687
Giorgio Arena5f6fdc12021-06-09 15:23:06 +0100688 LOOP_UNROLLING(int, i, 0, 1, M0,
689 {
690 c_int[i].v = CONVERT_SAT(c[i].v, VEC_DATA_TYPE(int, N0));
691 })
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000692
693#if defined(A_OFFSET)
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000694
695#if defined(SUM_COL_HAS_BATCHES)
Giorgio Arena5f6fdc12021-06-09 15:23:06 +0100696 int sum_col_y = z;
697#else // defined(SUM_COL_HAS_BATCHES)
698 int sum_col_y = 0;
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000699#endif // defined(SUM_COL_HAS_BATCHES)
Giorgio Arena5f6fdc12021-06-09 15:23:06 +0100700 TILE(int, 1, N0, a_offset_s32);
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000701
Giorgio Arena5f6fdc12021-06-09 15:23:06 +0100702 T_LOAD(int, 1, N0, BUFFER, sum_col, xo, sum_col_y, 1, sum_col_stride_y, a_offset_s32);
703
704 a_offset_s32[0].v *= A_OFFSET;
705
Giorgio Arena945ae9e2021-10-13 11:13:04 +0100706 T_ADD_BROADCAST_X(int, M0, N0, offset_s32, a_offset_s32, offset_s32);
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000707#endif // defined(A_OFFSET)
708
709#if defined(B_OFFSET)
710 // Compute the offset contribution due to B_OFFSET
Gian Marco Iodice27423f02020-08-12 14:12:28 +0100711 // Note: The sum_row tensor is generated through CLGEMMLowpMatrixAReductionKernel which
712 // does not introduce paddings. For this reason is safe to access the tensor in this manner
713 // without considering that the coordinate "y" could come from an input 3D tensor
Giorgio Arena5f6fdc12021-06-09 15:23:06 +0100714 TILE(int, M0, N0, b_offset_s32);
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000715
Giorgio Arena5f6fdc12021-06-09 15:23:06 +0100716 T_LOAD(int, M0, 1, BUFFER, sum_row, y + z * (sum_row_stride_y / sizeof(int)), 0, 1, sum_row_stride_x, b_offset_s32);
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000717
Giorgio Arena5f6fdc12021-06-09 15:23:06 +0100718 LOOP_UNROLLING(int, i, 0, 1, M0,
719 {
720 offset_s32[i].v += b_offset_s32[i].v *B_OFFSET;
721 })
722
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000723#endif // defined(B_OFFSET)
724
725#if defined(ADD_BIAS)
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000726
Giorgio Arena5f6fdc12021-06-09 15:23:06 +0100727 TILE(int, 1, N0, bias);
728
729 T_LOAD(int, 1, N0, BUFFER, biases, xo, 0, 1, 0, bias);
730
Giorgio Arena945ae9e2021-10-13 11:13:04 +0100731 T_ADD_BROADCAST_X(int, M0, N0, offset_s32, bias, offset_s32);
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000732#endif // defined(ADD_BIAS)
733
Giorgio Arena5f6fdc12021-06-09 15:23:06 +0100734 LOOP_UNROLLING(int, i, 0, 1, M0,
735 {
736 c_int[i].v += offset_s32[i].v;
737 })
738
739 TILE(DATA_TYPE, M0, N0, c_lp);
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000740
741 // Multiply by result_mult_int and shift
742#if defined(PER_CHANNEL_QUANTIZATION)
Giorgio Arena5f6fdc12021-06-09 15:23:06 +0100743 TILE(int, 1, N0, res_mul);
744 TILE(int, 1, N0, res_shift);
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000745
Giorgio Arena5f6fdc12021-06-09 15:23:06 +0100746 T_LOAD(int, 1, N0, BUFFER, result_multipliers, xo, 0, 0, 0, res_mul);
747 T_LOAD(int, 1, N0, BUFFER, result_shifts, xo, 0, 0, 0, res_shift);
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000748
Giorgio Arena5f6fdc12021-06-09 15:23:06 +0100749 T_QUANTIZE8(int, DATA_TYPE, PER_CHANNEL, M0, N0, RESULT_OFFSET, RESULT_SHIFT, RESULT_MULTIPLIER, c_int, res_mul, res_shift, c_lp);
750#else // defined(PER_CHANNEL_QUANTIZATION)
751 T_QUANTIZE8(int, DATA_TYPE, PER_TENSOR, M0, N0, RESULT_OFFSET, RESULT_SHIFT, RESULT_MULTIPLIER, c_int, 0, 0, c_lp);
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000752#endif // defined(PER_CHANNEL_QUANTIZATION)
753
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000754#if defined(MIN_BOUND)
Giorgio Arena5f6fdc12021-06-09 15:23:06 +0100755 LOOP_UNROLLING(int, i, 0, 1, M0,
756 {
757 c_lp[i].v = max(c_lp[i].v, (VEC_DATA_TYPE(DATA_TYPE, N0))MIN_BOUND);
758 })
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000759#endif // defined(MIN_BOUND)
760#if defined(MAX_BOUND)
Giorgio Arena5f6fdc12021-06-09 15:23:06 +0100761 LOOP_UNROLLING(int, i, 0, 1, M0,
762 {
763 c_lp[i].v = min(c_lp[i].v, (VEC_DATA_TYPE(DATA_TYPE, N0))MAX_BOUND);
764 })
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000765#endif // defined(MAX_BOUND)
766
Giorgio Arena5f6fdc12021-06-09 15:23:06 +0100767#else // defined(FUSED_OUTPUT_STAGE_FIXED_POINT)
768 TILE(int, M0, N0, c_lp);
Manuel Bottini488f5082020-10-29 13:51:23 +0000769
Giorgio Arena5f6fdc12021-06-09 15:23:06 +0100770 LOOP_UNROLLING(int, i, 0, 1, M0,
771 {
772 c_lp[i].v = CONVERT_SAT(c[i].v, VEC_DATA_TYPE(int, N0));
773 })
774#endif // defined(FUSED_OUTPUT_STAGE_FIXED_POINT)
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000775
Giorgio Arena5f6fdc12021-06-09 15:23:06 +0100776 TILE(uint, M0, 1, dst_indirect_y);
777
778 LOOP_UNROLLING(int, i, 0, 1, M0,
779 {
780#if defined(REINTERPRET_OUTPUT_AS_3D)
781 dst_indirect_y[i].v = (uint)min((int)((y + i) % HEIGHT_GEMM3D), (int)HEIGHT_GEMM3D - 1);
782 dst_indirect_y[i].v += (uint)min((int)((y + i) / HEIGHT_GEMM3D), (int)DEPTH_GEMM3D - 1) * FULL_DST_HEIGHT;
783 dst_indirect_y[i].v += z *FULL_DST_HEIGHT *DEPTH_GEMM3D;
784#else // (REINTERPRET_OUTPUT_AS_3D)
785 dst_indirect_y[i].v = (uint)min((int)y + i, (int)M - 1) + z *FULL_DST_HEIGHT;
786#endif // defined(REINTERPRET_OUTPUT_AS_3D)
787 })
788
Giorgio Arena945ae9e2021-10-13 11:13:04 +0100789 const bool cond_x = (xo > (N - N0)) & (PARTIAL_STORE_N0 != 0);
Giorgio Arena5f6fdc12021-06-09 15:23:06 +0100790
791#if defined(FUSED_OUTPUT_STAGE_FIXED_POINT)
792 T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, M0, N0, PARTIAL_STORE_N0, BUFFER, dst, xo, dst_stride_y, cond_x, c_lp, dst_indirect_y);
793#else // defined(FUSED_OUTPUT_STAGE_FIXED_POINT)
794 T_STORE_INDIRECT_WIDTH_SELECT(int, M0, N0, PARTIAL_STORE_N0, BUFFER, dst, xo, dst_stride_y, cond_x, c_lp, dst_indirect_y);
795#endif // defined(FUSED_OUTPUT_STAGE_FIXED_POINT)
796
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000797#undef RHS_OFFSET_X
798#undef RHS_STEP_X
Giorgio Arena5f6fdc12021-06-09 15:23:06 +0100799#undef RHS_STEP_LOOP
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000800}
Manuel Bottini488f5082020-10-29 13:51:23 +0000801#endif // defined(M0) && defined(N0) && defined(K0) && defined(H0) && defined(K) && defined(PARTIAL_STORE_M0) && defined(PARTIAL_STORE_N0)
Gian Marco Iodice62251f72019-03-11 16:07:12 +0000802
SiCong Lied5fb392020-10-20 18:07:27 +0100803#if defined(M0) && defined(N0) && defined(K0) && defined(K) && defined(PARTIAL_STORE_M0) && defined(PARTIAL_STORE_N0)
Gian Marco Iodicee7510622019-06-03 17:28:17 +0100804
805/** This OpenCL kernel computes the matrix multiplication between 2 matrices.
806 * The LHS matrix is NOT reshaped
807 * The RHS matrix is NOT reshaped
808 *
Michele Di Giorgiof9179d32019-11-27 16:17:30 +0000809 * @note The input data type must be passed at compile time using -DDATA_TYPE (i.e. -DDATA_TYPE=uchar)
810 * @note The accumulator data type must be passed at compile time using -DACC_DATA_TYPE (i.e. -DACC_DATA_TYPE=uint)
Gian Marco Iodicee7510622019-06-03 17:28:17 +0100811 * @note The number of columns of LHS matrix must be passed at compile time using -DK (i.e. -DK=64)
812 * @note The number of M0 rows to process must be passed at compile time using -DM0 (i.e. -DM0=2)
813 * @note The number of N0 columns to process must be passed at compile time using -DN0 (i.e. -DN0=2)
814 * @note The number of K0 partial accumulations must be passed at compile time using -DK0 (i.e., -DK0=2)
815 * @note Only the following configurations of M0, N0 and K0 are currently supported:
816 * - M0 = 1, 2, 3, 4, 5, 6, 7, 8
817 * - N0 = 2, 3, 4, 8, 16
818 * - K0 = 2, 3, 4, 8, 16
819 *
820 * @note In case the input or output have to be reinterpreted as a 3D tensor, the following information must be passed at compile time:
821 * -# REINTERPRET_INPUT_AS_3D: To reinterpret the input as 3D
822 * -# REINTERPRET_OUTPUT_AS_3D: To reinterpret the output as 3D
823 * -# HEIGHT_GEMM3D: The height of the output in case it has to be reinterpreted as a 3D tensor.
824 * -# DEPTH_GEMM3D: The depth of the output in case it has to be reinterpreted as a 3D tensor
825 * (HEIGHT_GEMM3D * DEPTH_GEMM3D) = columns LHS matrix
826 *
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000827 * @param[in] lhs_ptr Pointer to the LHS reshaped matrix. Supported data type: QASYMM8
Gian Marco Iodicee7510622019-06-03 17:28:17 +0100828 * @param[in] lhs_stride_x Stride of the LHS reshaped matrix in X dimension (in bytes)
829 * @param[in] lhs_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
830 * @param[in] lhs_stride_y Stride of the LHS reshaped matrix in Y dimension (in bytes)
831 * @param[in] lhs_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
832 * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the LHS reshaped matrix
833 * @param[in] rhs_ptr Pointer to the RHS reshaped matrix. Supported data type: same as @p lhs_ptr
834 * @param[in] rhs_stride_x Stride of the RHS reshaped matrix in X dimension (in bytes)
835 * @param[in] rhs_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
836 * @param[in] rhs_stride_y Stride of the RHS reshaped matrix in Y dimension (in bytes)
837 * @param[in] rhs_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
838 * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the RHS reshaped matrix
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000839 * @param[out] dst_ptr Pointer to the destination matrix Supported data type: S32
Gian Marco Iodicee7510622019-06-03 17:28:17 +0100840 * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
841 * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
842 * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
843 * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
844 * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
845 * @param[in] lhs_stride_z Stride of the LHS reshaped matrix in Z dimension (in bytes)
846 * @param[in] rhs_stride_z Stride of the RHS reshaped matrix in Z dimension (in bytes)
847 * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
848 * @param[in] lhs_cross_plane_pad (Optional) Bottom paddings for LHS matrix in unit of elements (only if defined REINTERPRET_INPUT_AS_3D)
849 * @param[in] dst_cross_plane_pad (Optional) Bottom paddings for the output matrix in unit of elements (only if defined REINTERPRET_OUTPUT_AS_3D)
850 */
851__kernel void gemmlowp_mm_native(IMAGE_DECLARATION(lhs),
852 IMAGE_DECLARATION(rhs),
853 IMAGE_DECLARATION(dst),
854 uint lhs_stride_z,
855 uint rhs_stride_z,
856 uint dst_stride_z
857#if defined(REINTERPRET_INPUT_AS_3D)
858 ,
859 uint lhs_cross_plane_pad
860#endif // REINTERPRET_INPUT_AS_3D
861#if defined(REINTERPRET_OUTPUT_AS_3D)
862 ,
863 uint dst_cross_plane_pad
864#endif // REINTERPRET_OUTPUT_AS_3D
865 )
866{
867 uint x = get_global_id(0);
868 uint y = get_global_id(1);
869 uint z = get_global_id(2);
870
871#if defined(DUMMY_WORK_ITEMS)
872 if((x * N0 >= N) || (y * M0 >= M))
873 {
874 return;
875 }
876#endif // defined(DUMMY_WORK_ITEMS)
877
878 // Compute LHS matrix address
morgolockcf343e32020-10-12 14:00:43 +0100879 uint lhs_offset = lhs_offset_first_element_in_bytes + COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) * (uint)lhs_stride_y;
Gian Marco Iodicee7510622019-06-03 17:28:17 +0100880
881 // Compute RHS matrix address
morgolockcf343e32020-10-12 14:00:43 +0100882 uint rhs_offset = rhs_offset_first_element_in_bytes + x * N0 * sizeof(DATA_TYPE);
883
Gian Marco Iodicee7510622019-06-03 17:28:17 +0100884#if defined(MATRIX_B_DEPTH)
885 // Do not slide matrix B if the matrix B has 3 dimensions and matrix A more than 3
886 rhs_offset += (z % MATRIX_B_DEPTH) * rhs_stride_z;
887#else // defined(MATRIX_B_DEPTH)
888 rhs_offset += z * rhs_stride_z;
889#endif // defined(MATRIX_B_DEPTH)
890
891 REPEAT_VAR_INIT_TO_CONST(8, uint, zlhs, 0);
892 REPEAT_VAR_INIT_TO_CONST(16, uint, zrhs, 0);
893
894#if defined(REINTERPRET_INPUT_AS_3D)
895 // The plane (zlhs) is calculated dividing M (y * M0) by HEIGHT_GEMM3D
Gian Marco Iodice9ae06d42020-10-22 16:37:12 +0100896 CALCULATE_Z_OFFSET(M0, uint, zlhs, COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0), HEIGHT_GEMM3D, DEPTH_GEMM3D, lhs_cross_plane_pad, lhs_stride_y);
Gian Marco Iodicee7510622019-06-03 17:28:17 +0100897
898 // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
899 // multiply lhs_stride_z by DEPTH_GEMM3D
900 lhs_offset += z * lhs_stride_z * DEPTH_GEMM3D;
901
902#else // defined(REINTERPRET_INPUT_AS_3D)
903
904 // Add offset for batched GEMM
905 lhs_offset += z * lhs_stride_z;
906
907#endif // defined(REINTERPRET_INPUT_AS_3D)
908
909 // Initialize the accumulators
Michele Di Giorgiof9179d32019-11-27 16:17:30 +0000910 REPEAT_VAR_INIT_TO_CONST(M0, VEC_DATA_TYPE(ACC_DATA_TYPE, N0), c, 0); //VEC_DATA_TYPE(ACC_DATA_TYPE, N0) c0=0,c1=0,c2=0,... c(M0-1)=0;
Gian Marco Iodicee7510622019-06-03 17:28:17 +0100911
912 int i = 0;
913
914 for(; i <= (K - K0); i += K0)
915 {
916 // Load values from LHS matrix
Michele Di Giorgiof9179d32019-11-27 16:17:30 +0000917 LOAD_BLOCK(M0, K0, DATA_TYPE, a, lhs_ptr, lhs_offset, lhs_stride_y, zlhs);
Gian Marco Iodicee7510622019-06-03 17:28:17 +0100918
919 // Load values from RHS matrix
Michele Di Giorgiof9179d32019-11-27 16:17:30 +0000920 LOAD_BLOCK(K0, N0, DATA_TYPE, b, rhs_ptr, rhs_offset, rhs_stride_y, zrhs);
Gian Marco Iodicee7510622019-06-03 17:28:17 +0100921
SiCong Li738893e2020-05-01 12:55:16 +0100922 // Partial matrix multiplication M0,N0,K0
923#if(GPU_ARCH == GPU_ARCH_MIDGARD)
924 ARM_MM_NATIVE_N0XK0XM0(VEC_DATA_TYPE(ACC_DATA_TYPE, N0), M0, K0, a, b, c);
925#else // GPU_ARCH == GPU_ARCH_MIDGARD
Gian Marco Iodicee7510622019-06-03 17:28:17 +0100926 // Transpose the values from RHS matrix
Michele Di Giorgiof9179d32019-11-27 16:17:30 +0000927 TRANSPOSE_K0XN0(K0, N0, b_t, b, DATA_TYPE);
Gian Marco Iodicee7510622019-06-03 17:28:17 +0100928
Gian Marco Iodicee7510622019-06-03 17:28:17 +0100929 ARM_MM_K0XN0XM0(M0, N0, K0, a, b_t, c);
SiCong Li738893e2020-05-01 12:55:16 +0100930#endif // GPU_ARCH == GPU_ARCH_MIDGARD
Gian Marco Iodicee7510622019-06-03 17:28:17 +0100931
932 // Update the offset
933 lhs_offset += K0;
934 rhs_offset += K0 * rhs_stride_y;
935 }
936
937 // Left-over for loop
938 for(; i < K; ++i)
939 {
940 // Load values from LHS matrix
Michele Di Giorgiof9179d32019-11-27 16:17:30 +0000941 LOAD_BLOCK(M0, 1, DATA_TYPE, a, lhs_ptr, lhs_offset, lhs_stride_y, zlhs);
Gian Marco Iodicee7510622019-06-03 17:28:17 +0100942
943 // Load values from RHS matrix
Michele Di Giorgiof9179d32019-11-27 16:17:30 +0000944 LOAD_BLOCK(1, N0, DATA_TYPE, b, rhs_ptr, rhs_offset, rhs_stride_y, zrhs);
Gian Marco Iodicee7510622019-06-03 17:28:17 +0100945
SiCong Li738893e2020-05-01 12:55:16 +0100946 // Partial matrix multiplication M0,N0,1
947#if(GPU_ARCH == GPU_ARCH_MIDGARD)
948 ARM_MM_NATIVE_N0XK0XM0(VEC_DATA_TYPE(ACC_DATA_TYPE, N0), M0, 1, a, b, c);
949#else // GPU_ARCH == GPU_ARCH_MIDGARD
Gian Marco Iodicee7510622019-06-03 17:28:17 +0100950 // Transpose the values from RHS matrix
Michele Di Giorgiof9179d32019-11-27 16:17:30 +0000951 TRANSPOSE_K0XN0(1, N0, b_t, b, DATA_TYPE);
Gian Marco Iodicee7510622019-06-03 17:28:17 +0100952
Gian Marco Iodicee7510622019-06-03 17:28:17 +0100953 ARM_MM_K0XN0XM0(M0, N0, 1, a, b_t, c);
SiCong Li738893e2020-05-01 12:55:16 +0100954#endif // GPU_ARCH == GPU_ARCH_MIDGARD
Gian Marco Iodicee7510622019-06-03 17:28:17 +0100955
956 // Update the offset
957 lhs_offset += 1;
958 rhs_offset += rhs_stride_y;
959 }
960
morgolockcf343e32020-10-12 14:00:43 +0100961 __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + (x * (uint)N0 * sizeof(int)) + (COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) * dst_stride_y);
962
Gian Marco Iodicee7510622019-06-03 17:28:17 +0100963 REPEAT_VAR_INIT_TO_CONST(M0, uint, zout, 0); //uint zout0=0,zout1=0,zout2=0,... zout7=0;
964
965#if defined(REINTERPRET_OUTPUT_AS_3D)
966 // The plane (zout) is calculated dividing M (y * M0) by HEIGHT_GEMM3D
Gian Marco Iodice9ae06d42020-10-22 16:37:12 +0100967 CALCULATE_Z_OFFSET(M0, uint, zout, COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0), HEIGHT_GEMM3D, DEPTH_GEMM3D, dst_cross_plane_pad, dst_stride_y);
Gian Marco Iodicee7510622019-06-03 17:28:17 +0100968
969 // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
970 // multiply dst_stride_z by DEPTH_GEMM3D
971 dst_addr += z * dst_stride_z * DEPTH_GEMM3D;
972
973#else // defined(REINTERPRET_OUTPUT_AS_3D)
974
975 // Add offset for batched GEMM
976 dst_addr += z * dst_stride_z;
977
978#endif // defined(REINTERPRET_OUTPUT_AS_3D)
morgolockcf343e32020-10-12 14:00:43 +0100979 const bool cond_y = y == 0;
980 const bool cond_x = ((x + 1) * N0 >= N);
Gian Marco Iodicee7510622019-06-03 17:28:17 +0100981
Michele Di Giorgio27d92fd2020-10-27 12:44:17 +0000982 // Convert and store output block
983 REPEAT_VAR_INIT_CONVERT(M0, VEC_DATA_TYPE(int, N0), c, res); // resN = CONVERT(cN, VEC_DATA_TYPE(int, N0));
984 STORE_BLOCK_BOUNDARY_AWARE(M0, N0, int, res, dst_addr, dst_stride_y, zout, PARTIAL_STORE_M0, PARTIAL_STORE_N0, cond_y, cond_x);
Gian Marco Iodicee7510622019-06-03 17:28:17 +0100985}
SiCong Lied5fb392020-10-20 18:07:27 +0100986#endif // defined(M0) && defined(N0) && defined(K0) && defined(K) && defined(PARTIAL_STORE_M0) && defined(PARTIAL_STORE_N0)
Gian Marco Iodicee7510622019-06-03 17:28:17 +0100987
Gian Marco05288a22017-11-21 10:57:50 +0000988#if defined(COLS_A)
989/** OpenCL kernel used to compute the row-vectors of sums of all the entries in each row of Matrix A.
Michele Di Giorgiof64d3362020-04-03 12:40:10 +0100990 * It is also possible to multiply each reduced row by a scalar value, if SCALAR is passed at compile time.
Gian Marco05288a22017-11-21 10:57:50 +0000991 *
992 * @note This stage is needed to handle the offset of matrix product
993 * https://github.com/google/gemmlowp/blob/master/doc/low-precision.md
994 *
995 * @attention The number of matrix A columns needs to be passed at compile time using -DCOLS_A
Manuel Bottini959c26d2019-12-02 16:22:35 +0000996 * @note The input data type must be passed at compile time using -DDATA_TYPE (i.e. -DDATA_TYPE=uchar)
Michele Di Giorgioe7b333e2020-01-15 10:30:51 +0000997 * @note The data type for the accumulation must be passed at compile time using -DACC_DATA_TYPE (i.e. -DACC_DATA_TYPE=uint)
Michele Di Giorgiof64d3362020-04-03 12:40:10 +0100998 * @note In case of scaling the scalar value must be passed at compile time using -DSCALAR (e.g. -DSCALAR=3)
Gian Marco05288a22017-11-21 10:57:50 +0000999 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +01001000 * @param[in] src_ptr Pointer to the source tensor. Supported data type: QASYMM8/QASYMM8_SIGNED/QSYMM8
Gian Marco05288a22017-11-21 10:57:50 +00001001 * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
1002 * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
1003 * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
1004 * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
1005 * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
1006 * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
1007 * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
1008 * @param[out] dst_ptr Pointer to the destination tensor Supported data type: S32
1009 * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
1010 * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
1011 * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
1012 * @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
1013 * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
1014 */
1015__kernel void gemmlowp_matrix_a_reduction(TENSOR3D_DECLARATION(src),
1016 IMAGE_DECLARATION(dst))
1017{
1018 // Compute source and destination addresses
1019 Tensor3D src = CONVERT_TO_TENSOR3D_STRUCT(src);
1020 Image dst = CONVERT_TO_IMAGE_STRUCT(dst);
1021
Michele Di Giorgioe7b333e2020-01-15 10:30:51 +00001022 VEC_DATA_TYPE(ACC_DATA_TYPE, 4)
1023 sum_row_32 = (VEC_DATA_TYPE(ACC_DATA_TYPE, 4))0;
1024 ACC_DATA_TYPE sum_row = 0;
Gian Marco05288a22017-11-21 10:57:50 +00001025
Manuel Bottini959c26d2019-12-02 16:22:35 +00001026 __global const DATA_TYPE *matrix_a = (__global const DATA_TYPE *)(src.ptr + get_global_id(0) * src_stride_y + get_global_id(1) * src_stride_z);
Gian Marco05288a22017-11-21 10:57:50 +00001027
1028 int i = 0;
1029
1030 // This for loop performs 16 accumulations
1031 for(; i <= ((int)COLS_A - 16); i += 16)
1032 {
Manuel Bottini959c26d2019-12-02 16:22:35 +00001033 const VEC_DATA_TYPE(DATA_TYPE, 16) a0 = vload16(0, matrix_a + i);
Gian Marco05288a22017-11-21 10:57:50 +00001034
Michele Di Giorgioe7b333e2020-01-15 10:30:51 +00001035 sum_row_32 += CONVERT(a0.s0123, VEC_DATA_TYPE(ACC_DATA_TYPE, 4)) + CONVERT(a0.s4567, VEC_DATA_TYPE(ACC_DATA_TYPE, 4)) + CONVERT(a0.s89AB, VEC_DATA_TYPE(ACC_DATA_TYPE, 4)) + CONVERT(a0.sCDEF,
1036 VEC_DATA_TYPE(ACC_DATA_TYPE, 4));
Gian Marco05288a22017-11-21 10:57:50 +00001037 }
1038
1039 // This for loop performs the leftover accumulations
1040 for(; i < COLS_A; ++i)
1041 {
Michele Di Giorgioe7b333e2020-01-15 10:30:51 +00001042 sum_row += (ACC_DATA_TYPE)matrix_a[i];
Gian Marco05288a22017-11-21 10:57:50 +00001043 }
1044
Manuel Bottini959c26d2019-12-02 16:22:35 +00001045 sum_row += sum_row_32.s0 + sum_row_32.s1 + sum_row_32.s2 + sum_row_32.s3;
Gian Marco05288a22017-11-21 10:57:50 +00001046
Michele Di Giorgiof64d3362020-04-03 12:40:10 +01001047#if defined(SCALAR)
1048 sum_row *= (int)SCALAR;
1049#endif // defined(SCALAR)
Gian Marco05288a22017-11-21 10:57:50 +00001050 *((__global int *)dst.ptr) = (int)sum_row;
1051}
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001052
1053#if defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)
Michele Di Giorgiof64d3362020-04-03 12:40:10 +01001054/** OpenCL kernel used to compute the row-vectors of sums of all the entries in each row of Matrix A using the arm dot product instruction.
1055 * It is also possible to multiply each reduced row by a scalar value, if SCALAR is passed at compile time.
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001056 *
1057 * @note This stage is needed to handle the offset of matrix product
1058 * https://github.com/google/gemmlowp/blob/master/doc/low-precision.md
1059 *
1060 * @attention The number of matrix A columns needs to be passed at compile time using -DCOLS_A
Manuel Bottini959c26d2019-12-02 16:22:35 +00001061 * @note The input data type must be passed at compile time using -DDATA_TYPE (i.e. -DDATA_TYPE=uchar)
Michele Di Giorgioe7b333e2020-01-15 10:30:51 +00001062 * @note The data type for the accumulation must be passed at compile time using -DACC_DATA_TYPE (i.e. -DACC_DATA_TYPE=uint)
Michele Di Giorgiof64d3362020-04-03 12:40:10 +01001063 * @note In case of scaling the scalar value must be passed at compile time using -DSCALAR (e.g. -DSCALAR=3)
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001064 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +01001065 * @param[in] src_ptr Pointer to the source tensor. Supported data type: QASYMM8/QASYMM8_SIGNED/QSYMM8
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001066 * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
1067 * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
1068 * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
1069 * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
1070 * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
1071 * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
1072 * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
1073 * @param[out] dst_ptr Pointer to the destination tensor Supported data type: S32
1074 * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
1075 * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
1076 * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
1077 * @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
1078 * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
1079 */
1080__kernel void gemmlowp_matrix_a_reduction_dot8(TENSOR3D_DECLARATION(src),
1081 IMAGE_DECLARATION(dst))
1082{
1083 // Compute source and destination addresses
1084 Tensor3D src = CONVERT_TO_TENSOR3D_STRUCT(src);
1085 Image dst = CONVERT_TO_IMAGE_STRUCT(dst);
1086
Michele Di Giorgioe7b333e2020-01-15 10:30:51 +00001087 ACC_DATA_TYPE sum_row = 0;
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001088
Manuel Bottini959c26d2019-12-02 16:22:35 +00001089 __global const DATA_TYPE *matrix_a = (__global const DATA_TYPE *)(src.ptr + get_global_id(0) * src_stride_y + get_global_id(1) * src_stride_z);
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001090
1091 int i = 0;
1092
1093 // This for loop performs 16 accumulations
1094 for(; i <= ((int)COLS_A - 32); i += 32)
1095 {
Manuel Bottini959c26d2019-12-02 16:22:35 +00001096 VEC_DATA_TYPE(DATA_TYPE, 16)
1097 a0 = vload16(0, matrix_a + i);
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001098
Viet-Hoa Do82169b32022-05-26 16:50:21 +01001099 DOT_PRODUCT4_INTEGER8(DATA_TYPE, DATA_TYPE, DATA_TYPE, a0.s0123, (VEC_DATA_TYPE(DATA_TYPE, 4))(1), sum_row);
1100 DOT_PRODUCT4_INTEGER8(DATA_TYPE, DATA_TYPE, DATA_TYPE, a0.s4567, (VEC_DATA_TYPE(DATA_TYPE, 4))(1), sum_row);
1101 DOT_PRODUCT4_INTEGER8(DATA_TYPE, DATA_TYPE, DATA_TYPE, a0.s89AB, (VEC_DATA_TYPE(DATA_TYPE, 4))(1), sum_row);
1102 DOT_PRODUCT4_INTEGER8(DATA_TYPE, DATA_TYPE, DATA_TYPE, a0.sCDEF, (VEC_DATA_TYPE(DATA_TYPE, 4))(1), sum_row);
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001103
Manuel Bottini959c26d2019-12-02 16:22:35 +00001104 a0 = vload16(1, matrix_a + i);
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001105
Viet-Hoa Do82169b32022-05-26 16:50:21 +01001106 DOT_PRODUCT4_INTEGER8(DATA_TYPE, DATA_TYPE, DATA_TYPE, a0.s0123, (VEC_DATA_TYPE(DATA_TYPE, 4))(1), sum_row);
1107 DOT_PRODUCT4_INTEGER8(DATA_TYPE, DATA_TYPE, DATA_TYPE, a0.s4567, (VEC_DATA_TYPE(DATA_TYPE, 4))(1), sum_row);
1108 DOT_PRODUCT4_INTEGER8(DATA_TYPE, DATA_TYPE, DATA_TYPE, a0.s89AB, (VEC_DATA_TYPE(DATA_TYPE, 4))(1), sum_row);
1109 DOT_PRODUCT4_INTEGER8(DATA_TYPE, DATA_TYPE, DATA_TYPE, a0.sCDEF, (VEC_DATA_TYPE(DATA_TYPE, 4))(1), sum_row);
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001110 }
1111
1112 // This for loop performs the leftover accumulations
1113 for(; i < COLS_A; ++i)
1114 {
Michele Di Giorgioe7b333e2020-01-15 10:30:51 +00001115 sum_row += (ACC_DATA_TYPE)matrix_a[i];
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001116 }
1117
Michele Di Giorgiof64d3362020-04-03 12:40:10 +01001118#if defined(SCALAR)
1119 sum_row *= (int)SCALAR;
1120#endif // defined(SCALAR)
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001121 *((__global int *)dst.ptr) = (int)sum_row;
1122}
1123#endif // defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)
Gian Marco05288a22017-11-21 10:57:50 +00001124#endif // defined(COLS_A)
1125
Michele Di Giorgioaae34102020-10-19 15:31:45 +01001126#if defined(COLS_B) && defined(ROWS_B) && defined(VEC_SIZE) && defined(VEC_SIZE_LEFTOVER)
Gian Marco05288a22017-11-21 10:57:50 +00001127/** OpenCL kernel used to compute the row-vectors of sums of all the entries in each column of Matrix B.
Michele Di Giorgiof64d3362020-04-03 12:40:10 +01001128 * It is also possible to multiply each reduced column by a scalar value, if SCALAR is passed at compile time.
Gian Marco05288a22017-11-21 10:57:50 +00001129 *
1130 * @note This stage is needed to handle the offset of matrix product
1131 * https://github.com/google/gemmlowp/blob/master/doc/low-precision.md
1132 *
1133 * @attention The number of matrix B columns and rows needs to be passed at compile time using -DCOLS_B and -DROWS_B
Manuel Bottini959c26d2019-12-02 16:22:35 +00001134 * @note The input data type must be passed at compile time using -DDATA_TYPE (i.e. -DDATA_TYPE=uchar)
Michele Di Giorgioe7b333e2020-01-15 10:30:51 +00001135 * @note The data type for the accumulation must be passed at compile time using -DACC_DATA_TYPE (i.e. -DACC_DATA_TYPE=uint)
Michele Di Giorgiof64d3362020-04-03 12:40:10 +01001136 * @note In case of scaling the scalar value must be passed at compile time using -DSCALAR (i.e. -DSCALAR=3)
Michele Di Giorgioaae34102020-10-19 15:31:45 +01001137 * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
1138 * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
Gian Marco05288a22017-11-21 10:57:50 +00001139 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +01001140 * @param[in] src_ptr Pointer to the source tensor. Supported data type: QASYMM8/QASYMM8_SIGNED/QSYMM8/QSYMM8_PER_CHANNEL
Gian Marco05288a22017-11-21 10:57:50 +00001141 * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
1142 * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
1143 * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
1144 * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
1145 * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
1146 * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
1147 * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
1148 * @param[out] dst_ptr Pointer to the destination tensor Supported data type: S32
1149 * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
1150 * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
1151 * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
1152 * @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
1153 * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
1154 */
1155__kernel void gemmlowp_matrix_b_reduction(TENSOR3D_DECLARATION(src),
1156 IMAGE_DECLARATION(dst))
1157{
1158 // Compute source and destination addresses
Michele Di Giorgioaae34102020-10-19 15:31:45 +01001159 const uint x_offs = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0);
1160 const uint y = get_global_id(1);
Gian Marco05288a22017-11-21 10:57:50 +00001161
Michele Di Giorgioaae34102020-10-19 15:31:45 +01001162 __global const DATA_TYPE *matrix_b = (__global const DATA_TYPE *)(src_ptr + src_offset_first_element_in_bytes + x_offs * sizeof(DATA_TYPE) + y * src_step_y + y * src_stride_z);
1163 __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x_offs * sizeof(int) + y * dst_stride_y;
Gian Marco05288a22017-11-21 10:57:50 +00001164
Michele Di Giorgioaae34102020-10-19 15:31:45 +01001165 VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE)
Michele Di Giorgioed902bc2020-10-22 12:05:09 +01001166 sum_col_32 = (VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE))0;
Gian Marco05288a22017-11-21 10:57:50 +00001167
1168 int i = 0;
1169 // This for loop performs 4 accumulations
1170 for(; i <= ((int)ROWS_B - 4); i += 4)
1171 {
Michele Di Giorgioaae34102020-10-19 15:31:45 +01001172 const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
1173 b0 = VLOAD(VEC_SIZE)(0, matrix_b + 0 * src_stride_y);
1174 const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
1175 b1 = VLOAD(VEC_SIZE)(0, matrix_b + 1 * src_stride_y);
1176 const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
1177 b2 = VLOAD(VEC_SIZE)(0, matrix_b + 2 * src_stride_y);
1178 const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
1179 b3 = VLOAD(VEC_SIZE)(0, matrix_b + 3 * src_stride_y);
Gian Marco05288a22017-11-21 10:57:50 +00001180
Michele Di Giorgioed902bc2020-10-22 12:05:09 +01001181 sum_col_32 += CONVERT(b0, VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE)) + CONVERT(b1, VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE)) + CONVERT(b2, VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE)) + CONVERT(b3,
1182 VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE));
Gian Marco05288a22017-11-21 10:57:50 +00001183
1184 matrix_b += 4 * src_stride_y;
1185 }
1186
1187 // This for loop perfoms the leftover accumulations
1188 for(; i < (int)ROWS_B; ++i)
1189 {
Michele Di Giorgioaae34102020-10-19 15:31:45 +01001190 const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
1191 b0 = VLOAD(VEC_SIZE)(0, matrix_b);
Gian Marco05288a22017-11-21 10:57:50 +00001192
Michele Di Giorgioed902bc2020-10-22 12:05:09 +01001193 sum_col_32 += CONVERT(b0, VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE));
Gian Marco05288a22017-11-21 10:57:50 +00001194
1195 matrix_b += src_stride_y;
1196 }
1197
Michele Di Giorgiof64d3362020-04-03 12:40:10 +01001198#if defined(SCALAR)
Michele Di Giorgioed902bc2020-10-22 12:05:09 +01001199 sum_col_32 *= (VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE))SCALAR;
Michele Di Giorgiof64d3362020-04-03 12:40:10 +01001200#endif // defined(SCALAR)
Michele Di Giorgioed902bc2020-10-22 12:05:09 +01001201 VEC_DATA_TYPE(int, VEC_SIZE)
1202 res0 = CONVERT(sum_col_32, VEC_DATA_TYPE(int, VEC_SIZE));
1203
1204 STORE_VECTOR_SELECT(res, int, dst_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
Gian Marco05288a22017-11-21 10:57:50 +00001205}
Michele Di Giorgioaae34102020-10-19 15:31:45 +01001206#endif // defined(COLS_B) && defined(ROWS_B) && defined(VEC_SIZE) && defined(VEC_SIZE_LEFTOVER)
Gian Marco05288a22017-11-21 10:57:50 +00001207
Michele Di Giorgioe7b333e2020-01-15 10:30:51 +00001208#endif // defined(DATA_TYPE) && defined(ACC_DATA_TYPE)
1209
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001210#if defined(K_OFFSET) && defined(VEC_SIZE) && defined(VEC_SIZE_LEFTOVER)
1211
1212#define VEC_INT VEC_DATA_TYPE(int, VEC_SIZE)
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001213
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +00001214/* Helper function used to calculate the offset contribution after matrix multiplication.
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001215 *
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +00001216 * This kernel takes a final int32 accumulator value (the output of matrix multiplication),
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001217 * and calculates the offset contribution of matrix A and matrix B.
1218 *
1219 * @attention The k_offset = a_offset * b_offset * k (where k is the number of matrix A columns) needs to be passed at compile time using -DK_OFFSET (i.e. -DK_OFFSET=1200)
1220 * @note In case the offset contribution due to a_offset is required, a_offset needs to be passed at compile time using -DA_OFFSET (i.e. -DA_OFFSET=1)
1221 * @note In case the offset contribution due to b_offset is required, b_offset needs to be passed at compile time using -DB_OFFSET (i.e. -DB_OFFSET=6)
1222 * @note In case sum_col has batches, -DSUM_COL_HAS_BATCHES must be passed at compile time. Usually if gemmlowp is used to accelerate convolution layer, sum_col will not have batches
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001223 * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
1224 * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001225 *
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001226 * @param[in] x max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0)
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001227 * @param[in] y get_global_id(1)
1228 * @param[in] z get_global_id(2)
1229 * @param[in] sum_col_ptr (Optional) Pointer to the source tensor. Supported data type: same as @p mm_result_ptr
1230 * @param[in] sum_col_stride_x (Optional) Stride of the source tensor in X dimension (in bytes)
1231 * @param[in] sum_col_step_x (Optional) sum_col_stride_x * number of elements along X processed per workitem(in bytes)
1232 * @param[in] sum_col_stride_y (Optional) Stride of the source tensor in Y dimension (in bytes)
1233 * @param[in] sum_col_step_y (Optional) sum_col_stride_y * number of elements along Y processed per workitem(in bytes)
1234 * @param[in] sum_col_offset_first_element_in_bytes (Optional) The offset of the first element in the source tensor
1235 * @param[in] sum_row_ptr (Optional) Pointer to the source tensor. Supported data type: same as @p mm_result_ptr
1236 * @param[in] sum_row_stride_x (Optional) Stride of the source tensor in X dimension (in bytes)
1237 * @param[in] sum_row_step_x (Optional) sum_row_stride_x * number of elements along X processed per workitem(in bytes)
1238 * @param[in] sum_row_stride_y (Optional) Stride of the source tensor in Y dimension (in bytes)
1239 * @param[in] sum_row_step_y (Optional) sum_row_stride_y * number of elements along Y processed per workitem(in bytes)
1240 * @param[in] sum_row_offset_first_element_in_bytes (Optional) The offset of the first element in the source tensor
1241 * @param[in] biases_ptr (Optional) Pointer to the biases tensor. Supported data type: same as @p src_ptr
1242 * @param[in] biases_stride_x (Optional) Stride of the biases tensor in X dimension (in bytes)
1243 * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
1244 * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases tensor
1245 */
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001246inline VEC_INT offset_contribution(
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001247 int x,
1248 int y,
1249 int z
1250#if defined(A_OFFSET)
1251 ,
1252 IMAGE_DECLARATION(sum_col)
1253#endif // defined(A_OFFSET)
1254#if defined(B_OFFSET)
1255 ,
1256 IMAGE_DECLARATION(sum_row)
1257#endif // defined(B_OFFSET)
1258#if defined(ADD_BIAS)
1259 ,
1260 VECTOR_DECLARATION(biases)
1261#endif // defined(ADD_BIAS)
1262)
1263{
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001264 VEC_INT a_offset_s32 = (VEC_INT)0;
1265 VEC_INT b_offset_s32 = (VEC_INT)0;
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001266
1267 int batch_id = z;
1268#if defined(DEPTH_INPUT3D)
1269 batch_id /= (int)DEPTH_INPUT3D;
1270#endif // defined(DEPTH_INPUT3D)
1271
1272#if defined(A_OFFSET)
1273 // Compute the offset contribution due to A_OFFSET
1274 __global uchar *sum_col_addr = sum_col_ptr + sum_col_offset_first_element_in_bytes + x * sizeof(int);
1275
1276 // Compute the offset contribution due to A_OFFSET
1277#if defined(SUM_COL_HAS_BATCHES)
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001278 a_offset_s32 = VLOAD(VEC_SIZE)(0, (__global int *)(sum_col_addr + batch_id * sum_col_stride_y));
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001279#else // defined(SUM_COL_HAS_BATCHES)
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001280 a_offset_s32 = VLOAD(VEC_SIZE)(0, (__global int *)sum_col_addr);
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001281#endif // defined(SUM_COL_HAS_BATCHES)
1282
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001283 a_offset_s32 *= (VEC_INT)A_OFFSET;
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001284#endif // defined(A_OFFSET)
1285
1286#if defined(B_OFFSET)
1287 // Compute the offset contribution due to A_OFFSET
1288 __global uchar *sum_row_addr = sum_row_ptr + sum_row_offset_first_element_in_bytes + y * sizeof(int);
1289
1290 // Compute the offset contribution due to B_OFFSET
1291#if defined(HEIGHT_INPUT3D) && defined(DEPTH_INPUT3D)
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001292 b_offset_s32 = (VEC_INT) * (((__global int *)(sum_row_addr + batch_id * sum_row_stride_y)) + (z % (int)DEPTH_INPUT3D) * (int)HEIGHT_INPUT3D);
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001293#else // defined(HEIGHT_INPUT3D) && defined(DEPTH_INPUT3D)
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001294 b_offset_s32 = (VEC_INT) * (((__global int *)(sum_row_addr + batch_id * sum_row_stride_y)));
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001295#endif // defined(HEIGHT_INPUT3D) && defined(DEPTH_INPUT3D)
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001296 b_offset_s32 *= (VEC_INT)B_OFFSET;
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001297#endif // defined(B_OFFSET)
1298
1299#if defined(ADD_BIAS)
1300 // Add bias
1301 __global uchar *bias_addr = biases_ptr + biases_offset_first_element_in_bytes + x * sizeof(int);
1302
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001303 VEC_INT biases_values = VLOAD(VEC_SIZE)(0, (__global int *)bias_addr);
1304 b_offset_s32 += (VEC_INT)biases_values;
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001305#endif // defined(ADD_BIAS)
1306
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001307 return (VEC_INT)K_OFFSET + a_offset_s32 + b_offset_s32;
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001308}
1309
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +00001310/* OpenCL kernel used to add the offset contribution after matrix multiplication. The computation is performed in-place
Gian Marco05288a22017-11-21 10:57:50 +00001311 *
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +00001312 * This kernel takes a final int32 accumulator value (the output of matrix multiplication),
Gian Marco05288a22017-11-21 10:57:50 +00001313 * and adds to it the offset contribution of matrix A and matrix B in-place.
1314 *
1315 * @attention The k_offset = a_offset * b_offset * k (where k is the number of matrix A columns) needs to be passed at compile time using -DK_OFFSET (i.e. -DK_OFFSET=1200)
1316 * @note In case the offset contribution due to a_offset is required, a_offset needs to be passed at compile time using -DA_OFFSET (i.e. -DA_OFFSET=1)
1317 * @note In case the offset contribution due to b_offset is required, b_offset needs to be passed at compile time using -DB_OFFSET (i.e. -DB_OFFSET=6)
Chunosov5124be52017-11-22 20:42:13 +07001318 * @note In case sum_col has batches, -DSUM_COL_HAS_BATCHES must be passed at compile time. Usually if gemmlowp is used to accelerate convolution layer, sum_col will not have batches
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001319 * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
1320 * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
Gian Marco05288a22017-11-21 10:57:50 +00001321 *
1322 * The final result is:
1323 *
1324 * mm_result[i][k] = mm_result[i][k] +
1325 * (sum_col[k] * A_OFFSET) +
1326 * (sum_row[i] * B_OFFSET) +
1327 * (K_OFFSET)
1328 *
Georgios Pinitasebf6b8a2018-09-24 16:31:08 +01001329 * @param[in] mm_result_ptr Pointer to the source tensor. Supported data type: S32
1330 * @param[in] mm_result_stride_x Stride of the source tensor in X dimension (in bytes)
1331 * @param[in] mm_result_step_x mm_result_stride_x * number of elements along X processed per workitem(in bytes)
1332 * @param[in] mm_result_stride_y Stride of the source tensor in Y dimension (in bytes)
1333 * @param[in] mm_result_step_y mm_result_stride_y * number of elements along Y processed per workitem(in bytes)
1334 * @param[in] mm_result_stride_z Stride of the source tensor in Z dimension (in bytes)
1335 * @param[in] mm_result_step_z mm_result_stride_z * number of elements along Z processed per workitem(in bytes)
1336 * @param[in] mm_result_offset_first_element_in_bytes The offset of the first element in the source tensor
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001337 * @param[in] sum_col_ptr (Optional) Pointer to the source tensor. Supported data type: same as @p mm_result_ptr
1338 * @param[in] sum_col_stride_x (Optional) Stride of the source tensor in X dimension (in bytes)
1339 * @param[in] sum_col_step_x (Optional) sum_col_stride_x * number of elements along X processed per workitem(in bytes)
1340 * @param[in] sum_col_stride_y (Optional) Stride of the source tensor in Y dimension (in bytes)
1341 * @param[in] sum_col_step_y (Optional) sum_col_stride_y * number of elements along Y processed per workitem(in bytes)
1342 * @param[in] sum_col_offset_first_element_in_bytes (Optional) The offset of the first element in the source tensor
1343 * @param[in] sum_row_ptr (Optional) Pointer to the source tensor. Supported data type: same as @p mm_result_ptr
1344 * @param[in] sum_row_stride_x (Optional) Stride of the source tensor in X dimension (in bytes)
1345 * @param[in] sum_row_step_x (Optional) sum_row_stride_x * number of elements along X processed per workitem(in bytes)
1346 * @param[in] sum_row_stride_y (Optional) Stride of the source tensor in Y dimension (in bytes)
1347 * @param[in] sum_row_step_y (Optional) sum_row_stride_y * number of elements along Y processed per workitem(in bytes)
1348 * @param[in] sum_row_offset_first_element_in_bytes (Optional) The offset of the first element in the source tensor
1349 * @param[in] biases_ptr (Optional) Pointer to the biases tensor. Supported data type: same as @p src_ptr
1350 * @param[in] biases_stride_x (Optional) Stride of the biases tensor in X dimension (in bytes)
1351 * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
1352 * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases tensor
Gian Marco05288a22017-11-21 10:57:50 +00001353 */
1354__kernel void gemmlowp_offset_contribution(TENSOR3D_DECLARATION(mm_result)
1355#if defined(A_OFFSET)
1356 ,
1357 IMAGE_DECLARATION(sum_col)
1358#endif // defined(A_OFFSET)
1359#if defined(B_OFFSET)
1360 ,
1361 IMAGE_DECLARATION(sum_row)
1362#endif // defined(B_OFFSET)
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001363#if defined(ADD_BIAS)
1364 ,
1365 VECTOR_DECLARATION(biases)
1366#endif // defined(ADD_BIAS))
Gian Marco05288a22017-11-21 10:57:50 +00001367 )
1368{
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001369 const int x = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0);
Georgios Pinitasebf6b8a2018-09-24 16:31:08 +01001370 const int y = get_global_id(1);
1371 const int z = get_global_id(2);
1372
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001373 // Compute offset contribution
Michele Di Giorgio410bca42020-10-22 11:07:33 +01001374 VEC_INT offset_term_s32 = offset_contribution(
1375 x, y, z
Gian Marco05288a22017-11-21 10:57:50 +00001376#if defined(A_OFFSET)
Michele Di Giorgio410bca42020-10-22 11:07:33 +01001377 ,
1378 sum_col_ptr,
1379 sum_col_stride_x,
1380 sum_col_step_x,
1381 sum_col_stride_y,
1382 sum_col_step_y,
1383 sum_col_offset_first_element_in_bytes
Gian Marco05288a22017-11-21 10:57:50 +00001384#endif // defined(A_OFFSET)
Gian Marco05288a22017-11-21 10:57:50 +00001385#if defined(B_OFFSET)
Michele Di Giorgio410bca42020-10-22 11:07:33 +01001386 ,
1387 sum_row_ptr,
1388 sum_row_stride_x,
1389 sum_row_step_x,
1390 sum_row_stride_y,
1391 sum_row_step_y,
1392 sum_row_offset_first_element_in_bytes
Gian Marco05288a22017-11-21 10:57:50 +00001393#endif // defined(B_OFFSET)
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001394#if defined(ADD_BIAS)
Michele Di Giorgio410bca42020-10-22 11:07:33 +01001395 ,
1396 biases_ptr,
1397 biases_stride_x,
1398 biases_step_x,
1399 biases_offset_first_element_in_bytes
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001400#endif // defined(ADD_BIAS)
Michele Di Giorgio410bca42020-10-22 11:07:33 +01001401 );
Gian Marco05288a22017-11-21 10:57:50 +00001402
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001403 __global uchar *mm_result_addr = mm_result_ptr + mm_result_offset_first_element_in_bytes + x * sizeof(int) + y * mm_result_stride_y + z * mm_result_stride_z;
Gian Marco05288a22017-11-21 10:57:50 +00001404
Michele Di Giorgio410bca42020-10-22 11:07:33 +01001405 VEC_INT in_s32_0 = VLOAD(VEC_SIZE)(0, (__global int *)mm_result_addr);
Gian Marco05288a22017-11-21 10:57:50 +00001406
1407 // Add the offset terms to GEMM's result
Michele Di Giorgio410bca42020-10-22 11:07:33 +01001408 in_s32_0 += offset_term_s32;
Gian Marco05288a22017-11-21 10:57:50 +00001409
1410 // Store the result with the offset contribution
Michele Di Giorgio410bca42020-10-22 11:07:33 +01001411 STORE_VECTOR_SELECT(in_s32_, int, mm_result_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
Gian Marco05288a22017-11-21 10:57:50 +00001412}
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001413
Michele Di Giorgiob54ba282020-01-14 15:31:55 +00001414#if defined(RESULT_OFFSET) && defined(RESULT_MULTIPLIER) && defined(RESULT_SHIFT) && defined(OUTPUT_DATA_TYPE)
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001415/* OpenCL kernel used to add the offset contribution after @ref CLGEMMLowpMatrixMultiplyKernel and it quantizes down to uint8.
1416 *
1417 * This kernel takes a final int32 accumulator value (the output of @CLGEMMLowpMatrixMultiplyKernel), adds to it the offset contribution of matrix A and matrix B and quantizes to uint8 through the output stage.
1418 *
1419 *
1420 * @attention The k_offset = a_offset * b_offset * k (where k is the number of matrix A columns) needs to be passed at compile time using -DK_OFFSET (i.e. -DK_OFFSET=1200)
1421 * @note In case the offset contribution due to a_offset is required, a_offset needs to be passed at compile time using -DA_OFFSET (i.e. -DA_OFFSET=1)
1422 * @note In case the offset contribution due to b_offset is required, b_offset needs to be passed at compile time using -DB_OFFSET (i.e. -DB_OFFSET=6)
1423 * @note In case sum_col has batches, -DSUM_COL_HAS_BATCHES must be passed at compile time. Usually if gemmlowp is used to accelerate convolution layer, sum_col will not have batches
1424 *
1425 * The result before the output stage is:
1426 *
1427 * mm_result[i][k] = mm_result[i][k] +
1428 * (sum_col[k] * A_OFFSET) +
1429 * (sum_row[i] * B_OFFSET) +
1430 * (K_OFFSET)
1431 *
Manuel Bottini959c26d2019-12-02 16:22:35 +00001432 * This result is quantized down to uint8/int8 using the output stage. The output stage computes the following operations:
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001433 *
1434 * -# Add offset terms to final result
1435 * -# Multiply each entry of result by result_mult_int
1436 * -# Add bias to final result (if -DADD_BIAS is passed at compile time)
1437 * -# Shift the int32 accumulator by result_shift
1438 * -# Clamp the value between the specified min and max bounds (if -DMIN_BOUND and/or -DMAX_BOUND are passed at compile time)
Manuel Bottini959c26d2019-12-02 16:22:35 +00001439 * -# Clamp the resulting int32 values:
1440 * - to the [0..255] range and cast to QASYMM8.
1441 * - to the [-128..127] range and cast to QASYMM8_SIGNED.
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001442 *
1443 * @attention The offset, scalar scale factor and number of bits to shift right of output tensor must be passed at compile time using -DRESULT_OFFSET, -RESULT_MULT_INT and -DRESULT_SHIFT
1444 *
1445 * @note In case the addition of int32 biases is required, -DADD_BIAS should be passed at compile time
Manuel Bottini959c26d2019-12-02 16:22:35 +00001446 * @note The output datatype should be passed at compile time using -DOUTPUT_DATA_TYPE
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001447 * @note In case the clamping of the result is required, the min and max bounds can be passed at compile time using -DMIN_BOUND and -DMAX_BOUND.
1448 * These values can be used to implement "rectified linear unit" activation functions
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001449 * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
1450 * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001451 *
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +00001452 * @param[in] mm_result_ptr Pointer to the source tensor. Supported data type: S32
1453 * @param[in] mm_result_stride_x Stride of the source tensor in X dimension (in bytes)
1454 * @param[in] mm_result_step_x mm_result_stride_x * number of elements along X processed per workitem(in bytes)
1455 * @param[in] mm_result_stride_y Stride of the source tensor in Y dimension (in bytes)
1456 * @param[in] mm_result_step_y mm_result_stride_y * number of elements along Y processed per workitem(in bytes)
1457 * @param[in] mm_result_stride_z Stride of the source tensor in Z dimension (in bytes)
1458 * @param[in] mm_result_step_z mm_result_stride_z * number of elements along Z processed per workitem(in bytes)
1459 * @param[in] mm_result_offset_first_element_in_bytes The offset of the first element in the source tensor
1460 * @param[in] sum_col_ptr (Optional) Pointer to the source tensor. Supported data type: same as @p mm_result_ptr
1461 * @param[in] sum_col_stride_x (Optional) Stride of the source tensor in X dimension (in bytes)
1462 * @param[in] sum_col_step_x (Optional) sum_col_stride_x * number of elements along X processed per workitem(in bytes)
1463 * @param[in] sum_col_stride_y (Optional) Stride of the source tensor in Y dimension (in bytes)
1464 * @param[in] sum_col_step_y (Optional) sum_col_stride_y * number of elements along Y processed per workitem(in bytes)
1465 * @param[in] sum_col_offset_first_element_in_bytes (Optional) The offset of the first element in the source tensor
1466 * @param[in] sum_row_ptr (Optional) Pointer to the source tensor. Supported data type: same as @p mm_result_ptr
1467 * @param[in] sum_row_stride_x (Optional) Stride of the source tensor in X dimension (in bytes)
1468 * @param[in] sum_row_step_x (Optional) sum_row_stride_x * number of elements along X processed per workitem(in bytes)
1469 * @param[in] sum_row_stride_y (Optional) Stride of the source tensor in Y dimension (in bytes)
1470 * @param[in] sum_row_step_y (Optional) sum_row_stride_y * number of elements along Y processed per workitem(in bytes)
1471 * @param[in] sum_row_offset_first_element_in_bytes (Optional) The offset of the first element in the source tensor
1472 * @param[in] biases_ptr (Optional) Pointer to the biases tensor. Supported data type: same as @p src_ptr
1473 * @param[in] biases_stride_x (Optional) Stride of the biases tensor in X dimension (in bytes)
1474 * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
1475 * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases tensor
Manuel Bottini959c26d2019-12-02 16:22:35 +00001476 * @param[out] dst_ptr Pointer to the destination tensor Supported data type: QASYMM8/QASYMM8_SIGNED
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +00001477 * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
1478 * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
1479 * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
1480 * @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
1481 * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
1482 * @param[in] dst_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
1483 * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
1484 * @param[in] result_multipliers_ptr (Optional) Pointer to the output multipliers vector for per-channel quantization. Supported data types: S32
1485 * @param[in] result_multipliers_stride_x (Optional) Stride of the output multipliers vector in X dimension (in bytes)
1486 * @param[in] result_multipliers_step_x (Optional) output_multipliers_stride_x * number of elements along X processed per workitem(in bytes)
1487 * @param[in] result_multipliers_offset_first_element_in_bytes (Optional) The offset of the first element in the output multipliers vector
1488 * @param[in] result_shifts_ptr (Optional) Pointer to the output shifts vector for per-channel quantization. Supported data types: S32
1489 * @param[in] result_shifts_stride_x (Optional) Stride of the output shifts vector in X dimension (in bytes)
1490 * @param[in] result_shifts_step_x (Optional) output_shifts_stride_x * number of elements along X processed per workitem(in bytes)
1491 * @param[in] result_shifts_offset_first_element_in_bytes (Optional) The offset of the first element in the output shifts vector
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001492 */
1493__kernel void gemmlowp_offset_contribution_quantize_down(TENSOR3D_DECLARATION(mm_result)
1494#if defined(A_OFFSET)
1495 ,
1496 IMAGE_DECLARATION(sum_col)
1497#endif // defined(A_OFFSET)
1498#if defined(B_OFFSET)
1499 ,
1500 IMAGE_DECLARATION(sum_row)
1501#endif // defined(B_OFFSET)
1502 ,
1503#if defined(ADD_BIAS)
1504 VECTOR_DECLARATION(biases),
1505#endif // defined(ADD_BIAS)
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +00001506 TENSOR3D_DECLARATION(dst)
1507#if defined(PER_CHANNEL_QUANTIZATION)
1508 ,
1509 VECTOR_DECLARATION(result_multipliers),
1510 VECTOR_DECLARATION(result_shifts)
1511#endif // defined(PER_CHANNEL_QUANTIZATION)
1512 )
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001513{
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001514 const int x = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0);
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001515 const int y = get_global_id(1);
1516 const int z = get_global_id(2);
1517
1518 __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x + y * dst_stride_y + z * dst_stride_z;
1519
1520 // Compute offset contribution
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001521 VEC_INT offset_term_s32 = offset_contribution(
1522 x, y, z
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001523#if defined(A_OFFSET)
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001524 ,
1525 sum_col_ptr,
1526 sum_col_stride_x,
1527 sum_col_step_x,
1528 sum_col_stride_y,
1529 sum_col_step_y,
1530 sum_col_offset_first_element_in_bytes
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001531#endif // defined(A_OFFSET)
1532#if defined(B_OFFSET)
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001533 ,
1534 sum_row_ptr,
1535 sum_row_stride_x,
1536 sum_row_step_x,
1537 sum_row_stride_y,
1538 sum_row_step_y,
1539 sum_row_offset_first_element_in_bytes
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001540#endif // defined(B_OFFSET)
1541#if defined(ADD_BIAS)
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001542 ,
1543 biases_ptr,
1544 biases_stride_x,
1545 biases_step_x,
1546 biases_offset_first_element_in_bytes
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001547#endif // defined(ADD_BIAS)
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001548 );
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001549
1550 __global uchar *mm_result_addr = mm_result_ptr + mm_result_offset_first_element_in_bytes + x * sizeof(int) + y * mm_result_stride_y + z * mm_result_stride_z;
1551
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001552 VEC_INT in_s32 = VLOAD(VEC_SIZE)(0, (__global int *)mm_result_addr);
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001553
1554 // Add the offset terms to GEMM's result
1555 in_s32 += offset_term_s32;
1556
1557 // -------------- OUTPUT STAGE
1558
1559 // Add the offset terms to GEMM's result
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001560 in_s32 += (VEC_INT)RESULT_OFFSET;
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001561
1562 // Multiply by result_mult_int and shift
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +00001563#if defined(PER_CHANNEL_QUANTIZATION)
1564 __global uchar *result_multipliers_addr = result_multipliers_ptr + result_multipliers_offset_first_element_in_bytes + x * sizeof(int);
1565 __global uchar *result_shifts_addr = result_shifts_ptr + result_shifts_offset_first_element_in_bytes + x * sizeof(int);
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001566 VEC_INT result_multipliers_values = VLOAD(VEC_SIZE)(0, (__global int *)result_multipliers_addr);
1567 VEC_INT result_shifts_values = VLOAD(VEC_SIZE)(0, (__global int *)result_shifts_addr);
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +00001568
1569 in_s32 *= result_multipliers_values;
1570 in_s32 >>= result_shifts_values;
1571#else // defined(PER_CHANNEL_QUANTIZATION)
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001572 in_s32 *= RESULT_MULTIPLIER;
1573
1574 in_s32 >>= RESULT_SHIFT;
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +00001575#endif // defined(PER_CHANNEL_QUANTIZATION)
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001576
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001577 VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE)
1578 res0 = CONVERT_SAT(in_s32, VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE));
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001579
1580#if defined(MIN_BOUND)
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001581 res0 = max(res0, (VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE))MIN_BOUND);
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001582#endif // defined(MIN_BOUND)
1583#if defined(MAX_BOUND)
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001584 res0 = min(res0, (VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE))MAX_BOUND);
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001585#endif // defined(MAX_BOUND)
1586
1587 // Store the result
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001588 STORE_VECTOR_SELECT(res, OUTPUT_DATA_TYPE, dst_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001589}
1590
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +00001591/* OpenCL kernel used to add the offset contribution after matrix multiplication and it quantizes down to uint8.
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001592 *
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +00001593 * This kernel takes a final int32 accumulator value (the output of matrix multiplication), adds to it the offset contribution of matrix A and matrix B and quantizes to uint8 through the output stage.
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001594 *
1595 *
1596 * @attention The k_offset = a_offset * b_offset * k (where k is the number of matrix A columns) needs to be passed at compile time using -DK_OFFSET (i.e. -DK_OFFSET=1200)
1597 * @note In case the offset contribution due to a_offset is required, a_offset needs to be passed at compile time using -DA_OFFSET (i.e. -DA_OFFSET=1)
1598 * @note In case the offset contribution due to b_offset is required, b_offset needs to be passed at compile time using -DB_OFFSET (i.e. -DB_OFFSET=6)
1599 * @note In case sum_col has batches, -DSUM_COL_HAS_BATCHES must be passed at compile time. Usually if gemmlowp is used to accelerate convolution layer, sum_col will not have batches
1600 *
1601 * The result before the output stage is:
1602 *
1603 * mm_result[i][k] = mm_result[i][k] +
1604 * (sum_col[k] * A_OFFSET) +
1605 * (sum_row[i] * B_OFFSET) +
1606 * (K_OFFSET)
1607 *
Manuel Bottini959c26d2019-12-02 16:22:35 +00001608 * This result is quantized down to uint8/int8 using the output stage. The output stage computes the following operations:
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001609 *
1610 * -# Compute fixed point multiplication between each entry of input by result_fixedpoint_multiplier
1611 * -# Add bias to final result if bias tensor is not a nullptr
1612 * -# Round to nearest division by a power-of-two using result_shift
1613 * -# Add offset to each result
1614 * -# Clamp the value between the specified min and max bounds
Manuel Bottini959c26d2019-12-02 16:22:35 +00001615 * -# Clamp the resulting int32 values:
1616 * - to the [0..255] range and cast to QASYMM8.
1617 * - to the [-128..127] range and cast to QASYMM8_SIGNED.
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001618 *
1619 * @attention The offset, scalar scale factor and number of bits to shift right of output tensor must be passed at compile time using -DRESULT_OFFSET, -RESULT_MULT_INT and -DRESULT_SHIFT
1620 *
1621 * @note In case the addition of int32 biases is required, -DADD_BIAS should be passed at compile time
Manuel Bottini959c26d2019-12-02 16:22:35 +00001622 * @note The output datatype should be passed at compile time using -DOUTPUT_DATA_TYPE
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001623 * @note In case the clamping of the result is required, the min and max bounds can be passed at compile time using -DMIN_BOUND and -DMAX_BOUND.
1624 * These values can be used to implement "rectified linear unit" activation functions
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001625 * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
1626 * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001627 *
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +00001628 * @param[in] mm_result_ptr Pointer to the source tensor. Supported data type: S32
1629 * @param[in] mm_result_stride_x Stride of the source tensor in X dimension (in bytes)
1630 * @param[in] mm_result_step_x mm_result_stride_x * number of elements along X processed per workitem(in bytes)
1631 * @param[in] mm_result_stride_y Stride of the source tensor in Y dimension (in bytes)
1632 * @param[in] mm_result_step_y mm_result_stride_y * number of elements along Y processed per workitem(in bytes)
1633 * @param[in] mm_result_stride_z Stride of the source tensor in Z dimension (in bytes)
1634 * @param[in] mm_result_step_z mm_result_stride_z * number of elements along Z processed per workitem(in bytes)
1635 * @param[in] mm_result_offset_first_element_in_bytes The offset of the first element in the source tensor
1636 * @param[in] sum_col_ptr (Optional) Pointer to the source tensor. Supported data type: same as @p mm_result_ptr
1637 * @param[in] sum_col_stride_x (Optional) Stride of the source tensor in X dimension (in bytes)
1638 * @param[in] sum_col_step_x (Optional) sum_col_stride_x * number of elements along X processed per workitem(in bytes)
1639 * @param[in] sum_col_stride_y (Optional) Stride of the source tensor in Y dimension (in bytes)
1640 * @param[in] sum_col_step_y (Optional) sum_col_stride_y * number of elements along Y processed per workitem(in bytes)
1641 * @param[in] sum_col_offset_first_element_in_bytes (Optional) The offset of the first element in the source tensor
1642 * @param[in] sum_row_ptr (Optional) Pointer to the source tensor. Supported data type: same as @p mm_result_ptr
1643 * @param[in] sum_row_stride_x (Optional) Stride of the source tensor in X dimension (in bytes)
1644 * @param[in] sum_row_step_x (Optional) sum_row_stride_x * number of elements along X processed per workitem(in bytes)
1645 * @param[in] sum_row_stride_y (Optional) Stride of the source tensor in Y dimension (in bytes)
1646 * @param[in] sum_row_step_y (Optional) sum_row_stride_y * number of elements along Y processed per workitem(in bytes)
1647 * @param[in] sum_row_offset_first_element_in_bytes (Optional) The offset of the first element in the source tensor
1648 * @param[in] biases_ptr (Optional) Pointer to the biases tensor. Supported data type: same as @p src_ptr
1649 * @param[in] biases_stride_x (Optional) Stride of the biases tensor in X dimension (in bytes)
1650 * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
1651 * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases tensor
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001652 * @param[out] dst_ptr Pointer to the destination tensor Supported data type: QASYMM8/QASYMM8_SIGNED
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +00001653 * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
1654 * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
1655 * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
1656 * @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
1657 * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
1658 * @param[in] dst_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
1659 * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
1660 * @param[in] result_multipliers_ptr (Optional) Pointer to the output multipliers vector for per-channel quantization. Supported data types: S32
1661 * @param[in] result_multipliers_stride_x (Optional) Stride of the output multipliers vector in X dimension (in bytes)
1662 * @param[in] result_multipliers_step_x (Optional) output_multipliers_stride_x * number of elements along X processed per workitem(in bytes)
1663 * @param[in] result_multipliers_offset_first_element_in_bytes (Optional) The offset of the first element in the output multipliers vector
1664 * @param[in] result_shifts_ptr (Optional) Pointer to the output shifts vector for per-channel quantization. Supported data types: S32
1665 * @param[in] result_shifts_stride_x (Optional) Stride of the output shifts vector in X dimension (in bytes)
1666 * @param[in] result_shifts_step_x (Optional) output_shifts_stride_x * number of elements along X processed per workitem(in bytes)
1667 * @param[in] result_shifts_offset_first_element_in_bytes (Optional) The offset of the first element in the output shifts vector
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001668 */
1669__kernel void gemmlowp_offset_contribution_quantize_down_fixedpoint(TENSOR3D_DECLARATION(mm_result)
1670#if defined(A_OFFSET)
1671 ,
1672 IMAGE_DECLARATION(sum_col)
1673#endif // defined(A_OFFSET)
1674#if defined(B_OFFSET)
1675 ,
1676 IMAGE_DECLARATION(sum_row)
1677#endif // defined(B_OFFSET)
1678 ,
1679#if defined(ADD_BIAS)
1680 VECTOR_DECLARATION(biases),
1681#endif // defined(ADD_BIAS)
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +00001682 TENSOR3D_DECLARATION(dst)
1683#if defined(PER_CHANNEL_QUANTIZATION)
1684 ,
1685 VECTOR_DECLARATION(result_multipliers),
1686 VECTOR_DECLARATION(result_shifts)
1687#endif // defined(PER_CHANNEL_QUANTIZATION)
1688 )
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001689{
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001690 const int x = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0);
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001691 const int y = get_global_id(1);
1692 const int z = get_global_id(2);
1693
1694 // Compute offset contribution
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001695 VEC_INT offset_term_s32 = offset_contribution(
1696 x, y, z
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001697#if defined(A_OFFSET)
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001698 ,
1699 sum_col_ptr,
1700 sum_col_stride_x,
1701 sum_col_step_x,
1702 sum_col_stride_y,
1703 sum_col_step_y,
1704 sum_col_offset_first_element_in_bytes
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001705#endif // defined(A_OFFSET)
1706#if defined(B_OFFSET)
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001707 ,
1708 sum_row_ptr,
1709 sum_row_stride_x,
1710 sum_row_step_x,
1711 sum_row_stride_y,
1712 sum_row_step_y,
1713 sum_row_offset_first_element_in_bytes
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001714#endif // defined(B_OFFSET)
1715#if defined(ADD_BIAS)
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001716 ,
1717 biases_ptr,
1718 biases_stride_x,
1719 biases_step_x,
1720 biases_offset_first_element_in_bytes
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001721#endif // defined(ADD_BIAS)
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001722 );
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001723
1724 __global uchar *mm_result_addr = mm_result_ptr + mm_result_offset_first_element_in_bytes + x * sizeof(int) + y * mm_result_stride_y + z * mm_result_stride_z;
1725
1726 __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x + y * dst_stride_y + z * dst_stride_z;
1727
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001728 VEC_INT in_s32 = VLOAD(VEC_SIZE)(0, (__global int *)mm_result_addr);
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001729
1730 // Add the offset terms to GEMM's result
1731 in_s32 += offset_term_s32;
1732
1733 // -------------- OUTPUT STAGE
1734
1735 // Multiply by result_mult_int and shift
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +00001736#if defined(PER_CHANNEL_QUANTIZATION)
1737 __global uchar *result_multipliers_addr = result_multipliers_ptr + result_multipliers_offset_first_element_in_bytes + x * sizeof(int);
1738 __global uchar *result_shifts_addr = result_shifts_ptr + result_shifts_offset_first_element_in_bytes + x * sizeof(int);
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001739 VEC_INT result_multipliers_values = VLOAD(VEC_SIZE)(0, (__global int *)result_multipliers_addr);
1740 VEC_INT result_shifts_values = VLOAD(VEC_SIZE)(0, (__global int *)result_shifts_addr);
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +00001741
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001742 VEC_INT in_s32_shift_lt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(in_s32, result_multipliers_values, result_shifts_values, VEC_SIZE);
1743 VEC_INT in_s32_shift_gt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(in_s32, result_multipliers_values, result_shifts_values, VEC_SIZE);
1744 in_s32 = select(in_s32_shift_lt0, in_s32_shift_gt0, result_shifts_values >= 0);
Michele Di Giorgio14cbfb22019-10-23 10:53:10 +01001745#else // defined(PER_CHANNEL_QUANTIZATION)
1746
1747#if RESULT_SHIFT < 0
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001748 in_s32 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(in_s32, RESULT_MULTIPLIER, RESULT_SHIFT, VEC_SIZE);
Michele Di Giorgio14cbfb22019-10-23 10:53:10 +01001749#else // RESULT_SHIFT >= 0
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001750 in_s32 = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(in_s32, RESULT_MULTIPLIER, RESULT_SHIFT, VEC_SIZE);
Michele Di Giorgio14cbfb22019-10-23 10:53:10 +01001751#endif // RESULT_SHIFT < 0
1752
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +00001753#endif // defined(PER_CHANNEL_QUANTIZATION)
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001754
1755 // Add the offset terms to GEMM's result
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001756 in_s32 += (VEC_INT)RESULT_OFFSET;
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001757
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001758 VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE)
1759 res0 = CONVERT_SAT(in_s32, VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE));
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001760
1761#if defined(MIN_BOUND)
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001762 res0 = max(res0, (VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE))MIN_BOUND);
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001763#endif // defined(MIN_BOUND)
1764#if defined(MAX_BOUND)
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001765 res0 = min(res0, (VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE))MAX_BOUND);
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001766#endif // defined(MAX_BOUND)
1767
1768 // Store the result
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001769 STORE_VECTOR_SELECT(res, OUTPUT_DATA_TYPE, dst_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001770}
Michele Di Giorgiob54ba282020-01-14 15:31:55 +00001771#endif // defined(RESULT_OFFSET) && defined(RESULT_MULTIPLIER) && defined(RESULT_SHIFT) && defined(OUTPUT_DATA_TYPE)
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +00001772
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001773#undef VEC_INT
1774
1775#endif // defined(K_OFFSET) && defined(VEC_SIZE) && defined(VEC_SIZE_LEFTOVER)
Gian Marco05288a22017-11-21 10:57:50 +00001776
1777#if defined(RESULT_OFFSET) && defined(RESULT_MULT_INT) && defined(RESULT_SHIFT)
Luca Foschiani689c9682020-02-26 14:30:14 +00001778/** This OpenCL kernel is used to quantize down the int32 accumulator values of GEMMLowp to QASYMM8/QASYMM8_SIGNED
Gian Marco05288a22017-11-21 10:57:50 +00001779 *
Luca Foschiani689c9682020-02-26 14:30:14 +00001780 * This kernel takes a final int32 accumulator value and processes it to obtain the final QASYMM8/QASYMM8_SIGNED value.
Gian Marco05288a22017-11-21 10:57:50 +00001781 * The following computations will be performed by the kernel:
1782 *
1783 * -# Add offset terms to final result
1784 * -# Multiply each entry of result by result_mult_int
1785 * -# Add bias to final result (if -DADD_BIAS is passed at compile time)
1786 * -# Shift the int32 accumulator by result_shift
1787 * -# Clamp the value between the specified min and max bounds (if -DMIN_BOUND and/or -DMAX_BOUND are passed at compile time)
Luca Foschiani689c9682020-02-26 14:30:14 +00001788 * -# Clamp the resulting int32 values:
1789 * -# - to the [0..255] range and cast to QASYMM8.
1790 * -# - to the [-128..127] range and cast to QASYMM8_SIGNED.
Gian Marco05288a22017-11-21 10:57:50 +00001791 *
1792 * @attention The offset, scalar scale factor and number of bits to shift right of output tensor must be passed at compile time using -DRESULT_OFFSET, -RESULT_MULT_INT and -DRESULT_SHIFT
1793 *
1794 * @note In case the addition of int32 biases is required, -DADD_BIAS should be passed at compile time
Luca Foschiani689c9682020-02-26 14:30:14 +00001795 * @note The output datatype should be passed at compile time using -DOUTPUT_DATA_TYPE
Gian Marco05288a22017-11-21 10:57:50 +00001796 * @note In case the clamping of the result is required, the min and max bounds can be passed at compile time using -DMIN_BOUND and -DMAX_BOUND.
1797 * These values can be used to implement "rectified linear unit" activation functions
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01001798 * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
Gian Marco05288a22017-11-21 10:57:50 +00001799 *
1800 * @param[in] src_ptr Pointer to the source tensor. Supported data type: S32
1801 * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
1802 * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
1803 * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
1804 * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
1805 * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
1806 * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
1807 * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001808 * @param[in] biases_ptr (Optional) Pointer to the biases tensor. Supported data type: same as @p src_ptr
1809 * @param[in] biases_stride_x (Optional) Stride of the biases tensor in X dimension (in bytes)
1810 * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
1811 * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases tensor
Luca Foschiani689c9682020-02-26 14:30:14 +00001812 * @param[out] dst_ptr Pointer to the destination tensor Supported data type: QASYMM8/QASYMM8_SIGNED
Gian Marco05288a22017-11-21 10:57:50 +00001813 * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
1814 * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
1815 * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
1816 * @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
1817 * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
1818 * @param[in] dst_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
1819 * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
1820 */
1821__kernel void gemmlowp_output_stage_quantize_down(TENSOR3D_DECLARATION(src),
1822#if defined(ADD_BIAS)
1823 VECTOR_DECLARATION(biases),
1824#endif // defined(ADD_BIAS)
1825 TENSOR3D_DECLARATION(dst))
1826{
1827 // Compute source and destination addresses
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01001828 int x = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0);
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001829 int y = get_global_id(1);
1830 int z = get_global_id(2);
Gian Marco05288a22017-11-21 10:57:50 +00001831
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001832 __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * sizeof(int) + y * src_stride_y + z * src_stride_z;
Gian Marco05288a22017-11-21 10:57:50 +00001833
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001834 __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x + y * dst_stride_y + z * dst_stride_z;
1835
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01001836 VEC_DATA_TYPE(int, VEC_SIZE)
1837 input_values = VLOAD(VEC_SIZE)(0, (__global int *)src_addr);
Gian Marco58c57942017-11-28 09:10:03 +00001838
Gian Marco05288a22017-11-21 10:57:50 +00001839#if defined(ADD_BIAS)
1840 // Add bias
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001841 __global uchar *bias_addr = biases_ptr + biases_offset_first_element_in_bytes + x * sizeof(int);
1842
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01001843 VEC_DATA_TYPE(int, VEC_SIZE)
1844 biases_values = VLOAD(VEC_SIZE)(0, (__global int *)bias_addr);
1845 input_values += biases_values;
Gian Marco05288a22017-11-21 10:57:50 +00001846#endif // defined(ADD_BIAS)
1847
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001848 // Add the offset terms to GEMM's result
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01001849 input_values += (VEC_DATA_TYPE(int, VEC_SIZE))RESULT_OFFSET;
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001850
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +00001851 // Multiply by result_mult_int and shift
Gian Marco58c57942017-11-28 09:10:03 +00001852 input_values *= RESULT_MULT_INT;
Gian Marco05288a22017-11-21 10:57:50 +00001853
Michele Di Giorgio14cbfb22019-10-23 10:53:10 +01001854#if RESULT_SHIFT < 0
1855 input_values >>= -RESULT_SHIFT;
1856#else // RESULT_SHIFT >= 0
Gian Marco58c57942017-11-28 09:10:03 +00001857 input_values >>= RESULT_SHIFT;
Michele Di Giorgio14cbfb22019-10-23 10:53:10 +01001858#endif // RESULT_SHIFT < 0
Gian Marco05288a22017-11-21 10:57:50 +00001859
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01001860 VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE)
1861 res0 = CONVERT_SAT(input_values, VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE));
Gian Marco05288a22017-11-21 10:57:50 +00001862
1863#if defined(MIN_BOUND)
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01001864 res0 = max(res0, (VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE))MIN_BOUND);
Gian Marco05288a22017-11-21 10:57:50 +00001865#endif // defined(MIN_BOUND)
1866#if defined(MAX_BOUND)
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01001867 res0 = min(res0, (VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE))MAX_BOUND);
Gian Marco05288a22017-11-21 10:57:50 +00001868#endif // defined(MAX_BOUND)
1869
1870 // Store the result
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01001871 STORE_VECTOR_SELECT(res, OUTPUT_DATA_TYPE, dst_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
Gian Marco05288a22017-11-21 10:57:50 +00001872}
Gian Marco58c57942017-11-28 09:10:03 +00001873#endif // defined(RESULT_OFFSET) && defined(RESULT_MULT_INT) && defined(RESULT_SHIFT)
1874
1875#if defined(RESULT_OFFSET_AFTER_SHIFT) && defined(RESULT_FIXEDPOINT_MULTIPLIER) && defined(RESULT_SHIFT)
Manuel Bottini959c26d2019-12-02 16:22:35 +00001876/** This OpenCL kernel is used to quantize down the int32 accumulator values of GEMMLowp to QASYMM8/QASYMM8_SIGNED
Gian Marco58c57942017-11-28 09:10:03 +00001877 *
Manuel Bottini959c26d2019-12-02 16:22:35 +00001878 * This kernel takes a final int32 accumulator value (the output of matrix multiplication), and processes it to obtain the final QASYMM8/QASYMM8_SIGNED value.
Gian Marco58c57942017-11-28 09:10:03 +00001879 * The following computations will be performed by the kernel:
1880 *
1881 * -# Compute fixed point multiplication between each entry of input by result_fixedpoint_multiplier
1882 * -# Add bias to final result if bias tensor is not a nullptr
1883 * -# Round to nearest division by a power-of-two using result_shift
1884 * -# Add offset to each result
1885 * -# Clamp the value between the specified min and max bounds
Manuel Bottini1f332d42019-11-29 17:25:25 +00001886 * -# Clamp the resulting int32 values:
1887 * - to the [0..255] range and cast to QASYMM8.
1888 * - to the [-128..127] range and cast to QASYMM8_SIGNED.
Gian Marco58c57942017-11-28 09:10:03 +00001889 *
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001890 * @attention The offset, scalar scale factor and number of bits to shift right of output tensor must be passed at compile time using -DRESULT_OFFSET_AFTER_SHIFT, -DRESULT_FIXEDPOINT_MULTIPLIER and -DRESULT_SHIFT
Gian Marco58c57942017-11-28 09:10:03 +00001891 *
1892 * @note In case the addition of int32 biases is required, -DADD_BIAS should be passed at compile time
Manuel Bottini1f332d42019-11-29 17:25:25 +00001893 * @note The output datatype should be passed at compile time using -DOUTPUT_DATA_TYPE
Gian Marco58c57942017-11-28 09:10:03 +00001894 * @note In case the clamping of the result is required, the min and max bounds can be passed at compile time using -DMIN_BOUND and -DMAX_BOUND.
1895 * These values can be used to implement "rectified linear unit" activation functions
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01001896 * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
1897 * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
Gian Marco58c57942017-11-28 09:10:03 +00001898 *
1899 * @param[in] src_ptr Pointer to the source tensor. Supported data type: S32
1900 * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
1901 * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
1902 * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
1903 * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
1904 * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
1905 * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
1906 * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001907 * @param[in] biases_ptr (Optional) Pointer to the biases tensor. Supported data type: same as @p src_ptr
1908 * @param[in] biases_stride_x (Optional) Stride of the biases tensor in X dimension (in bytes)
1909 * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
1910 * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases tensor
Sheri Zhang0cdbda52020-02-25 15:57:21 +00001911 * @param[out] dst_ptr Pointer to the destination tensor Supported data type: QASYMM8/QASYMM8_SIGNED
Gian Marco58c57942017-11-28 09:10:03 +00001912 * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
1913 * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
1914 * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
1915 * @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
1916 * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
1917 * @param[in] dst_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
1918 * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
1919 */
1920__kernel void gemmlowp_output_stage_quantize_down_fixedpoint(TENSOR3D_DECLARATION(src),
1921#if defined(ADD_BIAS)
1922 VECTOR_DECLARATION(biases),
1923#endif // defined(ADD_BIAS)
1924 TENSOR3D_DECLARATION(dst))
1925{
1926 // Compute source and destination addresses
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01001927 int x = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0);
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001928 int y = get_global_id(1);
1929 int z = get_global_id(2);
Georgios Pinitas932491f2018-09-21 16:33:15 +01001930
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001931 __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * sizeof(int) + y * src_stride_y + z * src_stride_z;
Gian Marco58c57942017-11-28 09:10:03 +00001932
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001933 __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x + y * dst_stride_y + z * dst_stride_z;
1934
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01001935 VEC_DATA_TYPE(int, VEC_SIZE)
1936 input_values = VLOAD(VEC_SIZE)(0, (__global int *)src_addr);
Gian Marco58c57942017-11-28 09:10:03 +00001937
1938#if defined(ADD_BIAS)
1939 // Add bias
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001940 __global uchar *bias_addr = biases_ptr + biases_offset_first_element_in_bytes + x * sizeof(int);
1941
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01001942 VEC_DATA_TYPE(int, VEC_SIZE)
1943 biases_values = VLOAD(VEC_SIZE)(0, (__global int *)bias_addr);
1944 input_values += biases_values;
Gian Marco58c57942017-11-28 09:10:03 +00001945#endif // defined(ADD_BIAS)
1946
1947 // Multiply by result_mult_int and shift
Michele Di Giorgio14cbfb22019-10-23 10:53:10 +01001948#if RESULT_SHIFT < 0
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01001949 input_values = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(input_values, RESULT_FIXEDPOINT_MULTIPLIER, RESULT_SHIFT, VEC_SIZE);
Michele Di Giorgio14cbfb22019-10-23 10:53:10 +01001950#else // RESULT_SHIFT >= 0
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01001951 input_values = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(input_values, RESULT_FIXEDPOINT_MULTIPLIER, RESULT_SHIFT, VEC_SIZE);
Michele Di Giorgio14cbfb22019-10-23 10:53:10 +01001952#endif // RESULT_SHIFT < 0
Gian Marco58c57942017-11-28 09:10:03 +00001953
1954 // Add the offset terms to GEMM's result
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01001955 input_values += (VEC_DATA_TYPE(int, VEC_SIZE))RESULT_OFFSET_AFTER_SHIFT;
Gian Marco58c57942017-11-28 09:10:03 +00001956
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01001957 VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE)
1958 res0 = CONVERT_SAT(input_values, VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE));
Gian Marco58c57942017-11-28 09:10:03 +00001959
1960#if defined(MIN_BOUND)
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01001961 res0 = max(res0, (VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE))MIN_BOUND);
Gian Marco58c57942017-11-28 09:10:03 +00001962#endif // defined(MIN_BOUND)
1963#if defined(MAX_BOUND)
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01001964 res0 = min(res0, (VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE))MAX_BOUND);
Gian Marco58c57942017-11-28 09:10:03 +00001965#endif // defined(MAX_BOUND)
1966
1967 // Store the result
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01001968 STORE_VECTOR_SELECT(res, OUTPUT_DATA_TYPE, dst_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
Gian Marco58c57942017-11-28 09:10:03 +00001969}
Chunosov5124be52017-11-22 20:42:13 +07001970#endif // defined(RESULT_OFFSET_AFTER_SHIFT) && defined(RESULT_FIXEDPOINT_MULTIPLIER) && defined(RESULT_SHIFT)
Georgios Pinitas51e53a32018-10-22 13:49:08 +01001971
Manuel Bottini9c9b70b2019-07-01 17:35:56 +01001972#if defined(RESULT_FIXEDPOINT_MULTIPLIER) && defined(RESULT_SHIFT)
1973
Michalis Spyrou51146c52019-07-12 14:42:29 +01001974/** This OpenCL kernel is used to quantize down the int32 accumulator values of GEMMLowp to QSYMM16
Manuel Bottini9c9b70b2019-07-01 17:35:56 +01001975 *
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +00001976 * This kernel takes a final int32 accumulator value (the output of matrix multiplication), and processes it to obtain the final QSYMM16 value.
Manuel Bottini9c9b70b2019-07-01 17:35:56 +01001977 * The following computations will be performed by the kernel:
1978 *
1979 * -# Compute fixed point multiplication between each entry of input by result_fixedpoint_multiplier
1980 * -# Add bias to final result if bias tensor is not a nullptr
1981 * -# Round to nearest division by a power-of-two using result_shift
1982 * -# Add offset to each result
1983 * -# Clamp the value between the specified min and max bounds
1984 * -# Clamp the resulting int32 values to the [-32768..32767] range and cast to QSYMM16.
1985 *
1986 * @attention The offset, scalar scale factor and number of bits to shift right of output tensor must be passed at compile time using -DRESULT_FIXEDPOINT_MULTIPLIER and -DRESULT_SHIFT
1987 *
1988 * @note In case the addition of int32 biases is required, -DADD_BIAS should be passed at compile time
1989 * @note In case the clamping of the result is required, the min and max bounds can be passed at compile time using -DMIN_BOUND and -DMAX_BOUND.
1990 * These values can be used to implement "rectified linear unit" activation functions
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01001991 * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
1992 * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
Manuel Bottini9c9b70b2019-07-01 17:35:56 +01001993 *
1994 * @param[in] src_ptr Pointer to the source tensor. Supported data type: S32
1995 * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
1996 * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
1997 * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
1998 * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
1999 * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
2000 * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
2001 * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
2002 * @param[in] biases_ptr (Optional) Pointer to the biases tensor. Supported data type: same as @p src_ptr
2003 * @param[in] biases_stride_x (Optional) Stride of the biases tensor in X dimension (in bytes)
2004 * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
2005 * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases tensor
Sheri Zhangb18252d2020-04-07 11:04:57 +01002006 * @param[out] dst_ptr Pointer to the destination tensor Supported data type: QSYMM16
Manuel Bottini9c9b70b2019-07-01 17:35:56 +01002007 * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
2008 * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
2009 * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
2010 * @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
2011 * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
2012 * @param[in] dst_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
2013 * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
2014 */
2015__kernel void gemmlowp_output_stage_quantize_down_fixedpoint_qsymm16(TENSOR3D_DECLARATION(src),
2016#if defined(ADD_BIAS)
2017 VECTOR_DECLARATION(biases),
2018#endif // defined(ADD_BIAS)
2019 TENSOR3D_DECLARATION(dst))
2020{
2021 // Compute source and destination addresses
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01002022 int x = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0);
Manuel Bottini9c9b70b2019-07-01 17:35:56 +01002023 int y = get_global_id(1);
2024 int z = get_global_id(2);
2025
Michalis Spyrou51146c52019-07-12 14:42:29 +01002026 __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * sizeof(int) + y * src_stride_y + z * src_stride_z;
Manuel Bottini9c9b70b2019-07-01 17:35:56 +01002027
Michele Di Giorgioba14c922020-10-12 13:27:57 +01002028 __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x * sizeof(short) + y * dst_stride_y + z * dst_stride_z;
Manuel Bottini9c9b70b2019-07-01 17:35:56 +01002029
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01002030 VEC_DATA_TYPE(int, VEC_SIZE)
2031 input_values = VLOAD(VEC_SIZE)(0, (__global int *)src_addr);
Manuel Bottini9c9b70b2019-07-01 17:35:56 +01002032
2033#if defined(ADD_BIAS)
2034 // Add bias
Michalis Spyrou51146c52019-07-12 14:42:29 +01002035 __global uchar *bias_addr = biases_ptr + biases_offset_first_element_in_bytes + x * sizeof(int);
Manuel Bottini9c9b70b2019-07-01 17:35:56 +01002036
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01002037 VEC_DATA_TYPE(int, VEC_SIZE)
2038 biases_values = VLOAD(VEC_SIZE)(0, (__global int *)bias_addr);
2039 input_values += biases_values;
Manuel Bottini9c9b70b2019-07-01 17:35:56 +01002040#endif // defined(ADD_BIAS)
2041
2042 // Multiply by result_mult_int and shift
Manuel Bottini07263982019-10-17 18:37:26 +01002043#if RESULT_SHIFT < 0
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01002044 input_values = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(input_values, RESULT_FIXEDPOINT_MULTIPLIER, RESULT_SHIFT, VEC_SIZE);
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +00002045#else // RESULT_SHIFT >= 0
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01002046 input_values = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(input_values, RESULT_FIXEDPOINT_MULTIPLIER, RESULT_SHIFT, VEC_SIZE);
Manuel Bottini07263982019-10-17 18:37:26 +01002047#endif // RESULT_SHIFT < 0
Manuel Bottini9c9b70b2019-07-01 17:35:56 +01002048
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01002049 VEC_DATA_TYPE(short, VEC_SIZE)
2050 res0 = CONVERT_SAT(input_values, VEC_DATA_TYPE(short, VEC_SIZE));
Manuel Bottini9c9b70b2019-07-01 17:35:56 +01002051
2052#if defined(MIN_BOUND)
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01002053 res0 = max(res0, (VEC_DATA_TYPE(short, VEC_SIZE))MIN_BOUND);
Manuel Bottini9c9b70b2019-07-01 17:35:56 +01002054#endif // defined(MIN_BOUND)
2055#if defined(MAX_BOUND)
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01002056 res0 = min(res0, (VEC_DATA_TYPE(short, VEC_SIZE))MAX_BOUND);
Manuel Bottini9c9b70b2019-07-01 17:35:56 +01002057#endif // defined(MAX_BOUND)
2058
2059 // Store the result
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01002060 STORE_VECTOR_SELECT(res, short, dst_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
Manuel Bottini9c9b70b2019-07-01 17:35:56 +01002061}
2062#endif // defined(RESULT_FIXEDPOINT_MULTIPLIER) && defined(RESULT_SHIFT)
2063
Georgios Pinitas51e53a32018-10-22 13:49:08 +01002064#if defined(REAL_MULTIPLIER) && defined(OUTPUT_OFFSET)
Sheri Zhang1b14c752020-03-09 14:29:52 +00002065/** This OpenCL kernel is used to quantize down the int32 accumulator values of GEMMLowp to QASYMM8/QASYMM8_SIGNED
Georgios Pinitas51e53a32018-10-22 13:49:08 +01002066 *
Sheri Zhang1b14c752020-03-09 14:29:52 +00002067 * This kernel takes a final int32 accumulator value (the output of matrix multiplication), and processes it to obtain the final QASYMM8/QASYMM8_SIGNED value.
Georgios Pinitas51e53a32018-10-22 13:49:08 +01002068 * The following computations will be performed by the kernel:
2069 *
2070 * -# Compute fixed point multiplication between each entry of input by result_fixedpoint_multiplier
2071 * -# Add bias to final result if bias tensor is not a nullptr
2072 * -# Requantize
2073 * -# Add offset to each result
2074 * -# Clamp the value between the specified min and max bounds
Sheri Zhang1b14c752020-03-09 14:29:52 +00002075 * -# Clamp the resulting int32 values:
2076 * - to the [0..255] range and cast to QASYMM8.
2077 * - to the [-128..127] range and cast to QASYMM8_SIGNED.
Georgios Pinitas51e53a32018-10-22 13:49:08 +01002078 *
2079 * @attention The offset and scalar scale factor must be passed at compile time using -DRESULT_OFFSET, -DREAL_MULTIPLIER
2080 *
2081 * @note In case the addition of int32 biases is required, -DADD_BIAS should be passed at compile time
Sheri Zhang1b14c752020-03-09 14:29:52 +00002082 * @note The output datatype should be passed at compile time using -DOUTPUT_DATA_TYPE
Georgios Pinitas51e53a32018-10-22 13:49:08 +01002083 * @note In case the clamping of the result is required, the min and max bounds can be passed at compile time using -DMIN_BOUND and -DMAX_BOUND.
2084 * These values can be used to implement "rectified linear unit" activation functions
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01002085 * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
2086 * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
Georgios Pinitas51e53a32018-10-22 13:49:08 +01002087 *
2088 * @param[in] src_ptr Pointer to the source tensor. Supported data type: S32
2089 * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
2090 * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
2091 * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
2092 * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
2093 * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
2094 * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
2095 * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
2096 * @param[in] biases_ptr Pointer to the biases tensor. Supported data type: same as @p src_ptr
2097 * @param[in] biases_stride_x Stride of the biases tensor in X dimension (in bytes)
2098 * @param[in] biases_step_x biases_stride_x * number of elements along X processed per workitem(in bytes)
2099 * @param[in] biases_offset_first_element_in_bytes The offset of the first element in the biases tensor
2100 * @param[out] dst_ptr Pointer to the destination tensor Supported data type: QASYMM8
2101 * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
2102 * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
2103 * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
2104 * @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
2105 * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
2106 * @param[in] dst_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
2107 * @param[in] dst_stride_w Stride of the source tensor in W dimension (in bytes)
2108 * @param[in] dst_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
2109 * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
2110 */
2111__kernel void gemmlowp_output_stage_quantize_down_float(TENSOR3D_DECLARATION(src),
2112#if defined(ADD_BIAS)
2113 VECTOR_DECLARATION(biases),
2114#endif // defined(ADD_BIAS)
2115#if defined(DST_HEIGHT)
2116 TENSOR4D_DECLARATION(dst))
2117#else // defined(DST_HEIGHT)
2118 TENSOR3D_DECLARATION(dst))
2119#endif // defined(DST_HEIGHT)
2120{
2121 // Compute source and destination addresses
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01002122 int x = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0);
Gian Marco Iodice0c54a622018-10-30 12:20:03 +00002123 int y = get_global_id(1);
2124 int z = get_global_id(2);
Georgios Pinitas51e53a32018-10-22 13:49:08 +01002125
Gian Marco Iodice0c54a622018-10-30 12:20:03 +00002126 __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * sizeof(int) + y * src_stride_y + z * src_stride_z;
Georgios Pinitas51e53a32018-10-22 13:49:08 +01002127
Gian Marco Iodice0c54a622018-10-30 12:20:03 +00002128 __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x + y * dst_stride_y + z * dst_stride_z;
2129
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01002130 VEC_DATA_TYPE(int, VEC_SIZE)
2131 input_values = VLOAD(VEC_SIZE)(0, (__global int *)src_addr);
Georgios Pinitas51e53a32018-10-22 13:49:08 +01002132
2133#if defined(ADD_BIAS)
2134 // Add bias
Gian Marco Iodice0c54a622018-10-30 12:20:03 +00002135 __global uchar *bias_addr = biases_ptr + biases_offset_first_element_in_bytes + x * sizeof(int);
2136
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01002137 VEC_DATA_TYPE(int, VEC_SIZE)
2138 biases_values = VLOAD(VEC_SIZE)(0, (__global int *)bias_addr);
Giorgio Arenafd83bc82021-05-12 12:44:47 +01002139 input_values += (VEC_DATA_TYPE(int, VEC_SIZE))biases_values;
Georgios Pinitas51e53a32018-10-22 13:49:08 +01002140#endif // defined(ADD_BIAS)
2141
2142 // Convert to float
Giorgio Arenafd83bc82021-05-12 12:44:47 +01002143 VEC_DATA_TYPE(float, VEC_SIZE)
2144 input_values_f = CONVERT(input_values, VEC_DATA_TYPE(float, VEC_SIZE));
2145 input_values_f = round(input_values_f * (float)REAL_MULTIPLIER + (float)OUTPUT_OFFSET);
Georgios Pinitas51e53a32018-10-22 13:49:08 +01002146
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01002147 VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE)
2148 res0 = CONVERT_SAT(input_values_f, VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE));
Georgios Pinitas51e53a32018-10-22 13:49:08 +01002149
2150#if defined(MIN_BOUND)
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01002151 res0 = max(res0, (VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE))MIN_BOUND);
Georgios Pinitas51e53a32018-10-22 13:49:08 +01002152#endif // defined(MIN_BOUND)
2153#if defined(MAX_BOUND)
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01002154 res0 = min(res0, (VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE))MAX_BOUND);
Georgios Pinitas51e53a32018-10-22 13:49:08 +01002155#endif // defined(MAX_BOUND)
2156
2157 // Store the result
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01002158 STORE_VECTOR_SELECT(res, OUTPUT_DATA_TYPE, dst_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
Georgios Pinitas51e53a32018-10-22 13:49:08 +01002159}
Gian Marco Iodice27423f02020-08-12 14:12:28 +01002160#endif // defined(REAL_MULTIPLIER) && defined(OUTPUT_OFFSET)