blob: 29314ec581eea6f51bec0e0ac25275e07fe8ba96 [file] [log] [blame]
Gian Marco05288a22017-11-21 10:57:50 +00001/*
Michele Di Giorgiod9eaf612020-07-08 11:12:57 +01002 * Copyright (c) 2017-2020 Arm Limited.
Gian Marco05288a22017-11-21 10:57:50 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Gian Marco Iodice43a129e2019-05-14 10:14:08 +010024#include "gemm_helpers.h"
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +000025#include "helpers_asymm.h"
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +000026#include "repeat.h"
Gian Marco05288a22017-11-21 10:57:50 +000027
Michele Di Giorgiof9179d32019-11-27 16:17:30 +000028#if defined(DATA_TYPE) && defined(ACC_DATA_TYPE)
29
Georgios Pinitasdaa38552018-08-28 17:43:18 +010030#if defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)
31#if defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8)
Gian Marco Iodice4b908652018-10-18 10:21:02 +010032#define ARM_DOT(x, y, val) val = arm_dot_acc((x), (y), (val));
Georgios Pinitasdaa38552018-08-28 17:43:18 +010033#else // defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8)
Gian Marco Iodice4b908652018-10-18 10:21:02 +010034#define ARM_DOT(x, y, val) val += arm_dot((x), (y));
Georgios Pinitasdaa38552018-08-28 17:43:18 +010035#endif // defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8)
36#endif // defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)
Giorgio Arenac50da382018-07-26 15:50:09 +010037
Gian Marco Iodice43a129e2019-05-14 10:14:08 +010038#if defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)
39
40/** Specialized macros to perform the dot product instruction between two vectors of size N [1,16]. These macros use the dot8 instruction */
Michele Di Giorgiof9179d32019-11-27 16:17:30 +000041#define ARM_DOT1(a, b, c) \
42 ({ \
43 ARM_DOT((VEC_DATA_TYPE(DATA_TYPE, 4))(a, (VEC_DATA_TYPE(DATA_TYPE, 3))0), (VEC_DATA_TYPE(DATA_TYPE, 4))(b, (VEC_DATA_TYPE(DATA_TYPE, 3))0), c); \
Gian Marco Iodice43a129e2019-05-14 10:14:08 +010044 })
Michele Di Giorgiof9179d32019-11-27 16:17:30 +000045#define ARM_DOT2(a, b, c) \
46 ({ \
47 ARM_DOT((VEC_DATA_TYPE(DATA_TYPE, 4))(a, (VEC_DATA_TYPE(DATA_TYPE, 2))0), (VEC_DATA_TYPE(DATA_TYPE, 4))(b, (VEC_DATA_TYPE(DATA_TYPE, 2))0), c); \
Gian Marco Iodice43a129e2019-05-14 10:14:08 +010048 })
Michele Di Giorgiof9179d32019-11-27 16:17:30 +000049#define ARM_DOT3(a, b, c) \
50 ({ \
51 ARM_DOT((VEC_DATA_TYPE(DATA_TYPE, 4))(a, (DATA_TYPE)0), (VEC_DATA_TYPE(DATA_TYPE, 4))(b, (DATA_TYPE)0), c); \
Gian Marco Iodice43a129e2019-05-14 10:14:08 +010052 })
53#define ARM_DOT4(a, b, c) \
54 ({ \
55 ARM_DOT(a, b, c); \
56 })
57#define ARM_DOT8(a, b, c) \
58 ({ \
59 ARM_DOT4((a.lo), (b.lo), c); \
60 ARM_DOT4((a.hi), (b.hi), c); \
61 })
62#define ARM_DOT16(a, b, c) \
63 ({ \
64 ARM_DOT8((a.lo), (b.lo), c); \
65 ARM_DOT8((a.hi), (b.hi), c); \
66 })
67
68#else // defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)
69
70/** Specialized macros to perform the dot product instruction between two vectors of size K0 [1,16] without using the dot8 instruction. */
Michele Di Giorgiof9179d32019-11-27 16:17:30 +000071#define ARM_DOT1(a, b, c) \
72 ({ \
73 c += (ACC_DATA_TYPE)a * b; \
Gian Marco Iodice43a129e2019-05-14 10:14:08 +010074 })
Michele Di Giorgiof9179d32019-11-27 16:17:30 +000075#define ARM_DOT2(a, b, c) \
76 ({ \
77 c += (ACC_DATA_TYPE)a.s0 * b.s0; \
78 c += (ACC_DATA_TYPE)a.s1 * b.s1; \
Gian Marco Iodice43a129e2019-05-14 10:14:08 +010079 })
Michele Di Giorgiof9179d32019-11-27 16:17:30 +000080#define ARM_DOT3(a, b, c) \
81 ({ \
82 ARM_DOT2(a, b, c); \
83 c += (ACC_DATA_TYPE)a.s2 * b.s2; \
Gian Marco Iodice43a129e2019-05-14 10:14:08 +010084 })
Michele Di Giorgiof9179d32019-11-27 16:17:30 +000085#define ARM_DOT4(a, b, c) \
86 ({ \
87 ARM_DOT3(a, b, c); \
88 c += (ACC_DATA_TYPE)a.s3 * b.s3; \
Gian Marco Iodice43a129e2019-05-14 10:14:08 +010089 })
90#define ARM_DOT8(a, b, c) \
91 ({ \
92 ARM_DOT4((a.lo), (b.lo), c); \
93 ARM_DOT4((a.hi), (b.hi), c); \
94 })
95#define ARM_DOT16(a, b, c) \
96 ({ \
97 ARM_DOT8((a.lo), (b.lo), c); \
98 ARM_DOT8((a.hi), (b.hi), c); \
99 })
100#endif // defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)
101
102/** Specialized macros to perform a broadcast dot product operation between one vector "a" and N0 vectors "b" of size K0 [1,16] */
Gian Marco Iodice061eefd2020-04-23 13:40:00 +0100103#define ARM_DOT_K0X1(k0, a, b, c) \
104 ({ \
105 ARM_DOT_K0(k0, (a), (b##0), (c)); \
106 })
Gian Marco Iodice43a129e2019-05-14 10:14:08 +0100107#define ARM_DOT_K0X2(k0, a, b, c) \
108 ({ \
109 ARM_DOT_K0(k0, (a), (b##0), (c.s0)); \
110 ARM_DOT_K0(k0, (a), (b##1), (c.s1)); \
111 })
112#define ARM_DOT_K0X3(k0, a, b, c) \
113 ({ \
114 ARM_DOT_K0X2(k0, a, b, c); \
115 ARM_DOT_K0(k0, (a), (b##2), (c.s2)); \
116 })
117#define ARM_DOT_K0X4(k0, a, b, c) \
118 ({ \
119 ARM_DOT_K0X3(k0, a, b, c); \
120 ARM_DOT_K0(k0, (a), (b##3), (c.s3)); \
121 })
122#define ARM_DOT_K0X8(k0, a, b, c) \
123 ({ \
124 ARM_DOT_K0X4(k0, a, b, c); \
125 ARM_DOT_K0(k0, (a), (b##4), (c.s4)); \
126 ARM_DOT_K0(k0, (a), (b##5), (c.s5)); \
127 ARM_DOT_K0(k0, (a), (b##6), (c.s6)); \
128 ARM_DOT_K0(k0, (a), (b##7), (c.s7)); \
129 })
130#define ARM_DOT_K0X16(k0, a, b, c) \
131 ({ \
132 ARM_DOT_K0X8(k0, a, b, c); \
133 ARM_DOT_K0(k0, (a), (b##8), (c.s8)); \
134 ARM_DOT_K0(k0, (a), (b##9), (c.s9)); \
135 ARM_DOT_K0(k0, (a), (b##A), (c.sA)); \
136 ARM_DOT_K0(k0, (a), (b##B), (c.sB)); \
137 ARM_DOT_K0(k0, (a), (b##C), (c.sC)); \
138 ARM_DOT_K0(k0, (a), (b##D), (c.sD)); \
139 ARM_DOT_K0(k0, (a), (b##E), (c.sE)); \
140 ARM_DOT_K0(k0, (a), (b##F), (c.sF)); \
141 })
142
SiCong Li738893e2020-05-01 12:55:16 +0100143/** Specialized macros to perform a partial matrix multiplication with dimensions M0,N0,K0 */
Gian Marco Iodice43a129e2019-05-14 10:14:08 +0100144#define ARM_MM_K0XN0X1(n0, k0, a, b, c) \
145 ({ \
146 ARM_DOT_K0XN0(n0, k0, (a##0), b, (c##0)); \
147 })
148#define ARM_MM_K0XN0X2(n0, k0, a, b, c) \
149 ({ \
150 ARM_MM_K0XN0X1(n0, k0, a, b, c); \
151 ARM_DOT_K0XN0(n0, k0, (a##1), b, (c##1)); \
152 })
153#define ARM_MM_K0XN0X3(n0, k0, a, b, c) \
154 ({ \
155 ARM_MM_K0XN0X2(n0, k0, a, b, c); \
156 ARM_DOT_K0XN0(n0, k0, (a##2), b, (c##2)); \
157 })
158#define ARM_MM_K0XN0X4(n0, k0, a, b, c) \
159 ({ \
160 ARM_MM_K0XN0X3(n0, k0, a, b, c); \
161 ARM_DOT_K0XN0(n0, k0, (a##3), b, (c##3)); \
162 })
163#define ARM_MM_K0XN0X5(n0, k0, a, b, c) \
164 ({ \
165 ARM_MM_K0XN0X4(n0, k0, a, b, c); \
166 ARM_DOT_K0XN0(n0, k0, (a##4), b, (c##4)); \
167 })
168#define ARM_MM_K0XN0X6(n0, k0, a, b, c) \
169 ({ \
170 ARM_MM_K0XN0X5(n0, k0, a, b, c); \
171 ARM_DOT_K0XN0(n0, k0, (a##5), b, (c##5)); \
172 })
173#define ARM_MM_K0XN0X7(n0, k0, a, b, c) \
174 ({ \
175 ARM_MM_K0XN0X6(n0, k0, a, b, c); \
176 ARM_DOT_K0XN0(n0, k0, (a##6), b, (c##6)); \
177 })
178#define ARM_MM_K0XN0X8(n0, k0, a, b, c) \
179 ({ \
180 ARM_MM_K0XN0X7(n0, k0, a, b, c); \
181 ARM_DOT_K0XN0(n0, k0, (a##7), b, (c##7)); \
182 })
183
184#define ARM_DOT_K0(k0, a, b, c) \
185 ({ \
186 CONCAT(ARM_DOT, k0) \
187 ((a), (b), (c)); \
188 })
189
190#define ARM_DOT_K0XN0(n0, k0, a, b, c) \
191 ({ \
192 CONCAT(ARM_DOT_K0X, n0) \
193 (k0, (a), b, (c)); \
194 })
195
196#define ARM_MM_K0XN0XM0(m0, n0, k0, a, b, c) \
197 ({ \
198 CONCAT(ARM_MM_K0XN0X, m0) \
199 (n0, k0, a, b, c); \
200 })
201
SiCong Li738893e2020-05-01 12:55:16 +0100202/** Specialized macros to perform a broadcast dot product operation between one vector "a" and N0 vectors "b" of size K0 [1,16] */
203#define ARM_MUL_N0X1(VECTOR_ACC_TYPE, a, b, c) \
204 ({ \
205 c += CONVERT(b##0, VECTOR_ACC_TYPE) * a; \
206 })
207#define ARM_MUL_N0X2(VECTOR_ACC_TYPE, a, b, c) \
208 ({ \
209 c += CONVERT(b##0, VECTOR_ACC_TYPE) * a.s##0; \
210 c += CONVERT(b##1, VECTOR_ACC_TYPE) * a.s##1; \
211 })
212#define ARM_MUL_N0X3(VECTOR_ACC_TYPE, a, b, c) \
213 ({ \
214 ARM_MUL_N0X2(VECTOR_ACC_TYPE, a, b, c); \
215 c += CONVERT(b##2, VECTOR_ACC_TYPE) * a.s##2; \
216 })
217#define ARM_MUL_N0X4(VECTOR_ACC_TYPE, a, b, c) \
218 ({ \
219 ARM_MUL_N0X3(VECTOR_ACC_TYPE, a, b, c); \
220 c += CONVERT(b##3, VECTOR_ACC_TYPE) * a.s##3; \
221 })
222#define ARM_MUL_N0X8(VECTOR_ACC_TYPE, a, b, c) \
223 ({ \
224 ARM_MUL_N0X4(VECTOR_ACC_TYPE, a, b, c); \
225 c += CONVERT(b##4, VECTOR_ACC_TYPE) * a.s##4; \
226 c += CONVERT(b##5, VECTOR_ACC_TYPE) * a.s##5; \
227 c += CONVERT(b##6, VECTOR_ACC_TYPE) * a.s##6; \
228 c += CONVERT(b##7, VECTOR_ACC_TYPE) * a.s##7; \
229 })
230#define ARM_MUL_N0X16(VECTOR_ACC_TYPE, a, b, c) \
231 ({ \
232 ARM_MUL_N0X8(VECTOR_ACC_TYPE, a, b, c); \
233 c += CONVERT(b##8, VECTOR_ACC_TYPE) * a.s##8; \
234 c += CONVERT(b##9, VECTOR_ACC_TYPE) * a.s##9; \
235 c += CONVERT(b##A, VECTOR_ACC_TYPE) * a.s##A; \
236 c += CONVERT(b##B, VECTOR_ACC_TYPE) * a.s##B; \
237 c += CONVERT(b##C, VECTOR_ACC_TYPE) * a.s##C; \
238 c += CONVERT(b##D, VECTOR_ACC_TYPE) * a.s##D; \
239 c += CONVERT(b##E, VECTOR_ACC_TYPE) * a.s##E; \
240 c += CONVERT(b##F, VECTOR_ACC_TYPE) * a.s##F; \
241 })
242/** Specialized macros to perform a a partial matrix multiplication with dimensions M0,N0,K0 */
243#define ARM_MM_NATIVE_N0XK0X1(VECTOR_ACC_TYPE, k0, a, b, c) \
244 ({ \
245 ARM_MUL_N0XK0(VECTOR_ACC_TYPE, k0, (a##0), b, (c##0)); \
246 })
247#define ARM_MM_NATIVE_N0XK0X2(VECTOR_ACC_TYPE, k0, a, b, c) \
248 ({ \
249 ARM_MM_NATIVE_N0XK0X1(VECTOR_ACC_TYPE, k0, a, b, c); \
250 ARM_MUL_N0XK0(VECTOR_ACC_TYPE, k0, (a##1), b, (c##1)); \
251 })
252#define ARM_MM_NATIVE_N0XK0X3(VECTOR_ACC_TYPE, k0, a, b, c) \
253 ({ \
254 ARM_MM_NATIVE_N0XK0X2(VECTOR_ACC_TYPE, k0, a, b, c); \
255 ARM_MUL_N0XK0(VECTOR_ACC_TYPE, k0, (a##2), b, (c##2)); \
256 })
257#define ARM_MM_NATIVE_N0XK0X4(VECTOR_ACC_TYPE, k0, a, b, c) \
258 ({ \
259 ARM_MM_NATIVE_N0XK0X3(VECTOR_ACC_TYPE, k0, a, b, c); \
260 ARM_MUL_N0XK0(VECTOR_ACC_TYPE, k0, (a##3), b, (c##3)); \
261 })
262#define ARM_MM_NATIVE_N0XK0X5(VECTOR_ACC_TYPE, k0, a, b, c) \
263 ({ \
264 ARM_MM_NATIVE_N0XK0X4(VECTOR_ACC_TYPE, k0, a, b, c); \
265 ARM_MUL_N0XK0(VECTOR_ACC_TYPE, k0, (a##4), b, (c##4)); \
266 })
267#define ARM_MM_NATIVE_N0XK0X6(VECTOR_ACC_TYPE, k0, a, b, c) \
268 ({ \
269 ARM_MM_NATIVE_N0XK0X5(VECTOR_ACC_TYPE, k0, a, b, c); \
270 ARM_MUL_N0XK0(VECTOR_ACC_TYPE, k0, (a##5), b, (c##5)); \
271 })
272#define ARM_MM_NATIVE_N0XK0X7(VECTOR_ACC_TYPE, k0, a, b, c) \
273 ({ \
274 ARM_MM_NATIVE_N0XK0X6(VECTOR_ACC_TYPE, k0, a, b, c); \
275 ARM_MUL_N0XK0(VECTOR_ACC_TYPE, k0, (a##6), b, (c##6)); \
276 })
277#define ARM_MM_NATIVE_N0XK0X8(VECTOR_ACC_TYPE, k0, a, b, c) \
278 ({ \
279 ARM_MM_NATIVE_N0XK0X7(VECTOR_ACC_TYPE, k0, a, b, c); \
280 ARM_MUL_N0XK0(VECTOR_ACC_TYPE, k0, (a##7), b, (c##7)); \
281 })
282#define ARM_MUL_N0XK0(VECTOR_ACC_TYPE, k0, a, b, c) \
283 ({ \
284 CONCAT(ARM_MUL_N0X, k0) \
285 (VECTOR_ACC_TYPE, (a), b, (c)); \
286 })
287#define ARM_MM_NATIVE_N0XK0XM0(VECTOR_ACC_TYPE, m0, k0, a, b, c) \
288 ({ \
289 CONCAT(ARM_MM_NATIVE_N0XK0X, m0) \
290 (VECTOR_ACC_TYPE, k0, a, b, c); \
291 })
292
Gian Marco Iodiceb0c50372019-03-15 10:13:05 +0000293#if defined(M0) && defined(N0) && defined(K0) && defined(V0) && defined(H0) && defined(M) && defined(N)
Sheri Zhang28287af2020-02-25 14:13:54 +0000294/** This OpenCL kernel computes the matrix multiplication between 2 matrices with QASYMM/QASYMM_SIGNED data type.
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000295 * The LHS matrix must be reshaped with @ref CLGEMMReshapeLHSMatrixKernel and the M0xK0 must be NOT transposed
296 * The RHS matrix must be reshaped with @ref CLGEMMReshapeRHSMatrixKernel and the K0xN0 must be transposed
297 *
Michele Di Giorgiof9179d32019-11-27 16:17:30 +0000298 * @note The input data type must be passed at compile time using -DDATA_TYPE (i.e. -DDATA_TYPE=uchar)
299 * @note The accumulator data type must be passed at compile time using -DACC_DATA_TYPE (i.e. -DACC_DATA_TYPE=uint)
Gian Marco Iodiceb0c50372019-03-15 10:13:05 +0000300 * @note If the first two dimensions of NDRange have been dispatched with "dummy_work_items" support, the option -DDUMMY_WORK_ITEMS must be passed at compile time.
301 * @note The GEMM's dimensions M and N must be passed at compile time using -DM and -DN (i.e. -DM=52 and -DN=90).
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000302 * @note The block's dimensions used for reshaping the LHS matrix and the RHS matrix (M0, N0 and K0) must be passed at compile time using -DM0, -DN0 and -DK0 (i.e. -DM0=4, -DN0=8, -DK0=4).
303 * @note The number of M0xK0 vertical blocks stored on the same output row of the reshaped LHS matrix must be passed at compile time using -DV0 (i.e. -DV0=2)
304 * @note The number of K0xN0 horizontal blocks stored on the same output row of the reshaped RHS matrix must be passed at compile time using -DH0 (i.e. -DH0=2)
305 * @note If the M0xK0 blocks in the reshaped LHS matrix have been interleaved, the option -DLHS_INTERLEAVE must passed at compile time.
306 * @note If the K0xN0 blocks in the reshaped RHS matrix have been interleaved, the option -DRHS_INTERLEAVE must passed at compile time.
307 * @note Only the following configurations of M0, N0 and K0 are currently supported:
308 * - M0 = 2, 3, 4, 5, 6, 7, 8
309 * - N0 = 2, 3, 4, 8, 16
310 * - K0 = 2, 3, 4, 8, 16
Gian Marco Iodice62251f72019-03-11 16:07:12 +0000311 * - V0 >= 1
312 * - H0 >= 1
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000313 *
314 * @note In case the output has to be reinterpreted as a 3D tensor (i.e. output of convolution layer), the following information must be passed at compile time:
315 * -# REINTERPRET_OUTPUT_AS_3D: To reinterpret the output as 3D
316 * -# HEIGHT_GEMM3D: The height of the output in case it has to be reinterpreted as a 3D tensor.
317 * -# DEPTH_GEMM3D: The depth of the output in case it has to be reinterpreted as a 3D tensor
318 * (HEIGHT_GEMM3D * DEPTH_GEMM3D) = columns LHS matrix NOT reshaped
319 *
Sheri Zhang28287af2020-02-25 14:13:54 +0000320 * @param[in] lhs_ptr Pointer to the LHS reshaped matrix. Supported data type: QASYMM8/QASYMM_SIGNED
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000321 * @param[in] lhs_stride_x Stride of the LHS reshaped matrix in X dimension (in bytes)
322 * @param[in] lhs_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
323 * @param[in] lhs_stride_y Stride of the LHS reshaped matrix in Y dimension (in bytes)
324 * @param[in] lhs_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
325 * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the LHS reshaped matrix
326 * @param[in] rhs_ptr Pointer to the RHS reshaped matrix. Supported data type: same as @p lhs_ptr
327 * @param[in] rhs_stride_x Stride of the RHS reshaped matrix in X dimension (in bytes)
328 * @param[in] rhs_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
329 * @param[in] rhs_stride_y Stride of the RHS reshaped matrix in Y dimension (in bytes)
330 * @param[in] rhs_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
331 * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the RHS reshaped matrix
Sheri Zhang28287af2020-02-25 14:13:54 +0000332 * @param[out] dst_ptr Pointer to the destination matrix Supported data type: S32
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000333 * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
334 * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
335 * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
336 * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
337 * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
338 * @param[in] k Number of columns in LHS matrix and rows in RHS matrix not reshaped.
339 * @param[in] lhs_stride_z Stride of the LHS reshaped matrix in Z dimension (in bytes)
340 * @param[in] rhs_stride_z Stride of the RHS reshaped matrix in Z dimension (in bytes)
341 * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
342 * @param[in] dst_cross_plane_pad (Optional) Bottom paddings in unit of elements (only if defined REINTERPRET_OUTPUT_AS_3D)
343 */
344__kernel void gemmlowp_mm_reshaped_lhs_nt_rhs_t(IMAGE_DECLARATION(lhs),
345 IMAGE_DECLARATION(rhs),
346 IMAGE_DECLARATION(dst),
347 uint k,
348 uint lhs_stride_z,
349 uint rhs_stride_z,
350 uint dst_stride_z
351#if defined(REINTERPRET_OUTPUT_AS_3D)
352 ,
353 uint dst_cross_plane_pad
354#endif // REINTERPRET_OUTPUT_AS_3D
355 )
356{
357 // Block size
358#define LHS_BLOCK_SIZE ((K0) * (M0))
359
360#if defined(LHS_INTERLEAVE)
361#define LHS_OFFSET_X (K0)
362#define LHS_STEP_X ((K0) * (V0))
363#define LHS_STEP_LOOP (1)
364#else // defined(INTERLEAVE)
365#define LHS_OFFSET_X (LHS_BLOCK_SIZE)
366#define LHS_STEP_X (K0)
367#define LHS_STEP_LOOP (V0)
368#endif // defined(INTERLEAVE)
369
370 // Block size
371#define RHS_BLOCK_SIZE ((K0) * (N0))
372
373 // RHS offset and step X
374#if defined(RHS_INTERLEAVE)
375#define RHS_OFFSET_X (K0)
376#define RHS_STEP_X ((K0) * (H0))
377#define RHS_STEP_LOOP (1)
378#else // defined(RHS_INTERLEAVE)
379#define RHS_OFFSET_X (RHS_BLOCK_SIZE)
380#define RHS_STEP_X (K0)
381#define RHS_STEP_LOOP (H0)
382#endif // defined(RHS_INTERLEAVE)
383
Gian Marco Iodice43a129e2019-05-14 10:14:08 +0100384 uint x = get_global_id(0);
385 uint y = get_global_id(1);
386 uint z = get_global_id(2);
387
Gian Marco Iodiceb0c50372019-03-15 10:13:05 +0000388#if defined(DUMMY_WORK_ITEMS)
Gian Marco Iodice43a129e2019-05-14 10:14:08 +0100389 if((x * N0 >= N) || (y * M0 >= M))
Gian Marco Iodiceb0c50372019-03-15 10:13:05 +0000390 {
391 return;
392 }
393#endif // defined(DUMMY_WORK_ITEMS)
394
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000395 // Compute LHS matrix address
Sheri Zhang28287af2020-02-25 14:13:54 +0000396 __global DATA_TYPE *lhs_addr = lhs_ptr + lhs_offset_first_element_in_bytes + (y % V0) * (uint)LHS_OFFSET_X + (y / V0) * (uint)lhs_stride_y + (z * lhs_stride_z);
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000397
398 // Compute RHS matrix address
Sheri Zhang28287af2020-02-25 14:13:54 +0000399 __global DATA_TYPE *rhs_addr = rhs_ptr + rhs_offset_first_element_in_bytes + (x % H0) * (uint)RHS_OFFSET_X + (x / (uint)H0) * rhs_stride_y;
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000400
401#if defined(MATRIX_B_DEPTH)
402 // Do not slide matrix B if the matrix B has 3 dimensions and matrix A more than 3
Gian Marco Iodice43a129e2019-05-14 10:14:08 +0100403 rhs_addr += (z % MATRIX_B_DEPTH) * rhs_stride_z;
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000404#else // defined(MATRIX_B_DEPTH)
Gian Marco Iodice43a129e2019-05-14 10:14:08 +0100405 rhs_addr += z * rhs_stride_z;
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000406#endif // defined(MATRIX_B_DEPTH)
407
Gian Marco Iodice43a129e2019-05-14 10:14:08 +0100408 REPEAT_VAR_INIT_TO_CONST(8, uint, zlhs, 0); //uint zout0=0,zout1=0,zout2=0,... zout7=0;
409 REPEAT_VAR_INIT_TO_CONST(16, uint, zrhs, 0);
410
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000411 // Initialize the accumulators
Michele Di Giorgiof9179d32019-11-27 16:17:30 +0000412 REPEAT_VAR_INIT_TO_CONST(M0, VEC_DATA_TYPE(ACC_DATA_TYPE, N0), c, 0); //VEC_DATA_TYPE(ACC_DATA_TYPE, N0) c0=0,c1=0,c2=0,... c(M0-1)=0;
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000413
414 for(int i = 0; i < k; i += K0)
415 {
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000416 // Load values from LHS matrix
Michele Di Giorgiof9179d32019-11-27 16:17:30 +0000417 LOAD_BLOCK(M0, K0, DATA_TYPE, a, lhs_addr, 0, LHS_STEP_X, zlhs);
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000418
419 // Load values from RHS matrix
Michele Di Giorgiof9179d32019-11-27 16:17:30 +0000420 LOAD_BLOCK(N0, K0, DATA_TYPE, b, rhs_addr, 0, RHS_STEP_X, zrhs);
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000421
Gian Marco Iodice43a129e2019-05-14 10:14:08 +0100422 // Partial matrix multiplication M0,N0,K0
423 ARM_MM_K0XN0XM0(M0, N0, K0, a, b, c);
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000424
Gian Marco Iodice43a129e2019-05-14 10:14:08 +0100425 // Update address
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000426 lhs_addr += (M0 * LHS_STEP_X * LHS_STEP_LOOP);
427 rhs_addr += (N0 * RHS_STEP_X * RHS_STEP_LOOP);
428 }
429
Gian Marco Iodice43a129e2019-05-14 10:14:08 +0100430 __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + (x * (uint)N0 * sizeof(int)) + (y * (uint)M0 * dst_stride_y);
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000431
432 REPEAT_VAR_INIT_TO_CONST(8, uint, zout, 0); //uint zout0=0,zout1=0,zout2=0,... zout7=0;
433
434#if defined(REINTERPRET_OUTPUT_AS_3D)
Gian Marco Iodice43a129e2019-05-14 10:14:08 +0100435 // The plane (zout) is calculated dividing M (y * M0) by HEIGHT_GEMM3D
436 CALCULATE_Z_OFFSET(M0, uint, zout, y, HEIGHT_GEMM3D, DEPTH_GEMM3D, dst_cross_plane_pad, dst_stride_y);
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000437
438 // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
439 // multiply dst_stride_z by DEPTH_GEMM3D
Gian Marco Iodice43a129e2019-05-14 10:14:08 +0100440 dst_addr += z * dst_stride_z * DEPTH_GEMM3D;
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000441
442#else // defined(REINTERPRET_OUTPUT_AS_3D)
443
444 // Add offset for batched GEMM
Gian Marco Iodice43a129e2019-05-14 10:14:08 +0100445 dst_addr += z * dst_stride_z;
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000446
447#endif // defined(REINTERPRET_OUTPUT_AS_3D)
448
Gian Marco Iodice43a129e2019-05-14 10:14:08 +0100449 // Convert and store output block
450 CONVERT_STORE_BLOCK(M0, N0, int, c, dst_addr, dst_stride_y, zout);
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000451
452#undef LHS_BLOCK_SIZE
453#undef LHS_OFFSET_X
454#undef LHS_STEP_X
455#undef RHS_BLOCK_SIZE
456#undef RHS_OFFSET_X
457#undef RHS_STEP_X
458}
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000459#endif // defined(M0) && defined(N0) && defined(K0) && defined(V0) && defined(H0) && defined(K)
460
Gian Marco Iodice62251f72019-03-11 16:07:12 +0000461#if defined(M0) && defined(N0) && defined(K0) && defined(H0) && defined(K)
462
Gian Marco Iodice62251f72019-03-11 16:07:12 +0000463/** This OpenCL kernel computes the matrix multiplication between 2 matrices.
464 * The LHS matrix is NOT reshaped
Gian Marco Iodice43a129e2019-05-14 10:14:08 +0100465 * The RHS matrix is reshaped with @ref CLGEMMReshapeRHSMatrixKernel and the block K0xN0 is transposed
Gian Marco Iodice62251f72019-03-11 16:07:12 +0000466 *
Michele Di Giorgiof9179d32019-11-27 16:17:30 +0000467 * @note The input data type must be passed at compile time using -DDATA_TYPE (i.e. -DDATA_TYPE=uchar)
468 * @note The accumulator data type must be passed at compile time using -DACC_DATA_TYPE (i.e. -DACC_DATA_TYPE=uint)
Gian Marco Iodice62251f72019-03-11 16:07:12 +0000469 * @note The number of columns of LHS matrix must be passed at compile time using -DK (i.e. -DK=64)
470 * @note The block's dimensions used for reshaping the RHS matrix (N0 and K0) must be passed at compile time using -DN0 and -DK0 (i.e. -DN0=8, -DK0=4).
471 * @note The number of M0 rows to process must be passed at compile time using -DM0 (i.e. -DM0=2)
472 * @note The number of K0xN0 horizontal blocks stored on the same output row of the reshaped RHS matrix must be passed at compile time using -DH0 (i.e. -DH0=2)
473 * @note If the K0xN0 blocks in the reshaped RHS matrix have been interleaved, the option -DRHS_INTERLEAVE must passed at compile time.
474 * @note Only the following configurations of M0, N0 and K0 are currently supported:
475 * - M0 = 1, 2, 3, 4, 5, 6, 7, 8
476 * - N0 = 2, 3, 4, 8, 16
477 * - K0 = 2, 3, 4, 8, 16
478 * - H0 >= 1
479 *
480 * @note In case the input or output have to be reinterpreted as a 3D tensor, the following information must be passed at compile time:
481 * -# REINTERPRET_INPUT_AS_3D: To reinterpret the input as 3D
482 * -# REINTERPRET_OUTPUT_AS_3D: To reinterpret the output as 3D
483 * -# HEIGHT_GEMM3D: The height of the output in case it has to be reinterpreted as a 3D tensor.
484 * -# DEPTH_GEMM3D: The depth of the output in case it has to be reinterpreted as a 3D tensor
485 * (HEIGHT_GEMM3D * DEPTH_GEMM3D) = columns LHS matrix
486 *
Michele Di Giorgiof9179d32019-11-27 16:17:30 +0000487 * @param[in] lhs_ptr Pointer to the LHS reshaped matrix. Supported data type: QASYMM8/QASYMM8_SIGNED
Gian Marco Iodice62251f72019-03-11 16:07:12 +0000488 * @param[in] lhs_stride_x Stride of the LHS reshaped matrix in X dimension (in bytes)
489 * @param[in] lhs_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
490 * @param[in] lhs_stride_y Stride of the LHS reshaped matrix in Y dimension (in bytes)
491 * @param[in] lhs_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
492 * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the LHS reshaped matrix
493 * @param[in] rhs_ptr Pointer to the RHS reshaped matrix. Supported data type: same as @p lhs_ptr
494 * @param[in] rhs_stride_x Stride of the RHS reshaped matrix in X dimension (in bytes)
495 * @param[in] rhs_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
496 * @param[in] rhs_stride_y Stride of the RHS reshaped matrix in Y dimension (in bytes)
497 * @param[in] rhs_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
498 * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the RHS reshaped matrix
Michele Di Giorgiof9179d32019-11-27 16:17:30 +0000499 * @param[out] dst_ptr Pointer to the destination matrix Supported data type: S32
Gian Marco Iodice62251f72019-03-11 16:07:12 +0000500 * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
501 * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
502 * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
503 * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
504 * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
505 * @param[in] lhs_stride_z Stride of the LHS reshaped matrix in Z dimension (in bytes)
506 * @param[in] rhs_stride_z Stride of the RHS reshaped matrix in Z dimension (in bytes)
507 * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
508 * @param[in] lhs_cross_plane_pad (Optional) Bottom paddings for LHS matrix in unit of elements (only if defined REINTERPRET_INPUT_AS_3D)
509 * @param[in] dst_cross_plane_pad (Optional) Bottom paddings for the output matrix in unit of elements (only if defined REINTERPRET_OUTPUT_AS_3D)
510 */
511__kernel void gemmlowp_mm_reshaped_only_rhs_t(IMAGE_DECLARATION(lhs),
512 IMAGE_DECLARATION(rhs),
513 IMAGE_DECLARATION(dst),
514 uint lhs_stride_z,
515 uint rhs_stride_z,
516 uint dst_stride_z
517#if defined(REINTERPRET_INPUT_AS_3D)
518 ,
519 uint lhs_cross_plane_pad
520#endif // REINTERPRET_INPUT_AS_3D
521#if defined(REINTERPRET_OUTPUT_AS_3D)
522 ,
523 uint dst_cross_plane_pad
524#endif // REINTERPRET_OUTPUT_AS_3D
525 )
526{
527 // Block size
528#define RHS_BLOCK_SIZE ((K0) * (N0))
529
530 // RHS offset and step X
531#if defined(RHS_INTERLEAVE)
532#define RHS_OFFSET_X (K0)
533#define RHS_STEP_X ((K0) * (H0))
534#define RHS_STEP_LOOP (1)
535#else // defined(RHS_INTERLEAVE)
536#define RHS_OFFSET_X (RHS_BLOCK_SIZE)
537#define RHS_STEP_X (K0)
538#define RHS_STEP_LOOP (H0)
539#endif // defined(RHS_INTERLEAVE)
540
541 uint x = get_global_id(0);
542 uint y = get_global_id(1);
543 uint z = get_global_id(2);
544
Gian Marco Iodice86cfffe2019-04-02 11:02:20 +0100545#if defined(DUMMY_WORK_ITEMS)
546 if((x * N0 >= N) || (y * M0 >= M))
547 {
548 return;
549 }
550#endif // defined(DUMMY_WORK_ITEMS)
551
Gian Marco Iodice62251f72019-03-11 16:07:12 +0000552 // Compute LHS matrix address
553 uint lhs_offset = lhs_offset_first_element_in_bytes + y * M0 * (uint)lhs_stride_y;
554
555 // Compute RHS matrix address
556 uint rhs_offset = rhs_offset_first_element_in_bytes + (x % H0) * (uint)RHS_OFFSET_X + (x / (uint)H0) * rhs_stride_y;
557
558#if defined(MATRIX_B_DEPTH)
559 // Do not slide matrix B if the matrix B has 3 dimensions and matrix A more than 3
560 rhs_offset += (z % MATRIX_B_DEPTH) * rhs_stride_z;
561#else // defined(MATRIX_B_DEPTH)
562 rhs_offset += z * rhs_stride_z;
563#endif // defined(MATRIX_B_DEPTH)
564
Gian Marco Iodice43a129e2019-05-14 10:14:08 +0100565 REPEAT_VAR_INIT_TO_CONST(8, uint, zlhs, 0); //uint zout0=0,zout1=0,zout2=0,... zout7=0;
566 REPEAT_VAR_INIT_TO_CONST(16, uint, zrhs, 0);
Gian Marco Iodice62251f72019-03-11 16:07:12 +0000567
568#if defined(REINTERPRET_INPUT_AS_3D)
Gian Marco Iodice43a129e2019-05-14 10:14:08 +0100569 // The plane (zlhs) is calculated dividing M (y * M0) by HEIGHT_GEMM3D
570 CALCULATE_Z_OFFSET(M0, uint, zlhs, y, HEIGHT_GEMM3D, DEPTH_GEMM3D, lhs_cross_plane_pad, lhs_stride_y);
Gian Marco Iodice62251f72019-03-11 16:07:12 +0000571
572 // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
573 // multiply lhs_stride_z by DEPTH_GEMM3D
574 lhs_offset += z * lhs_stride_z * DEPTH_GEMM3D;
575
576#else // defined(REINTERPRET_INPUT_AS_3D)
577
578 // Add offset for batched GEMM
579 lhs_offset += z * lhs_stride_z;
580
581#endif // defined(REINTERPRET_INPUT_AS_3D)
582
583 // Initialize the accumulators
Michele Di Giorgiof9179d32019-11-27 16:17:30 +0000584 REPEAT_VAR_INIT_TO_CONST(M0, VEC_DATA_TYPE(ACC_DATA_TYPE, N0), c, 0); //VEC_DATA_TYPE(ACC_DATA_TYPE, N0) c0=0,c1=0,c2=0,... c(N0-1)=0;
Gian Marco Iodice62251f72019-03-11 16:07:12 +0000585
586 for(int i = 0; i < K; i += K0)
587 {
Gian Marco Iodice62251f72019-03-11 16:07:12 +0000588 // Load values from LHS matrix
Michele Di Giorgiof9179d32019-11-27 16:17:30 +0000589 LOAD_BLOCK(M0, K0, DATA_TYPE, a, lhs_ptr, lhs_offset, lhs_stride_y, zlhs);
Gian Marco Iodice62251f72019-03-11 16:07:12 +0000590
591 // Load values from RHS matrix
Michele Di Giorgiof9179d32019-11-27 16:17:30 +0000592 LOAD_BLOCK(N0, K0, DATA_TYPE, b, rhs_ptr, rhs_offset, RHS_STEP_X, zrhs);
Gian Marco Iodice62251f72019-03-11 16:07:12 +0000593
Gian Marco Iodice43a129e2019-05-14 10:14:08 +0100594 // Partial matrix multiplication M0,N0,K0
595 ARM_MM_K0XN0XM0(M0, N0, K0, a, b, c);
Gian Marco Iodice62251f72019-03-11 16:07:12 +0000596
597 lhs_offset += K0;
598 rhs_offset += N0 * RHS_STEP_X * RHS_STEP_LOOP;
599 }
600
601 __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + (x * (uint)N0) * sizeof(int) + (y * (uint)M0 * dst_stride_y);
602
603 REPEAT_VAR_INIT_TO_CONST(8, uint, zout, 0); //uint zout0=0,zout1=0,zout2=0,... zout7=0;
604
605#if defined(REINTERPRET_OUTPUT_AS_3D)
Gian Marco Iodice62251f72019-03-11 16:07:12 +0000606 // The plane (zout) is calculated dividing M (y * M0) by HEIGHT_GEMM3D
Gian Marco Iodice43a129e2019-05-14 10:14:08 +0100607 CALCULATE_Z_OFFSET(M0, uint, zout, y, HEIGHT_GEMM3D, DEPTH_GEMM3D, dst_cross_plane_pad, dst_stride_y);
Gian Marco Iodice62251f72019-03-11 16:07:12 +0000608
609 // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
610 // multiply dst_stride_z by DEPTH_GEMM3D
611 dst_addr += z * dst_stride_z * DEPTH_GEMM3D;
612
613#else // defined(REINTERPRET_OUTPUT_AS_3D)
614
615 // Add offset for batched GEMM
616 dst_addr += z * dst_stride_z;
617
618#endif // defined(REINTERPRET_OUTPUT_AS_3D)
619
Gian Marco Iodice43a129e2019-05-14 10:14:08 +0100620 // Convert and store output block
621 CONVERT_STORE_BLOCK(M0, N0, int, c, dst_addr, dst_stride_y, zout);
Gian Marco Iodice62251f72019-03-11 16:07:12 +0000622
623#undef RHS_BLOCK_SIZE
624#undef RHS_OFFSET_X
625#undef RHS_STEP_X
626}
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000627
628#if defined(RESULT_OFFSET) && defined(RESULT_SHIFT) && defined(RESULT_MULTIPLIER)
629/** This OpenCL kernel computes the matrix multiplication between 2 matrices with fused output stage using fixed-point arithmetic.
630 * The LHS matrix is NOT reshaped
631 * The RHS matrix is reshaped with @ref CLGEMMReshapeRHSMatrixKernel and the block K0xN0 is transposed
632 *
633 * @note The input data type must be passed at compile time using -DDATA_TYPE (i.e. -DDATA_TYPE=uchar)
634 * @note The accumulator data type must be passed at compile time using -DACC_DATA_TYPE (i.e. -DACC_DATA_TYPE=uint)
635 * @note The number of columns of LHS matrix must be passed at compile time using -DK (i.e. -DK=64)
636 * @note The block's dimensions used for reshaping the RHS matrix (N0 and K0) must be passed at compile time using -DN0 and -DK0 (i.e. -DN0=8, -DK0=4).
637 * @note The number of M0 rows to process must be passed at compile time using -DM0 (i.e. -DM0=2)
638 * @note The number of K0xN0 horizontal blocks stored on the same output row of the reshaped RHS matrix must be passed at compile time using -DH0 (i.e. -DH0=2)
639 * @note If the K0xN0 blocks in the reshaped RHS matrix have been interleaved, the option -DRHS_INTERLEAVE must passed at compile time.
640 * @note Only the following configurations of M0, N0 and K0 are currently supported:
641 * - M0 = 1, 2, 3, 4, 5, 6, 7, 8
642 * - N0 = 2, 3, 4, 8, 16
643 * - K0 = 2, 3, 4, 8, 16
644 * - H0 >= 1
645 *
646 * @note In case the input or output have to be reinterpreted as a 3D tensor, the following information must be passed at compile time:
647 * -# REINTERPRET_INPUT_AS_3D: To reinterpret the input as 3D
648 * -# REINTERPRET_OUTPUT_AS_3D: To reinterpret the output as 3D
649 * -# HEIGHT_GEMM3D: The height of the output in case it has to be reinterpreted as a 3D tensor.
650 * -# DEPTH_GEMM3D: The depth of the output in case it has to be reinterpreted as a 3D tensor
651 * (HEIGHT_GEMM3D * DEPTH_GEMM3D) = columns LHS matrix
652 *
653 * @note The offset, scalar scale factor and number of bits to shift right of output tensor must be passed at compile time using -DRESULT_OFFSET, -RESULT_MULTIPLIER and -DRESULT_SHIFT
654 * @note In case the addition of int32 biases is required, -DADD_BIAS should be passed at compile time
655 * @note The output datatype should be passed at compile time using -DOUTPUT_DATA_TYPE
656 * @note In case the clamping of the result is required, the min and max bounds can be passed at compile time using -DMIN_BOUND and -DMAX_BOUND.
657 * These values can be used to implement "rectified linear unit" activation functions
658 * @note In case of per-channel quantization of matrix B, -DPER_CHANNEL_QUANTIZATION must be passed at compile time.
659 *
660 * @param[in] lhs_ptr Pointer to the LHS reshaped matrix. Supported data type: QASYMM8/QASYMM8_SIGNED
661 * @param[in] lhs_stride_x Stride of the LHS reshaped matrix in X dimension (in bytes)
662 * @param[in] lhs_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
663 * @param[in] lhs_stride_y Stride of the LHS reshaped matrix in Y dimension (in bytes)
664 * @param[in] lhs_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
665 * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the LHS reshaped matrix
666 * @param[in] rhs_ptr Pointer to the RHS reshaped matrix. Supported data type: same as @p lhs_ptr
667 * @param[in] rhs_stride_x Stride of the RHS reshaped matrix in X dimension (in bytes)
668 * @param[in] rhs_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
669 * @param[in] rhs_stride_y Stride of the RHS reshaped matrix in Y dimension (in bytes)
670 * @param[in] rhs_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
671 * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the RHS reshaped matrix
672 * @param[out] dst_ptr Pointer to the destination matrix Supported data type: same as @p lhs_ptr
673 * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
674 * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
675 * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
676 * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
677 * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
678 * @param[in] lhs_stride_z Stride of the LHS reshaped matrix in Z dimension (in bytes)
679 * @param[in] rhs_stride_z Stride of the RHS reshaped matrix in Z dimension (in bytes)
680 * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
681 * @param[in] lhs_cross_plane_pad (Optional) Bottom paddings for LHS matrix in unit of elements (only if defined REINTERPRET_INPUT_AS_3D)
682 * @param[in] dst_cross_plane_pad (Optional) Bottom paddings for the output matrix in unit of elements (only if defined REINTERPRET_OUTPUT_AS_3D)
683 * @param[in] sum_col_ptr (Optional) Pointer to the source tensor. Supported data type: S32
684 * @param[in] sum_col_stride_x (Optional) Stride of the source tensor in X dimension (in bytes)
685 * @param[in] sum_col_step_x (Optional) sum_col_stride_x * number of elements along X processed per workitem(in bytes)
686 * @param[in] sum_col_stride_y (Optional) Stride of the source tensor in Y dimension (in bytes)
687 * @param[in] sum_col_step_y (Optional) sum_col_stride_y * number of elements along Y processed per workitem(in bytes)
688 * @param[in] sum_col_offset_first_element_in_bytes (Optional) The offset of the first element in the source tensor
689 * @param[in] sum_row_ptr (Optional) Pointer to the source tensor. Supported data type: S32
690 * @param[in] sum_row_stride_x (Optional) Stride of the source tensor in X dimension (in bytes)
691 * @param[in] sum_row_step_x (Optional) sum_row_stride_x * number of elements along X processed per workitem(in bytes)
692 * @param[in] sum_row_stride_y (Optional) Stride of the source tensor in Y dimension (in bytes)
693 * @param[in] sum_row_step_y (Optional) sum_row_stride_y * number of elements along Y processed per workitem(in bytes)
694 * @param[in] sum_row_offset_first_element_in_bytes (Optional) The offset of the first element in the source tensor
695 * @param[in] biases_ptr (Optional) Pointer to the biases tensor. Supported data type: S32
696 * @param[in] biases_stride_x (Optional) Stride of the biases tensor in X dimension (in bytes)
697 * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
698 * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases tensor
699 * @param[in] result_multipliers_ptr (Optional) Pointer to the output multipliers vector for per-channel quantization. Supported data types: S32
700 * @param[in] result_multipliers_stride_x (Optional) Stride of the output multipliers vector in X dimension (in bytes)
701 * @param[in] result_multipliers_step_x (Optional) output_multipliers_stride_x * number of elements along X processed per workitem(in bytes)
702 * @param[in] result_multipliers_offset_first_element_in_bytes (Optional) The offset of the first element in the output multipliers vector
703 * @param[in] result_shifts_ptr (Optional) Pointer to the output shifts vector for per-channel quantization. Supported data types: S32
704 * @param[in] result_shifts_stride_x (Optional) Stride of the output shifts vector in X dimension (in bytes)
705 * @param[in] result_shifts_step_x (Optional) output_shifts_stride_x * number of elements along X processed per workitem(in bytes)
706 * @param[in] result_shifts_offset_first_element_in_bytes (Optional) The offset of the first element in the output shifts vector
707 */
708__kernel void gemmlowp_mm_reshaped_only_rhs_t_fused_output_stage_fixedpoint(IMAGE_DECLARATION(lhs),
709 IMAGE_DECLARATION(rhs),
710 IMAGE_DECLARATION(dst),
711 uint lhs_stride_z,
712 uint rhs_stride_z,
713 uint dst_stride_z
714#if defined(REINTERPRET_INPUT_AS_3D)
715 ,
716 uint lhs_cross_plane_pad
717#endif // REINTERPRET_INPUT_AS_3D
718#if defined(REINTERPRET_OUTPUT_AS_3D)
719 ,
720 uint dst_cross_plane_pad
721#endif // REINTERPRET_OUTPUT_AS_3D
722#if defined(A_OFFSET)
723 ,
724 IMAGE_DECLARATION(sum_col)
725#endif // defined(A_OFFSET)
726#if defined(B_OFFSET)
727 ,
728 IMAGE_DECLARATION(sum_row)
729#endif // defined(B_OFFSET)
730#if defined(ADD_BIAS)
731 ,
732 VECTOR_DECLARATION(biases)
733#endif // defined(ADD_BIAS)
734#if defined(PER_CHANNEL_QUANTIZATION)
735 ,
736 VECTOR_DECLARATION(result_multipliers),
737 VECTOR_DECLARATION(result_shifts)
738#endif // defined(PER_CHANNEL_QUANTIZATION)
739 )
740{
741 // Block size
742#define RHS_BLOCK_SIZE ((K0) * (N0))
743
744 // RHS offset and step X
745#if defined(RHS_INTERLEAVE)
746#define RHS_OFFSET_X (K0)
747#define RHS_STEP_X ((K0) * (H0))
748#define RHS_STEP_LOOP (1)
749#else // defined(RHS_INTERLEAVE)
750#define RHS_OFFSET_X (RHS_BLOCK_SIZE)
751#define RHS_STEP_X (K0)
752#define RHS_STEP_LOOP (H0)
753#endif // defined(RHS_INTERLEAVE)
754
755 uint x = get_global_id(0);
756 uint y = get_global_id(1);
757 uint z = get_global_id(2);
758
759#if defined(DUMMY_WORK_ITEMS)
760 if((x * N0 >= N) || (y * M0 >= M))
761 {
762 return;
763 }
764#endif // defined(DUMMY_WORK_ITEMS)
765
766 // Compute LHS matrix address
767 uint lhs_offset = lhs_offset_first_element_in_bytes + y * M0 * (uint)lhs_stride_y;
768
769 // Compute RHS matrix address
770 uint rhs_offset = rhs_offset_first_element_in_bytes + (x % H0) * (uint)RHS_OFFSET_X + (x / (uint)H0) * rhs_stride_y;
771
772#if defined(MATRIX_B_DEPTH)
773 // Do not slide matrix B if the matrix B has 3 dimensions and matrix A more than 3
774 rhs_offset += (z % MATRIX_B_DEPTH) * rhs_stride_z;
775#else // defined(MATRIX_B_DEPTH)
776 rhs_offset += z * rhs_stride_z;
777#endif // defined(MATRIX_B_DEPTH)
778
779 REPEAT_VAR_INIT_TO_CONST(8, uint, zlhs, 0); //uint zout0=0,zout1=0,zout2=0,... zout7=0;
780 REPEAT_VAR_INIT_TO_CONST(16, uint, zrhs, 0);
781
782#if defined(REINTERPRET_INPUT_AS_3D)
783 // The plane (zlhs) is calculated dividing M (y * M0) by HEIGHT_GEMM3D
784 CALCULATE_Z_OFFSET(M0, uint, zlhs, y, HEIGHT_GEMM3D, DEPTH_GEMM3D, lhs_cross_plane_pad, lhs_stride_y);
785
786 // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
787 // multiply lhs_stride_z by DEPTH_GEMM3D
788 lhs_offset += z * lhs_stride_z * DEPTH_GEMM3D;
789
790#else // defined(REINTERPRET_INPUT_AS_3D)
791
792 // Add offset for batched GEMM
793 lhs_offset += z * lhs_stride_z;
794
795#endif // defined(REINTERPRET_INPUT_AS_3D)
796
797 // Initialize the accumulators
798 REPEAT_VAR_INIT_TO_CONST(M0, VEC_DATA_TYPE(ACC_DATA_TYPE, N0), c, 0); //VEC_DATA_TYPE(ACC_DATA_TYPE, N0) c0=0,c1=0,c2=0,... c(N0-1)=0;
799
800 for(int i = 0; i < K; i += K0)
801 {
802 // Load values from LHS matrix
803 LOAD_BLOCK(M0, K0, DATA_TYPE, a, lhs_ptr, lhs_offset, lhs_stride_y, zlhs);
804
805 // Load values from RHS matrix
806 LOAD_BLOCK(N0, K0, DATA_TYPE, b, rhs_ptr, rhs_offset, RHS_STEP_X, zrhs);
807
808 // Partial matrix multiplication M0,N0,K0
809 ARM_MM_K0XN0XM0(M0, N0, K0, a, b, c);
810
811 lhs_offset += K0;
812 rhs_offset += N0 * RHS_STEP_X * RHS_STEP_LOOP;
813 }
814
815 // Result of MM is of type DATA_TYPE
816 __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + (x * (uint)N0) * sizeof(DATA_TYPE) + (y * (uint)M0 * dst_stride_y);
817
818 REPEAT_VAR_INIT_TO_CONST(8, uint, zout, 0); //uint zout0=0,zout1=0,zout2=0,... zout7=0;
819
820#if defined(REINTERPRET_OUTPUT_AS_3D)
821 // The plane (zout) is calculated dividing M (y * M0) by HEIGHT_GEMM3D
822 CALCULATE_Z_OFFSET(M0, uint, zout, y, HEIGHT_GEMM3D, DEPTH_GEMM3D, dst_cross_plane_pad, dst_stride_y);
823
824 // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
825 // multiply dst_stride_z by DEPTH_GEMM3D
826 dst_addr += z * dst_stride_z * DEPTH_GEMM3D;
827
828#else // defined(REINTERPRET_OUTPUT_AS_3D)
829
830 // Add offset for batched GEMM
831 dst_addr += z * dst_stride_z;
832
833#endif // defined(REINTERPRET_OUTPUT_AS_3D)
834
835 // Convert result of matrix multiplication to S32
836 REPEAT_VAR_INIT_CONVERT_SAT(M0, VEC_DATA_TYPE(int, N0), c, c_int);
837
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000838 // Offset contribution: c += (A_OFFSET * sum_col) + (B_OFFSET * sum_row) + K_OFFSET;
839 REPEAT_VAR_INIT_TO_CONST(M0, VEC_DATA_TYPE(int, N0), offset_s32_, K_OFFSET);
840
841#if defined(A_OFFSET)
842 // Compute the offset contribution due to A_OFFSET
843 __global uchar *sum_col_addr = sum_col_ptr + sum_col_offset_first_element_in_bytes + (x * (uint)N0) * sizeof(int);
844
845#if defined(SUM_COL_HAS_BATCHES)
846 sum_col_addr += z * sum_col_stride_y;
847#endif // defined(SUM_COL_HAS_BATCHES)
848 VEC_DATA_TYPE(int, N0)
849 a_offset_s32 = VLOAD(N0)(0, (__global int *)sum_col_addr);
850 a_offset_s32 *= (VEC_DATA_TYPE(int, N0))A_OFFSET;
851
852 REPEAT_ADD_VECTOR_TO_VAR(M0, offset_s32_, a_offset_s32);
853#endif // defined(A_OFFSET)
854
855#if defined(B_OFFSET)
856 // Compute the offset contribution due to B_OFFSET
Gian Marco Iodice27423f02020-08-12 14:12:28 +0100857 // Note: The sum_row tensor is generated through CLGEMMLowpMatrixAReductionKernel which
858 // does not introduce paddings. For this reason is safe to access the tensor in this manner
859 // without considering that the coordinate "y" could come from an input 3D tensor
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000860 __global uchar *sum_row_addr = sum_row_ptr + sum_row_offset_first_element_in_bytes + (y * (uint)M0) * sizeof(int) + z * sum_row_stride_y;
861
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000862 LOAD_SCALAR_AS_VECTOR(M0, N0, int, b_offset_s32_, sum_row_addr, 0, sum_row_stride_x);
863
864 REPEAT_MLA_VAR_WITH_CONST_VEC(M0, offset_s32_, b_offset_s32_, (VEC_DATA_TYPE(int, N0))B_OFFSET);
865#endif // defined(B_OFFSET)
866
867#if defined(ADD_BIAS)
868 // Add bias
869 __global uchar *bias_addr = biases_ptr + biases_offset_first_element_in_bytes + (x * (uint)N0) * sizeof(int);
870
871 VEC_DATA_TYPE(int, N0)
872 bias_values = VLOAD(N0)(0, (__global int *)bias_addr);
873 REPEAT_ADD_VECTOR_TO_VAR(M0, offset_s32_, bias_values);
874#endif // defined(ADD_BIAS)
875
876 REPEAT_ADD_TWO_VARS(M0, c_int, offset_s32_);
877
878 // Multiply by result_mult_int and shift
879#if defined(PER_CHANNEL_QUANTIZATION)
880 __global uchar *result_multipliers_addr = result_multipliers_ptr + result_multipliers_offset_first_element_in_bytes + (x * (uint)N0) * sizeof(int);
881 __global uchar *result_shifts_addr = result_shifts_ptr + result_shifts_offset_first_element_in_bytes + (x * (uint)N0) * sizeof(int);
882
883 VEC_DATA_TYPE(int, N0)
884 res_mul = VLOAD(N0)(0, (__global int *)result_multipliers_addr);
885 VEC_DATA_TYPE(int, N0)
886 res_shift = VLOAD(N0)(0, (__global int *)result_shifts_addr);
887
888 REPEAT_ASYMM_MULT_BY_QUANT_MULTIPLIER_PER_CHANNEL(M0, N0, c_int, res_mul, res_shift);
889#else // defined(PER_CHANNEL_QUANTIZATION)
890
891#if RESULT_SHIFT < 0
892 REPEAT_ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(M0, N0, c_int, RESULT_MULTIPLIER, RESULT_SHIFT);
893#else // RESULT_SHIFT >= 0
894 REPEAT_ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(M0, N0, c_int, RESULT_MULTIPLIER, RESULT_SHIFT);
895#endif // RESULT_SHIFT < 0
896
897#endif // defined(PER_CHANNEL_QUANTIZATION)
898
899 // Add the offset terms to GEMM's result
900 REPEAT_ADD_CONST_TO_VAR(M0, VEC_DATA_TYPE(int, N0), c_int, RESULT_OFFSET);
901
902#if defined(MIN_BOUND)
903 REPEAT_MAX_CONST_VAR(M0, VEC_DATA_TYPE(int, N0), c_int, MIN_BOUND);
904#endif // defined(MIN_BOUND)
905#if defined(MAX_BOUND)
906 REPEAT_MIN_CONST_VAR(M0, VEC_DATA_TYPE(int, N0), c_int, MAX_BOUND);
907#endif // defined(MAX_BOUND)
908
909 // Convert and store output block (does convert saturate)
910 CONVERT_STORE_BLOCK(M0, N0, DATA_TYPE, c_int, dst_addr, dst_stride_y, zout);
911
912#undef RHS_BLOCK_SIZE
913#undef RHS_OFFSET_X
914#undef RHS_STEP_X
915}
916#endif // defined(RESULT_OFFSET) && defined(RESULT_SHIFT) && defined(RESULT_MULTIPLIER)
Gian Marco Iodice62251f72019-03-11 16:07:12 +0000917#endif // defined(M0) && defined(N0) && defined(K0) && defined(H0) && defined(DATA_TYPE) && defined(K)
918
Gian Marco Iodicee7510622019-06-03 17:28:17 +0100919#if defined(M0) && defined(N0) && defined(K0) && defined(K)
920
921/** This OpenCL kernel computes the matrix multiplication between 2 matrices.
922 * The LHS matrix is NOT reshaped
923 * The RHS matrix is NOT reshaped
924 *
Michele Di Giorgiof9179d32019-11-27 16:17:30 +0000925 * @note The input data type must be passed at compile time using -DDATA_TYPE (i.e. -DDATA_TYPE=uchar)
926 * @note The accumulator data type must be passed at compile time using -DACC_DATA_TYPE (i.e. -DACC_DATA_TYPE=uint)
Gian Marco Iodicee7510622019-06-03 17:28:17 +0100927 * @note The number of columns of LHS matrix must be passed at compile time using -DK (i.e. -DK=64)
928 * @note The number of M0 rows to process must be passed at compile time using -DM0 (i.e. -DM0=2)
929 * @note The number of N0 columns to process must be passed at compile time using -DN0 (i.e. -DN0=2)
930 * @note The number of K0 partial accumulations must be passed at compile time using -DK0 (i.e., -DK0=2)
931 * @note Only the following configurations of M0, N0 and K0 are currently supported:
932 * - M0 = 1, 2, 3, 4, 5, 6, 7, 8
933 * - N0 = 2, 3, 4, 8, 16
934 * - K0 = 2, 3, 4, 8, 16
935 *
936 * @note In case the input or output have to be reinterpreted as a 3D tensor, the following information must be passed at compile time:
937 * -# REINTERPRET_INPUT_AS_3D: To reinterpret the input as 3D
938 * -# REINTERPRET_OUTPUT_AS_3D: To reinterpret the output as 3D
939 * -# HEIGHT_GEMM3D: The height of the output in case it has to be reinterpreted as a 3D tensor.
940 * -# DEPTH_GEMM3D: The depth of the output in case it has to be reinterpreted as a 3D tensor
941 * (HEIGHT_GEMM3D * DEPTH_GEMM3D) = columns LHS matrix
942 *
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000943 * @param[in] lhs_ptr Pointer to the LHS reshaped matrix. Supported data type: QASYMM8
Gian Marco Iodicee7510622019-06-03 17:28:17 +0100944 * @param[in] lhs_stride_x Stride of the LHS reshaped matrix in X dimension (in bytes)
945 * @param[in] lhs_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
946 * @param[in] lhs_stride_y Stride of the LHS reshaped matrix in Y dimension (in bytes)
947 * @param[in] lhs_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
948 * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the LHS reshaped matrix
949 * @param[in] rhs_ptr Pointer to the RHS reshaped matrix. Supported data type: same as @p lhs_ptr
950 * @param[in] rhs_stride_x Stride of the RHS reshaped matrix in X dimension (in bytes)
951 * @param[in] rhs_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
952 * @param[in] rhs_stride_y Stride of the RHS reshaped matrix in Y dimension (in bytes)
953 * @param[in] rhs_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
954 * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the RHS reshaped matrix
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000955 * @param[out] dst_ptr Pointer to the destination matrix Supported data type: S32
Gian Marco Iodicee7510622019-06-03 17:28:17 +0100956 * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
957 * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
958 * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
959 * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
960 * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
961 * @param[in] lhs_stride_z Stride of the LHS reshaped matrix in Z dimension (in bytes)
962 * @param[in] rhs_stride_z Stride of the RHS reshaped matrix in Z dimension (in bytes)
963 * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
964 * @param[in] lhs_cross_plane_pad (Optional) Bottom paddings for LHS matrix in unit of elements (only if defined REINTERPRET_INPUT_AS_3D)
965 * @param[in] dst_cross_plane_pad (Optional) Bottom paddings for the output matrix in unit of elements (only if defined REINTERPRET_OUTPUT_AS_3D)
966 */
967__kernel void gemmlowp_mm_native(IMAGE_DECLARATION(lhs),
968 IMAGE_DECLARATION(rhs),
969 IMAGE_DECLARATION(dst),
970 uint lhs_stride_z,
971 uint rhs_stride_z,
972 uint dst_stride_z
973#if defined(REINTERPRET_INPUT_AS_3D)
974 ,
975 uint lhs_cross_plane_pad
976#endif // REINTERPRET_INPUT_AS_3D
977#if defined(REINTERPRET_OUTPUT_AS_3D)
978 ,
979 uint dst_cross_plane_pad
980#endif // REINTERPRET_OUTPUT_AS_3D
981 )
982{
983 uint x = get_global_id(0);
984 uint y = get_global_id(1);
985 uint z = get_global_id(2);
986
987#if defined(DUMMY_WORK_ITEMS)
988 if((x * N0 >= N) || (y * M0 >= M))
989 {
990 return;
991 }
992#endif // defined(DUMMY_WORK_ITEMS)
993
994 // Compute LHS matrix address
morgolockcf343e32020-10-12 14:00:43 +0100995 uint lhs_offset = lhs_offset_first_element_in_bytes + COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) * (uint)lhs_stride_y;
Gian Marco Iodicee7510622019-06-03 17:28:17 +0100996
997 // Compute RHS matrix address
morgolockcf343e32020-10-12 14:00:43 +0100998 uint rhs_offset = rhs_offset_first_element_in_bytes + x * N0 * sizeof(DATA_TYPE);
999
Gian Marco Iodicee7510622019-06-03 17:28:17 +01001000
1001#if defined(MATRIX_B_DEPTH)
1002 // Do not slide matrix B if the matrix B has 3 dimensions and matrix A more than 3
1003 rhs_offset += (z % MATRIX_B_DEPTH) * rhs_stride_z;
1004#else // defined(MATRIX_B_DEPTH)
1005 rhs_offset += z * rhs_stride_z;
1006#endif // defined(MATRIX_B_DEPTH)
1007
1008 REPEAT_VAR_INIT_TO_CONST(8, uint, zlhs, 0);
1009 REPEAT_VAR_INIT_TO_CONST(16, uint, zrhs, 0);
1010
1011#if defined(REINTERPRET_INPUT_AS_3D)
1012 // The plane (zlhs) is calculated dividing M (y * M0) by HEIGHT_GEMM3D
1013 CALCULATE_Z_OFFSET(M0, uint, zlhs, y, HEIGHT_GEMM3D, DEPTH_GEMM3D, lhs_cross_plane_pad, lhs_stride_y);
1014
1015 // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
1016 // multiply lhs_stride_z by DEPTH_GEMM3D
1017 lhs_offset += z * lhs_stride_z * DEPTH_GEMM3D;
1018
1019#else // defined(REINTERPRET_INPUT_AS_3D)
1020
1021 // Add offset for batched GEMM
1022 lhs_offset += z * lhs_stride_z;
1023
1024#endif // defined(REINTERPRET_INPUT_AS_3D)
1025
1026 // Initialize the accumulators
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001027 REPEAT_VAR_INIT_TO_CONST(M0, VEC_DATA_TYPE(ACC_DATA_TYPE, N0), c, 0); //VEC_DATA_TYPE(ACC_DATA_TYPE, N0) c0=0,c1=0,c2=0,... c(M0-1)=0;
Gian Marco Iodicee7510622019-06-03 17:28:17 +01001028
1029 int i = 0;
1030
1031 for(; i <= (K - K0); i += K0)
1032 {
1033 // Load values from LHS matrix
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001034 LOAD_BLOCK(M0, K0, DATA_TYPE, a, lhs_ptr, lhs_offset, lhs_stride_y, zlhs);
Gian Marco Iodicee7510622019-06-03 17:28:17 +01001035
1036 // Load values from RHS matrix
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001037 LOAD_BLOCK(K0, N0, DATA_TYPE, b, rhs_ptr, rhs_offset, rhs_stride_y, zrhs);
Gian Marco Iodicee7510622019-06-03 17:28:17 +01001038
SiCong Li738893e2020-05-01 12:55:16 +01001039 // Partial matrix multiplication M0,N0,K0
1040#if(GPU_ARCH == GPU_ARCH_MIDGARD)
1041 ARM_MM_NATIVE_N0XK0XM0(VEC_DATA_TYPE(ACC_DATA_TYPE, N0), M0, K0, a, b, c);
1042#else // GPU_ARCH == GPU_ARCH_MIDGARD
Gian Marco Iodicee7510622019-06-03 17:28:17 +01001043 // Transpose the values from RHS matrix
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001044 TRANSPOSE_K0XN0(K0, N0, b_t, b, DATA_TYPE);
Gian Marco Iodicee7510622019-06-03 17:28:17 +01001045
Gian Marco Iodicee7510622019-06-03 17:28:17 +01001046 ARM_MM_K0XN0XM0(M0, N0, K0, a, b_t, c);
SiCong Li738893e2020-05-01 12:55:16 +01001047#endif // GPU_ARCH == GPU_ARCH_MIDGARD
Gian Marco Iodicee7510622019-06-03 17:28:17 +01001048
1049 // Update the offset
1050 lhs_offset += K0;
1051 rhs_offset += K0 * rhs_stride_y;
1052 }
1053
1054 // Left-over for loop
1055 for(; i < K; ++i)
1056 {
1057 // Load values from LHS matrix
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001058 LOAD_BLOCK(M0, 1, DATA_TYPE, a, lhs_ptr, lhs_offset, lhs_stride_y, zlhs);
Gian Marco Iodicee7510622019-06-03 17:28:17 +01001059
1060 // Load values from RHS matrix
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001061 LOAD_BLOCK(1, N0, DATA_TYPE, b, rhs_ptr, rhs_offset, rhs_stride_y, zrhs);
Gian Marco Iodicee7510622019-06-03 17:28:17 +01001062
SiCong Li738893e2020-05-01 12:55:16 +01001063 // Partial matrix multiplication M0,N0,1
1064#if(GPU_ARCH == GPU_ARCH_MIDGARD)
1065 ARM_MM_NATIVE_N0XK0XM0(VEC_DATA_TYPE(ACC_DATA_TYPE, N0), M0, 1, a, b, c);
1066#else // GPU_ARCH == GPU_ARCH_MIDGARD
Gian Marco Iodicee7510622019-06-03 17:28:17 +01001067 // Transpose the values from RHS matrix
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001068 TRANSPOSE_K0XN0(1, N0, b_t, b, DATA_TYPE);
Gian Marco Iodicee7510622019-06-03 17:28:17 +01001069
Gian Marco Iodicee7510622019-06-03 17:28:17 +01001070 ARM_MM_K0XN0XM0(M0, N0, 1, a, b_t, c);
SiCong Li738893e2020-05-01 12:55:16 +01001071#endif // GPU_ARCH == GPU_ARCH_MIDGARD
Gian Marco Iodicee7510622019-06-03 17:28:17 +01001072
1073 // Update the offset
1074 lhs_offset += 1;
1075 rhs_offset += rhs_stride_y;
1076 }
1077
morgolockcf343e32020-10-12 14:00:43 +01001078 __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + (x * (uint)N0 * sizeof(int)) + (COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) * dst_stride_y);
1079
Gian Marco Iodicee7510622019-06-03 17:28:17 +01001080
1081 REPEAT_VAR_INIT_TO_CONST(M0, uint, zout, 0); //uint zout0=0,zout1=0,zout2=0,... zout7=0;
1082
1083#if defined(REINTERPRET_OUTPUT_AS_3D)
1084 // The plane (zout) is calculated dividing M (y * M0) by HEIGHT_GEMM3D
1085 CALCULATE_Z_OFFSET(M0, uint, zout, y, HEIGHT_GEMM3D, DEPTH_GEMM3D, dst_cross_plane_pad, dst_stride_y);
1086
1087 // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
1088 // multiply dst_stride_z by DEPTH_GEMM3D
1089 dst_addr += z * dst_stride_z * DEPTH_GEMM3D;
1090
1091#else // defined(REINTERPRET_OUTPUT_AS_3D)
1092
1093 // Add offset for batched GEMM
1094 dst_addr += z * dst_stride_z;
1095
1096#endif // defined(REINTERPRET_OUTPUT_AS_3D)
morgolockcf343e32020-10-12 14:00:43 +01001097 const bool cond_y = y == 0;
1098 const bool cond_x = ((x + 1) * N0 >= N);
Gian Marco Iodicee7510622019-06-03 17:28:17 +01001099
morgolockcf343e32020-10-12 14:00:43 +01001100
1101 // Store output block
1102 STORE_BLOCK_BOUNDARY_AWARE(M0, N0, int, c, dst_addr, dst_stride_y, zout, PARTIAL_STORE_M0, PARTIAL_STORE_N0, N, cond_y, cond_x);
Gian Marco Iodicee7510622019-06-03 17:28:17 +01001103}
1104#endif // defined(M0) && defined(N0) && defined(K0) && defined(K)
1105
Gian Marco05288a22017-11-21 10:57:50 +00001106#if defined(COLS_A)
1107/** OpenCL kernel used to compute the row-vectors of sums of all the entries in each row of Matrix A.
Michele Di Giorgiof64d3362020-04-03 12:40:10 +01001108 * It is also possible to multiply each reduced row by a scalar value, if SCALAR is passed at compile time.
Gian Marco05288a22017-11-21 10:57:50 +00001109 *
1110 * @note This stage is needed to handle the offset of matrix product
1111 * https://github.com/google/gemmlowp/blob/master/doc/low-precision.md
1112 *
1113 * @attention The number of matrix A columns needs to be passed at compile time using -DCOLS_A
Manuel Bottini959c26d2019-12-02 16:22:35 +00001114 * @note The input data type must be passed at compile time using -DDATA_TYPE (i.e. -DDATA_TYPE=uchar)
Michele Di Giorgioe7b333e2020-01-15 10:30:51 +00001115 * @note The data type for the accumulation must be passed at compile time using -DACC_DATA_TYPE (i.e. -DACC_DATA_TYPE=uint)
Michele Di Giorgiof64d3362020-04-03 12:40:10 +01001116 * @note In case of scaling the scalar value must be passed at compile time using -DSCALAR (e.g. -DSCALAR=3)
Gian Marco05288a22017-11-21 10:57:50 +00001117 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +01001118 * @param[in] src_ptr Pointer to the source tensor. Supported data type: QASYMM8/QASYMM8_SIGNED/QSYMM8
Gian Marco05288a22017-11-21 10:57:50 +00001119 * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
1120 * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
1121 * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
1122 * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
1123 * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
1124 * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
1125 * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
1126 * @param[out] dst_ptr Pointer to the destination tensor Supported data type: S32
1127 * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
1128 * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
1129 * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
1130 * @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
1131 * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
1132 */
1133__kernel void gemmlowp_matrix_a_reduction(TENSOR3D_DECLARATION(src),
1134 IMAGE_DECLARATION(dst))
1135{
1136 // Compute source and destination addresses
1137 Tensor3D src = CONVERT_TO_TENSOR3D_STRUCT(src);
1138 Image dst = CONVERT_TO_IMAGE_STRUCT(dst);
1139
Michele Di Giorgioe7b333e2020-01-15 10:30:51 +00001140 VEC_DATA_TYPE(ACC_DATA_TYPE, 4)
1141 sum_row_32 = (VEC_DATA_TYPE(ACC_DATA_TYPE, 4))0;
1142 ACC_DATA_TYPE sum_row = 0;
Gian Marco05288a22017-11-21 10:57:50 +00001143
Manuel Bottini959c26d2019-12-02 16:22:35 +00001144 __global const DATA_TYPE *matrix_a = (__global const DATA_TYPE *)(src.ptr + get_global_id(0) * src_stride_y + get_global_id(1) * src_stride_z);
Gian Marco05288a22017-11-21 10:57:50 +00001145
1146 int i = 0;
1147
1148 // This for loop performs 16 accumulations
1149 for(; i <= ((int)COLS_A - 16); i += 16)
1150 {
Manuel Bottini959c26d2019-12-02 16:22:35 +00001151 const VEC_DATA_TYPE(DATA_TYPE, 16) a0 = vload16(0, matrix_a + i);
Gian Marco05288a22017-11-21 10:57:50 +00001152
Michele Di Giorgioe7b333e2020-01-15 10:30:51 +00001153 sum_row_32 += CONVERT(a0.s0123, VEC_DATA_TYPE(ACC_DATA_TYPE, 4)) + CONVERT(a0.s4567, VEC_DATA_TYPE(ACC_DATA_TYPE, 4)) + CONVERT(a0.s89AB, VEC_DATA_TYPE(ACC_DATA_TYPE, 4)) + CONVERT(a0.sCDEF,
1154 VEC_DATA_TYPE(ACC_DATA_TYPE, 4));
Gian Marco05288a22017-11-21 10:57:50 +00001155 }
1156
1157 // This for loop performs the leftover accumulations
1158 for(; i < COLS_A; ++i)
1159 {
Michele Di Giorgioe7b333e2020-01-15 10:30:51 +00001160 sum_row += (ACC_DATA_TYPE)matrix_a[i];
Gian Marco05288a22017-11-21 10:57:50 +00001161 }
1162
Manuel Bottini959c26d2019-12-02 16:22:35 +00001163 sum_row += sum_row_32.s0 + sum_row_32.s1 + sum_row_32.s2 + sum_row_32.s3;
Gian Marco05288a22017-11-21 10:57:50 +00001164
Michele Di Giorgiof64d3362020-04-03 12:40:10 +01001165#if defined(SCALAR)
1166 sum_row *= (int)SCALAR;
1167#endif // defined(SCALAR)
Gian Marco05288a22017-11-21 10:57:50 +00001168 *((__global int *)dst.ptr) = (int)sum_row;
1169}
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001170
1171#if defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)
Michele Di Giorgiof64d3362020-04-03 12:40:10 +01001172/** OpenCL kernel used to compute the row-vectors of sums of all the entries in each row of Matrix A using the arm dot product instruction.
1173 * It is also possible to multiply each reduced row by a scalar value, if SCALAR is passed at compile time.
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001174 *
1175 * @note This stage is needed to handle the offset of matrix product
1176 * https://github.com/google/gemmlowp/blob/master/doc/low-precision.md
1177 *
1178 * @attention The number of matrix A columns needs to be passed at compile time using -DCOLS_A
Manuel Bottini959c26d2019-12-02 16:22:35 +00001179 * @note The input data type must be passed at compile time using -DDATA_TYPE (i.e. -DDATA_TYPE=uchar)
Michele Di Giorgioe7b333e2020-01-15 10:30:51 +00001180 * @note The data type for the accumulation must be passed at compile time using -DACC_DATA_TYPE (i.e. -DACC_DATA_TYPE=uint)
Michele Di Giorgiof64d3362020-04-03 12:40:10 +01001181 * @note In case of scaling the scalar value must be passed at compile time using -DSCALAR (e.g. -DSCALAR=3)
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001182 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +01001183 * @param[in] src_ptr Pointer to the source tensor. Supported data type: QASYMM8/QASYMM8_SIGNED/QSYMM8
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001184 * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
1185 * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
1186 * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
1187 * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
1188 * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
1189 * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
1190 * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
1191 * @param[out] dst_ptr Pointer to the destination tensor Supported data type: S32
1192 * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
1193 * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
1194 * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
1195 * @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
1196 * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
1197 */
1198__kernel void gemmlowp_matrix_a_reduction_dot8(TENSOR3D_DECLARATION(src),
1199 IMAGE_DECLARATION(dst))
1200{
1201 // Compute source and destination addresses
1202 Tensor3D src = CONVERT_TO_TENSOR3D_STRUCT(src);
1203 Image dst = CONVERT_TO_IMAGE_STRUCT(dst);
1204
Michele Di Giorgioe7b333e2020-01-15 10:30:51 +00001205 ACC_DATA_TYPE sum_row = 0;
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001206
Manuel Bottini959c26d2019-12-02 16:22:35 +00001207 __global const DATA_TYPE *matrix_a = (__global const DATA_TYPE *)(src.ptr + get_global_id(0) * src_stride_y + get_global_id(1) * src_stride_z);
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001208
1209 int i = 0;
1210
1211 // This for loop performs 16 accumulations
1212 for(; i <= ((int)COLS_A - 32); i += 32)
1213 {
Manuel Bottini959c26d2019-12-02 16:22:35 +00001214 VEC_DATA_TYPE(DATA_TYPE, 16)
1215 a0 = vload16(0, matrix_a + i);
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001216
Manuel Bottini959c26d2019-12-02 16:22:35 +00001217 sum_row += arm_dot(a0.s0123, (VEC_DATA_TYPE(DATA_TYPE, 4))(1));
1218 sum_row += arm_dot(a0.s4567, (VEC_DATA_TYPE(DATA_TYPE, 4))(1));
1219 sum_row += arm_dot(a0.s89AB, (VEC_DATA_TYPE(DATA_TYPE, 4))(1));
1220 sum_row += arm_dot(a0.sCDEF, (VEC_DATA_TYPE(DATA_TYPE, 4))(1));
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001221
Manuel Bottini959c26d2019-12-02 16:22:35 +00001222 a0 = vload16(1, matrix_a + i);
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001223
Manuel Bottini959c26d2019-12-02 16:22:35 +00001224 sum_row += arm_dot(a0.s0123, (VEC_DATA_TYPE(DATA_TYPE, 4))(1));
1225 sum_row += arm_dot(a0.s4567, (VEC_DATA_TYPE(DATA_TYPE, 4))(1));
1226 sum_row += arm_dot(a0.s89AB, (VEC_DATA_TYPE(DATA_TYPE, 4))(1));
1227 sum_row += arm_dot(a0.sCDEF, (VEC_DATA_TYPE(DATA_TYPE, 4))(1));
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001228 }
1229
1230 // This for loop performs the leftover accumulations
1231 for(; i < COLS_A; ++i)
1232 {
Michele Di Giorgioe7b333e2020-01-15 10:30:51 +00001233 sum_row += (ACC_DATA_TYPE)matrix_a[i];
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001234 }
1235
Michele Di Giorgiof64d3362020-04-03 12:40:10 +01001236#if defined(SCALAR)
1237 sum_row *= (int)SCALAR;
1238#endif // defined(SCALAR)
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001239 *((__global int *)dst.ptr) = (int)sum_row;
1240}
1241#endif // defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)
Gian Marco05288a22017-11-21 10:57:50 +00001242#endif // defined(COLS_A)
1243
1244#if defined(COLS_B) && defined(ROWS_B)
1245/** OpenCL kernel used to compute the row-vectors of sums of all the entries in each column of Matrix B.
Michele Di Giorgiof64d3362020-04-03 12:40:10 +01001246 * It is also possible to multiply each reduced column by a scalar value, if SCALAR is passed at compile time.
Gian Marco05288a22017-11-21 10:57:50 +00001247 *
1248 * @note This stage is needed to handle the offset of matrix product
1249 * https://github.com/google/gemmlowp/blob/master/doc/low-precision.md
1250 *
1251 * @attention The number of matrix B columns and rows needs to be passed at compile time using -DCOLS_B and -DROWS_B
Manuel Bottini959c26d2019-12-02 16:22:35 +00001252 * @note The input data type must be passed at compile time using -DDATA_TYPE (i.e. -DDATA_TYPE=uchar)
Michele Di Giorgioe7b333e2020-01-15 10:30:51 +00001253 * @note The data type for the accumulation must be passed at compile time using -DACC_DATA_TYPE (i.e. -DACC_DATA_TYPE=uint)
Michele Di Giorgiof64d3362020-04-03 12:40:10 +01001254 * @note In case of scaling the scalar value must be passed at compile time using -DSCALAR (i.e. -DSCALAR=3)
Gian Marco05288a22017-11-21 10:57:50 +00001255 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +01001256 * @param[in] src_ptr Pointer to the source tensor. Supported data type: QASYMM8/QASYMM8_SIGNED/QSYMM8/QSYMM8_PER_CHANNEL
Gian Marco05288a22017-11-21 10:57:50 +00001257 * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
1258 * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
1259 * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
1260 * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
1261 * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
1262 * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
1263 * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
1264 * @param[out] dst_ptr Pointer to the destination tensor Supported data type: S32
1265 * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
1266 * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
1267 * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
1268 * @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
1269 * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
1270 */
1271__kernel void gemmlowp_matrix_b_reduction(TENSOR3D_DECLARATION(src),
1272 IMAGE_DECLARATION(dst))
1273{
1274 // Compute source and destination addresses
1275 Tensor3D src = CONVERT_TO_TENSOR3D_STRUCT(src);
1276 Image dst = CONVERT_TO_IMAGE_STRUCT(dst);
1277
Michele Di Giorgioe7b333e2020-01-15 10:30:51 +00001278 VEC_DATA_TYPE(ACC_DATA_TYPE, 16)
1279 sum_col_32 = (VEC_DATA_TYPE(ACC_DATA_TYPE, 16))0;
Gian Marco05288a22017-11-21 10:57:50 +00001280
Manuel Bottini959c26d2019-12-02 16:22:35 +00001281 __global const DATA_TYPE *matrix_b = (__global const DATA_TYPE *)(src.ptr + get_global_id(1) * src_stride_z);
Gian Marco05288a22017-11-21 10:57:50 +00001282
1283 int i = 0;
1284 // This for loop performs 4 accumulations
1285 for(; i <= ((int)ROWS_B - 4); i += 4)
1286 {
Manuel Bottini959c26d2019-12-02 16:22:35 +00001287 const VEC_DATA_TYPE(DATA_TYPE, 16)
1288 b0 = vload16(0, matrix_b + 0 * src_stride_y);
1289 const VEC_DATA_TYPE(DATA_TYPE, 16)
1290 b1 = vload16(0, matrix_b + 1 * src_stride_y);
1291 const VEC_DATA_TYPE(DATA_TYPE, 16)
1292 b2 = vload16(0, matrix_b + 2 * src_stride_y);
1293 const VEC_DATA_TYPE(DATA_TYPE, 16)
1294 b3 = vload16(0, matrix_b + 3 * src_stride_y);
Gian Marco05288a22017-11-21 10:57:50 +00001295
Michele Di Giorgioe7b333e2020-01-15 10:30:51 +00001296 sum_col_32 += CONVERT(b0, VEC_DATA_TYPE(ACC_DATA_TYPE, 16)) + CONVERT(b1, VEC_DATA_TYPE(ACC_DATA_TYPE, 16)) + CONVERT(b2, VEC_DATA_TYPE(ACC_DATA_TYPE, 16)) + CONVERT(b3, VEC_DATA_TYPE(ACC_DATA_TYPE,
Manuel Bottini959c26d2019-12-02 16:22:35 +00001297 16));
Gian Marco05288a22017-11-21 10:57:50 +00001298
1299 matrix_b += 4 * src_stride_y;
1300 }
1301
1302 // This for loop perfoms the leftover accumulations
1303 for(; i < (int)ROWS_B; ++i)
1304 {
Manuel Bottini959c26d2019-12-02 16:22:35 +00001305 const VEC_DATA_TYPE(DATA_TYPE, 16)
1306 b0 = vload16(0, matrix_b);
Gian Marco05288a22017-11-21 10:57:50 +00001307
Michele Di Giorgioe7b333e2020-01-15 10:30:51 +00001308 sum_col_32 += CONVERT(b0, VEC_DATA_TYPE(ACC_DATA_TYPE, 16));
Gian Marco05288a22017-11-21 10:57:50 +00001309
1310 matrix_b += src_stride_y;
1311 }
1312
Michele Di Giorgiof64d3362020-04-03 12:40:10 +01001313#if defined(SCALAR)
1314 sum_col_32 *= (VEC_DATA_TYPE(ACC_DATA_TYPE, 16))SCALAR;
1315#endif // defined(SCALAR)
1316 VSTORE(16)
Gian Marco Iodice19fe0a92020-04-14 14:43:03 +01001317 (convert_int16(sum_col_32), 0, (__global int *)dst.ptr);
Gian Marco05288a22017-11-21 10:57:50 +00001318}
1319#endif // defined(COLS_B) && defined(ROWS_B)
1320
Michele Di Giorgioe7b333e2020-01-15 10:30:51 +00001321#endif // defined(DATA_TYPE) && defined(ACC_DATA_TYPE)
1322
Gian Marco05288a22017-11-21 10:57:50 +00001323#if defined(K_OFFSET)
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001324
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +00001325/* Helper function used to calculate the offset contribution after matrix multiplication.
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001326 *
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +00001327 * This kernel takes a final int32 accumulator value (the output of matrix multiplication),
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001328 * and calculates the offset contribution of matrix A and matrix B.
1329 *
1330 * @attention The k_offset = a_offset * b_offset * k (where k is the number of matrix A columns) needs to be passed at compile time using -DK_OFFSET (i.e. -DK_OFFSET=1200)
1331 * @note In case the offset contribution due to a_offset is required, a_offset needs to be passed at compile time using -DA_OFFSET (i.e. -DA_OFFSET=1)
1332 * @note In case the offset contribution due to b_offset is required, b_offset needs to be passed at compile time using -DB_OFFSET (i.e. -DB_OFFSET=6)
1333 * @note In case sum_col has batches, -DSUM_COL_HAS_BATCHES must be passed at compile time. Usually if gemmlowp is used to accelerate convolution layer, sum_col will not have batches
1334 *
1335 * @param[in] x get_global_id(0) * 4
1336 * @param[in] y get_global_id(1)
1337 * @param[in] z get_global_id(2)
1338 * @param[in] sum_col_ptr (Optional) Pointer to the source tensor. Supported data type: same as @p mm_result_ptr
1339 * @param[in] sum_col_stride_x (Optional) Stride of the source tensor in X dimension (in bytes)
1340 * @param[in] sum_col_step_x (Optional) sum_col_stride_x * number of elements along X processed per workitem(in bytes)
1341 * @param[in] sum_col_stride_y (Optional) Stride of the source tensor in Y dimension (in bytes)
1342 * @param[in] sum_col_step_y (Optional) sum_col_stride_y * number of elements along Y processed per workitem(in bytes)
1343 * @param[in] sum_col_offset_first_element_in_bytes (Optional) The offset of the first element in the source tensor
1344 * @param[in] sum_row_ptr (Optional) Pointer to the source tensor. Supported data type: same as @p mm_result_ptr
1345 * @param[in] sum_row_stride_x (Optional) Stride of the source tensor in X dimension (in bytes)
1346 * @param[in] sum_row_step_x (Optional) sum_row_stride_x * number of elements along X processed per workitem(in bytes)
1347 * @param[in] sum_row_stride_y (Optional) Stride of the source tensor in Y dimension (in bytes)
1348 * @param[in] sum_row_step_y (Optional) sum_row_stride_y * number of elements along Y processed per workitem(in bytes)
1349 * @param[in] sum_row_offset_first_element_in_bytes (Optional) The offset of the first element in the source tensor
1350 * @param[in] biases_ptr (Optional) Pointer to the biases tensor. Supported data type: same as @p src_ptr
1351 * @param[in] biases_stride_x (Optional) Stride of the biases tensor in X dimension (in bytes)
1352 * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
1353 * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases tensor
1354 */
1355inline int4 offset_contribution(
1356 int x,
1357 int y,
1358 int z
1359#if defined(A_OFFSET)
1360 ,
1361 IMAGE_DECLARATION(sum_col)
1362#endif // defined(A_OFFSET)
1363#if defined(B_OFFSET)
1364 ,
1365 IMAGE_DECLARATION(sum_row)
1366#endif // defined(B_OFFSET)
1367#if defined(ADD_BIAS)
1368 ,
1369 VECTOR_DECLARATION(biases)
1370#endif // defined(ADD_BIAS)
1371)
1372{
1373 int4 a_offset_s32 = (int4)0;
1374 int4 b_offset_s32 = (int4)0;
1375
1376 int batch_id = z;
1377#if defined(DEPTH_INPUT3D)
1378 batch_id /= (int)DEPTH_INPUT3D;
1379#endif // defined(DEPTH_INPUT3D)
1380
1381#if defined(A_OFFSET)
1382 // Compute the offset contribution due to A_OFFSET
1383 __global uchar *sum_col_addr = sum_col_ptr + sum_col_offset_first_element_in_bytes + x * sizeof(int);
1384
1385 // Compute the offset contribution due to A_OFFSET
1386#if defined(SUM_COL_HAS_BATCHES)
1387 a_offset_s32 = vload4(0, (__global int *)(sum_col_addr + batch_id * sum_col_stride_y));
1388#else // defined(SUM_COL_HAS_BATCHES)
1389 a_offset_s32 = vload4(0, (__global int *)sum_col_addr);
1390#endif // defined(SUM_COL_HAS_BATCHES)
1391
1392 a_offset_s32 *= (int4)A_OFFSET;
1393#endif // defined(A_OFFSET)
1394
1395#if defined(B_OFFSET)
1396 // Compute the offset contribution due to A_OFFSET
1397 __global uchar *sum_row_addr = sum_row_ptr + sum_row_offset_first_element_in_bytes + y * sizeof(int);
1398
1399 // Compute the offset contribution due to B_OFFSET
1400#if defined(HEIGHT_INPUT3D) && defined(DEPTH_INPUT3D)
1401 b_offset_s32 = (int4) * (((__global int *)(sum_row_addr + batch_id * sum_row_stride_y)) + (z % (int)DEPTH_INPUT3D) * (int)HEIGHT_INPUT3D);
1402#else // defined(HEIGHT_INPUT3D) && defined(DEPTH_INPUT3D)
1403 b_offset_s32 = (int4) * (((__global int *)(sum_row_addr + batch_id * sum_row_stride_y)));
1404#endif // defined(HEIGHT_INPUT3D) && defined(DEPTH_INPUT3D)
1405 b_offset_s32 *= (int4)B_OFFSET;
1406#endif // defined(B_OFFSET)
1407
1408#if defined(ADD_BIAS)
1409 // Add bias
1410 __global uchar *bias_addr = biases_ptr + biases_offset_first_element_in_bytes + x * sizeof(int);
1411
1412 int4 biases_values = vload4(0, (__global int *)bias_addr);
1413 b_offset_s32 += (int4)biases_values;
1414#endif // defined(ADD_BIAS)
1415
1416 return (int4)K_OFFSET + a_offset_s32 + b_offset_s32;
1417}
1418
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +00001419/* OpenCL kernel used to add the offset contribution after matrix multiplication. The computation is performed in-place
Gian Marco05288a22017-11-21 10:57:50 +00001420 *
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +00001421 * This kernel takes a final int32 accumulator value (the output of matrix multiplication),
Gian Marco05288a22017-11-21 10:57:50 +00001422 * and adds to it the offset contribution of matrix A and matrix B in-place.
1423 *
1424 * @attention The k_offset = a_offset * b_offset * k (where k is the number of matrix A columns) needs to be passed at compile time using -DK_OFFSET (i.e. -DK_OFFSET=1200)
1425 * @note In case the offset contribution due to a_offset is required, a_offset needs to be passed at compile time using -DA_OFFSET (i.e. -DA_OFFSET=1)
1426 * @note In case the offset contribution due to b_offset is required, b_offset needs to be passed at compile time using -DB_OFFSET (i.e. -DB_OFFSET=6)
Chunosov5124be52017-11-22 20:42:13 +07001427 * @note In case sum_col has batches, -DSUM_COL_HAS_BATCHES must be passed at compile time. Usually if gemmlowp is used to accelerate convolution layer, sum_col will not have batches
Gian Marco05288a22017-11-21 10:57:50 +00001428 *
1429 * The final result is:
1430 *
1431 * mm_result[i][k] = mm_result[i][k] +
1432 * (sum_col[k] * A_OFFSET) +
1433 * (sum_row[i] * B_OFFSET) +
1434 * (K_OFFSET)
1435 *
Georgios Pinitasebf6b8a2018-09-24 16:31:08 +01001436 * @param[in] mm_result_ptr Pointer to the source tensor. Supported data type: S32
1437 * @param[in] mm_result_stride_x Stride of the source tensor in X dimension (in bytes)
1438 * @param[in] mm_result_step_x mm_result_stride_x * number of elements along X processed per workitem(in bytes)
1439 * @param[in] mm_result_stride_y Stride of the source tensor in Y dimension (in bytes)
1440 * @param[in] mm_result_step_y mm_result_stride_y * number of elements along Y processed per workitem(in bytes)
1441 * @param[in] mm_result_stride_z Stride of the source tensor in Z dimension (in bytes)
1442 * @param[in] mm_result_step_z mm_result_stride_z * number of elements along Z processed per workitem(in bytes)
1443 * @param[in] mm_result_offset_first_element_in_bytes The offset of the first element in the source tensor
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001444 * @param[in] sum_col_ptr (Optional) Pointer to the source tensor. Supported data type: same as @p mm_result_ptr
1445 * @param[in] sum_col_stride_x (Optional) Stride of the source tensor in X dimension (in bytes)
1446 * @param[in] sum_col_step_x (Optional) sum_col_stride_x * number of elements along X processed per workitem(in bytes)
1447 * @param[in] sum_col_stride_y (Optional) Stride of the source tensor in Y dimension (in bytes)
1448 * @param[in] sum_col_step_y (Optional) sum_col_stride_y * number of elements along Y processed per workitem(in bytes)
1449 * @param[in] sum_col_offset_first_element_in_bytes (Optional) The offset of the first element in the source tensor
1450 * @param[in] sum_row_ptr (Optional) Pointer to the source tensor. Supported data type: same as @p mm_result_ptr
1451 * @param[in] sum_row_stride_x (Optional) Stride of the source tensor in X dimension (in bytes)
1452 * @param[in] sum_row_step_x (Optional) sum_row_stride_x * number of elements along X processed per workitem(in bytes)
1453 * @param[in] sum_row_stride_y (Optional) Stride of the source tensor in Y dimension (in bytes)
1454 * @param[in] sum_row_step_y (Optional) sum_row_stride_y * number of elements along Y processed per workitem(in bytes)
1455 * @param[in] sum_row_offset_first_element_in_bytes (Optional) The offset of the first element in the source tensor
1456 * @param[in] biases_ptr (Optional) Pointer to the biases tensor. Supported data type: same as @p src_ptr
1457 * @param[in] biases_stride_x (Optional) Stride of the biases tensor in X dimension (in bytes)
1458 * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
1459 * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases tensor
Gian Marco05288a22017-11-21 10:57:50 +00001460 */
1461__kernel void gemmlowp_offset_contribution(TENSOR3D_DECLARATION(mm_result)
1462#if defined(A_OFFSET)
1463 ,
1464 IMAGE_DECLARATION(sum_col)
1465#endif // defined(A_OFFSET)
1466#if defined(B_OFFSET)
1467 ,
1468 IMAGE_DECLARATION(sum_row)
1469#endif // defined(B_OFFSET)
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001470#if defined(ADD_BIAS)
1471 ,
1472 VECTOR_DECLARATION(biases)
1473#endif // defined(ADD_BIAS))
Gian Marco05288a22017-11-21 10:57:50 +00001474 )
1475{
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001476 const int x = get_global_id(0) * 4;
Georgios Pinitasebf6b8a2018-09-24 16:31:08 +01001477 const int y = get_global_id(1);
1478 const int z = get_global_id(2);
1479
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001480 // Compute offset contribution
1481 int4 offset_term_s32 = offset_contribution(
1482 x, y, z
Gian Marco05288a22017-11-21 10:57:50 +00001483#if defined(A_OFFSET)
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001484 ,
1485 sum_col_ptr,
1486 sum_col_stride_x,
1487 sum_col_step_x,
1488 sum_col_stride_y,
1489 sum_col_step_y,
1490 sum_col_offset_first_element_in_bytes
Gian Marco05288a22017-11-21 10:57:50 +00001491#endif // defined(A_OFFSET)
Gian Marco05288a22017-11-21 10:57:50 +00001492#if defined(B_OFFSET)
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001493 ,
1494 sum_row_ptr,
1495 sum_row_stride_x,
1496 sum_row_step_x,
1497 sum_row_stride_y,
1498 sum_row_step_y,
1499 sum_row_offset_first_element_in_bytes
Gian Marco05288a22017-11-21 10:57:50 +00001500#endif // defined(B_OFFSET)
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001501#if defined(ADD_BIAS)
1502 ,
1503 biases_ptr,
1504 biases_stride_x,
1505 biases_step_x,
1506 biases_offset_first_element_in_bytes
1507#endif // defined(ADD_BIAS)
1508 );
Gian Marco05288a22017-11-21 10:57:50 +00001509
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001510 __global uchar *mm_result_addr = mm_result_ptr + mm_result_offset_first_element_in_bytes + x * sizeof(int) + y * mm_result_stride_y + z * mm_result_stride_z;
Gian Marco05288a22017-11-21 10:57:50 +00001511
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001512 int4 in_s32 = vload4(0, (__global int *)mm_result_addr);
Gian Marco05288a22017-11-21 10:57:50 +00001513
1514 // Add the offset terms to GEMM's result
1515 in_s32 += offset_term_s32;
1516
1517 // Store the result with the offset contribution
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001518 vstore4(in_s32, 0, (__global int *)mm_result_addr);
Gian Marco05288a22017-11-21 10:57:50 +00001519}
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001520
Michele Di Giorgiob54ba282020-01-14 15:31:55 +00001521#if defined(RESULT_OFFSET) && defined(RESULT_MULTIPLIER) && defined(RESULT_SHIFT) && defined(OUTPUT_DATA_TYPE)
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001522/* OpenCL kernel used to add the offset contribution after @ref CLGEMMLowpMatrixMultiplyKernel and it quantizes down to uint8.
1523 *
1524 * This kernel takes a final int32 accumulator value (the output of @CLGEMMLowpMatrixMultiplyKernel), adds to it the offset contribution of matrix A and matrix B and quantizes to uint8 through the output stage.
1525 *
1526 *
1527 * @attention The k_offset = a_offset * b_offset * k (where k is the number of matrix A columns) needs to be passed at compile time using -DK_OFFSET (i.e. -DK_OFFSET=1200)
1528 * @note In case the offset contribution due to a_offset is required, a_offset needs to be passed at compile time using -DA_OFFSET (i.e. -DA_OFFSET=1)
1529 * @note In case the offset contribution due to b_offset is required, b_offset needs to be passed at compile time using -DB_OFFSET (i.e. -DB_OFFSET=6)
1530 * @note In case sum_col has batches, -DSUM_COL_HAS_BATCHES must be passed at compile time. Usually if gemmlowp is used to accelerate convolution layer, sum_col will not have batches
1531 *
1532 * The result before the output stage is:
1533 *
1534 * mm_result[i][k] = mm_result[i][k] +
1535 * (sum_col[k] * A_OFFSET) +
1536 * (sum_row[i] * B_OFFSET) +
1537 * (K_OFFSET)
1538 *
Manuel Bottini959c26d2019-12-02 16:22:35 +00001539 * This result is quantized down to uint8/int8 using the output stage. The output stage computes the following operations:
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001540 *
1541 * -# Add offset terms to final result
1542 * -# Multiply each entry of result by result_mult_int
1543 * -# Add bias to final result (if -DADD_BIAS is passed at compile time)
1544 * -# Shift the int32 accumulator by result_shift
1545 * -# Clamp the value between the specified min and max bounds (if -DMIN_BOUND and/or -DMAX_BOUND are passed at compile time)
Manuel Bottini959c26d2019-12-02 16:22:35 +00001546 * -# Clamp the resulting int32 values:
1547 * - to the [0..255] range and cast to QASYMM8.
1548 * - to the [-128..127] range and cast to QASYMM8_SIGNED.
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001549 *
1550 * @attention The offset, scalar scale factor and number of bits to shift right of output tensor must be passed at compile time using -DRESULT_OFFSET, -RESULT_MULT_INT and -DRESULT_SHIFT
1551 *
1552 * @note In case the addition of int32 biases is required, -DADD_BIAS should be passed at compile time
Manuel Bottini959c26d2019-12-02 16:22:35 +00001553 * @note The output datatype should be passed at compile time using -DOUTPUT_DATA_TYPE
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001554 * @note In case the clamping of the result is required, the min and max bounds can be passed at compile time using -DMIN_BOUND and -DMAX_BOUND.
1555 * These values can be used to implement "rectified linear unit" activation functions
1556 *
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +00001557 * @param[in] mm_result_ptr Pointer to the source tensor. Supported data type: S32
1558 * @param[in] mm_result_stride_x Stride of the source tensor in X dimension (in bytes)
1559 * @param[in] mm_result_step_x mm_result_stride_x * number of elements along X processed per workitem(in bytes)
1560 * @param[in] mm_result_stride_y Stride of the source tensor in Y dimension (in bytes)
1561 * @param[in] mm_result_step_y mm_result_stride_y * number of elements along Y processed per workitem(in bytes)
1562 * @param[in] mm_result_stride_z Stride of the source tensor in Z dimension (in bytes)
1563 * @param[in] mm_result_step_z mm_result_stride_z * number of elements along Z processed per workitem(in bytes)
1564 * @param[in] mm_result_offset_first_element_in_bytes The offset of the first element in the source tensor
1565 * @param[in] sum_col_ptr (Optional) Pointer to the source tensor. Supported data type: same as @p mm_result_ptr
1566 * @param[in] sum_col_stride_x (Optional) Stride of the source tensor in X dimension (in bytes)
1567 * @param[in] sum_col_step_x (Optional) sum_col_stride_x * number of elements along X processed per workitem(in bytes)
1568 * @param[in] sum_col_stride_y (Optional) Stride of the source tensor in Y dimension (in bytes)
1569 * @param[in] sum_col_step_y (Optional) sum_col_stride_y * number of elements along Y processed per workitem(in bytes)
1570 * @param[in] sum_col_offset_first_element_in_bytes (Optional) The offset of the first element in the source tensor
1571 * @param[in] sum_row_ptr (Optional) Pointer to the source tensor. Supported data type: same as @p mm_result_ptr
1572 * @param[in] sum_row_stride_x (Optional) Stride of the source tensor in X dimension (in bytes)
1573 * @param[in] sum_row_step_x (Optional) sum_row_stride_x * number of elements along X processed per workitem(in bytes)
1574 * @param[in] sum_row_stride_y (Optional) Stride of the source tensor in Y dimension (in bytes)
1575 * @param[in] sum_row_step_y (Optional) sum_row_stride_y * number of elements along Y processed per workitem(in bytes)
1576 * @param[in] sum_row_offset_first_element_in_bytes (Optional) The offset of the first element in the source tensor
1577 * @param[in] biases_ptr (Optional) Pointer to the biases tensor. Supported data type: same as @p src_ptr
1578 * @param[in] biases_stride_x (Optional) Stride of the biases tensor in X dimension (in bytes)
1579 * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
1580 * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases tensor
Manuel Bottini959c26d2019-12-02 16:22:35 +00001581 * @param[out] dst_ptr Pointer to the destination tensor Supported data type: QASYMM8/QASYMM8_SIGNED
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +00001582 * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
1583 * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
1584 * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
1585 * @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
1586 * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
1587 * @param[in] dst_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
1588 * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
1589 * @param[in] result_multipliers_ptr (Optional) Pointer to the output multipliers vector for per-channel quantization. Supported data types: S32
1590 * @param[in] result_multipliers_stride_x (Optional) Stride of the output multipliers vector in X dimension (in bytes)
1591 * @param[in] result_multipliers_step_x (Optional) output_multipliers_stride_x * number of elements along X processed per workitem(in bytes)
1592 * @param[in] result_multipliers_offset_first_element_in_bytes (Optional) The offset of the first element in the output multipliers vector
1593 * @param[in] result_shifts_ptr (Optional) Pointer to the output shifts vector for per-channel quantization. Supported data types: S32
1594 * @param[in] result_shifts_stride_x (Optional) Stride of the output shifts vector in X dimension (in bytes)
1595 * @param[in] result_shifts_step_x (Optional) output_shifts_stride_x * number of elements along X processed per workitem(in bytes)
1596 * @param[in] result_shifts_offset_first_element_in_bytes (Optional) The offset of the first element in the output shifts vector
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001597 */
1598__kernel void gemmlowp_offset_contribution_quantize_down(TENSOR3D_DECLARATION(mm_result)
1599#if defined(A_OFFSET)
1600 ,
1601 IMAGE_DECLARATION(sum_col)
1602#endif // defined(A_OFFSET)
1603#if defined(B_OFFSET)
1604 ,
1605 IMAGE_DECLARATION(sum_row)
1606#endif // defined(B_OFFSET)
1607 ,
1608#if defined(ADD_BIAS)
1609 VECTOR_DECLARATION(biases),
1610#endif // defined(ADD_BIAS)
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +00001611 TENSOR3D_DECLARATION(dst)
1612#if defined(PER_CHANNEL_QUANTIZATION)
1613 ,
1614 VECTOR_DECLARATION(result_multipliers),
1615 VECTOR_DECLARATION(result_shifts)
1616#endif // defined(PER_CHANNEL_QUANTIZATION)
1617 )
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001618{
1619 const int x = get_global_id(0) * 4;
1620 const int y = get_global_id(1);
1621 const int z = get_global_id(2);
1622
1623 __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x + y * dst_stride_y + z * dst_stride_z;
1624
1625 // Compute offset contribution
1626 int4 offset_term_s32 = offset_contribution(
1627 x, y, z
1628#if defined(A_OFFSET)
1629 ,
1630 sum_col_ptr,
1631 sum_col_stride_x,
1632 sum_col_step_x,
1633 sum_col_stride_y,
1634 sum_col_step_y,
1635 sum_col_offset_first_element_in_bytes
1636#endif // defined(A_OFFSET)
1637#if defined(B_OFFSET)
1638 ,
1639 sum_row_ptr,
1640 sum_row_stride_x,
1641 sum_row_step_x,
1642 sum_row_stride_y,
1643 sum_row_step_y,
1644 sum_row_offset_first_element_in_bytes
1645#endif // defined(B_OFFSET)
1646#if defined(ADD_BIAS)
1647 ,
1648 biases_ptr,
1649 biases_stride_x,
1650 biases_step_x,
1651 biases_offset_first_element_in_bytes
1652#endif // defined(ADD_BIAS)
1653 );
1654
1655 __global uchar *mm_result_addr = mm_result_ptr + mm_result_offset_first_element_in_bytes + x * sizeof(int) + y * mm_result_stride_y + z * mm_result_stride_z;
1656
1657 int4 in_s32 = vload4(0, (__global int *)mm_result_addr);
1658
1659 // Add the offset terms to GEMM's result
1660 in_s32 += offset_term_s32;
1661
1662 // -------------- OUTPUT STAGE
1663
1664 // Add the offset terms to GEMM's result
1665 in_s32 += (int4)RESULT_OFFSET;
1666
1667 // Multiply by result_mult_int and shift
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +00001668#if defined(PER_CHANNEL_QUANTIZATION)
1669 __global uchar *result_multipliers_addr = result_multipliers_ptr + result_multipliers_offset_first_element_in_bytes + x * sizeof(int);
1670 __global uchar *result_shifts_addr = result_shifts_ptr + result_shifts_offset_first_element_in_bytes + x * sizeof(int);
1671 int4 result_multipliers_values = vload4(0, (__global int *)result_multipliers_addr);
1672 int4 result_shifts_values = vload4(0, (__global int *)result_shifts_addr);
1673
1674 in_s32 *= result_multipliers_values;
1675 in_s32 >>= result_shifts_values;
1676#else // defined(PER_CHANNEL_QUANTIZATION)
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001677 in_s32 *= RESULT_MULTIPLIER;
1678
1679 in_s32 >>= RESULT_SHIFT;
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +00001680#endif // defined(PER_CHANNEL_QUANTIZATION)
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001681
Manuel Bottini959c26d2019-12-02 16:22:35 +00001682 VEC_DATA_TYPE(OUTPUT_DATA_TYPE, 4)
1683 res = CONVERT_SAT(in_s32, VEC_DATA_TYPE(OUTPUT_DATA_TYPE, 4));
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001684
1685#if defined(MIN_BOUND)
Manuel Bottini959c26d2019-12-02 16:22:35 +00001686 res = max(res, (VEC_DATA_TYPE(OUTPUT_DATA_TYPE, 4))MIN_BOUND);
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001687#endif // defined(MIN_BOUND)
1688#if defined(MAX_BOUND)
Manuel Bottini959c26d2019-12-02 16:22:35 +00001689 res = min(res, (VEC_DATA_TYPE(OUTPUT_DATA_TYPE, 4))MAX_BOUND);
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001690#endif // defined(MAX_BOUND)
1691
1692 // Store the result
Manuel Bottini959c26d2019-12-02 16:22:35 +00001693 vstore4(res, 0, (__global OUTPUT_DATA_TYPE *)dst_addr);
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001694}
1695
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +00001696/* OpenCL kernel used to add the offset contribution after matrix multiplication and it quantizes down to uint8.
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001697 *
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +00001698 * This kernel takes a final int32 accumulator value (the output of matrix multiplication), adds to it the offset contribution of matrix A and matrix B and quantizes to uint8 through the output stage.
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001699 *
1700 *
1701 * @attention The k_offset = a_offset * b_offset * k (where k is the number of matrix A columns) needs to be passed at compile time using -DK_OFFSET (i.e. -DK_OFFSET=1200)
1702 * @note In case the offset contribution due to a_offset is required, a_offset needs to be passed at compile time using -DA_OFFSET (i.e. -DA_OFFSET=1)
1703 * @note In case the offset contribution due to b_offset is required, b_offset needs to be passed at compile time using -DB_OFFSET (i.e. -DB_OFFSET=6)
1704 * @note In case sum_col has batches, -DSUM_COL_HAS_BATCHES must be passed at compile time. Usually if gemmlowp is used to accelerate convolution layer, sum_col will not have batches
1705 *
1706 * The result before the output stage is:
1707 *
1708 * mm_result[i][k] = mm_result[i][k] +
1709 * (sum_col[k] * A_OFFSET) +
1710 * (sum_row[i] * B_OFFSET) +
1711 * (K_OFFSET)
1712 *
Manuel Bottini959c26d2019-12-02 16:22:35 +00001713 * This result is quantized down to uint8/int8 using the output stage. The output stage computes the following operations:
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001714 *
1715 * -# Compute fixed point multiplication between each entry of input by result_fixedpoint_multiplier
1716 * -# Add bias to final result if bias tensor is not a nullptr
1717 * -# Round to nearest division by a power-of-two using result_shift
1718 * -# Add offset to each result
1719 * -# Clamp the value between the specified min and max bounds
Manuel Bottini959c26d2019-12-02 16:22:35 +00001720 * -# Clamp the resulting int32 values:
1721 * - to the [0..255] range and cast to QASYMM8.
1722 * - to the [-128..127] range and cast to QASYMM8_SIGNED.
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001723 *
1724 * @attention The offset, scalar scale factor and number of bits to shift right of output tensor must be passed at compile time using -DRESULT_OFFSET, -RESULT_MULT_INT and -DRESULT_SHIFT
1725 *
1726 * @note In case the addition of int32 biases is required, -DADD_BIAS should be passed at compile time
Manuel Bottini959c26d2019-12-02 16:22:35 +00001727 * @note The output datatype should be passed at compile time using -DOUTPUT_DATA_TYPE
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001728 * @note In case the clamping of the result is required, the min and max bounds can be passed at compile time using -DMIN_BOUND and -DMAX_BOUND.
1729 * These values can be used to implement "rectified linear unit" activation functions
1730 *
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +00001731 * @param[in] mm_result_ptr Pointer to the source tensor. Supported data type: S32
1732 * @param[in] mm_result_stride_x Stride of the source tensor in X dimension (in bytes)
1733 * @param[in] mm_result_step_x mm_result_stride_x * number of elements along X processed per workitem(in bytes)
1734 * @param[in] mm_result_stride_y Stride of the source tensor in Y dimension (in bytes)
1735 * @param[in] mm_result_step_y mm_result_stride_y * number of elements along Y processed per workitem(in bytes)
1736 * @param[in] mm_result_stride_z Stride of the source tensor in Z dimension (in bytes)
1737 * @param[in] mm_result_step_z mm_result_stride_z * number of elements along Z processed per workitem(in bytes)
1738 * @param[in] mm_result_offset_first_element_in_bytes The offset of the first element in the source tensor
1739 * @param[in] sum_col_ptr (Optional) Pointer to the source tensor. Supported data type: same as @p mm_result_ptr
1740 * @param[in] sum_col_stride_x (Optional) Stride of the source tensor in X dimension (in bytes)
1741 * @param[in] sum_col_step_x (Optional) sum_col_stride_x * number of elements along X processed per workitem(in bytes)
1742 * @param[in] sum_col_stride_y (Optional) Stride of the source tensor in Y dimension (in bytes)
1743 * @param[in] sum_col_step_y (Optional) sum_col_stride_y * number of elements along Y processed per workitem(in bytes)
1744 * @param[in] sum_col_offset_first_element_in_bytes (Optional) The offset of the first element in the source tensor
1745 * @param[in] sum_row_ptr (Optional) Pointer to the source tensor. Supported data type: same as @p mm_result_ptr
1746 * @param[in] sum_row_stride_x (Optional) Stride of the source tensor in X dimension (in bytes)
1747 * @param[in] sum_row_step_x (Optional) sum_row_stride_x * number of elements along X processed per workitem(in bytes)
1748 * @param[in] sum_row_stride_y (Optional) Stride of the source tensor in Y dimension (in bytes)
1749 * @param[in] sum_row_step_y (Optional) sum_row_stride_y * number of elements along Y processed per workitem(in bytes)
1750 * @param[in] sum_row_offset_first_element_in_bytes (Optional) The offset of the first element in the source tensor
1751 * @param[in] biases_ptr (Optional) Pointer to the biases tensor. Supported data type: same as @p src_ptr
1752 * @param[in] biases_stride_x (Optional) Stride of the biases tensor in X dimension (in bytes)
1753 * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
1754 * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases tensor
1755 * @param[out] dst_ptr Pointer to the destination tensor Supported data type: QASYMM8
1756 * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
1757 * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
1758 * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
1759 * @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
1760 * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
1761 * @param[in] dst_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
1762 * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
1763 * @param[in] result_multipliers_ptr (Optional) Pointer to the output multipliers vector for per-channel quantization. Supported data types: S32
1764 * @param[in] result_multipliers_stride_x (Optional) Stride of the output multipliers vector in X dimension (in bytes)
1765 * @param[in] result_multipliers_step_x (Optional) output_multipliers_stride_x * number of elements along X processed per workitem(in bytes)
1766 * @param[in] result_multipliers_offset_first_element_in_bytes (Optional) The offset of the first element in the output multipliers vector
1767 * @param[in] result_shifts_ptr (Optional) Pointer to the output shifts vector for per-channel quantization. Supported data types: S32
1768 * @param[in] result_shifts_stride_x (Optional) Stride of the output shifts vector in X dimension (in bytes)
1769 * @param[in] result_shifts_step_x (Optional) output_shifts_stride_x * number of elements along X processed per workitem(in bytes)
1770 * @param[in] result_shifts_offset_first_element_in_bytes (Optional) The offset of the first element in the output shifts vector
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001771 */
1772__kernel void gemmlowp_offset_contribution_quantize_down_fixedpoint(TENSOR3D_DECLARATION(mm_result)
1773#if defined(A_OFFSET)
1774 ,
1775 IMAGE_DECLARATION(sum_col)
1776#endif // defined(A_OFFSET)
1777#if defined(B_OFFSET)
1778 ,
1779 IMAGE_DECLARATION(sum_row)
1780#endif // defined(B_OFFSET)
1781 ,
1782#if defined(ADD_BIAS)
1783 VECTOR_DECLARATION(biases),
1784#endif // defined(ADD_BIAS)
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +00001785 TENSOR3D_DECLARATION(dst)
1786#if defined(PER_CHANNEL_QUANTIZATION)
1787 ,
1788 VECTOR_DECLARATION(result_multipliers),
1789 VECTOR_DECLARATION(result_shifts)
1790#endif // defined(PER_CHANNEL_QUANTIZATION)
1791 )
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001792{
1793 const int x = get_global_id(0) * 4;
1794 const int y = get_global_id(1);
1795 const int z = get_global_id(2);
1796
1797 // Compute offset contribution
1798 int4 offset_term_s32 = offset_contribution(
1799 x, y, z
1800#if defined(A_OFFSET)
1801 ,
1802 sum_col_ptr,
1803 sum_col_stride_x,
1804 sum_col_step_x,
1805 sum_col_stride_y,
1806 sum_col_step_y,
1807 sum_col_offset_first_element_in_bytes
1808#endif // defined(A_OFFSET)
1809#if defined(B_OFFSET)
1810 ,
1811 sum_row_ptr,
1812 sum_row_stride_x,
1813 sum_row_step_x,
1814 sum_row_stride_y,
1815 sum_row_step_y,
1816 sum_row_offset_first_element_in_bytes
1817#endif // defined(B_OFFSET)
1818#if defined(ADD_BIAS)
1819 ,
1820 biases_ptr,
1821 biases_stride_x,
1822 biases_step_x,
1823 biases_offset_first_element_in_bytes
1824#endif // defined(ADD_BIAS)
1825 );
1826
1827 __global uchar *mm_result_addr = mm_result_ptr + mm_result_offset_first_element_in_bytes + x * sizeof(int) + y * mm_result_stride_y + z * mm_result_stride_z;
1828
1829 __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x + y * dst_stride_y + z * dst_stride_z;
1830
1831 int4 in_s32 = vload4(0, (__global int *)mm_result_addr);
1832
1833 // Add the offset terms to GEMM's result
1834 in_s32 += offset_term_s32;
1835
1836 // -------------- OUTPUT STAGE
1837
1838 // Multiply by result_mult_int and shift
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +00001839#if defined(PER_CHANNEL_QUANTIZATION)
1840 __global uchar *result_multipliers_addr = result_multipliers_ptr + result_multipliers_offset_first_element_in_bytes + x * sizeof(int);
1841 __global uchar *result_shifts_addr = result_shifts_ptr + result_shifts_offset_first_element_in_bytes + x * sizeof(int);
1842 int4 result_multipliers_values = vload4(0, (__global int *)result_multipliers_addr);
1843 int4 result_shifts_values = vload4(0, (__global int *)result_shifts_addr);
1844
Michele Di Giorgio14cbfb22019-10-23 10:53:10 +01001845 int4 in_s32_shift_lt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(in_s32, result_multipliers_values, result_shifts_values, 4);
1846 int4 in_s32_shift_gt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(in_s32, result_multipliers_values, result_shifts_values, 4);
1847 in_s32 = select(in_s32_shift_lt0, in_s32_shift_gt0, result_shifts_values >= 0);
1848#else // defined(PER_CHANNEL_QUANTIZATION)
1849
1850#if RESULT_SHIFT < 0
1851 in_s32 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(in_s32, RESULT_MULTIPLIER, RESULT_SHIFT, 4);
1852#else // RESULT_SHIFT >= 0
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001853 in_s32 = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(in_s32, RESULT_MULTIPLIER, RESULT_SHIFT, 4);
Michele Di Giorgio14cbfb22019-10-23 10:53:10 +01001854#endif // RESULT_SHIFT < 0
1855
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +00001856#endif // defined(PER_CHANNEL_QUANTIZATION)
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001857
1858 // Add the offset terms to GEMM's result
1859 in_s32 += (int4)RESULT_OFFSET;
1860
Manuel Bottini959c26d2019-12-02 16:22:35 +00001861 VEC_DATA_TYPE(OUTPUT_DATA_TYPE, 4)
1862 res = CONVERT_SAT(in_s32, VEC_DATA_TYPE(OUTPUT_DATA_TYPE, 4));
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001863
1864#if defined(MIN_BOUND)
Manuel Bottini959c26d2019-12-02 16:22:35 +00001865 res = max(res, (VEC_DATA_TYPE(OUTPUT_DATA_TYPE, 4))MIN_BOUND);
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001866#endif // defined(MIN_BOUND)
1867#if defined(MAX_BOUND)
Manuel Bottini959c26d2019-12-02 16:22:35 +00001868 res = min(res, (VEC_DATA_TYPE(OUTPUT_DATA_TYPE, 4))MAX_BOUND);
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001869#endif // defined(MAX_BOUND)
1870
1871 // Store the result
Manuel Bottini959c26d2019-12-02 16:22:35 +00001872 vstore4(res, 0, (__global OUTPUT_DATA_TYPE *)dst_addr);
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001873}
Michele Di Giorgiob54ba282020-01-14 15:31:55 +00001874#endif // defined(RESULT_OFFSET) && defined(RESULT_MULTIPLIER) && defined(RESULT_SHIFT) && defined(OUTPUT_DATA_TYPE)
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +00001875
Gian Marco05288a22017-11-21 10:57:50 +00001876#endif // defined(K_OFFSET)
1877
1878#if defined(RESULT_OFFSET) && defined(RESULT_MULT_INT) && defined(RESULT_SHIFT)
Luca Foschiani689c9682020-02-26 14:30:14 +00001879/** This OpenCL kernel is used to quantize down the int32 accumulator values of GEMMLowp to QASYMM8/QASYMM8_SIGNED
Gian Marco05288a22017-11-21 10:57:50 +00001880 *
Luca Foschiani689c9682020-02-26 14:30:14 +00001881 * This kernel takes a final int32 accumulator value and processes it to obtain the final QASYMM8/QASYMM8_SIGNED value.
Gian Marco05288a22017-11-21 10:57:50 +00001882 * The following computations will be performed by the kernel:
1883 *
1884 * -# Add offset terms to final result
1885 * -# Multiply each entry of result by result_mult_int
1886 * -# Add bias to final result (if -DADD_BIAS is passed at compile time)
1887 * -# Shift the int32 accumulator by result_shift
1888 * -# Clamp the value between the specified min and max bounds (if -DMIN_BOUND and/or -DMAX_BOUND are passed at compile time)
Luca Foschiani689c9682020-02-26 14:30:14 +00001889 * -# Clamp the resulting int32 values:
1890 * -# - to the [0..255] range and cast to QASYMM8.
1891 * -# - to the [-128..127] range and cast to QASYMM8_SIGNED.
Gian Marco05288a22017-11-21 10:57:50 +00001892 *
1893 * @attention The offset, scalar scale factor and number of bits to shift right of output tensor must be passed at compile time using -DRESULT_OFFSET, -RESULT_MULT_INT and -DRESULT_SHIFT
1894 *
1895 * @note In case the addition of int32 biases is required, -DADD_BIAS should be passed at compile time
Luca Foschiani689c9682020-02-26 14:30:14 +00001896 * @note The output datatype should be passed at compile time using -DOUTPUT_DATA_TYPE
Gian Marco05288a22017-11-21 10:57:50 +00001897 * @note In case the clamping of the result is required, the min and max bounds can be passed at compile time using -DMIN_BOUND and -DMAX_BOUND.
1898 * These values can be used to implement "rectified linear unit" activation functions
1899 *
1900 * @param[in] src_ptr Pointer to the source tensor. Supported data type: S32
1901 * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
1902 * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
1903 * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
1904 * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
1905 * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
1906 * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
1907 * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001908 * @param[in] biases_ptr (Optional) Pointer to the biases tensor. Supported data type: same as @p src_ptr
1909 * @param[in] biases_stride_x (Optional) Stride of the biases tensor in X dimension (in bytes)
1910 * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
1911 * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases tensor
Luca Foschiani689c9682020-02-26 14:30:14 +00001912 * @param[out] dst_ptr Pointer to the destination tensor Supported data type: QASYMM8/QASYMM8_SIGNED
Gian Marco05288a22017-11-21 10:57:50 +00001913 * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
1914 * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
1915 * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
1916 * @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
1917 * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
1918 * @param[in] dst_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
1919 * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
1920 */
1921__kernel void gemmlowp_output_stage_quantize_down(TENSOR3D_DECLARATION(src),
1922#if defined(ADD_BIAS)
1923 VECTOR_DECLARATION(biases),
1924#endif // defined(ADD_BIAS)
1925 TENSOR3D_DECLARATION(dst))
1926{
1927 // Compute source and destination addresses
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001928 int x = get_global_id(0) * 4;
1929 int y = get_global_id(1);
1930 int z = get_global_id(2);
Gian Marco05288a22017-11-21 10:57:50 +00001931
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001932 __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * sizeof(int) + y * src_stride_y + z * src_stride_z;
Gian Marco05288a22017-11-21 10:57:50 +00001933
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001934 __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x + y * dst_stride_y + z * dst_stride_z;
1935
1936 int4 input_values = vload4(0, (__global int *)src_addr);
Gian Marco58c57942017-11-28 09:10:03 +00001937
Gian Marco05288a22017-11-21 10:57:50 +00001938#if defined(ADD_BIAS)
1939 // Add bias
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001940 __global uchar *bias_addr = biases_ptr + biases_offset_first_element_in_bytes + x * sizeof(int);
1941
1942 int4 biases_values = vload4(0, (__global int *)bias_addr);
1943 input_values += (int4)biases_values;
Gian Marco05288a22017-11-21 10:57:50 +00001944#endif // defined(ADD_BIAS)
1945
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001946 // Add the offset terms to GEMM's result
1947 input_values += (int4)RESULT_OFFSET;
1948
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +00001949 // Multiply by result_mult_int and shift
Gian Marco58c57942017-11-28 09:10:03 +00001950 input_values *= RESULT_MULT_INT;
Gian Marco05288a22017-11-21 10:57:50 +00001951
Michele Di Giorgio14cbfb22019-10-23 10:53:10 +01001952#if RESULT_SHIFT < 0
1953 input_values >>= -RESULT_SHIFT;
1954#else // RESULT_SHIFT >= 0
Gian Marco58c57942017-11-28 09:10:03 +00001955 input_values >>= RESULT_SHIFT;
Michele Di Giorgio14cbfb22019-10-23 10:53:10 +01001956#endif // RESULT_SHIFT < 0
Gian Marco05288a22017-11-21 10:57:50 +00001957
Luca Foschiani689c9682020-02-26 14:30:14 +00001958 VEC_DATA_TYPE(OUTPUT_DATA_TYPE, 4)
1959 res = CONVERT_SAT(input_values, VEC_DATA_TYPE(OUTPUT_DATA_TYPE, 4));
Gian Marco05288a22017-11-21 10:57:50 +00001960
1961#if defined(MIN_BOUND)
Luca Foschiani689c9682020-02-26 14:30:14 +00001962 res = max(res, (VEC_DATA_TYPE(OUTPUT_DATA_TYPE, 4))MIN_BOUND);
Gian Marco05288a22017-11-21 10:57:50 +00001963#endif // defined(MIN_BOUND)
1964#if defined(MAX_BOUND)
Luca Foschiani689c9682020-02-26 14:30:14 +00001965 res = min(res, (VEC_DATA_TYPE(OUTPUT_DATA_TYPE, 4))MAX_BOUND);
Gian Marco05288a22017-11-21 10:57:50 +00001966#endif // defined(MAX_BOUND)
1967
1968 // Store the result
Luca Foschiani5219ed82020-03-27 15:04:13 +00001969 vstore4(res, 0, (__global OUTPUT_DATA_TYPE *)dst_addr);
Gian Marco05288a22017-11-21 10:57:50 +00001970}
Gian Marco58c57942017-11-28 09:10:03 +00001971#endif // defined(RESULT_OFFSET) && defined(RESULT_MULT_INT) && defined(RESULT_SHIFT)
1972
1973#if defined(RESULT_OFFSET_AFTER_SHIFT) && defined(RESULT_FIXEDPOINT_MULTIPLIER) && defined(RESULT_SHIFT)
Manuel Bottini959c26d2019-12-02 16:22:35 +00001974/** This OpenCL kernel is used to quantize down the int32 accumulator values of GEMMLowp to QASYMM8/QASYMM8_SIGNED
Gian Marco58c57942017-11-28 09:10:03 +00001975 *
Manuel Bottini959c26d2019-12-02 16:22:35 +00001976 * This kernel takes a final int32 accumulator value (the output of matrix multiplication), and processes it to obtain the final QASYMM8/QASYMM8_SIGNED value.
Gian Marco58c57942017-11-28 09:10:03 +00001977 * The following computations will be performed by the kernel:
1978 *
1979 * -# Compute fixed point multiplication between each entry of input by result_fixedpoint_multiplier
1980 * -# Add bias to final result if bias tensor is not a nullptr
1981 * -# Round to nearest division by a power-of-two using result_shift
1982 * -# Add offset to each result
1983 * -# Clamp the value between the specified min and max bounds
Manuel Bottini1f332d42019-11-29 17:25:25 +00001984 * -# Clamp the resulting int32 values:
1985 * - to the [0..255] range and cast to QASYMM8.
1986 * - to the [-128..127] range and cast to QASYMM8_SIGNED.
Gian Marco58c57942017-11-28 09:10:03 +00001987 *
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001988 * @attention The offset, scalar scale factor and number of bits to shift right of output tensor must be passed at compile time using -DRESULT_OFFSET_AFTER_SHIFT, -DRESULT_FIXEDPOINT_MULTIPLIER and -DRESULT_SHIFT
Gian Marco58c57942017-11-28 09:10:03 +00001989 *
1990 * @note In case the addition of int32 biases is required, -DADD_BIAS should be passed at compile time
Manuel Bottini1f332d42019-11-29 17:25:25 +00001991 * @note The output datatype should be passed at compile time using -DOUTPUT_DATA_TYPE
Gian Marco58c57942017-11-28 09:10:03 +00001992 * @note In case the clamping of the result is required, the min and max bounds can be passed at compile time using -DMIN_BOUND and -DMAX_BOUND.
1993 * These values can be used to implement "rectified linear unit" activation functions
Michele Di Giorgioba14c922020-10-12 13:27:57 +01001994 * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
Gian Marco58c57942017-11-28 09:10:03 +00001995 *
1996 * @param[in] src_ptr Pointer to the source tensor. Supported data type: S32
1997 * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
1998 * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
1999 * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
2000 * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
2001 * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
2002 * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
2003 * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
Gian Marco Iodice4b908652018-10-18 10:21:02 +01002004 * @param[in] biases_ptr (Optional) Pointer to the biases tensor. Supported data type: same as @p src_ptr
2005 * @param[in] biases_stride_x (Optional) Stride of the biases tensor in X dimension (in bytes)
2006 * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
2007 * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases tensor
Sheri Zhang0cdbda52020-02-25 15:57:21 +00002008 * @param[out] dst_ptr Pointer to the destination tensor Supported data type: QASYMM8/QASYMM8_SIGNED
Gian Marco58c57942017-11-28 09:10:03 +00002009 * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
2010 * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
2011 * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
2012 * @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
2013 * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
2014 * @param[in] dst_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
2015 * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
2016 */
2017__kernel void gemmlowp_output_stage_quantize_down_fixedpoint(TENSOR3D_DECLARATION(src),
2018#if defined(ADD_BIAS)
2019 VECTOR_DECLARATION(biases),
2020#endif // defined(ADD_BIAS)
2021 TENSOR3D_DECLARATION(dst))
2022{
2023 // Compute source and destination addresses
Michele Di Giorgioba14c922020-10-12 13:27:57 +01002024 int x = max((int)(get_global_id(0) * 4 - (4 - VEC_SIZE_LEFTOVER) % 4), 0);
Gian Marco Iodice4b908652018-10-18 10:21:02 +01002025 int y = get_global_id(1);
2026 int z = get_global_id(2);
Georgios Pinitas932491f2018-09-21 16:33:15 +01002027
Gian Marco Iodice4b908652018-10-18 10:21:02 +01002028 __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * sizeof(int) + y * src_stride_y + z * src_stride_z;
Gian Marco58c57942017-11-28 09:10:03 +00002029
Gian Marco Iodice4b908652018-10-18 10:21:02 +01002030 __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x + y * dst_stride_y + z * dst_stride_z;
2031
2032 int4 input_values = vload4(0, (__global int *)src_addr);
Gian Marco58c57942017-11-28 09:10:03 +00002033
2034#if defined(ADD_BIAS)
2035 // Add bias
Gian Marco Iodice4b908652018-10-18 10:21:02 +01002036 __global uchar *bias_addr = biases_ptr + biases_offset_first_element_in_bytes + x * sizeof(int);
2037
2038 int4 biases_values = vload4(0, (__global int *)bias_addr);
2039 input_values += (int4)biases_values;
Gian Marco58c57942017-11-28 09:10:03 +00002040#endif // defined(ADD_BIAS)
2041
2042 // Multiply by result_mult_int and shift
Michele Di Giorgio14cbfb22019-10-23 10:53:10 +01002043#if RESULT_SHIFT < 0
2044 input_values = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(input_values, RESULT_FIXEDPOINT_MULTIPLIER, RESULT_SHIFT, 4);
2045#else // RESULT_SHIFT >= 0
Gian Marco Iodice4b908652018-10-18 10:21:02 +01002046 input_values = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(input_values, RESULT_FIXEDPOINT_MULTIPLIER, RESULT_SHIFT, 4);
Michele Di Giorgio14cbfb22019-10-23 10:53:10 +01002047#endif // RESULT_SHIFT < 0
Gian Marco58c57942017-11-28 09:10:03 +00002048
2049 // Add the offset terms to GEMM's result
Gian Marco Iodice4b908652018-10-18 10:21:02 +01002050 input_values += (int4)RESULT_OFFSET_AFTER_SHIFT;
Gian Marco58c57942017-11-28 09:10:03 +00002051
Manuel Bottini1f332d42019-11-29 17:25:25 +00002052 VEC_DATA_TYPE(OUTPUT_DATA_TYPE, 4)
Michele Di Giorgioba14c922020-10-12 13:27:57 +01002053 res0 = CONVERT_SAT(input_values, VEC_DATA_TYPE(OUTPUT_DATA_TYPE, 4));
Gian Marco58c57942017-11-28 09:10:03 +00002054
2055#if defined(MIN_BOUND)
Michele Di Giorgioba14c922020-10-12 13:27:57 +01002056 res0 = max(res0, (VEC_DATA_TYPE(OUTPUT_DATA_TYPE, 4))MIN_BOUND);
Gian Marco58c57942017-11-28 09:10:03 +00002057#endif // defined(MIN_BOUND)
2058#if defined(MAX_BOUND)
Michele Di Giorgioba14c922020-10-12 13:27:57 +01002059 res0 = min(res0, (VEC_DATA_TYPE(OUTPUT_DATA_TYPE, 4))MAX_BOUND);
Gian Marco58c57942017-11-28 09:10:03 +00002060#endif // defined(MAX_BOUND)
2061
2062 // Store the result
Michele Di Giorgioba14c922020-10-12 13:27:57 +01002063 STORE_VECTOR_SELECT(res, OUTPUT_DATA_TYPE, dst_addr, 4, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
Gian Marco58c57942017-11-28 09:10:03 +00002064}
Chunosov5124be52017-11-22 20:42:13 +07002065#endif // defined(RESULT_OFFSET_AFTER_SHIFT) && defined(RESULT_FIXEDPOINT_MULTIPLIER) && defined(RESULT_SHIFT)
Georgios Pinitas51e53a32018-10-22 13:49:08 +01002066
Manuel Bottini9c9b70b2019-07-01 17:35:56 +01002067#if defined(RESULT_FIXEDPOINT_MULTIPLIER) && defined(RESULT_SHIFT)
2068
Michalis Spyrou51146c52019-07-12 14:42:29 +01002069/** This OpenCL kernel is used to quantize down the int32 accumulator values of GEMMLowp to QSYMM16
Manuel Bottini9c9b70b2019-07-01 17:35:56 +01002070 *
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +00002071 * This kernel takes a final int32 accumulator value (the output of matrix multiplication), and processes it to obtain the final QSYMM16 value.
Manuel Bottini9c9b70b2019-07-01 17:35:56 +01002072 * The following computations will be performed by the kernel:
2073 *
2074 * -# Compute fixed point multiplication between each entry of input by result_fixedpoint_multiplier
2075 * -# Add bias to final result if bias tensor is not a nullptr
2076 * -# Round to nearest division by a power-of-two using result_shift
2077 * -# Add offset to each result
2078 * -# Clamp the value between the specified min and max bounds
2079 * -# Clamp the resulting int32 values to the [-32768..32767] range and cast to QSYMM16.
2080 *
2081 * @attention The offset, scalar scale factor and number of bits to shift right of output tensor must be passed at compile time using -DRESULT_FIXEDPOINT_MULTIPLIER and -DRESULT_SHIFT
2082 *
2083 * @note In case the addition of int32 biases is required, -DADD_BIAS should be passed at compile time
2084 * @note In case the clamping of the result is required, the min and max bounds can be passed at compile time using -DMIN_BOUND and -DMAX_BOUND.
2085 * These values can be used to implement "rectified linear unit" activation functions
Michele Di Giorgioba14c922020-10-12 13:27:57 +01002086 * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
Manuel Bottini9c9b70b2019-07-01 17:35:56 +01002087 *
2088 * @param[in] src_ptr Pointer to the source tensor. Supported data type: S32
2089 * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
2090 * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
2091 * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
2092 * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
2093 * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
2094 * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
2095 * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
2096 * @param[in] biases_ptr (Optional) Pointer to the biases tensor. Supported data type: same as @p src_ptr
2097 * @param[in] biases_stride_x (Optional) Stride of the biases tensor in X dimension (in bytes)
2098 * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
2099 * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases tensor
Sheri Zhangb18252d2020-04-07 11:04:57 +01002100 * @param[out] dst_ptr Pointer to the destination tensor Supported data type: QSYMM16
Manuel Bottini9c9b70b2019-07-01 17:35:56 +01002101 * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
2102 * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
2103 * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
2104 * @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
2105 * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
2106 * @param[in] dst_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
2107 * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
2108 */
2109__kernel void gemmlowp_output_stage_quantize_down_fixedpoint_qsymm16(TENSOR3D_DECLARATION(src),
2110#if defined(ADD_BIAS)
2111 VECTOR_DECLARATION(biases),
2112#endif // defined(ADD_BIAS)
2113 TENSOR3D_DECLARATION(dst))
2114{
2115 // Compute source and destination addresses
Michele Di Giorgioba14c922020-10-12 13:27:57 +01002116 int x = max((int)(get_global_id(0) * 4 - (4 - VEC_SIZE_LEFTOVER) % 4), 0);
Manuel Bottini9c9b70b2019-07-01 17:35:56 +01002117 int y = get_global_id(1);
2118 int z = get_global_id(2);
2119
Michalis Spyrou51146c52019-07-12 14:42:29 +01002120 __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * sizeof(int) + y * src_stride_y + z * src_stride_z;
Manuel Bottini9c9b70b2019-07-01 17:35:56 +01002121
Michele Di Giorgioba14c922020-10-12 13:27:57 +01002122 __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x * sizeof(short) + y * dst_stride_y + z * dst_stride_z;
Manuel Bottini9c9b70b2019-07-01 17:35:56 +01002123
2124 int4 input_values = vload4(0, (__global int *)src_addr);
2125
2126#if defined(ADD_BIAS)
2127 // Add bias
Michalis Spyrou51146c52019-07-12 14:42:29 +01002128 __global uchar *bias_addr = biases_ptr + biases_offset_first_element_in_bytes + x * sizeof(int);
Manuel Bottini9c9b70b2019-07-01 17:35:56 +01002129
2130 int4 biases_values = vload4(0, (__global int *)bias_addr);
2131 input_values += (int4)biases_values;
2132#endif // defined(ADD_BIAS)
2133
2134 // Multiply by result_mult_int and shift
Manuel Bottini07263982019-10-17 18:37:26 +01002135#if RESULT_SHIFT < 0
Michele Di Giorgio14cbfb22019-10-23 10:53:10 +01002136 input_values = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(input_values, RESULT_FIXEDPOINT_MULTIPLIER, RESULT_SHIFT, 4);
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +00002137#else // RESULT_SHIFT >= 0
Manuel Bottini9c9b70b2019-07-01 17:35:56 +01002138 input_values = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(input_values, RESULT_FIXEDPOINT_MULTIPLIER, RESULT_SHIFT, 4);
Manuel Bottini07263982019-10-17 18:37:26 +01002139#endif // RESULT_SHIFT < 0
Manuel Bottini9c9b70b2019-07-01 17:35:56 +01002140
Michele Di Giorgioba14c922020-10-12 13:27:57 +01002141 short4 res0 = convert_short4_sat(input_values);
Manuel Bottini9c9b70b2019-07-01 17:35:56 +01002142
2143#if defined(MIN_BOUND)
Michele Di Giorgioba14c922020-10-12 13:27:57 +01002144 res0 = max(res0, (short4)MIN_BOUND);
Manuel Bottini9c9b70b2019-07-01 17:35:56 +01002145#endif // defined(MIN_BOUND)
2146#if defined(MAX_BOUND)
Michele Di Giorgioba14c922020-10-12 13:27:57 +01002147 res0 = min(res0, (short4)MAX_BOUND);
Manuel Bottini9c9b70b2019-07-01 17:35:56 +01002148#endif // defined(MAX_BOUND)
2149
2150 // Store the result
Michele Di Giorgioba14c922020-10-12 13:27:57 +01002151 STORE_VECTOR_SELECT(res, short, dst_addr, 4, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
Manuel Bottini9c9b70b2019-07-01 17:35:56 +01002152}
2153#endif // defined(RESULT_FIXEDPOINT_MULTIPLIER) && defined(RESULT_SHIFT)
2154
Georgios Pinitas51e53a32018-10-22 13:49:08 +01002155#if defined(REAL_MULTIPLIER) && defined(OUTPUT_OFFSET)
Sheri Zhang1b14c752020-03-09 14:29:52 +00002156/** This OpenCL kernel is used to quantize down the int32 accumulator values of GEMMLowp to QASYMM8/QASYMM8_SIGNED
Georgios Pinitas51e53a32018-10-22 13:49:08 +01002157 *
Sheri Zhang1b14c752020-03-09 14:29:52 +00002158 * This kernel takes a final int32 accumulator value (the output of matrix multiplication), and processes it to obtain the final QASYMM8/QASYMM8_SIGNED value.
Georgios Pinitas51e53a32018-10-22 13:49:08 +01002159 * The following computations will be performed by the kernel:
2160 *
2161 * -# Compute fixed point multiplication between each entry of input by result_fixedpoint_multiplier
2162 * -# Add bias to final result if bias tensor is not a nullptr
2163 * -# Requantize
2164 * -# Add offset to each result
2165 * -# Clamp the value between the specified min and max bounds
Sheri Zhang1b14c752020-03-09 14:29:52 +00002166 * -# Clamp the resulting int32 values:
2167 * - to the [0..255] range and cast to QASYMM8.
2168 * - to the [-128..127] range and cast to QASYMM8_SIGNED.
Georgios Pinitas51e53a32018-10-22 13:49:08 +01002169 *
2170 * @attention The offset and scalar scale factor must be passed at compile time using -DRESULT_OFFSET, -DREAL_MULTIPLIER
2171 *
2172 * @note In case the addition of int32 biases is required, -DADD_BIAS should be passed at compile time
Sheri Zhang1b14c752020-03-09 14:29:52 +00002173 * @note The output datatype should be passed at compile time using -DOUTPUT_DATA_TYPE
Georgios Pinitas51e53a32018-10-22 13:49:08 +01002174 * @note In case the clamping of the result is required, the min and max bounds can be passed at compile time using -DMIN_BOUND and -DMAX_BOUND.
2175 * These values can be used to implement "rectified linear unit" activation functions
2176 *
2177 * @param[in] src_ptr Pointer to the source tensor. Supported data type: S32
2178 * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
2179 * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
2180 * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
2181 * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
2182 * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
2183 * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
2184 * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
2185 * @param[in] biases_ptr Pointer to the biases tensor. Supported data type: same as @p src_ptr
2186 * @param[in] biases_stride_x Stride of the biases tensor in X dimension (in bytes)
2187 * @param[in] biases_step_x biases_stride_x * number of elements along X processed per workitem(in bytes)
2188 * @param[in] biases_offset_first_element_in_bytes The offset of the first element in the biases tensor
2189 * @param[out] dst_ptr Pointer to the destination tensor Supported data type: QASYMM8
2190 * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
2191 * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
2192 * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
2193 * @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
2194 * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
2195 * @param[in] dst_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
2196 * @param[in] dst_stride_w Stride of the source tensor in W dimension (in bytes)
2197 * @param[in] dst_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
2198 * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
2199 */
2200__kernel void gemmlowp_output_stage_quantize_down_float(TENSOR3D_DECLARATION(src),
2201#if defined(ADD_BIAS)
2202 VECTOR_DECLARATION(biases),
2203#endif // defined(ADD_BIAS)
2204#if defined(DST_HEIGHT)
2205 TENSOR4D_DECLARATION(dst))
2206#else // defined(DST_HEIGHT)
2207 TENSOR3D_DECLARATION(dst))
2208#endif // defined(DST_HEIGHT)
2209{
2210 // Compute source and destination addresses
Gian Marco Iodice0c54a622018-10-30 12:20:03 +00002211 int x = get_global_id(0) * 4;
2212 int y = get_global_id(1);
2213 int z = get_global_id(2);
Georgios Pinitas51e53a32018-10-22 13:49:08 +01002214
Gian Marco Iodice0c54a622018-10-30 12:20:03 +00002215 __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * sizeof(int) + y * src_stride_y + z * src_stride_z;
Georgios Pinitas51e53a32018-10-22 13:49:08 +01002216
Gian Marco Iodice0c54a622018-10-30 12:20:03 +00002217 __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x + y * dst_stride_y + z * dst_stride_z;
2218
2219 int4 input_values = vload4(0, (__global int *)src_addr);
Georgios Pinitas51e53a32018-10-22 13:49:08 +01002220
2221#if defined(ADD_BIAS)
2222 // Add bias
Gian Marco Iodice0c54a622018-10-30 12:20:03 +00002223 __global uchar *bias_addr = biases_ptr + biases_offset_first_element_in_bytes + x * sizeof(int);
2224
2225 int4 biases_values = vload4(0, (__global int *)bias_addr);
2226 input_values += (int4)biases_values;
Georgios Pinitas51e53a32018-10-22 13:49:08 +01002227#endif // defined(ADD_BIAS)
2228
2229 // Convert to float
Sheri Zhang1b14c752020-03-09 14:29:52 +00002230 float4 input_values_f = convert_float4(input_values);
2231 input_values_f = round(input_values_f * (float)REAL_MULTIPLIER + (float)OUTPUT_OFFSET);
Georgios Pinitas51e53a32018-10-22 13:49:08 +01002232
Sheri Zhang1b14c752020-03-09 14:29:52 +00002233 VEC_DATA_TYPE(OUTPUT_DATA_TYPE, 4)
2234 res = CONVERT_SAT(input_values_f, VEC_DATA_TYPE(OUTPUT_DATA_TYPE, 4));
Georgios Pinitas51e53a32018-10-22 13:49:08 +01002235
2236#if defined(MIN_BOUND)
Sheri Zhang1b14c752020-03-09 14:29:52 +00002237 res = max(res, (VEC_DATA_TYPE(OUTPUT_DATA_TYPE, 4))MIN_BOUND);
Georgios Pinitas51e53a32018-10-22 13:49:08 +01002238#endif // defined(MIN_BOUND)
2239#if defined(MAX_BOUND)
Sheri Zhang1b14c752020-03-09 14:29:52 +00002240 res = min(res, (VEC_DATA_TYPE(OUTPUT_DATA_TYPE, 4))MAX_BOUND);
Georgios Pinitas51e53a32018-10-22 13:49:08 +01002241#endif // defined(MAX_BOUND)
2242
2243 // Store the result
Sheri Zhang1b14c752020-03-09 14:29:52 +00002244 vstore4(res, 0, (__global OUTPUT_DATA_TYPE *)dst_addr);
Georgios Pinitas51e53a32018-10-22 13:49:08 +01002245}
Gian Marco Iodice27423f02020-08-12 14:12:28 +01002246#endif // defined(REAL_MULTIPLIER) && defined(OUTPUT_OFFSET)