blob: c136fdc20407151240fca55ef096602f97f1c8b4 [file] [log] [blame]
Gian Marco05288a22017-11-21 10:57:50 +00001/*
Gian Marco Iodiceff1fe3e2021-01-02 09:58:51 +00002 * Copyright (c) 2017-2021 Arm Limited.
Gian Marco05288a22017-11-21 10:57:50 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Gian Marco Iodice43a129e2019-05-14 10:14:08 +010024#include "gemm_helpers.h"
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +000025#include "helpers_asymm.h"
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +000026#include "repeat.h"
Gian Marco05288a22017-11-21 10:57:50 +000027
Michele Di Giorgiof9179d32019-11-27 16:17:30 +000028#if defined(DATA_TYPE) && defined(ACC_DATA_TYPE)
29
Georgios Pinitasdaa38552018-08-28 17:43:18 +010030#if defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)
31#if defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8)
Gian Marco Iodice4b908652018-10-18 10:21:02 +010032#define ARM_DOT(x, y, val) val = arm_dot_acc((x), (y), (val));
Georgios Pinitasdaa38552018-08-28 17:43:18 +010033#else // defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8)
Gian Marco Iodice4b908652018-10-18 10:21:02 +010034#define ARM_DOT(x, y, val) val += arm_dot((x), (y));
Georgios Pinitasdaa38552018-08-28 17:43:18 +010035#endif // defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8)
36#endif // defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)
Giorgio Arenac50da382018-07-26 15:50:09 +010037
Gian Marco Iodice43a129e2019-05-14 10:14:08 +010038#if defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)
39
Michele Di Giorgiof9179d32019-11-27 16:17:30 +000040#define ARM_DOT1(a, b, c) \
41 ({ \
42 ARM_DOT((VEC_DATA_TYPE(DATA_TYPE, 4))(a, (VEC_DATA_TYPE(DATA_TYPE, 3))0), (VEC_DATA_TYPE(DATA_TYPE, 4))(b, (VEC_DATA_TYPE(DATA_TYPE, 3))0), c); \
Gian Marco Iodice43a129e2019-05-14 10:14:08 +010043 })
Michele Di Giorgiof9179d32019-11-27 16:17:30 +000044#define ARM_DOT2(a, b, c) \
45 ({ \
46 ARM_DOT((VEC_DATA_TYPE(DATA_TYPE, 4))(a, (VEC_DATA_TYPE(DATA_TYPE, 2))0), (VEC_DATA_TYPE(DATA_TYPE, 4))(b, (VEC_DATA_TYPE(DATA_TYPE, 2))0), c); \
Gian Marco Iodice43a129e2019-05-14 10:14:08 +010047 })
Michele Di Giorgiof9179d32019-11-27 16:17:30 +000048#define ARM_DOT3(a, b, c) \
49 ({ \
50 ARM_DOT((VEC_DATA_TYPE(DATA_TYPE, 4))(a, (DATA_TYPE)0), (VEC_DATA_TYPE(DATA_TYPE, 4))(b, (DATA_TYPE)0), c); \
Gian Marco Iodice43a129e2019-05-14 10:14:08 +010051 })
52#define ARM_DOT4(a, b, c) \
53 ({ \
54 ARM_DOT(a, b, c); \
55 })
56#define ARM_DOT8(a, b, c) \
57 ({ \
58 ARM_DOT4((a.lo), (b.lo), c); \
59 ARM_DOT4((a.hi), (b.hi), c); \
60 })
61#define ARM_DOT16(a, b, c) \
62 ({ \
63 ARM_DOT8((a.lo), (b.lo), c); \
64 ARM_DOT8((a.hi), (b.hi), c); \
65 })
66
67#else // defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)
68
69/** Specialized macros to perform the dot product instruction between two vectors of size K0 [1,16] without using the dot8 instruction. */
Michele Di Giorgiof9179d32019-11-27 16:17:30 +000070#define ARM_DOT1(a, b, c) \
71 ({ \
72 c += (ACC_DATA_TYPE)a * b; \
Gian Marco Iodice43a129e2019-05-14 10:14:08 +010073 })
Michele Di Giorgiof9179d32019-11-27 16:17:30 +000074#define ARM_DOT2(a, b, c) \
75 ({ \
76 c += (ACC_DATA_TYPE)a.s0 * b.s0; \
77 c += (ACC_DATA_TYPE)a.s1 * b.s1; \
Gian Marco Iodice43a129e2019-05-14 10:14:08 +010078 })
Michele Di Giorgiof9179d32019-11-27 16:17:30 +000079#define ARM_DOT3(a, b, c) \
80 ({ \
81 ARM_DOT2(a, b, c); \
82 c += (ACC_DATA_TYPE)a.s2 * b.s2; \
Gian Marco Iodice43a129e2019-05-14 10:14:08 +010083 })
Michele Di Giorgiof9179d32019-11-27 16:17:30 +000084#define ARM_DOT4(a, b, c) \
85 ({ \
86 ARM_DOT3(a, b, c); \
87 c += (ACC_DATA_TYPE)a.s3 * b.s3; \
Gian Marco Iodice43a129e2019-05-14 10:14:08 +010088 })
89#define ARM_DOT8(a, b, c) \
90 ({ \
91 ARM_DOT4((a.lo), (b.lo), c); \
92 ARM_DOT4((a.hi), (b.hi), c); \
93 })
94#define ARM_DOT16(a, b, c) \
95 ({ \
96 ARM_DOT8((a.lo), (b.lo), c); \
97 ARM_DOT8((a.hi), (b.hi), c); \
98 })
99#endif // defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)
100
101/** Specialized macros to perform a broadcast dot product operation between one vector "a" and N0 vectors "b" of size K0 [1,16] */
Gian Marco Iodice061eefd2020-04-23 13:40:00 +0100102#define ARM_DOT_K0X1(k0, a, b, c) \
103 ({ \
104 ARM_DOT_K0(k0, (a), (b##0), (c)); \
105 })
Gian Marco Iodice43a129e2019-05-14 10:14:08 +0100106#define ARM_DOT_K0X2(k0, a, b, c) \
107 ({ \
108 ARM_DOT_K0(k0, (a), (b##0), (c.s0)); \
109 ARM_DOT_K0(k0, (a), (b##1), (c.s1)); \
110 })
111#define ARM_DOT_K0X3(k0, a, b, c) \
112 ({ \
113 ARM_DOT_K0X2(k0, a, b, c); \
114 ARM_DOT_K0(k0, (a), (b##2), (c.s2)); \
115 })
116#define ARM_DOT_K0X4(k0, a, b, c) \
117 ({ \
118 ARM_DOT_K0X3(k0, a, b, c); \
119 ARM_DOT_K0(k0, (a), (b##3), (c.s3)); \
120 })
121#define ARM_DOT_K0X8(k0, a, b, c) \
122 ({ \
123 ARM_DOT_K0X4(k0, a, b, c); \
124 ARM_DOT_K0(k0, (a), (b##4), (c.s4)); \
125 ARM_DOT_K0(k0, (a), (b##5), (c.s5)); \
126 ARM_DOT_K0(k0, (a), (b##6), (c.s6)); \
127 ARM_DOT_K0(k0, (a), (b##7), (c.s7)); \
128 })
129#define ARM_DOT_K0X16(k0, a, b, c) \
130 ({ \
131 ARM_DOT_K0X8(k0, a, b, c); \
132 ARM_DOT_K0(k0, (a), (b##8), (c.s8)); \
133 ARM_DOT_K0(k0, (a), (b##9), (c.s9)); \
134 ARM_DOT_K0(k0, (a), (b##A), (c.sA)); \
135 ARM_DOT_K0(k0, (a), (b##B), (c.sB)); \
136 ARM_DOT_K0(k0, (a), (b##C), (c.sC)); \
137 ARM_DOT_K0(k0, (a), (b##D), (c.sD)); \
138 ARM_DOT_K0(k0, (a), (b##E), (c.sE)); \
139 ARM_DOT_K0(k0, (a), (b##F), (c.sF)); \
140 })
141
SiCong Li738893e2020-05-01 12:55:16 +0100142/** Specialized macros to perform a partial matrix multiplication with dimensions M0,N0,K0 */
Gian Marco Iodice43a129e2019-05-14 10:14:08 +0100143#define ARM_MM_K0XN0X1(n0, k0, a, b, c) \
144 ({ \
145 ARM_DOT_K0XN0(n0, k0, (a##0), b, (c##0)); \
146 })
147#define ARM_MM_K0XN0X2(n0, k0, a, b, c) \
148 ({ \
149 ARM_MM_K0XN0X1(n0, k0, a, b, c); \
150 ARM_DOT_K0XN0(n0, k0, (a##1), b, (c##1)); \
151 })
152#define ARM_MM_K0XN0X3(n0, k0, a, b, c) \
153 ({ \
154 ARM_MM_K0XN0X2(n0, k0, a, b, c); \
155 ARM_DOT_K0XN0(n0, k0, (a##2), b, (c##2)); \
156 })
157#define ARM_MM_K0XN0X4(n0, k0, a, b, c) \
158 ({ \
159 ARM_MM_K0XN0X3(n0, k0, a, b, c); \
160 ARM_DOT_K0XN0(n0, k0, (a##3), b, (c##3)); \
161 })
162#define ARM_MM_K0XN0X5(n0, k0, a, b, c) \
163 ({ \
164 ARM_MM_K0XN0X4(n0, k0, a, b, c); \
165 ARM_DOT_K0XN0(n0, k0, (a##4), b, (c##4)); \
166 })
167#define ARM_MM_K0XN0X6(n0, k0, a, b, c) \
168 ({ \
169 ARM_MM_K0XN0X5(n0, k0, a, b, c); \
170 ARM_DOT_K0XN0(n0, k0, (a##5), b, (c##5)); \
171 })
172#define ARM_MM_K0XN0X7(n0, k0, a, b, c) \
173 ({ \
174 ARM_MM_K0XN0X6(n0, k0, a, b, c); \
175 ARM_DOT_K0XN0(n0, k0, (a##6), b, (c##6)); \
176 })
177#define ARM_MM_K0XN0X8(n0, k0, a, b, c) \
178 ({ \
179 ARM_MM_K0XN0X7(n0, k0, a, b, c); \
180 ARM_DOT_K0XN0(n0, k0, (a##7), b, (c##7)); \
181 })
182
183#define ARM_DOT_K0(k0, a, b, c) \
184 ({ \
185 CONCAT(ARM_DOT, k0) \
186 ((a), (b), (c)); \
187 })
188
189#define ARM_DOT_K0XN0(n0, k0, a, b, c) \
190 ({ \
191 CONCAT(ARM_DOT_K0X, n0) \
192 (k0, (a), b, (c)); \
193 })
194
195#define ARM_MM_K0XN0XM0(m0, n0, k0, a, b, c) \
196 ({ \
197 CONCAT(ARM_MM_K0XN0X, m0) \
198 (n0, k0, a, b, c); \
199 })
200
SiCong Li738893e2020-05-01 12:55:16 +0100201/** Specialized macros to perform a broadcast dot product operation between one vector "a" and N0 vectors "b" of size K0 [1,16] */
202#define ARM_MUL_N0X1(VECTOR_ACC_TYPE, a, b, c) \
203 ({ \
204 c += CONVERT(b##0, VECTOR_ACC_TYPE) * a; \
205 })
206#define ARM_MUL_N0X2(VECTOR_ACC_TYPE, a, b, c) \
207 ({ \
208 c += CONVERT(b##0, VECTOR_ACC_TYPE) * a.s##0; \
209 c += CONVERT(b##1, VECTOR_ACC_TYPE) * a.s##1; \
210 })
211#define ARM_MUL_N0X3(VECTOR_ACC_TYPE, a, b, c) \
212 ({ \
213 ARM_MUL_N0X2(VECTOR_ACC_TYPE, a, b, c); \
214 c += CONVERT(b##2, VECTOR_ACC_TYPE) * a.s##2; \
215 })
216#define ARM_MUL_N0X4(VECTOR_ACC_TYPE, a, b, c) \
217 ({ \
218 ARM_MUL_N0X3(VECTOR_ACC_TYPE, a, b, c); \
219 c += CONVERT(b##3, VECTOR_ACC_TYPE) * a.s##3; \
220 })
221#define ARM_MUL_N0X8(VECTOR_ACC_TYPE, a, b, c) \
222 ({ \
223 ARM_MUL_N0X4(VECTOR_ACC_TYPE, a, b, c); \
224 c += CONVERT(b##4, VECTOR_ACC_TYPE) * a.s##4; \
225 c += CONVERT(b##5, VECTOR_ACC_TYPE) * a.s##5; \
226 c += CONVERT(b##6, VECTOR_ACC_TYPE) * a.s##6; \
227 c += CONVERT(b##7, VECTOR_ACC_TYPE) * a.s##7; \
228 })
229#define ARM_MUL_N0X16(VECTOR_ACC_TYPE, a, b, c) \
230 ({ \
231 ARM_MUL_N0X8(VECTOR_ACC_TYPE, a, b, c); \
232 c += CONVERT(b##8, VECTOR_ACC_TYPE) * a.s##8; \
233 c += CONVERT(b##9, VECTOR_ACC_TYPE) * a.s##9; \
234 c += CONVERT(b##A, VECTOR_ACC_TYPE) * a.s##A; \
235 c += CONVERT(b##B, VECTOR_ACC_TYPE) * a.s##B; \
236 c += CONVERT(b##C, VECTOR_ACC_TYPE) * a.s##C; \
237 c += CONVERT(b##D, VECTOR_ACC_TYPE) * a.s##D; \
238 c += CONVERT(b##E, VECTOR_ACC_TYPE) * a.s##E; \
239 c += CONVERT(b##F, VECTOR_ACC_TYPE) * a.s##F; \
240 })
241/** Specialized macros to perform a a partial matrix multiplication with dimensions M0,N0,K0 */
242#define ARM_MM_NATIVE_N0XK0X1(VECTOR_ACC_TYPE, k0, a, b, c) \
243 ({ \
244 ARM_MUL_N0XK0(VECTOR_ACC_TYPE, k0, (a##0), b, (c##0)); \
245 })
246#define ARM_MM_NATIVE_N0XK0X2(VECTOR_ACC_TYPE, k0, a, b, c) \
247 ({ \
248 ARM_MM_NATIVE_N0XK0X1(VECTOR_ACC_TYPE, k0, a, b, c); \
249 ARM_MUL_N0XK0(VECTOR_ACC_TYPE, k0, (a##1), b, (c##1)); \
250 })
251#define ARM_MM_NATIVE_N0XK0X3(VECTOR_ACC_TYPE, k0, a, b, c) \
252 ({ \
253 ARM_MM_NATIVE_N0XK0X2(VECTOR_ACC_TYPE, k0, a, b, c); \
254 ARM_MUL_N0XK0(VECTOR_ACC_TYPE, k0, (a##2), b, (c##2)); \
255 })
256#define ARM_MM_NATIVE_N0XK0X4(VECTOR_ACC_TYPE, k0, a, b, c) \
257 ({ \
258 ARM_MM_NATIVE_N0XK0X3(VECTOR_ACC_TYPE, k0, a, b, c); \
259 ARM_MUL_N0XK0(VECTOR_ACC_TYPE, k0, (a##3), b, (c##3)); \
260 })
261#define ARM_MM_NATIVE_N0XK0X5(VECTOR_ACC_TYPE, k0, a, b, c) \
262 ({ \
263 ARM_MM_NATIVE_N0XK0X4(VECTOR_ACC_TYPE, k0, a, b, c); \
264 ARM_MUL_N0XK0(VECTOR_ACC_TYPE, k0, (a##4), b, (c##4)); \
265 })
266#define ARM_MM_NATIVE_N0XK0X6(VECTOR_ACC_TYPE, k0, a, b, c) \
267 ({ \
268 ARM_MM_NATIVE_N0XK0X5(VECTOR_ACC_TYPE, k0, a, b, c); \
269 ARM_MUL_N0XK0(VECTOR_ACC_TYPE, k0, (a##5), b, (c##5)); \
270 })
271#define ARM_MM_NATIVE_N0XK0X7(VECTOR_ACC_TYPE, k0, a, b, c) \
272 ({ \
273 ARM_MM_NATIVE_N0XK0X6(VECTOR_ACC_TYPE, k0, a, b, c); \
274 ARM_MUL_N0XK0(VECTOR_ACC_TYPE, k0, (a##6), b, (c##6)); \
275 })
276#define ARM_MM_NATIVE_N0XK0X8(VECTOR_ACC_TYPE, k0, a, b, c) \
277 ({ \
278 ARM_MM_NATIVE_N0XK0X7(VECTOR_ACC_TYPE, k0, a, b, c); \
279 ARM_MUL_N0XK0(VECTOR_ACC_TYPE, k0, (a##7), b, (c##7)); \
280 })
281#define ARM_MUL_N0XK0(VECTOR_ACC_TYPE, k0, a, b, c) \
282 ({ \
283 CONCAT(ARM_MUL_N0X, k0) \
284 (VECTOR_ACC_TYPE, (a), b, (c)); \
285 })
286#define ARM_MM_NATIVE_N0XK0XM0(VECTOR_ACC_TYPE, m0, k0, a, b, c) \
287 ({ \
288 CONCAT(ARM_MM_NATIVE_N0XK0X, m0) \
289 (VECTOR_ACC_TYPE, k0, a, b, c); \
290 })
291
Manuel Bottini8cf753f2020-10-21 12:34:38 +0100292#if defined(M0) && defined(N0) && defined(K0) && defined(V0) && defined(H0) && defined(M) && defined(N) && defined(PARTIAL_STORE_M0) && defined(PARTIAL_STORE_N0)
Sheri Zhang28287af2020-02-25 14:13:54 +0000293/** This OpenCL kernel computes the matrix multiplication between 2 matrices with QASYMM/QASYMM_SIGNED data type.
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000294 * The LHS matrix must be reshaped with @ref CLGEMMReshapeLHSMatrixKernel and the M0xK0 must be NOT transposed
295 * The RHS matrix must be reshaped with @ref CLGEMMReshapeRHSMatrixKernel and the K0xN0 must be transposed
296 *
Michele Di Giorgiof9179d32019-11-27 16:17:30 +0000297 * @note The input data type must be passed at compile time using -DDATA_TYPE (i.e. -DDATA_TYPE=uchar)
298 * @note The accumulator data type must be passed at compile time using -DACC_DATA_TYPE (i.e. -DACC_DATA_TYPE=uint)
Gian Marco Iodiceb0c50372019-03-15 10:13:05 +0000299 * @note If the first two dimensions of NDRange have been dispatched with "dummy_work_items" support, the option -DDUMMY_WORK_ITEMS must be passed at compile time.
300 * @note The GEMM's dimensions M and N must be passed at compile time using -DM and -DN (i.e. -DM=52 and -DN=90).
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000301 * @note The block's dimensions used for reshaping the LHS matrix and the RHS matrix (M0, N0 and K0) must be passed at compile time using -DM0, -DN0 and -DK0 (i.e. -DM0=4, -DN0=8, -DK0=4).
302 * @note The number of M0xK0 vertical blocks stored on the same output row of the reshaped LHS matrix must be passed at compile time using -DV0 (i.e. -DV0=2)
303 * @note The number of K0xN0 horizontal blocks stored on the same output row of the reshaped RHS matrix must be passed at compile time using -DH0 (i.e. -DH0=2)
304 * @note If the M0xK0 blocks in the reshaped LHS matrix have been interleaved, the option -DLHS_INTERLEAVE must passed at compile time.
305 * @note If the K0xN0 blocks in the reshaped RHS matrix have been interleaved, the option -DRHS_INTERLEAVE must passed at compile time.
306 * @note Only the following configurations of M0, N0 and K0 are currently supported:
307 * - M0 = 2, 3, 4, 5, 6, 7, 8
308 * - N0 = 2, 3, 4, 8, 16
309 * - K0 = 2, 3, 4, 8, 16
Gian Marco Iodice62251f72019-03-11 16:07:12 +0000310 * - V0 >= 1
311 * - H0 >= 1
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000312 *
313 * @note In case the output has to be reinterpreted as a 3D tensor (i.e. output of convolution layer), the following information must be passed at compile time:
314 * -# REINTERPRET_OUTPUT_AS_3D: To reinterpret the output as 3D
315 * -# HEIGHT_GEMM3D: The height of the output in case it has to be reinterpreted as a 3D tensor.
316 * -# DEPTH_GEMM3D: The depth of the output in case it has to be reinterpreted as a 3D tensor
317 * (HEIGHT_GEMM3D * DEPTH_GEMM3D) = columns LHS matrix NOT reshaped
318 *
Sheri Zhang28287af2020-02-25 14:13:54 +0000319 * @param[in] lhs_ptr Pointer to the LHS reshaped matrix. Supported data type: QASYMM8/QASYMM_SIGNED
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000320 * @param[in] lhs_stride_x Stride of the LHS reshaped matrix in X dimension (in bytes)
321 * @param[in] lhs_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
322 * @param[in] lhs_stride_y Stride of the LHS reshaped matrix in Y dimension (in bytes)
323 * @param[in] lhs_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
324 * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the LHS reshaped matrix
325 * @param[in] rhs_ptr Pointer to the RHS reshaped matrix. Supported data type: same as @p lhs_ptr
326 * @param[in] rhs_stride_x Stride of the RHS reshaped matrix in X dimension (in bytes)
327 * @param[in] rhs_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
328 * @param[in] rhs_stride_y Stride of the RHS reshaped matrix in Y dimension (in bytes)
329 * @param[in] rhs_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
330 * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the RHS reshaped matrix
Sheri Zhang28287af2020-02-25 14:13:54 +0000331 * @param[out] dst_ptr Pointer to the destination matrix Supported data type: S32
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000332 * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
333 * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
334 * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
335 * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
336 * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
337 * @param[in] k Number of columns in LHS matrix and rows in RHS matrix not reshaped.
338 * @param[in] lhs_stride_z Stride of the LHS reshaped matrix in Z dimension (in bytes)
339 * @param[in] rhs_stride_z Stride of the RHS reshaped matrix in Z dimension (in bytes)
340 * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
341 * @param[in] dst_cross_plane_pad (Optional) Bottom paddings in unit of elements (only if defined REINTERPRET_OUTPUT_AS_3D)
342 */
343__kernel void gemmlowp_mm_reshaped_lhs_nt_rhs_t(IMAGE_DECLARATION(lhs),
344 IMAGE_DECLARATION(rhs),
345 IMAGE_DECLARATION(dst),
346 uint k,
347 uint lhs_stride_z,
348 uint rhs_stride_z,
349 uint dst_stride_z
350#if defined(REINTERPRET_OUTPUT_AS_3D)
351 ,
352 uint dst_cross_plane_pad
353#endif // REINTERPRET_OUTPUT_AS_3D
354 )
355{
356 // Block size
357#define LHS_BLOCK_SIZE ((K0) * (M0))
358
359#if defined(LHS_INTERLEAVE)
360#define LHS_OFFSET_X (K0)
361#define LHS_STEP_X ((K0) * (V0))
362#define LHS_STEP_LOOP (1)
363#else // defined(INTERLEAVE)
364#define LHS_OFFSET_X (LHS_BLOCK_SIZE)
365#define LHS_STEP_X (K0)
366#define LHS_STEP_LOOP (V0)
367#endif // defined(INTERLEAVE)
368
369 // Block size
370#define RHS_BLOCK_SIZE ((K0) * (N0))
371
372 // RHS offset and step X
373#if defined(RHS_INTERLEAVE)
374#define RHS_OFFSET_X (K0)
375#define RHS_STEP_X ((K0) * (H0))
376#define RHS_STEP_LOOP (1)
377#else // defined(RHS_INTERLEAVE)
378#define RHS_OFFSET_X (RHS_BLOCK_SIZE)
379#define RHS_STEP_X (K0)
380#define RHS_STEP_LOOP (H0)
381#endif // defined(RHS_INTERLEAVE)
382
Gian Marco Iodice43a129e2019-05-14 10:14:08 +0100383 uint x = get_global_id(0);
384 uint y = get_global_id(1);
385 uint z = get_global_id(2);
386
Gian Marco Iodiceb0c50372019-03-15 10:13:05 +0000387#if defined(DUMMY_WORK_ITEMS)
Gian Marco Iodice43a129e2019-05-14 10:14:08 +0100388 if((x * N0 >= N) || (y * M0 >= M))
Gian Marco Iodiceb0c50372019-03-15 10:13:05 +0000389 {
390 return;
391 }
392#endif // defined(DUMMY_WORK_ITEMS)
393
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000394 // Compute LHS matrix address
Michalis Spyrou2b7fee02021-04-27 14:10:20 +0100395 __global DATA_TYPE *lhs_addr = (__global DATA_TYPE *)(lhs_ptr + lhs_offset_first_element_in_bytes + (y % V0) * (uint)LHS_OFFSET_X + (y / V0) * (uint)lhs_stride_y + (z * lhs_stride_z));
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000396
397 // Compute RHS matrix address
Michalis Spyrou2b7fee02021-04-27 14:10:20 +0100398 __global DATA_TYPE *rhs_addr = (__global DATA_TYPE *)(rhs_ptr + rhs_offset_first_element_in_bytes + (x % H0) * (uint)RHS_OFFSET_X + (x / (uint)H0) * rhs_stride_y);
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000399
400#if defined(MATRIX_B_DEPTH)
401 // Do not slide matrix B if the matrix B has 3 dimensions and matrix A more than 3
Gian Marco Iodice43a129e2019-05-14 10:14:08 +0100402 rhs_addr += (z % MATRIX_B_DEPTH) * rhs_stride_z;
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000403#else // defined(MATRIX_B_DEPTH)
Gian Marco Iodice43a129e2019-05-14 10:14:08 +0100404 rhs_addr += z * rhs_stride_z;
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000405#endif // defined(MATRIX_B_DEPTH)
406
Gian Marco Iodice43a129e2019-05-14 10:14:08 +0100407 REPEAT_VAR_INIT_TO_CONST(8, uint, zlhs, 0); //uint zout0=0,zout1=0,zout2=0,... zout7=0;
408 REPEAT_VAR_INIT_TO_CONST(16, uint, zrhs, 0);
409
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000410 // Initialize the accumulators
Michele Di Giorgiof9179d32019-11-27 16:17:30 +0000411 REPEAT_VAR_INIT_TO_CONST(M0, VEC_DATA_TYPE(ACC_DATA_TYPE, N0), c, 0); //VEC_DATA_TYPE(ACC_DATA_TYPE, N0) c0=0,c1=0,c2=0,... c(M0-1)=0;
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000412
413 for(int i = 0; i < k; i += K0)
414 {
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000415 // Load values from LHS matrix
Michele Di Giorgiof9179d32019-11-27 16:17:30 +0000416 LOAD_BLOCK(M0, K0, DATA_TYPE, a, lhs_addr, 0, LHS_STEP_X, zlhs);
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000417
418 // Load values from RHS matrix
Michele Di Giorgiof9179d32019-11-27 16:17:30 +0000419 LOAD_BLOCK(N0, K0, DATA_TYPE, b, rhs_addr, 0, RHS_STEP_X, zrhs);
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000420
Gian Marco Iodice43a129e2019-05-14 10:14:08 +0100421 // Partial matrix multiplication M0,N0,K0
422 ARM_MM_K0XN0XM0(M0, N0, K0, a, b, c);
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000423
Gian Marco Iodice43a129e2019-05-14 10:14:08 +0100424 // Update address
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000425 lhs_addr += (M0 * LHS_STEP_X * LHS_STEP_LOOP);
426 rhs_addr += (N0 * RHS_STEP_X * RHS_STEP_LOOP);
427 }
428
Gian Marco Iodice43a129e2019-05-14 10:14:08 +0100429 __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + (x * (uint)N0 * sizeof(int)) + (y * (uint)M0 * dst_stride_y);
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000430
431 REPEAT_VAR_INIT_TO_CONST(8, uint, zout, 0); //uint zout0=0,zout1=0,zout2=0,... zout7=0;
432
433#if defined(REINTERPRET_OUTPUT_AS_3D)
Gian Marco Iodice43a129e2019-05-14 10:14:08 +0100434 // The plane (zout) is calculated dividing M (y * M0) by HEIGHT_GEMM3D
Gian Marco Iodice9ae06d42020-10-22 16:37:12 +0100435 CALCULATE_Z_OFFSET(M0, uint, zout, y * M0, HEIGHT_GEMM3D, DEPTH_GEMM3D, dst_cross_plane_pad, dst_stride_y);
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000436
437 // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
438 // multiply dst_stride_z by DEPTH_GEMM3D
Gian Marco Iodice43a129e2019-05-14 10:14:08 +0100439 dst_addr += z * dst_stride_z * DEPTH_GEMM3D;
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000440
441#else // defined(REINTERPRET_OUTPUT_AS_3D)
442
443 // Add offset for batched GEMM
Gian Marco Iodice43a129e2019-05-14 10:14:08 +0100444 dst_addr += z * dst_stride_z;
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000445
446#endif // defined(REINTERPRET_OUTPUT_AS_3D)
447
Gian Marco Iodice43a129e2019-05-14 10:14:08 +0100448 // Convert and store output block
Manuel Bottini8cf753f2020-10-21 12:34:38 +0100449 const bool cond_y = ((get_global_id(1) + 1) * M0 >= M);
450 const bool cond_x = ((get_global_id(0) + 1) * N0 >= N);
451
452 // Store output block
453 REPEAT_VAR_INIT_CONVERT_SAT(M0, VEC_DATA_TYPE(int, N0), c, c_lp);
454 STORE_BLOCK_BOUNDARY_AWARE(M0, N0, int, c_lp, dst_addr, dst_stride_y, zout, PARTIAL_STORE_M0, PARTIAL_STORE_N0, cond_y, cond_x);
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000455
456#undef LHS_BLOCK_SIZE
457#undef LHS_OFFSET_X
458#undef LHS_STEP_X
459#undef RHS_BLOCK_SIZE
460#undef RHS_OFFSET_X
461#undef RHS_STEP_X
462}
Manuel Bottini8cf753f2020-10-21 12:34:38 +0100463#endif // defined(M0) && defined(N0) && defined(K0) && defined(V0) && defined(H0) && defined(M) && defined(N) && defined(PARTIAL_STORE_M0) && defined(PARTIAL_STORE_N0)
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000464
Manuel Bottini488f5082020-10-29 13:51:23 +0000465#if defined(M0) && defined(N0) && defined(K0) && defined(H0) && defined(K) && defined(PARTIAL_STORE_M0) && defined(PARTIAL_STORE_N0)
Gian Marco Iodice62251f72019-03-11 16:07:12 +0000466
Gian Marco Iodice62251f72019-03-11 16:07:12 +0000467/** This OpenCL kernel computes the matrix multiplication between 2 matrices.
468 * The LHS matrix is NOT reshaped
Gian Marco Iodice43a129e2019-05-14 10:14:08 +0100469 * The RHS matrix is reshaped with @ref CLGEMMReshapeRHSMatrixKernel and the block K0xN0 is transposed
Gian Marco Iodice62251f72019-03-11 16:07:12 +0000470 *
Michele Di Giorgiof9179d32019-11-27 16:17:30 +0000471 * @note The input data type must be passed at compile time using -DDATA_TYPE (i.e. -DDATA_TYPE=uchar)
472 * @note The accumulator data type must be passed at compile time using -DACC_DATA_TYPE (i.e. -DACC_DATA_TYPE=uint)
Gian Marco Iodice62251f72019-03-11 16:07:12 +0000473 * @note The number of columns of LHS matrix must be passed at compile time using -DK (i.e. -DK=64)
474 * @note The block's dimensions used for reshaping the RHS matrix (N0 and K0) must be passed at compile time using -DN0 and -DK0 (i.e. -DN0=8, -DK0=4).
475 * @note The number of M0 rows to process must be passed at compile time using -DM0 (i.e. -DM0=2)
476 * @note The number of K0xN0 horizontal blocks stored on the same output row of the reshaped RHS matrix must be passed at compile time using -DH0 (i.e. -DH0=2)
477 * @note If the K0xN0 blocks in the reshaped RHS matrix have been interleaved, the option -DRHS_INTERLEAVE must passed at compile time.
478 * @note Only the following configurations of M0, N0 and K0 are currently supported:
479 * - M0 = 1, 2, 3, 4, 5, 6, 7, 8
480 * - N0 = 2, 3, 4, 8, 16
481 * - K0 = 2, 3, 4, 8, 16
482 * - H0 >= 1
483 *
484 * @note In case the input or output have to be reinterpreted as a 3D tensor, the following information must be passed at compile time:
485 * -# REINTERPRET_INPUT_AS_3D: To reinterpret the input as 3D
486 * -# REINTERPRET_OUTPUT_AS_3D: To reinterpret the output as 3D
487 * -# HEIGHT_GEMM3D: The height of the output in case it has to be reinterpreted as a 3D tensor.
488 * -# DEPTH_GEMM3D: The depth of the output in case it has to be reinterpreted as a 3D tensor
489 * (HEIGHT_GEMM3D * DEPTH_GEMM3D) = columns LHS matrix
490 *
Michele Di Giorgiof9179d32019-11-27 16:17:30 +0000491 * @param[in] lhs_ptr Pointer to the LHS reshaped matrix. Supported data type: QASYMM8/QASYMM8_SIGNED
Gian Marco Iodice62251f72019-03-11 16:07:12 +0000492 * @param[in] lhs_stride_x Stride of the LHS reshaped matrix in X dimension (in bytes)
493 * @param[in] lhs_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
494 * @param[in] lhs_stride_y Stride of the LHS reshaped matrix in Y dimension (in bytes)
495 * @param[in] lhs_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
496 * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the LHS reshaped matrix
497 * @param[in] rhs_ptr Pointer to the RHS reshaped matrix. Supported data type: same as @p lhs_ptr
498 * @param[in] rhs_stride_x Stride of the RHS reshaped matrix in X dimension (in bytes)
499 * @param[in] rhs_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
500 * @param[in] rhs_stride_y Stride of the RHS reshaped matrix in Y dimension (in bytes)
501 * @param[in] rhs_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
502 * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the RHS reshaped matrix
Michele Di Giorgiof9179d32019-11-27 16:17:30 +0000503 * @param[out] dst_ptr Pointer to the destination matrix Supported data type: S32
Gian Marco Iodice62251f72019-03-11 16:07:12 +0000504 * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
505 * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
506 * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
507 * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
508 * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
509 * @param[in] lhs_stride_z Stride of the LHS reshaped matrix in Z dimension (in bytes)
510 * @param[in] rhs_stride_z Stride of the RHS reshaped matrix in Z dimension (in bytes)
511 * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
512 * @param[in] lhs_cross_plane_pad (Optional) Bottom paddings for LHS matrix in unit of elements (only if defined REINTERPRET_INPUT_AS_3D)
513 * @param[in] dst_cross_plane_pad (Optional) Bottom paddings for the output matrix in unit of elements (only if defined REINTERPRET_OUTPUT_AS_3D)
514 */
515__kernel void gemmlowp_mm_reshaped_only_rhs_t(IMAGE_DECLARATION(lhs),
516 IMAGE_DECLARATION(rhs),
517 IMAGE_DECLARATION(dst),
518 uint lhs_stride_z,
519 uint rhs_stride_z,
520 uint dst_stride_z
521#if defined(REINTERPRET_INPUT_AS_3D)
522 ,
523 uint lhs_cross_plane_pad
524#endif // REINTERPRET_INPUT_AS_3D
525#if defined(REINTERPRET_OUTPUT_AS_3D)
526 ,
527 uint dst_cross_plane_pad
528#endif // REINTERPRET_OUTPUT_AS_3D
529 )
530{
531 // Block size
532#define RHS_BLOCK_SIZE ((K0) * (N0))
533
534 // RHS offset and step X
535#if defined(RHS_INTERLEAVE)
536#define RHS_OFFSET_X (K0)
537#define RHS_STEP_X ((K0) * (H0))
538#define RHS_STEP_LOOP (1)
539#else // defined(RHS_INTERLEAVE)
540#define RHS_OFFSET_X (RHS_BLOCK_SIZE)
541#define RHS_STEP_X (K0)
542#define RHS_STEP_LOOP (H0)
543#endif // defined(RHS_INTERLEAVE)
544
545 uint x = get_global_id(0);
546 uint y = get_global_id(1);
547 uint z = get_global_id(2);
548
Gian Marco Iodice86cfffe2019-04-02 11:02:20 +0100549#if defined(DUMMY_WORK_ITEMS)
550 if((x * N0 >= N) || (y * M0 >= M))
551 {
552 return;
553 }
554#endif // defined(DUMMY_WORK_ITEMS)
555
Gian Marco Iodice62251f72019-03-11 16:07:12 +0000556 // Compute LHS matrix address
Manuel Bottini488f5082020-10-29 13:51:23 +0000557 uint lhs_offset = lhs_offset_first_element_in_bytes + COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) * (uint)lhs_stride_y;
Gian Marco Iodice62251f72019-03-11 16:07:12 +0000558
559 // Compute RHS matrix address
560 uint rhs_offset = rhs_offset_first_element_in_bytes + (x % H0) * (uint)RHS_OFFSET_X + (x / (uint)H0) * rhs_stride_y;
561
562#if defined(MATRIX_B_DEPTH)
563 // Do not slide matrix B if the matrix B has 3 dimensions and matrix A more than 3
564 rhs_offset += (z % MATRIX_B_DEPTH) * rhs_stride_z;
565#else // defined(MATRIX_B_DEPTH)
566 rhs_offset += z * rhs_stride_z;
567#endif // defined(MATRIX_B_DEPTH)
568
Gian Marco Iodice43a129e2019-05-14 10:14:08 +0100569 REPEAT_VAR_INIT_TO_CONST(8, uint, zlhs, 0); //uint zout0=0,zout1=0,zout2=0,... zout7=0;
570 REPEAT_VAR_INIT_TO_CONST(16, uint, zrhs, 0);
Gian Marco Iodice62251f72019-03-11 16:07:12 +0000571
572#if defined(REINTERPRET_INPUT_AS_3D)
Gian Marco Iodice43a129e2019-05-14 10:14:08 +0100573 // The plane (zlhs) is calculated dividing M (y * M0) by HEIGHT_GEMM3D
Manuel Bottini488f5082020-10-29 13:51:23 +0000574 CALCULATE_Z_OFFSET(M0, uint, zlhs, COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0), HEIGHT_GEMM3D, DEPTH_GEMM3D, lhs_cross_plane_pad, lhs_stride_y);
Gian Marco Iodice62251f72019-03-11 16:07:12 +0000575
576 // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
577 // multiply lhs_stride_z by DEPTH_GEMM3D
578 lhs_offset += z * lhs_stride_z * DEPTH_GEMM3D;
579
580#else // defined(REINTERPRET_INPUT_AS_3D)
581
582 // Add offset for batched GEMM
583 lhs_offset += z * lhs_stride_z;
584
585#endif // defined(REINTERPRET_INPUT_AS_3D)
586
587 // Initialize the accumulators
Michele Di Giorgiof9179d32019-11-27 16:17:30 +0000588 REPEAT_VAR_INIT_TO_CONST(M0, VEC_DATA_TYPE(ACC_DATA_TYPE, N0), c, 0); //VEC_DATA_TYPE(ACC_DATA_TYPE, N0) c0=0,c1=0,c2=0,... c(N0-1)=0;
Gian Marco Iodice62251f72019-03-11 16:07:12 +0000589
Manuel Bottini488f5082020-10-29 13:51:23 +0000590 int i = 0;
591 for(; i <= (K - K0); i += K0)
Gian Marco Iodice62251f72019-03-11 16:07:12 +0000592 {
Gian Marco Iodice62251f72019-03-11 16:07:12 +0000593 // Load values from LHS matrix
Michele Di Giorgiof9179d32019-11-27 16:17:30 +0000594 LOAD_BLOCK(M0, K0, DATA_TYPE, a, lhs_ptr, lhs_offset, lhs_stride_y, zlhs);
Gian Marco Iodice62251f72019-03-11 16:07:12 +0000595
596 // Load values from RHS matrix
Michele Di Giorgiof9179d32019-11-27 16:17:30 +0000597 LOAD_BLOCK(N0, K0, DATA_TYPE, b, rhs_ptr, rhs_offset, RHS_STEP_X, zrhs);
Gian Marco Iodice62251f72019-03-11 16:07:12 +0000598
Gian Marco Iodice43a129e2019-05-14 10:14:08 +0100599 // Partial matrix multiplication M0,N0,K0
600 ARM_MM_K0XN0XM0(M0, N0, K0, a, b, c);
Gian Marco Iodice62251f72019-03-11 16:07:12 +0000601
602 lhs_offset += K0;
603 rhs_offset += N0 * RHS_STEP_X * RHS_STEP_LOOP;
604 }
Manuel Bottini488f5082020-10-29 13:51:23 +0000605 // Left-over accumulations
606 for(; i < K; ++i)
607 {
608 // Load values from LHS matrix
609 LOAD_BLOCK(M0, 1, DATA_TYPE, a, lhs_ptr, lhs_offset, lhs_stride_y, zlhs);
Gian Marco Iodice62251f72019-03-11 16:07:12 +0000610
Manuel Bottini488f5082020-10-29 13:51:23 +0000611 // Load values from RHS reshaped matrix
Manuel Bottini28a46c92020-11-11 15:05:29 +0000612 LOAD_BLOCK(N0, 1, DATA_TYPE, b, rhs_ptr, rhs_offset, RHS_STEP_X, zrhs);
Manuel Bottini488f5082020-10-29 13:51:23 +0000613
614 ARM_MM_K0XN0XM0(M0, N0, 1, a, b, c);
615 lhs_offset += 1;
616 rhs_offset += 1;
617 }
618 __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + (x * (uint)N0 * sizeof(int)) + (COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) * dst_stride_y);
Gian Marco Iodice62251f72019-03-11 16:07:12 +0000619
620 REPEAT_VAR_INIT_TO_CONST(8, uint, zout, 0); //uint zout0=0,zout1=0,zout2=0,... zout7=0;
621
622#if defined(REINTERPRET_OUTPUT_AS_3D)
Gian Marco Iodice62251f72019-03-11 16:07:12 +0000623 // The plane (zout) is calculated dividing M (y * M0) by HEIGHT_GEMM3D
Manuel Bottini488f5082020-10-29 13:51:23 +0000624 CALCULATE_Z_OFFSET(M0, uint, zout, COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0), HEIGHT_GEMM3D, DEPTH_GEMM3D, dst_cross_plane_pad, dst_stride_y);
Gian Marco Iodice62251f72019-03-11 16:07:12 +0000625
626 // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
627 // multiply dst_stride_z by DEPTH_GEMM3D
628 dst_addr += z * dst_stride_z * DEPTH_GEMM3D;
629
630#else // defined(REINTERPRET_OUTPUT_AS_3D)
631
632 // Add offset for batched GEMM
633 dst_addr += z * dst_stride_z;
634
635#endif // defined(REINTERPRET_OUTPUT_AS_3D)
636
Gian Marco Iodice43a129e2019-05-14 10:14:08 +0100637 // Convert and store output block
Manuel Bottini488f5082020-10-29 13:51:23 +0000638 const bool cond_y = y == 0;
639 const bool cond_x = ((x + 1) * N0 >= N);
640
641 // Store output block
642 REPEAT_VAR_INIT_CONVERT_SAT(M0, VEC_DATA_TYPE(int, N0), c, c_lp);
643 STORE_BLOCK_BOUNDARY_AWARE(M0, N0, int, c_lp, dst_addr, dst_stride_y, zout, PARTIAL_STORE_M0, PARTIAL_STORE_N0, cond_y, cond_x);
Gian Marco Iodice62251f72019-03-11 16:07:12 +0000644
645#undef RHS_BLOCK_SIZE
646#undef RHS_OFFSET_X
647#undef RHS_STEP_X
648}
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000649
650#if defined(RESULT_OFFSET) && defined(RESULT_SHIFT) && defined(RESULT_MULTIPLIER)
651/** This OpenCL kernel computes the matrix multiplication between 2 matrices with fused output stage using fixed-point arithmetic.
652 * The LHS matrix is NOT reshaped
653 * The RHS matrix is reshaped with @ref CLGEMMReshapeRHSMatrixKernel and the block K0xN0 is transposed
654 *
655 * @note The input data type must be passed at compile time using -DDATA_TYPE (i.e. -DDATA_TYPE=uchar)
656 * @note The accumulator data type must be passed at compile time using -DACC_DATA_TYPE (i.e. -DACC_DATA_TYPE=uint)
657 * @note The number of columns of LHS matrix must be passed at compile time using -DK (i.e. -DK=64)
658 * @note The block's dimensions used for reshaping the RHS matrix (N0 and K0) must be passed at compile time using -DN0 and -DK0 (i.e. -DN0=8, -DK0=4).
659 * @note The number of M0 rows to process must be passed at compile time using -DM0 (i.e. -DM0=2)
660 * @note The number of K0xN0 horizontal blocks stored on the same output row of the reshaped RHS matrix must be passed at compile time using -DH0 (i.e. -DH0=2)
661 * @note If the K0xN0 blocks in the reshaped RHS matrix have been interleaved, the option -DRHS_INTERLEAVE must passed at compile time.
662 * @note Only the following configurations of M0, N0 and K0 are currently supported:
663 * - M0 = 1, 2, 3, 4, 5, 6, 7, 8
664 * - N0 = 2, 3, 4, 8, 16
665 * - K0 = 2, 3, 4, 8, 16
666 * - H0 >= 1
667 *
668 * @note In case the input or output have to be reinterpreted as a 3D tensor, the following information must be passed at compile time:
669 * -# REINTERPRET_INPUT_AS_3D: To reinterpret the input as 3D
670 * -# REINTERPRET_OUTPUT_AS_3D: To reinterpret the output as 3D
671 * -# HEIGHT_GEMM3D: The height of the output in case it has to be reinterpreted as a 3D tensor.
672 * -# DEPTH_GEMM3D: The depth of the output in case it has to be reinterpreted as a 3D tensor
673 * (HEIGHT_GEMM3D * DEPTH_GEMM3D) = columns LHS matrix
674 *
675 * @note The offset, scalar scale factor and number of bits to shift right of output tensor must be passed at compile time using -DRESULT_OFFSET, -RESULT_MULTIPLIER and -DRESULT_SHIFT
676 * @note In case the addition of int32 biases is required, -DADD_BIAS should be passed at compile time
677 * @note The output datatype should be passed at compile time using -DOUTPUT_DATA_TYPE
678 * @note In case the clamping of the result is required, the min and max bounds can be passed at compile time using -DMIN_BOUND and -DMAX_BOUND.
679 * These values can be used to implement "rectified linear unit" activation functions
680 * @note In case of per-channel quantization of matrix B, -DPER_CHANNEL_QUANTIZATION must be passed at compile time.
681 *
682 * @param[in] lhs_ptr Pointer to the LHS reshaped matrix. Supported data type: QASYMM8/QASYMM8_SIGNED
683 * @param[in] lhs_stride_x Stride of the LHS reshaped matrix in X dimension (in bytes)
684 * @param[in] lhs_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
685 * @param[in] lhs_stride_y Stride of the LHS reshaped matrix in Y dimension (in bytes)
686 * @param[in] lhs_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
687 * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the LHS reshaped matrix
688 * @param[in] rhs_ptr Pointer to the RHS reshaped matrix. Supported data type: same as @p lhs_ptr
689 * @param[in] rhs_stride_x Stride of the RHS reshaped matrix in X dimension (in bytes)
690 * @param[in] rhs_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
691 * @param[in] rhs_stride_y Stride of the RHS reshaped matrix in Y dimension (in bytes)
692 * @param[in] rhs_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
693 * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the RHS reshaped matrix
694 * @param[out] dst_ptr Pointer to the destination matrix Supported data type: same as @p lhs_ptr
695 * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
696 * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
697 * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
698 * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
699 * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
700 * @param[in] lhs_stride_z Stride of the LHS reshaped matrix in Z dimension (in bytes)
701 * @param[in] rhs_stride_z Stride of the RHS reshaped matrix in Z dimension (in bytes)
702 * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
703 * @param[in] lhs_cross_plane_pad (Optional) Bottom paddings for LHS matrix in unit of elements (only if defined REINTERPRET_INPUT_AS_3D)
704 * @param[in] dst_cross_plane_pad (Optional) Bottom paddings for the output matrix in unit of elements (only if defined REINTERPRET_OUTPUT_AS_3D)
705 * @param[in] sum_col_ptr (Optional) Pointer to the source tensor. Supported data type: S32
706 * @param[in] sum_col_stride_x (Optional) Stride of the source tensor in X dimension (in bytes)
707 * @param[in] sum_col_step_x (Optional) sum_col_stride_x * number of elements along X processed per workitem(in bytes)
708 * @param[in] sum_col_stride_y (Optional) Stride of the source tensor in Y dimension (in bytes)
709 * @param[in] sum_col_step_y (Optional) sum_col_stride_y * number of elements along Y processed per workitem(in bytes)
710 * @param[in] sum_col_offset_first_element_in_bytes (Optional) The offset of the first element in the source tensor
711 * @param[in] sum_row_ptr (Optional) Pointer to the source tensor. Supported data type: S32
712 * @param[in] sum_row_stride_x (Optional) Stride of the source tensor in X dimension (in bytes)
713 * @param[in] sum_row_step_x (Optional) sum_row_stride_x * number of elements along X processed per workitem(in bytes)
714 * @param[in] sum_row_stride_y (Optional) Stride of the source tensor in Y dimension (in bytes)
715 * @param[in] sum_row_step_y (Optional) sum_row_stride_y * number of elements along Y processed per workitem(in bytes)
716 * @param[in] sum_row_offset_first_element_in_bytes (Optional) The offset of the first element in the source tensor
717 * @param[in] biases_ptr (Optional) Pointer to the biases tensor. Supported data type: S32
718 * @param[in] biases_stride_x (Optional) Stride of the biases tensor in X dimension (in bytes)
719 * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
720 * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases tensor
721 * @param[in] result_multipliers_ptr (Optional) Pointer to the output multipliers vector for per-channel quantization. Supported data types: S32
722 * @param[in] result_multipliers_stride_x (Optional) Stride of the output multipliers vector in X dimension (in bytes)
723 * @param[in] result_multipliers_step_x (Optional) output_multipliers_stride_x * number of elements along X processed per workitem(in bytes)
724 * @param[in] result_multipliers_offset_first_element_in_bytes (Optional) The offset of the first element in the output multipliers vector
725 * @param[in] result_shifts_ptr (Optional) Pointer to the output shifts vector for per-channel quantization. Supported data types: S32
726 * @param[in] result_shifts_stride_x (Optional) Stride of the output shifts vector in X dimension (in bytes)
727 * @param[in] result_shifts_step_x (Optional) output_shifts_stride_x * number of elements along X processed per workitem(in bytes)
728 * @param[in] result_shifts_offset_first_element_in_bytes (Optional) The offset of the first element in the output shifts vector
729 */
730__kernel void gemmlowp_mm_reshaped_only_rhs_t_fused_output_stage_fixedpoint(IMAGE_DECLARATION(lhs),
731 IMAGE_DECLARATION(rhs),
732 IMAGE_DECLARATION(dst),
733 uint lhs_stride_z,
734 uint rhs_stride_z,
735 uint dst_stride_z
736#if defined(REINTERPRET_INPUT_AS_3D)
737 ,
738 uint lhs_cross_plane_pad
739#endif // REINTERPRET_INPUT_AS_3D
740#if defined(REINTERPRET_OUTPUT_AS_3D)
741 ,
742 uint dst_cross_plane_pad
743#endif // REINTERPRET_OUTPUT_AS_3D
744#if defined(A_OFFSET)
745 ,
746 IMAGE_DECLARATION(sum_col)
747#endif // defined(A_OFFSET)
748#if defined(B_OFFSET)
749 ,
750 IMAGE_DECLARATION(sum_row)
751#endif // defined(B_OFFSET)
752#if defined(ADD_BIAS)
753 ,
754 VECTOR_DECLARATION(biases)
755#endif // defined(ADD_BIAS)
756#if defined(PER_CHANNEL_QUANTIZATION)
757 ,
758 VECTOR_DECLARATION(result_multipliers),
759 VECTOR_DECLARATION(result_shifts)
760#endif // defined(PER_CHANNEL_QUANTIZATION)
761 )
762{
763 // Block size
764#define RHS_BLOCK_SIZE ((K0) * (N0))
765
766 // RHS offset and step X
767#if defined(RHS_INTERLEAVE)
768#define RHS_OFFSET_X (K0)
769#define RHS_STEP_X ((K0) * (H0))
770#define RHS_STEP_LOOP (1)
771#else // defined(RHS_INTERLEAVE)
772#define RHS_OFFSET_X (RHS_BLOCK_SIZE)
773#define RHS_STEP_X (K0)
774#define RHS_STEP_LOOP (H0)
775#endif // defined(RHS_INTERLEAVE)
776
777 uint x = get_global_id(0);
778 uint y = get_global_id(1);
779 uint z = get_global_id(2);
780
781#if defined(DUMMY_WORK_ITEMS)
782 if((x * N0 >= N) || (y * M0 >= M))
783 {
784 return;
785 }
786#endif // defined(DUMMY_WORK_ITEMS)
787
788 // Compute LHS matrix address
Manuel Bottini488f5082020-10-29 13:51:23 +0000789 uint lhs_offset = lhs_offset_first_element_in_bytes + COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) * (uint)lhs_stride_y;
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000790
791 // Compute RHS matrix address
792 uint rhs_offset = rhs_offset_first_element_in_bytes + (x % H0) * (uint)RHS_OFFSET_X + (x / (uint)H0) * rhs_stride_y;
793
794#if defined(MATRIX_B_DEPTH)
795 // Do not slide matrix B if the matrix B has 3 dimensions and matrix A more than 3
796 rhs_offset += (z % MATRIX_B_DEPTH) * rhs_stride_z;
797#else // defined(MATRIX_B_DEPTH)
798 rhs_offset += z * rhs_stride_z;
799#endif // defined(MATRIX_B_DEPTH)
800
801 REPEAT_VAR_INIT_TO_CONST(8, uint, zlhs, 0); //uint zout0=0,zout1=0,zout2=0,... zout7=0;
802 REPEAT_VAR_INIT_TO_CONST(16, uint, zrhs, 0);
803
804#if defined(REINTERPRET_INPUT_AS_3D)
805 // The plane (zlhs) is calculated dividing M (y * M0) by HEIGHT_GEMM3D
Manuel Bottini488f5082020-10-29 13:51:23 +0000806 CALCULATE_Z_OFFSET(M0, uint, zlhs, COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0), HEIGHT_GEMM3D, DEPTH_GEMM3D, lhs_cross_plane_pad, lhs_stride_y);
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000807
808 // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
809 // multiply lhs_stride_z by DEPTH_GEMM3D
810 lhs_offset += z * lhs_stride_z * DEPTH_GEMM3D;
811
812#else // defined(REINTERPRET_INPUT_AS_3D)
813
814 // Add offset for batched GEMM
815 lhs_offset += z * lhs_stride_z;
816
817#endif // defined(REINTERPRET_INPUT_AS_3D)
818
819 // Initialize the accumulators
820 REPEAT_VAR_INIT_TO_CONST(M0, VEC_DATA_TYPE(ACC_DATA_TYPE, N0), c, 0); //VEC_DATA_TYPE(ACC_DATA_TYPE, N0) c0=0,c1=0,c2=0,... c(N0-1)=0;
821
Manuel Bottini488f5082020-10-29 13:51:23 +0000822 int i = 0;
823 for(; i <= (K - K0); i += K0)
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000824 {
825 // Load values from LHS matrix
826 LOAD_BLOCK(M0, K0, DATA_TYPE, a, lhs_ptr, lhs_offset, lhs_stride_y, zlhs);
827
828 // Load values from RHS matrix
829 LOAD_BLOCK(N0, K0, DATA_TYPE, b, rhs_ptr, rhs_offset, RHS_STEP_X, zrhs);
830
831 // Partial matrix multiplication M0,N0,K0
832 ARM_MM_K0XN0XM0(M0, N0, K0, a, b, c);
833
834 lhs_offset += K0;
835 rhs_offset += N0 * RHS_STEP_X * RHS_STEP_LOOP;
836 }
Manuel Bottini488f5082020-10-29 13:51:23 +0000837 // Left-over accumulations
838 for(; i < K; ++i)
839 {
840 // Load values from LHS matrix
841 LOAD_BLOCK(M0, 1, DATA_TYPE, a, lhs_ptr, lhs_offset, lhs_stride_y, zlhs);
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000842
Manuel Bottini488f5082020-10-29 13:51:23 +0000843 // Load values from RHS reshaped matrix
Manuel Bottini28a46c92020-11-11 15:05:29 +0000844 LOAD_BLOCK(N0, 1, DATA_TYPE, b, rhs_ptr, rhs_offset, RHS_STEP_X, zrhs);
Manuel Bottini488f5082020-10-29 13:51:23 +0000845
846 ARM_MM_K0XN0XM0(M0, N0, 1, a, b, c);
847 lhs_offset += 1;
848 rhs_offset += 1;
849 }
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000850 // Result of MM is of type DATA_TYPE
Manuel Bottini488f5082020-10-29 13:51:23 +0000851 __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + (x * (uint)N0 * sizeof(DATA_TYPE)) + (COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) * dst_stride_y);
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000852
853 REPEAT_VAR_INIT_TO_CONST(8, uint, zout, 0); //uint zout0=0,zout1=0,zout2=0,... zout7=0;
854
855#if defined(REINTERPRET_OUTPUT_AS_3D)
856 // The plane (zout) is calculated dividing M (y * M0) by HEIGHT_GEMM3D
Manuel Bottini488f5082020-10-29 13:51:23 +0000857 CALCULATE_Z_OFFSET(M0, uint, zout, COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0), HEIGHT_GEMM3D, DEPTH_GEMM3D, dst_cross_plane_pad, dst_stride_y);
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000858
859 // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
860 // multiply dst_stride_z by DEPTH_GEMM3D
861 dst_addr += z * dst_stride_z * DEPTH_GEMM3D;
862
863#else // defined(REINTERPRET_OUTPUT_AS_3D)
864
865 // Add offset for batched GEMM
866 dst_addr += z * dst_stride_z;
867
868#endif // defined(REINTERPRET_OUTPUT_AS_3D)
869
870 // Convert result of matrix multiplication to S32
871 REPEAT_VAR_INIT_CONVERT_SAT(M0, VEC_DATA_TYPE(int, N0), c, c_int);
872
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000873 // Offset contribution: c += (A_OFFSET * sum_col) + (B_OFFSET * sum_row) + K_OFFSET;
874 REPEAT_VAR_INIT_TO_CONST(M0, VEC_DATA_TYPE(int, N0), offset_s32_, K_OFFSET);
875
876#if defined(A_OFFSET)
877 // Compute the offset contribution due to A_OFFSET
878 __global uchar *sum_col_addr = sum_col_ptr + sum_col_offset_first_element_in_bytes + (x * (uint)N0) * sizeof(int);
879
880#if defined(SUM_COL_HAS_BATCHES)
881 sum_col_addr += z * sum_col_stride_y;
882#endif // defined(SUM_COL_HAS_BATCHES)
883 VEC_DATA_TYPE(int, N0)
884 a_offset_s32 = VLOAD(N0)(0, (__global int *)sum_col_addr);
885 a_offset_s32 *= (VEC_DATA_TYPE(int, N0))A_OFFSET;
886
887 REPEAT_ADD_VECTOR_TO_VAR(M0, offset_s32_, a_offset_s32);
888#endif // defined(A_OFFSET)
889
890#if defined(B_OFFSET)
891 // Compute the offset contribution due to B_OFFSET
Gian Marco Iodice27423f02020-08-12 14:12:28 +0100892 // Note: The sum_row tensor is generated through CLGEMMLowpMatrixAReductionKernel which
893 // does not introduce paddings. For this reason is safe to access the tensor in this manner
894 // without considering that the coordinate "y" could come from an input 3D tensor
Manuel Bottini488f5082020-10-29 13:51:23 +0000895 __global uchar *sum_row_addr = sum_row_ptr + sum_row_offset_first_element_in_bytes + (COMPUTE_M0_START_ROW(y, (uint)M0, PARTIAL_STORE_M0)) * sizeof(int) + z * sum_row_stride_y;
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000896
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000897 LOAD_SCALAR_AS_VECTOR(M0, N0, int, b_offset_s32_, sum_row_addr, 0, sum_row_stride_x);
898
899 REPEAT_MLA_VAR_WITH_CONST_VEC(M0, offset_s32_, b_offset_s32_, (VEC_DATA_TYPE(int, N0))B_OFFSET);
900#endif // defined(B_OFFSET)
901
902#if defined(ADD_BIAS)
903 // Add bias
904 __global uchar *bias_addr = biases_ptr + biases_offset_first_element_in_bytes + (x * (uint)N0) * sizeof(int);
905
906 VEC_DATA_TYPE(int, N0)
907 bias_values = VLOAD(N0)(0, (__global int *)bias_addr);
908 REPEAT_ADD_VECTOR_TO_VAR(M0, offset_s32_, bias_values);
909#endif // defined(ADD_BIAS)
910
911 REPEAT_ADD_TWO_VARS(M0, c_int, offset_s32_);
912
913 // Multiply by result_mult_int and shift
914#if defined(PER_CHANNEL_QUANTIZATION)
915 __global uchar *result_multipliers_addr = result_multipliers_ptr + result_multipliers_offset_first_element_in_bytes + (x * (uint)N0) * sizeof(int);
916 __global uchar *result_shifts_addr = result_shifts_ptr + result_shifts_offset_first_element_in_bytes + (x * (uint)N0) * sizeof(int);
917
918 VEC_DATA_TYPE(int, N0)
919 res_mul = VLOAD(N0)(0, (__global int *)result_multipliers_addr);
920 VEC_DATA_TYPE(int, N0)
921 res_shift = VLOAD(N0)(0, (__global int *)result_shifts_addr);
922
923 REPEAT_ASYMM_MULT_BY_QUANT_MULTIPLIER_PER_CHANNEL(M0, N0, c_int, res_mul, res_shift);
924#else // defined(PER_CHANNEL_QUANTIZATION)
925
926#if RESULT_SHIFT < 0
927 REPEAT_ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(M0, N0, c_int, RESULT_MULTIPLIER, RESULT_SHIFT);
928#else // RESULT_SHIFT >= 0
929 REPEAT_ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(M0, N0, c_int, RESULT_MULTIPLIER, RESULT_SHIFT);
930#endif // RESULT_SHIFT < 0
931
932#endif // defined(PER_CHANNEL_QUANTIZATION)
933
934 // Add the offset terms to GEMM's result
935 REPEAT_ADD_CONST_TO_VAR(M0, VEC_DATA_TYPE(int, N0), c_int, RESULT_OFFSET);
936
937#if defined(MIN_BOUND)
938 REPEAT_MAX_CONST_VAR(M0, VEC_DATA_TYPE(int, N0), c_int, MIN_BOUND);
939#endif // defined(MIN_BOUND)
940#if defined(MAX_BOUND)
941 REPEAT_MIN_CONST_VAR(M0, VEC_DATA_TYPE(int, N0), c_int, MAX_BOUND);
942#endif // defined(MAX_BOUND)
943
Manuel Bottini488f5082020-10-29 13:51:23 +0000944 // Convert and store output block
945 const bool cond_y = y == 0;
946 const bool cond_x = ((x + 1) * N0 >= N);
947
948 // Store output block
949 REPEAT_VAR_INIT_CONVERT_SAT(M0, VEC_DATA_TYPE(DATA_TYPE, N0), c_int, c_lp);
950 STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, c_lp, dst_addr, dst_stride_y, zout, PARTIAL_STORE_M0, PARTIAL_STORE_N0, cond_y, cond_x);
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000951
952#undef RHS_BLOCK_SIZE
953#undef RHS_OFFSET_X
954#undef RHS_STEP_X
955}
956#endif // defined(RESULT_OFFSET) && defined(RESULT_SHIFT) && defined(RESULT_MULTIPLIER)
Manuel Bottini488f5082020-10-29 13:51:23 +0000957#endif // defined(M0) && defined(N0) && defined(K0) && defined(H0) && defined(K) && defined(PARTIAL_STORE_M0) && defined(PARTIAL_STORE_N0)
Gian Marco Iodice62251f72019-03-11 16:07:12 +0000958
SiCong Lied5fb392020-10-20 18:07:27 +0100959#if defined(M0) && defined(N0) && defined(K0) && defined(K) && defined(PARTIAL_STORE_M0) && defined(PARTIAL_STORE_N0)
Gian Marco Iodicee7510622019-06-03 17:28:17 +0100960
961/** This OpenCL kernel computes the matrix multiplication between 2 matrices.
962 * The LHS matrix is NOT reshaped
963 * The RHS matrix is NOT reshaped
964 *
Michele Di Giorgiof9179d32019-11-27 16:17:30 +0000965 * @note The input data type must be passed at compile time using -DDATA_TYPE (i.e. -DDATA_TYPE=uchar)
966 * @note The accumulator data type must be passed at compile time using -DACC_DATA_TYPE (i.e. -DACC_DATA_TYPE=uint)
Gian Marco Iodicee7510622019-06-03 17:28:17 +0100967 * @note The number of columns of LHS matrix must be passed at compile time using -DK (i.e. -DK=64)
968 * @note The number of M0 rows to process must be passed at compile time using -DM0 (i.e. -DM0=2)
969 * @note The number of N0 columns to process must be passed at compile time using -DN0 (i.e. -DN0=2)
970 * @note The number of K0 partial accumulations must be passed at compile time using -DK0 (i.e., -DK0=2)
971 * @note Only the following configurations of M0, N0 and K0 are currently supported:
972 * - M0 = 1, 2, 3, 4, 5, 6, 7, 8
973 * - N0 = 2, 3, 4, 8, 16
974 * - K0 = 2, 3, 4, 8, 16
975 *
976 * @note In case the input or output have to be reinterpreted as a 3D tensor, the following information must be passed at compile time:
977 * -# REINTERPRET_INPUT_AS_3D: To reinterpret the input as 3D
978 * -# REINTERPRET_OUTPUT_AS_3D: To reinterpret the output as 3D
979 * -# HEIGHT_GEMM3D: The height of the output in case it has to be reinterpreted as a 3D tensor.
980 * -# DEPTH_GEMM3D: The depth of the output in case it has to be reinterpreted as a 3D tensor
981 * (HEIGHT_GEMM3D * DEPTH_GEMM3D) = columns LHS matrix
982 *
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000983 * @param[in] lhs_ptr Pointer to the LHS reshaped matrix. Supported data type: QASYMM8
Gian Marco Iodicee7510622019-06-03 17:28:17 +0100984 * @param[in] lhs_stride_x Stride of the LHS reshaped matrix in X dimension (in bytes)
985 * @param[in] lhs_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
986 * @param[in] lhs_stride_y Stride of the LHS reshaped matrix in Y dimension (in bytes)
987 * @param[in] lhs_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
988 * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the LHS reshaped matrix
989 * @param[in] rhs_ptr Pointer to the RHS reshaped matrix. Supported data type: same as @p lhs_ptr
990 * @param[in] rhs_stride_x Stride of the RHS reshaped matrix in X dimension (in bytes)
991 * @param[in] rhs_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
992 * @param[in] rhs_stride_y Stride of the RHS reshaped matrix in Y dimension (in bytes)
993 * @param[in] rhs_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
994 * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the RHS reshaped matrix
Michele Di Giorgiob54ba282020-01-14 15:31:55 +0000995 * @param[out] dst_ptr Pointer to the destination matrix Supported data type: S32
Gian Marco Iodicee7510622019-06-03 17:28:17 +0100996 * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
997 * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
998 * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
999 * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
1000 * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
1001 * @param[in] lhs_stride_z Stride of the LHS reshaped matrix in Z dimension (in bytes)
1002 * @param[in] rhs_stride_z Stride of the RHS reshaped matrix in Z dimension (in bytes)
1003 * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
1004 * @param[in] lhs_cross_plane_pad (Optional) Bottom paddings for LHS matrix in unit of elements (only if defined REINTERPRET_INPUT_AS_3D)
1005 * @param[in] dst_cross_plane_pad (Optional) Bottom paddings for the output matrix in unit of elements (only if defined REINTERPRET_OUTPUT_AS_3D)
1006 */
1007__kernel void gemmlowp_mm_native(IMAGE_DECLARATION(lhs),
1008 IMAGE_DECLARATION(rhs),
1009 IMAGE_DECLARATION(dst),
1010 uint lhs_stride_z,
1011 uint rhs_stride_z,
1012 uint dst_stride_z
1013#if defined(REINTERPRET_INPUT_AS_3D)
1014 ,
1015 uint lhs_cross_plane_pad
1016#endif // REINTERPRET_INPUT_AS_3D
1017#if defined(REINTERPRET_OUTPUT_AS_3D)
1018 ,
1019 uint dst_cross_plane_pad
1020#endif // REINTERPRET_OUTPUT_AS_3D
1021 )
1022{
1023 uint x = get_global_id(0);
1024 uint y = get_global_id(1);
1025 uint z = get_global_id(2);
1026
1027#if defined(DUMMY_WORK_ITEMS)
1028 if((x * N0 >= N) || (y * M0 >= M))
1029 {
1030 return;
1031 }
1032#endif // defined(DUMMY_WORK_ITEMS)
1033
1034 // Compute LHS matrix address
morgolockcf343e32020-10-12 14:00:43 +01001035 uint lhs_offset = lhs_offset_first_element_in_bytes + COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) * (uint)lhs_stride_y;
Gian Marco Iodicee7510622019-06-03 17:28:17 +01001036
1037 // Compute RHS matrix address
morgolockcf343e32020-10-12 14:00:43 +01001038 uint rhs_offset = rhs_offset_first_element_in_bytes + x * N0 * sizeof(DATA_TYPE);
1039
Gian Marco Iodicee7510622019-06-03 17:28:17 +01001040#if defined(MATRIX_B_DEPTH)
1041 // Do not slide matrix B if the matrix B has 3 dimensions and matrix A more than 3
1042 rhs_offset += (z % MATRIX_B_DEPTH) * rhs_stride_z;
1043#else // defined(MATRIX_B_DEPTH)
1044 rhs_offset += z * rhs_stride_z;
1045#endif // defined(MATRIX_B_DEPTH)
1046
1047 REPEAT_VAR_INIT_TO_CONST(8, uint, zlhs, 0);
1048 REPEAT_VAR_INIT_TO_CONST(16, uint, zrhs, 0);
1049
1050#if defined(REINTERPRET_INPUT_AS_3D)
1051 // The plane (zlhs) is calculated dividing M (y * M0) by HEIGHT_GEMM3D
Gian Marco Iodice9ae06d42020-10-22 16:37:12 +01001052 CALCULATE_Z_OFFSET(M0, uint, zlhs, COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0), HEIGHT_GEMM3D, DEPTH_GEMM3D, lhs_cross_plane_pad, lhs_stride_y);
Gian Marco Iodicee7510622019-06-03 17:28:17 +01001053
1054 // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
1055 // multiply lhs_stride_z by DEPTH_GEMM3D
1056 lhs_offset += z * lhs_stride_z * DEPTH_GEMM3D;
1057
1058#else // defined(REINTERPRET_INPUT_AS_3D)
1059
1060 // Add offset for batched GEMM
1061 lhs_offset += z * lhs_stride_z;
1062
1063#endif // defined(REINTERPRET_INPUT_AS_3D)
1064
1065 // Initialize the accumulators
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001066 REPEAT_VAR_INIT_TO_CONST(M0, VEC_DATA_TYPE(ACC_DATA_TYPE, N0), c, 0); //VEC_DATA_TYPE(ACC_DATA_TYPE, N0) c0=0,c1=0,c2=0,... c(M0-1)=0;
Gian Marco Iodicee7510622019-06-03 17:28:17 +01001067
1068 int i = 0;
1069
1070 for(; i <= (K - K0); i += K0)
1071 {
1072 // Load values from LHS matrix
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001073 LOAD_BLOCK(M0, K0, DATA_TYPE, a, lhs_ptr, lhs_offset, lhs_stride_y, zlhs);
Gian Marco Iodicee7510622019-06-03 17:28:17 +01001074
1075 // Load values from RHS matrix
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001076 LOAD_BLOCK(K0, N0, DATA_TYPE, b, rhs_ptr, rhs_offset, rhs_stride_y, zrhs);
Gian Marco Iodicee7510622019-06-03 17:28:17 +01001077
SiCong Li738893e2020-05-01 12:55:16 +01001078 // Partial matrix multiplication M0,N0,K0
1079#if(GPU_ARCH == GPU_ARCH_MIDGARD)
1080 ARM_MM_NATIVE_N0XK0XM0(VEC_DATA_TYPE(ACC_DATA_TYPE, N0), M0, K0, a, b, c);
1081#else // GPU_ARCH == GPU_ARCH_MIDGARD
Gian Marco Iodicee7510622019-06-03 17:28:17 +01001082 // Transpose the values from RHS matrix
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001083 TRANSPOSE_K0XN0(K0, N0, b_t, b, DATA_TYPE);
Gian Marco Iodicee7510622019-06-03 17:28:17 +01001084
Gian Marco Iodicee7510622019-06-03 17:28:17 +01001085 ARM_MM_K0XN0XM0(M0, N0, K0, a, b_t, c);
SiCong Li738893e2020-05-01 12:55:16 +01001086#endif // GPU_ARCH == GPU_ARCH_MIDGARD
Gian Marco Iodicee7510622019-06-03 17:28:17 +01001087
1088 // Update the offset
1089 lhs_offset += K0;
1090 rhs_offset += K0 * rhs_stride_y;
1091 }
1092
1093 // Left-over for loop
1094 for(; i < K; ++i)
1095 {
1096 // Load values from LHS matrix
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001097 LOAD_BLOCK(M0, 1, DATA_TYPE, a, lhs_ptr, lhs_offset, lhs_stride_y, zlhs);
Gian Marco Iodicee7510622019-06-03 17:28:17 +01001098
1099 // Load values from RHS matrix
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001100 LOAD_BLOCK(1, N0, DATA_TYPE, b, rhs_ptr, rhs_offset, rhs_stride_y, zrhs);
Gian Marco Iodicee7510622019-06-03 17:28:17 +01001101
SiCong Li738893e2020-05-01 12:55:16 +01001102 // Partial matrix multiplication M0,N0,1
1103#if(GPU_ARCH == GPU_ARCH_MIDGARD)
1104 ARM_MM_NATIVE_N0XK0XM0(VEC_DATA_TYPE(ACC_DATA_TYPE, N0), M0, 1, a, b, c);
1105#else // GPU_ARCH == GPU_ARCH_MIDGARD
Gian Marco Iodicee7510622019-06-03 17:28:17 +01001106 // Transpose the values from RHS matrix
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001107 TRANSPOSE_K0XN0(1, N0, b_t, b, DATA_TYPE);
Gian Marco Iodicee7510622019-06-03 17:28:17 +01001108
Gian Marco Iodicee7510622019-06-03 17:28:17 +01001109 ARM_MM_K0XN0XM0(M0, N0, 1, a, b_t, c);
SiCong Li738893e2020-05-01 12:55:16 +01001110#endif // GPU_ARCH == GPU_ARCH_MIDGARD
Gian Marco Iodicee7510622019-06-03 17:28:17 +01001111
1112 // Update the offset
1113 lhs_offset += 1;
1114 rhs_offset += rhs_stride_y;
1115 }
1116
morgolockcf343e32020-10-12 14:00:43 +01001117 __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + (x * (uint)N0 * sizeof(int)) + (COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) * dst_stride_y);
1118
Gian Marco Iodicee7510622019-06-03 17:28:17 +01001119 REPEAT_VAR_INIT_TO_CONST(M0, uint, zout, 0); //uint zout0=0,zout1=0,zout2=0,... zout7=0;
1120
1121#if defined(REINTERPRET_OUTPUT_AS_3D)
1122 // The plane (zout) is calculated dividing M (y * M0) by HEIGHT_GEMM3D
Gian Marco Iodice9ae06d42020-10-22 16:37:12 +01001123 CALCULATE_Z_OFFSET(M0, uint, zout, COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0), HEIGHT_GEMM3D, DEPTH_GEMM3D, dst_cross_plane_pad, dst_stride_y);
Gian Marco Iodicee7510622019-06-03 17:28:17 +01001124
1125 // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
1126 // multiply dst_stride_z by DEPTH_GEMM3D
1127 dst_addr += z * dst_stride_z * DEPTH_GEMM3D;
1128
1129#else // defined(REINTERPRET_OUTPUT_AS_3D)
1130
1131 // Add offset for batched GEMM
1132 dst_addr += z * dst_stride_z;
1133
1134#endif // defined(REINTERPRET_OUTPUT_AS_3D)
morgolockcf343e32020-10-12 14:00:43 +01001135 const bool cond_y = y == 0;
1136 const bool cond_x = ((x + 1) * N0 >= N);
Gian Marco Iodicee7510622019-06-03 17:28:17 +01001137
Michele Di Giorgio27d92fd2020-10-27 12:44:17 +00001138 // Convert and store output block
1139 REPEAT_VAR_INIT_CONVERT(M0, VEC_DATA_TYPE(int, N0), c, res); // resN = CONVERT(cN, VEC_DATA_TYPE(int, N0));
1140 STORE_BLOCK_BOUNDARY_AWARE(M0, N0, int, res, dst_addr, dst_stride_y, zout, PARTIAL_STORE_M0, PARTIAL_STORE_N0, cond_y, cond_x);
Gian Marco Iodicee7510622019-06-03 17:28:17 +01001141}
SiCong Lied5fb392020-10-20 18:07:27 +01001142#endif // defined(M0) && defined(N0) && defined(K0) && defined(K) && defined(PARTIAL_STORE_M0) && defined(PARTIAL_STORE_N0)
Gian Marco Iodicee7510622019-06-03 17:28:17 +01001143
Gian Marco05288a22017-11-21 10:57:50 +00001144#if defined(COLS_A)
1145/** OpenCL kernel used to compute the row-vectors of sums of all the entries in each row of Matrix A.
Michele Di Giorgiof64d3362020-04-03 12:40:10 +01001146 * It is also possible to multiply each reduced row by a scalar value, if SCALAR is passed at compile time.
Gian Marco05288a22017-11-21 10:57:50 +00001147 *
1148 * @note This stage is needed to handle the offset of matrix product
1149 * https://github.com/google/gemmlowp/blob/master/doc/low-precision.md
1150 *
1151 * @attention The number of matrix A columns needs to be passed at compile time using -DCOLS_A
Manuel Bottini959c26d2019-12-02 16:22:35 +00001152 * @note The input data type must be passed at compile time using -DDATA_TYPE (i.e. -DDATA_TYPE=uchar)
Michele Di Giorgioe7b333e2020-01-15 10:30:51 +00001153 * @note The data type for the accumulation must be passed at compile time using -DACC_DATA_TYPE (i.e. -DACC_DATA_TYPE=uint)
Michele Di Giorgiof64d3362020-04-03 12:40:10 +01001154 * @note In case of scaling the scalar value must be passed at compile time using -DSCALAR (e.g. -DSCALAR=3)
Gian Marco05288a22017-11-21 10:57:50 +00001155 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +01001156 * @param[in] src_ptr Pointer to the source tensor. Supported data type: QASYMM8/QASYMM8_SIGNED/QSYMM8
Gian Marco05288a22017-11-21 10:57:50 +00001157 * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
1158 * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
1159 * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
1160 * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
1161 * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
1162 * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
1163 * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
1164 * @param[out] dst_ptr Pointer to the destination tensor Supported data type: S32
1165 * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
1166 * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
1167 * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
1168 * @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
1169 * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
1170 */
1171__kernel void gemmlowp_matrix_a_reduction(TENSOR3D_DECLARATION(src),
1172 IMAGE_DECLARATION(dst))
1173{
1174 // Compute source and destination addresses
1175 Tensor3D src = CONVERT_TO_TENSOR3D_STRUCT(src);
1176 Image dst = CONVERT_TO_IMAGE_STRUCT(dst);
1177
Michele Di Giorgioe7b333e2020-01-15 10:30:51 +00001178 VEC_DATA_TYPE(ACC_DATA_TYPE, 4)
1179 sum_row_32 = (VEC_DATA_TYPE(ACC_DATA_TYPE, 4))0;
1180 ACC_DATA_TYPE sum_row = 0;
Gian Marco05288a22017-11-21 10:57:50 +00001181
Manuel Bottini959c26d2019-12-02 16:22:35 +00001182 __global const DATA_TYPE *matrix_a = (__global const DATA_TYPE *)(src.ptr + get_global_id(0) * src_stride_y + get_global_id(1) * src_stride_z);
Gian Marco05288a22017-11-21 10:57:50 +00001183
1184 int i = 0;
1185
1186 // This for loop performs 16 accumulations
1187 for(; i <= ((int)COLS_A - 16); i += 16)
1188 {
Manuel Bottini959c26d2019-12-02 16:22:35 +00001189 const VEC_DATA_TYPE(DATA_TYPE, 16) a0 = vload16(0, matrix_a + i);
Gian Marco05288a22017-11-21 10:57:50 +00001190
Michele Di Giorgioe7b333e2020-01-15 10:30:51 +00001191 sum_row_32 += CONVERT(a0.s0123, VEC_DATA_TYPE(ACC_DATA_TYPE, 4)) + CONVERT(a0.s4567, VEC_DATA_TYPE(ACC_DATA_TYPE, 4)) + CONVERT(a0.s89AB, VEC_DATA_TYPE(ACC_DATA_TYPE, 4)) + CONVERT(a0.sCDEF,
1192 VEC_DATA_TYPE(ACC_DATA_TYPE, 4));
Gian Marco05288a22017-11-21 10:57:50 +00001193 }
1194
1195 // This for loop performs the leftover accumulations
1196 for(; i < COLS_A; ++i)
1197 {
Michele Di Giorgioe7b333e2020-01-15 10:30:51 +00001198 sum_row += (ACC_DATA_TYPE)matrix_a[i];
Gian Marco05288a22017-11-21 10:57:50 +00001199 }
1200
Manuel Bottini959c26d2019-12-02 16:22:35 +00001201 sum_row += sum_row_32.s0 + sum_row_32.s1 + sum_row_32.s2 + sum_row_32.s3;
Gian Marco05288a22017-11-21 10:57:50 +00001202
Michele Di Giorgiof64d3362020-04-03 12:40:10 +01001203#if defined(SCALAR)
1204 sum_row *= (int)SCALAR;
1205#endif // defined(SCALAR)
Gian Marco05288a22017-11-21 10:57:50 +00001206 *((__global int *)dst.ptr) = (int)sum_row;
1207}
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001208
1209#if defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)
Michele Di Giorgiof64d3362020-04-03 12:40:10 +01001210/** OpenCL kernel used to compute the row-vectors of sums of all the entries in each row of Matrix A using the arm dot product instruction.
1211 * It is also possible to multiply each reduced row by a scalar value, if SCALAR is passed at compile time.
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001212 *
1213 * @note This stage is needed to handle the offset of matrix product
1214 * https://github.com/google/gemmlowp/blob/master/doc/low-precision.md
1215 *
1216 * @attention The number of matrix A columns needs to be passed at compile time using -DCOLS_A
Manuel Bottini959c26d2019-12-02 16:22:35 +00001217 * @note The input data type must be passed at compile time using -DDATA_TYPE (i.e. -DDATA_TYPE=uchar)
Michele Di Giorgioe7b333e2020-01-15 10:30:51 +00001218 * @note The data type for the accumulation must be passed at compile time using -DACC_DATA_TYPE (i.e. -DACC_DATA_TYPE=uint)
Michele Di Giorgiof64d3362020-04-03 12:40:10 +01001219 * @note In case of scaling the scalar value must be passed at compile time using -DSCALAR (e.g. -DSCALAR=3)
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001220 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +01001221 * @param[in] src_ptr Pointer to the source tensor. Supported data type: QASYMM8/QASYMM8_SIGNED/QSYMM8
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001222 * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
1223 * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
1224 * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
1225 * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
1226 * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
1227 * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
1228 * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
1229 * @param[out] dst_ptr Pointer to the destination tensor Supported data type: S32
1230 * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
1231 * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
1232 * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
1233 * @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
1234 * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
1235 */
1236__kernel void gemmlowp_matrix_a_reduction_dot8(TENSOR3D_DECLARATION(src),
1237 IMAGE_DECLARATION(dst))
1238{
1239 // Compute source and destination addresses
1240 Tensor3D src = CONVERT_TO_TENSOR3D_STRUCT(src);
1241 Image dst = CONVERT_TO_IMAGE_STRUCT(dst);
1242
Michele Di Giorgioe7b333e2020-01-15 10:30:51 +00001243 ACC_DATA_TYPE sum_row = 0;
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001244
Manuel Bottini959c26d2019-12-02 16:22:35 +00001245 __global const DATA_TYPE *matrix_a = (__global const DATA_TYPE *)(src.ptr + get_global_id(0) * src_stride_y + get_global_id(1) * src_stride_z);
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001246
1247 int i = 0;
1248
1249 // This for loop performs 16 accumulations
1250 for(; i <= ((int)COLS_A - 32); i += 32)
1251 {
Manuel Bottini959c26d2019-12-02 16:22:35 +00001252 VEC_DATA_TYPE(DATA_TYPE, 16)
1253 a0 = vload16(0, matrix_a + i);
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001254
Manuel Bottini959c26d2019-12-02 16:22:35 +00001255 sum_row += arm_dot(a0.s0123, (VEC_DATA_TYPE(DATA_TYPE, 4))(1));
1256 sum_row += arm_dot(a0.s4567, (VEC_DATA_TYPE(DATA_TYPE, 4))(1));
1257 sum_row += arm_dot(a0.s89AB, (VEC_DATA_TYPE(DATA_TYPE, 4))(1));
1258 sum_row += arm_dot(a0.sCDEF, (VEC_DATA_TYPE(DATA_TYPE, 4))(1));
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001259
Manuel Bottini959c26d2019-12-02 16:22:35 +00001260 a0 = vload16(1, matrix_a + i);
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001261
Manuel Bottini959c26d2019-12-02 16:22:35 +00001262 sum_row += arm_dot(a0.s0123, (VEC_DATA_TYPE(DATA_TYPE, 4))(1));
1263 sum_row += arm_dot(a0.s4567, (VEC_DATA_TYPE(DATA_TYPE, 4))(1));
1264 sum_row += arm_dot(a0.s89AB, (VEC_DATA_TYPE(DATA_TYPE, 4))(1));
1265 sum_row += arm_dot(a0.sCDEF, (VEC_DATA_TYPE(DATA_TYPE, 4))(1));
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001266 }
1267
1268 // This for loop performs the leftover accumulations
1269 for(; i < COLS_A; ++i)
1270 {
Michele Di Giorgioe7b333e2020-01-15 10:30:51 +00001271 sum_row += (ACC_DATA_TYPE)matrix_a[i];
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001272 }
1273
Michele Di Giorgiof64d3362020-04-03 12:40:10 +01001274#if defined(SCALAR)
1275 sum_row *= (int)SCALAR;
1276#endif // defined(SCALAR)
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001277 *((__global int *)dst.ptr) = (int)sum_row;
1278}
1279#endif // defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)
Gian Marco05288a22017-11-21 10:57:50 +00001280#endif // defined(COLS_A)
1281
Michele Di Giorgioaae34102020-10-19 15:31:45 +01001282#if defined(COLS_B) && defined(ROWS_B) && defined(VEC_SIZE) && defined(VEC_SIZE_LEFTOVER)
Gian Marco05288a22017-11-21 10:57:50 +00001283/** OpenCL kernel used to compute the row-vectors of sums of all the entries in each column of Matrix B.
Michele Di Giorgiof64d3362020-04-03 12:40:10 +01001284 * It is also possible to multiply each reduced column by a scalar value, if SCALAR is passed at compile time.
Gian Marco05288a22017-11-21 10:57:50 +00001285 *
1286 * @note This stage is needed to handle the offset of matrix product
1287 * https://github.com/google/gemmlowp/blob/master/doc/low-precision.md
1288 *
1289 * @attention The number of matrix B columns and rows needs to be passed at compile time using -DCOLS_B and -DROWS_B
Manuel Bottini959c26d2019-12-02 16:22:35 +00001290 * @note The input data type must be passed at compile time using -DDATA_TYPE (i.e. -DDATA_TYPE=uchar)
Michele Di Giorgioe7b333e2020-01-15 10:30:51 +00001291 * @note The data type for the accumulation must be passed at compile time using -DACC_DATA_TYPE (i.e. -DACC_DATA_TYPE=uint)
Michele Di Giorgiof64d3362020-04-03 12:40:10 +01001292 * @note In case of scaling the scalar value must be passed at compile time using -DSCALAR (i.e. -DSCALAR=3)
Michele Di Giorgioaae34102020-10-19 15:31:45 +01001293 * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
1294 * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
Gian Marco05288a22017-11-21 10:57:50 +00001295 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +01001296 * @param[in] src_ptr Pointer to the source tensor. Supported data type: QASYMM8/QASYMM8_SIGNED/QSYMM8/QSYMM8_PER_CHANNEL
Gian Marco05288a22017-11-21 10:57:50 +00001297 * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
1298 * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
1299 * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
1300 * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
1301 * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
1302 * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
1303 * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
1304 * @param[out] dst_ptr Pointer to the destination tensor Supported data type: S32
1305 * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
1306 * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
1307 * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
1308 * @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
1309 * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
1310 */
1311__kernel void gemmlowp_matrix_b_reduction(TENSOR3D_DECLARATION(src),
1312 IMAGE_DECLARATION(dst))
1313{
1314 // Compute source and destination addresses
Michele Di Giorgioaae34102020-10-19 15:31:45 +01001315 const uint x_offs = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0);
1316 const uint y = get_global_id(1);
Gian Marco05288a22017-11-21 10:57:50 +00001317
Michele Di Giorgioaae34102020-10-19 15:31:45 +01001318 __global const DATA_TYPE *matrix_b = (__global const DATA_TYPE *)(src_ptr + src_offset_first_element_in_bytes + x_offs * sizeof(DATA_TYPE) + y * src_step_y + y * src_stride_z);
1319 __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x_offs * sizeof(int) + y * dst_stride_y;
Gian Marco05288a22017-11-21 10:57:50 +00001320
Michele Di Giorgioaae34102020-10-19 15:31:45 +01001321 VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE)
Michele Di Giorgioed902bc2020-10-22 12:05:09 +01001322 sum_col_32 = (VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE))0;
Gian Marco05288a22017-11-21 10:57:50 +00001323
1324 int i = 0;
1325 // This for loop performs 4 accumulations
1326 for(; i <= ((int)ROWS_B - 4); i += 4)
1327 {
Michele Di Giorgioaae34102020-10-19 15:31:45 +01001328 const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
1329 b0 = VLOAD(VEC_SIZE)(0, matrix_b + 0 * src_stride_y);
1330 const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
1331 b1 = VLOAD(VEC_SIZE)(0, matrix_b + 1 * src_stride_y);
1332 const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
1333 b2 = VLOAD(VEC_SIZE)(0, matrix_b + 2 * src_stride_y);
1334 const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
1335 b3 = VLOAD(VEC_SIZE)(0, matrix_b + 3 * src_stride_y);
Gian Marco05288a22017-11-21 10:57:50 +00001336
Michele Di Giorgioed902bc2020-10-22 12:05:09 +01001337 sum_col_32 += CONVERT(b0, VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE)) + CONVERT(b1, VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE)) + CONVERT(b2, VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE)) + CONVERT(b3,
1338 VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE));
Gian Marco05288a22017-11-21 10:57:50 +00001339
1340 matrix_b += 4 * src_stride_y;
1341 }
1342
1343 // This for loop perfoms the leftover accumulations
1344 for(; i < (int)ROWS_B; ++i)
1345 {
Michele Di Giorgioaae34102020-10-19 15:31:45 +01001346 const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
1347 b0 = VLOAD(VEC_SIZE)(0, matrix_b);
Gian Marco05288a22017-11-21 10:57:50 +00001348
Michele Di Giorgioed902bc2020-10-22 12:05:09 +01001349 sum_col_32 += CONVERT(b0, VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE));
Gian Marco05288a22017-11-21 10:57:50 +00001350
1351 matrix_b += src_stride_y;
1352 }
1353
Michele Di Giorgiof64d3362020-04-03 12:40:10 +01001354#if defined(SCALAR)
Michele Di Giorgioed902bc2020-10-22 12:05:09 +01001355 sum_col_32 *= (VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE))SCALAR;
Michele Di Giorgiof64d3362020-04-03 12:40:10 +01001356#endif // defined(SCALAR)
Michele Di Giorgioed902bc2020-10-22 12:05:09 +01001357 VEC_DATA_TYPE(int, VEC_SIZE)
1358 res0 = CONVERT(sum_col_32, VEC_DATA_TYPE(int, VEC_SIZE));
1359
1360 STORE_VECTOR_SELECT(res, int, dst_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
Gian Marco05288a22017-11-21 10:57:50 +00001361}
Michele Di Giorgioaae34102020-10-19 15:31:45 +01001362#endif // defined(COLS_B) && defined(ROWS_B) && defined(VEC_SIZE) && defined(VEC_SIZE_LEFTOVER)
Gian Marco05288a22017-11-21 10:57:50 +00001363
Michele Di Giorgioe7b333e2020-01-15 10:30:51 +00001364#endif // defined(DATA_TYPE) && defined(ACC_DATA_TYPE)
1365
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001366#if defined(K_OFFSET) && defined(VEC_SIZE) && defined(VEC_SIZE_LEFTOVER)
1367
1368#define VEC_INT VEC_DATA_TYPE(int, VEC_SIZE)
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001369
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +00001370/* Helper function used to calculate the offset contribution after matrix multiplication.
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001371 *
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +00001372 * This kernel takes a final int32 accumulator value (the output of matrix multiplication),
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001373 * and calculates the offset contribution of matrix A and matrix B.
1374 *
1375 * @attention The k_offset = a_offset * b_offset * k (where k is the number of matrix A columns) needs to be passed at compile time using -DK_OFFSET (i.e. -DK_OFFSET=1200)
1376 * @note In case the offset contribution due to a_offset is required, a_offset needs to be passed at compile time using -DA_OFFSET (i.e. -DA_OFFSET=1)
1377 * @note In case the offset contribution due to b_offset is required, b_offset needs to be passed at compile time using -DB_OFFSET (i.e. -DB_OFFSET=6)
1378 * @note In case sum_col has batches, -DSUM_COL_HAS_BATCHES must be passed at compile time. Usually if gemmlowp is used to accelerate convolution layer, sum_col will not have batches
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001379 * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
1380 * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001381 *
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001382 * @param[in] x max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0)
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001383 * @param[in] y get_global_id(1)
1384 * @param[in] z get_global_id(2)
1385 * @param[in] sum_col_ptr (Optional) Pointer to the source tensor. Supported data type: same as @p mm_result_ptr
1386 * @param[in] sum_col_stride_x (Optional) Stride of the source tensor in X dimension (in bytes)
1387 * @param[in] sum_col_step_x (Optional) sum_col_stride_x * number of elements along X processed per workitem(in bytes)
1388 * @param[in] sum_col_stride_y (Optional) Stride of the source tensor in Y dimension (in bytes)
1389 * @param[in] sum_col_step_y (Optional) sum_col_stride_y * number of elements along Y processed per workitem(in bytes)
1390 * @param[in] sum_col_offset_first_element_in_bytes (Optional) The offset of the first element in the source tensor
1391 * @param[in] sum_row_ptr (Optional) Pointer to the source tensor. Supported data type: same as @p mm_result_ptr
1392 * @param[in] sum_row_stride_x (Optional) Stride of the source tensor in X dimension (in bytes)
1393 * @param[in] sum_row_step_x (Optional) sum_row_stride_x * number of elements along X processed per workitem(in bytes)
1394 * @param[in] sum_row_stride_y (Optional) Stride of the source tensor in Y dimension (in bytes)
1395 * @param[in] sum_row_step_y (Optional) sum_row_stride_y * number of elements along Y processed per workitem(in bytes)
1396 * @param[in] sum_row_offset_first_element_in_bytes (Optional) The offset of the first element in the source tensor
1397 * @param[in] biases_ptr (Optional) Pointer to the biases tensor. Supported data type: same as @p src_ptr
1398 * @param[in] biases_stride_x (Optional) Stride of the biases tensor in X dimension (in bytes)
1399 * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
1400 * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases tensor
1401 */
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001402inline VEC_INT offset_contribution(
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001403 int x,
1404 int y,
1405 int z
1406#if defined(A_OFFSET)
1407 ,
1408 IMAGE_DECLARATION(sum_col)
1409#endif // defined(A_OFFSET)
1410#if defined(B_OFFSET)
1411 ,
1412 IMAGE_DECLARATION(sum_row)
1413#endif // defined(B_OFFSET)
1414#if defined(ADD_BIAS)
1415 ,
1416 VECTOR_DECLARATION(biases)
1417#endif // defined(ADD_BIAS)
1418)
1419{
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001420 VEC_INT a_offset_s32 = (VEC_INT)0;
1421 VEC_INT b_offset_s32 = (VEC_INT)0;
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001422
1423 int batch_id = z;
1424#if defined(DEPTH_INPUT3D)
1425 batch_id /= (int)DEPTH_INPUT3D;
1426#endif // defined(DEPTH_INPUT3D)
1427
1428#if defined(A_OFFSET)
1429 // Compute the offset contribution due to A_OFFSET
1430 __global uchar *sum_col_addr = sum_col_ptr + sum_col_offset_first_element_in_bytes + x * sizeof(int);
1431
1432 // Compute the offset contribution due to A_OFFSET
1433#if defined(SUM_COL_HAS_BATCHES)
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001434 a_offset_s32 = VLOAD(VEC_SIZE)(0, (__global int *)(sum_col_addr + batch_id * sum_col_stride_y));
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001435#else // defined(SUM_COL_HAS_BATCHES)
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001436 a_offset_s32 = VLOAD(VEC_SIZE)(0, (__global int *)sum_col_addr);
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001437#endif // defined(SUM_COL_HAS_BATCHES)
1438
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001439 a_offset_s32 *= (VEC_INT)A_OFFSET;
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001440#endif // defined(A_OFFSET)
1441
1442#if defined(B_OFFSET)
1443 // Compute the offset contribution due to A_OFFSET
1444 __global uchar *sum_row_addr = sum_row_ptr + sum_row_offset_first_element_in_bytes + y * sizeof(int);
1445
1446 // Compute the offset contribution due to B_OFFSET
1447#if defined(HEIGHT_INPUT3D) && defined(DEPTH_INPUT3D)
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001448 b_offset_s32 = (VEC_INT) * (((__global int *)(sum_row_addr + batch_id * sum_row_stride_y)) + (z % (int)DEPTH_INPUT3D) * (int)HEIGHT_INPUT3D);
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001449#else // defined(HEIGHT_INPUT3D) && defined(DEPTH_INPUT3D)
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001450 b_offset_s32 = (VEC_INT) * (((__global int *)(sum_row_addr + batch_id * sum_row_stride_y)));
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001451#endif // defined(HEIGHT_INPUT3D) && defined(DEPTH_INPUT3D)
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001452 b_offset_s32 *= (VEC_INT)B_OFFSET;
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001453#endif // defined(B_OFFSET)
1454
1455#if defined(ADD_BIAS)
1456 // Add bias
1457 __global uchar *bias_addr = biases_ptr + biases_offset_first_element_in_bytes + x * sizeof(int);
1458
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001459 VEC_INT biases_values = VLOAD(VEC_SIZE)(0, (__global int *)bias_addr);
1460 b_offset_s32 += (VEC_INT)biases_values;
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001461#endif // defined(ADD_BIAS)
1462
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001463 return (VEC_INT)K_OFFSET + a_offset_s32 + b_offset_s32;
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001464}
1465
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +00001466/* OpenCL kernel used to add the offset contribution after matrix multiplication. The computation is performed in-place
Gian Marco05288a22017-11-21 10:57:50 +00001467 *
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +00001468 * This kernel takes a final int32 accumulator value (the output of matrix multiplication),
Gian Marco05288a22017-11-21 10:57:50 +00001469 * and adds to it the offset contribution of matrix A and matrix B in-place.
1470 *
1471 * @attention The k_offset = a_offset * b_offset * k (where k is the number of matrix A columns) needs to be passed at compile time using -DK_OFFSET (i.e. -DK_OFFSET=1200)
1472 * @note In case the offset contribution due to a_offset is required, a_offset needs to be passed at compile time using -DA_OFFSET (i.e. -DA_OFFSET=1)
1473 * @note In case the offset contribution due to b_offset is required, b_offset needs to be passed at compile time using -DB_OFFSET (i.e. -DB_OFFSET=6)
Chunosov5124be52017-11-22 20:42:13 +07001474 * @note In case sum_col has batches, -DSUM_COL_HAS_BATCHES must be passed at compile time. Usually if gemmlowp is used to accelerate convolution layer, sum_col will not have batches
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001475 * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
1476 * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
Gian Marco05288a22017-11-21 10:57:50 +00001477 *
1478 * The final result is:
1479 *
1480 * mm_result[i][k] = mm_result[i][k] +
1481 * (sum_col[k] * A_OFFSET) +
1482 * (sum_row[i] * B_OFFSET) +
1483 * (K_OFFSET)
1484 *
Georgios Pinitasebf6b8a2018-09-24 16:31:08 +01001485 * @param[in] mm_result_ptr Pointer to the source tensor. Supported data type: S32
1486 * @param[in] mm_result_stride_x Stride of the source tensor in X dimension (in bytes)
1487 * @param[in] mm_result_step_x mm_result_stride_x * number of elements along X processed per workitem(in bytes)
1488 * @param[in] mm_result_stride_y Stride of the source tensor in Y dimension (in bytes)
1489 * @param[in] mm_result_step_y mm_result_stride_y * number of elements along Y processed per workitem(in bytes)
1490 * @param[in] mm_result_stride_z Stride of the source tensor in Z dimension (in bytes)
1491 * @param[in] mm_result_step_z mm_result_stride_z * number of elements along Z processed per workitem(in bytes)
1492 * @param[in] mm_result_offset_first_element_in_bytes The offset of the first element in the source tensor
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001493 * @param[in] sum_col_ptr (Optional) Pointer to the source tensor. Supported data type: same as @p mm_result_ptr
1494 * @param[in] sum_col_stride_x (Optional) Stride of the source tensor in X dimension (in bytes)
1495 * @param[in] sum_col_step_x (Optional) sum_col_stride_x * number of elements along X processed per workitem(in bytes)
1496 * @param[in] sum_col_stride_y (Optional) Stride of the source tensor in Y dimension (in bytes)
1497 * @param[in] sum_col_step_y (Optional) sum_col_stride_y * number of elements along Y processed per workitem(in bytes)
1498 * @param[in] sum_col_offset_first_element_in_bytes (Optional) The offset of the first element in the source tensor
1499 * @param[in] sum_row_ptr (Optional) Pointer to the source tensor. Supported data type: same as @p mm_result_ptr
1500 * @param[in] sum_row_stride_x (Optional) Stride of the source tensor in X dimension (in bytes)
1501 * @param[in] sum_row_step_x (Optional) sum_row_stride_x * number of elements along X processed per workitem(in bytes)
1502 * @param[in] sum_row_stride_y (Optional) Stride of the source tensor in Y dimension (in bytes)
1503 * @param[in] sum_row_step_y (Optional) sum_row_stride_y * number of elements along Y processed per workitem(in bytes)
1504 * @param[in] sum_row_offset_first_element_in_bytes (Optional) The offset of the first element in the source tensor
1505 * @param[in] biases_ptr (Optional) Pointer to the biases tensor. Supported data type: same as @p src_ptr
1506 * @param[in] biases_stride_x (Optional) Stride of the biases tensor in X dimension (in bytes)
1507 * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
1508 * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases tensor
Gian Marco05288a22017-11-21 10:57:50 +00001509 */
1510__kernel void gemmlowp_offset_contribution(TENSOR3D_DECLARATION(mm_result)
1511#if defined(A_OFFSET)
1512 ,
1513 IMAGE_DECLARATION(sum_col)
1514#endif // defined(A_OFFSET)
1515#if defined(B_OFFSET)
1516 ,
1517 IMAGE_DECLARATION(sum_row)
1518#endif // defined(B_OFFSET)
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001519#if defined(ADD_BIAS)
1520 ,
1521 VECTOR_DECLARATION(biases)
1522#endif // defined(ADD_BIAS))
Gian Marco05288a22017-11-21 10:57:50 +00001523 )
1524{
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001525 const int x = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0);
Georgios Pinitasebf6b8a2018-09-24 16:31:08 +01001526 const int y = get_global_id(1);
1527 const int z = get_global_id(2);
1528
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001529 // Compute offset contribution
Michele Di Giorgio410bca42020-10-22 11:07:33 +01001530 VEC_INT offset_term_s32 = offset_contribution(
1531 x, y, z
Gian Marco05288a22017-11-21 10:57:50 +00001532#if defined(A_OFFSET)
Michele Di Giorgio410bca42020-10-22 11:07:33 +01001533 ,
1534 sum_col_ptr,
1535 sum_col_stride_x,
1536 sum_col_step_x,
1537 sum_col_stride_y,
1538 sum_col_step_y,
1539 sum_col_offset_first_element_in_bytes
Gian Marco05288a22017-11-21 10:57:50 +00001540#endif // defined(A_OFFSET)
Gian Marco05288a22017-11-21 10:57:50 +00001541#if defined(B_OFFSET)
Michele Di Giorgio410bca42020-10-22 11:07:33 +01001542 ,
1543 sum_row_ptr,
1544 sum_row_stride_x,
1545 sum_row_step_x,
1546 sum_row_stride_y,
1547 sum_row_step_y,
1548 sum_row_offset_first_element_in_bytes
Gian Marco05288a22017-11-21 10:57:50 +00001549#endif // defined(B_OFFSET)
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001550#if defined(ADD_BIAS)
Michele Di Giorgio410bca42020-10-22 11:07:33 +01001551 ,
1552 biases_ptr,
1553 biases_stride_x,
1554 biases_step_x,
1555 biases_offset_first_element_in_bytes
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001556#endif // defined(ADD_BIAS)
Michele Di Giorgio410bca42020-10-22 11:07:33 +01001557 );
Gian Marco05288a22017-11-21 10:57:50 +00001558
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001559 __global uchar *mm_result_addr = mm_result_ptr + mm_result_offset_first_element_in_bytes + x * sizeof(int) + y * mm_result_stride_y + z * mm_result_stride_z;
Gian Marco05288a22017-11-21 10:57:50 +00001560
Michele Di Giorgio410bca42020-10-22 11:07:33 +01001561 VEC_INT in_s32_0 = VLOAD(VEC_SIZE)(0, (__global int *)mm_result_addr);
Gian Marco05288a22017-11-21 10:57:50 +00001562
1563 // Add the offset terms to GEMM's result
Michele Di Giorgio410bca42020-10-22 11:07:33 +01001564 in_s32_0 += offset_term_s32;
Gian Marco05288a22017-11-21 10:57:50 +00001565
1566 // Store the result with the offset contribution
Michele Di Giorgio410bca42020-10-22 11:07:33 +01001567 STORE_VECTOR_SELECT(in_s32_, int, mm_result_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
Gian Marco05288a22017-11-21 10:57:50 +00001568}
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001569
Michele Di Giorgiob54ba282020-01-14 15:31:55 +00001570#if defined(RESULT_OFFSET) && defined(RESULT_MULTIPLIER) && defined(RESULT_SHIFT) && defined(OUTPUT_DATA_TYPE)
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001571/* OpenCL kernel used to add the offset contribution after @ref CLGEMMLowpMatrixMultiplyKernel and it quantizes down to uint8.
1572 *
1573 * This kernel takes a final int32 accumulator value (the output of @CLGEMMLowpMatrixMultiplyKernel), adds to it the offset contribution of matrix A and matrix B and quantizes to uint8 through the output stage.
1574 *
1575 *
1576 * @attention The k_offset = a_offset * b_offset * k (where k is the number of matrix A columns) needs to be passed at compile time using -DK_OFFSET (i.e. -DK_OFFSET=1200)
1577 * @note In case the offset contribution due to a_offset is required, a_offset needs to be passed at compile time using -DA_OFFSET (i.e. -DA_OFFSET=1)
1578 * @note In case the offset contribution due to b_offset is required, b_offset needs to be passed at compile time using -DB_OFFSET (i.e. -DB_OFFSET=6)
1579 * @note In case sum_col has batches, -DSUM_COL_HAS_BATCHES must be passed at compile time. Usually if gemmlowp is used to accelerate convolution layer, sum_col will not have batches
1580 *
1581 * The result before the output stage is:
1582 *
1583 * mm_result[i][k] = mm_result[i][k] +
1584 * (sum_col[k] * A_OFFSET) +
1585 * (sum_row[i] * B_OFFSET) +
1586 * (K_OFFSET)
1587 *
Manuel Bottini959c26d2019-12-02 16:22:35 +00001588 * This result is quantized down to uint8/int8 using the output stage. The output stage computes the following operations:
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001589 *
1590 * -# Add offset terms to final result
1591 * -# Multiply each entry of result by result_mult_int
1592 * -# Add bias to final result (if -DADD_BIAS is passed at compile time)
1593 * -# Shift the int32 accumulator by result_shift
1594 * -# Clamp the value between the specified min and max bounds (if -DMIN_BOUND and/or -DMAX_BOUND are passed at compile time)
Manuel Bottini959c26d2019-12-02 16:22:35 +00001595 * -# Clamp the resulting int32 values:
1596 * - to the [0..255] range and cast to QASYMM8.
1597 * - to the [-128..127] range and cast to QASYMM8_SIGNED.
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001598 *
1599 * @attention The offset, scalar scale factor and number of bits to shift right of output tensor must be passed at compile time using -DRESULT_OFFSET, -RESULT_MULT_INT and -DRESULT_SHIFT
1600 *
1601 * @note In case the addition of int32 biases is required, -DADD_BIAS should be passed at compile time
Manuel Bottini959c26d2019-12-02 16:22:35 +00001602 * @note The output datatype should be passed at compile time using -DOUTPUT_DATA_TYPE
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001603 * @note In case the clamping of the result is required, the min and max bounds can be passed at compile time using -DMIN_BOUND and -DMAX_BOUND.
1604 * These values can be used to implement "rectified linear unit" activation functions
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001605 * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
1606 * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001607 *
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +00001608 * @param[in] mm_result_ptr Pointer to the source tensor. Supported data type: S32
1609 * @param[in] mm_result_stride_x Stride of the source tensor in X dimension (in bytes)
1610 * @param[in] mm_result_step_x mm_result_stride_x * number of elements along X processed per workitem(in bytes)
1611 * @param[in] mm_result_stride_y Stride of the source tensor in Y dimension (in bytes)
1612 * @param[in] mm_result_step_y mm_result_stride_y * number of elements along Y processed per workitem(in bytes)
1613 * @param[in] mm_result_stride_z Stride of the source tensor in Z dimension (in bytes)
1614 * @param[in] mm_result_step_z mm_result_stride_z * number of elements along Z processed per workitem(in bytes)
1615 * @param[in] mm_result_offset_first_element_in_bytes The offset of the first element in the source tensor
1616 * @param[in] sum_col_ptr (Optional) Pointer to the source tensor. Supported data type: same as @p mm_result_ptr
1617 * @param[in] sum_col_stride_x (Optional) Stride of the source tensor in X dimension (in bytes)
1618 * @param[in] sum_col_step_x (Optional) sum_col_stride_x * number of elements along X processed per workitem(in bytes)
1619 * @param[in] sum_col_stride_y (Optional) Stride of the source tensor in Y dimension (in bytes)
1620 * @param[in] sum_col_step_y (Optional) sum_col_stride_y * number of elements along Y processed per workitem(in bytes)
1621 * @param[in] sum_col_offset_first_element_in_bytes (Optional) The offset of the first element in the source tensor
1622 * @param[in] sum_row_ptr (Optional) Pointer to the source tensor. Supported data type: same as @p mm_result_ptr
1623 * @param[in] sum_row_stride_x (Optional) Stride of the source tensor in X dimension (in bytes)
1624 * @param[in] sum_row_step_x (Optional) sum_row_stride_x * number of elements along X processed per workitem(in bytes)
1625 * @param[in] sum_row_stride_y (Optional) Stride of the source tensor in Y dimension (in bytes)
1626 * @param[in] sum_row_step_y (Optional) sum_row_stride_y * number of elements along Y processed per workitem(in bytes)
1627 * @param[in] sum_row_offset_first_element_in_bytes (Optional) The offset of the first element in the source tensor
1628 * @param[in] biases_ptr (Optional) Pointer to the biases tensor. Supported data type: same as @p src_ptr
1629 * @param[in] biases_stride_x (Optional) Stride of the biases tensor in X dimension (in bytes)
1630 * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
1631 * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases tensor
Manuel Bottini959c26d2019-12-02 16:22:35 +00001632 * @param[out] dst_ptr Pointer to the destination tensor Supported data type: QASYMM8/QASYMM8_SIGNED
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +00001633 * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
1634 * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
1635 * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
1636 * @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
1637 * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
1638 * @param[in] dst_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
1639 * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
1640 * @param[in] result_multipliers_ptr (Optional) Pointer to the output multipliers vector for per-channel quantization. Supported data types: S32
1641 * @param[in] result_multipliers_stride_x (Optional) Stride of the output multipliers vector in X dimension (in bytes)
1642 * @param[in] result_multipliers_step_x (Optional) output_multipliers_stride_x * number of elements along X processed per workitem(in bytes)
1643 * @param[in] result_multipliers_offset_first_element_in_bytes (Optional) The offset of the first element in the output multipliers vector
1644 * @param[in] result_shifts_ptr (Optional) Pointer to the output shifts vector for per-channel quantization. Supported data types: S32
1645 * @param[in] result_shifts_stride_x (Optional) Stride of the output shifts vector in X dimension (in bytes)
1646 * @param[in] result_shifts_step_x (Optional) output_shifts_stride_x * number of elements along X processed per workitem(in bytes)
1647 * @param[in] result_shifts_offset_first_element_in_bytes (Optional) The offset of the first element in the output shifts vector
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001648 */
1649__kernel void gemmlowp_offset_contribution_quantize_down(TENSOR3D_DECLARATION(mm_result)
1650#if defined(A_OFFSET)
1651 ,
1652 IMAGE_DECLARATION(sum_col)
1653#endif // defined(A_OFFSET)
1654#if defined(B_OFFSET)
1655 ,
1656 IMAGE_DECLARATION(sum_row)
1657#endif // defined(B_OFFSET)
1658 ,
1659#if defined(ADD_BIAS)
1660 VECTOR_DECLARATION(biases),
1661#endif // defined(ADD_BIAS)
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +00001662 TENSOR3D_DECLARATION(dst)
1663#if defined(PER_CHANNEL_QUANTIZATION)
1664 ,
1665 VECTOR_DECLARATION(result_multipliers),
1666 VECTOR_DECLARATION(result_shifts)
1667#endif // defined(PER_CHANNEL_QUANTIZATION)
1668 )
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001669{
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001670 const int x = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0);
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001671 const int y = get_global_id(1);
1672 const int z = get_global_id(2);
1673
1674 __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x + y * dst_stride_y + z * dst_stride_z;
1675
1676 // Compute offset contribution
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001677 VEC_INT offset_term_s32 = offset_contribution(
1678 x, y, z
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001679#if defined(A_OFFSET)
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001680 ,
1681 sum_col_ptr,
1682 sum_col_stride_x,
1683 sum_col_step_x,
1684 sum_col_stride_y,
1685 sum_col_step_y,
1686 sum_col_offset_first_element_in_bytes
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001687#endif // defined(A_OFFSET)
1688#if defined(B_OFFSET)
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001689 ,
1690 sum_row_ptr,
1691 sum_row_stride_x,
1692 sum_row_step_x,
1693 sum_row_stride_y,
1694 sum_row_step_y,
1695 sum_row_offset_first_element_in_bytes
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001696#endif // defined(B_OFFSET)
1697#if defined(ADD_BIAS)
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001698 ,
1699 biases_ptr,
1700 biases_stride_x,
1701 biases_step_x,
1702 biases_offset_first_element_in_bytes
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001703#endif // defined(ADD_BIAS)
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001704 );
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001705
1706 __global uchar *mm_result_addr = mm_result_ptr + mm_result_offset_first_element_in_bytes + x * sizeof(int) + y * mm_result_stride_y + z * mm_result_stride_z;
1707
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001708 VEC_INT in_s32 = VLOAD(VEC_SIZE)(0, (__global int *)mm_result_addr);
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001709
1710 // Add the offset terms to GEMM's result
1711 in_s32 += offset_term_s32;
1712
1713 // -------------- OUTPUT STAGE
1714
1715 // Add the offset terms to GEMM's result
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001716 in_s32 += (VEC_INT)RESULT_OFFSET;
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001717
1718 // Multiply by result_mult_int and shift
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +00001719#if defined(PER_CHANNEL_QUANTIZATION)
1720 __global uchar *result_multipliers_addr = result_multipliers_ptr + result_multipliers_offset_first_element_in_bytes + x * sizeof(int);
1721 __global uchar *result_shifts_addr = result_shifts_ptr + result_shifts_offset_first_element_in_bytes + x * sizeof(int);
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001722 VEC_INT result_multipliers_values = VLOAD(VEC_SIZE)(0, (__global int *)result_multipliers_addr);
1723 VEC_INT result_shifts_values = VLOAD(VEC_SIZE)(0, (__global int *)result_shifts_addr);
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +00001724
1725 in_s32 *= result_multipliers_values;
1726 in_s32 >>= result_shifts_values;
1727#else // defined(PER_CHANNEL_QUANTIZATION)
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001728 in_s32 *= RESULT_MULTIPLIER;
1729
1730 in_s32 >>= RESULT_SHIFT;
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +00001731#endif // defined(PER_CHANNEL_QUANTIZATION)
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001732
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001733 VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE)
1734 res0 = CONVERT_SAT(in_s32, VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE));
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001735
1736#if defined(MIN_BOUND)
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001737 res0 = max(res0, (VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE))MIN_BOUND);
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001738#endif // defined(MIN_BOUND)
1739#if defined(MAX_BOUND)
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001740 res0 = min(res0, (VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE))MAX_BOUND);
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001741#endif // defined(MAX_BOUND)
1742
1743 // Store the result
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001744 STORE_VECTOR_SELECT(res, OUTPUT_DATA_TYPE, dst_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001745}
1746
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +00001747/* OpenCL kernel used to add the offset contribution after matrix multiplication and it quantizes down to uint8.
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001748 *
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +00001749 * This kernel takes a final int32 accumulator value (the output of matrix multiplication), adds to it the offset contribution of matrix A and matrix B and quantizes to uint8 through the output stage.
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001750 *
1751 *
1752 * @attention The k_offset = a_offset * b_offset * k (where k is the number of matrix A columns) needs to be passed at compile time using -DK_OFFSET (i.e. -DK_OFFSET=1200)
1753 * @note In case the offset contribution due to a_offset is required, a_offset needs to be passed at compile time using -DA_OFFSET (i.e. -DA_OFFSET=1)
1754 * @note In case the offset contribution due to b_offset is required, b_offset needs to be passed at compile time using -DB_OFFSET (i.e. -DB_OFFSET=6)
1755 * @note In case sum_col has batches, -DSUM_COL_HAS_BATCHES must be passed at compile time. Usually if gemmlowp is used to accelerate convolution layer, sum_col will not have batches
1756 *
1757 * The result before the output stage is:
1758 *
1759 * mm_result[i][k] = mm_result[i][k] +
1760 * (sum_col[k] * A_OFFSET) +
1761 * (sum_row[i] * B_OFFSET) +
1762 * (K_OFFSET)
1763 *
Manuel Bottini959c26d2019-12-02 16:22:35 +00001764 * This result is quantized down to uint8/int8 using the output stage. The output stage computes the following operations:
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001765 *
1766 * -# Compute fixed point multiplication between each entry of input by result_fixedpoint_multiplier
1767 * -# Add bias to final result if bias tensor is not a nullptr
1768 * -# Round to nearest division by a power-of-two using result_shift
1769 * -# Add offset to each result
1770 * -# Clamp the value between the specified min and max bounds
Manuel Bottini959c26d2019-12-02 16:22:35 +00001771 * -# Clamp the resulting int32 values:
1772 * - to the [0..255] range and cast to QASYMM8.
1773 * - to the [-128..127] range and cast to QASYMM8_SIGNED.
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001774 *
1775 * @attention The offset, scalar scale factor and number of bits to shift right of output tensor must be passed at compile time using -DRESULT_OFFSET, -RESULT_MULT_INT and -DRESULT_SHIFT
1776 *
1777 * @note In case the addition of int32 biases is required, -DADD_BIAS should be passed at compile time
Manuel Bottini959c26d2019-12-02 16:22:35 +00001778 * @note The output datatype should be passed at compile time using -DOUTPUT_DATA_TYPE
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001779 * @note In case the clamping of the result is required, the min and max bounds can be passed at compile time using -DMIN_BOUND and -DMAX_BOUND.
1780 * These values can be used to implement "rectified linear unit" activation functions
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001781 * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
1782 * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001783 *
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +00001784 * @param[in] mm_result_ptr Pointer to the source tensor. Supported data type: S32
1785 * @param[in] mm_result_stride_x Stride of the source tensor in X dimension (in bytes)
1786 * @param[in] mm_result_step_x mm_result_stride_x * number of elements along X processed per workitem(in bytes)
1787 * @param[in] mm_result_stride_y Stride of the source tensor in Y dimension (in bytes)
1788 * @param[in] mm_result_step_y mm_result_stride_y * number of elements along Y processed per workitem(in bytes)
1789 * @param[in] mm_result_stride_z Stride of the source tensor in Z dimension (in bytes)
1790 * @param[in] mm_result_step_z mm_result_stride_z * number of elements along Z processed per workitem(in bytes)
1791 * @param[in] mm_result_offset_first_element_in_bytes The offset of the first element in the source tensor
1792 * @param[in] sum_col_ptr (Optional) Pointer to the source tensor. Supported data type: same as @p mm_result_ptr
1793 * @param[in] sum_col_stride_x (Optional) Stride of the source tensor in X dimension (in bytes)
1794 * @param[in] sum_col_step_x (Optional) sum_col_stride_x * number of elements along X processed per workitem(in bytes)
1795 * @param[in] sum_col_stride_y (Optional) Stride of the source tensor in Y dimension (in bytes)
1796 * @param[in] sum_col_step_y (Optional) sum_col_stride_y * number of elements along Y processed per workitem(in bytes)
1797 * @param[in] sum_col_offset_first_element_in_bytes (Optional) The offset of the first element in the source tensor
1798 * @param[in] sum_row_ptr (Optional) Pointer to the source tensor. Supported data type: same as @p mm_result_ptr
1799 * @param[in] sum_row_stride_x (Optional) Stride of the source tensor in X dimension (in bytes)
1800 * @param[in] sum_row_step_x (Optional) sum_row_stride_x * number of elements along X processed per workitem(in bytes)
1801 * @param[in] sum_row_stride_y (Optional) Stride of the source tensor in Y dimension (in bytes)
1802 * @param[in] sum_row_step_y (Optional) sum_row_stride_y * number of elements along Y processed per workitem(in bytes)
1803 * @param[in] sum_row_offset_first_element_in_bytes (Optional) The offset of the first element in the source tensor
1804 * @param[in] biases_ptr (Optional) Pointer to the biases tensor. Supported data type: same as @p src_ptr
1805 * @param[in] biases_stride_x (Optional) Stride of the biases tensor in X dimension (in bytes)
1806 * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
1807 * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases tensor
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001808 * @param[out] dst_ptr Pointer to the destination tensor Supported data type: QASYMM8/QASYMM8_SIGNED
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +00001809 * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
1810 * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
1811 * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
1812 * @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
1813 * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
1814 * @param[in] dst_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
1815 * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
1816 * @param[in] result_multipliers_ptr (Optional) Pointer to the output multipliers vector for per-channel quantization. Supported data types: S32
1817 * @param[in] result_multipliers_stride_x (Optional) Stride of the output multipliers vector in X dimension (in bytes)
1818 * @param[in] result_multipliers_step_x (Optional) output_multipliers_stride_x * number of elements along X processed per workitem(in bytes)
1819 * @param[in] result_multipliers_offset_first_element_in_bytes (Optional) The offset of the first element in the output multipliers vector
1820 * @param[in] result_shifts_ptr (Optional) Pointer to the output shifts vector for per-channel quantization. Supported data types: S32
1821 * @param[in] result_shifts_stride_x (Optional) Stride of the output shifts vector in X dimension (in bytes)
1822 * @param[in] result_shifts_step_x (Optional) output_shifts_stride_x * number of elements along X processed per workitem(in bytes)
1823 * @param[in] result_shifts_offset_first_element_in_bytes (Optional) The offset of the first element in the output shifts vector
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001824 */
1825__kernel void gemmlowp_offset_contribution_quantize_down_fixedpoint(TENSOR3D_DECLARATION(mm_result)
1826#if defined(A_OFFSET)
1827 ,
1828 IMAGE_DECLARATION(sum_col)
1829#endif // defined(A_OFFSET)
1830#if defined(B_OFFSET)
1831 ,
1832 IMAGE_DECLARATION(sum_row)
1833#endif // defined(B_OFFSET)
1834 ,
1835#if defined(ADD_BIAS)
1836 VECTOR_DECLARATION(biases),
1837#endif // defined(ADD_BIAS)
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +00001838 TENSOR3D_DECLARATION(dst)
1839#if defined(PER_CHANNEL_QUANTIZATION)
1840 ,
1841 VECTOR_DECLARATION(result_multipliers),
1842 VECTOR_DECLARATION(result_shifts)
1843#endif // defined(PER_CHANNEL_QUANTIZATION)
1844 )
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001845{
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001846 const int x = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0);
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001847 const int y = get_global_id(1);
1848 const int z = get_global_id(2);
1849
1850 // Compute offset contribution
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001851 VEC_INT offset_term_s32 = offset_contribution(
1852 x, y, z
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001853#if defined(A_OFFSET)
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001854 ,
1855 sum_col_ptr,
1856 sum_col_stride_x,
1857 sum_col_step_x,
1858 sum_col_stride_y,
1859 sum_col_step_y,
1860 sum_col_offset_first_element_in_bytes
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001861#endif // defined(A_OFFSET)
1862#if defined(B_OFFSET)
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001863 ,
1864 sum_row_ptr,
1865 sum_row_stride_x,
1866 sum_row_step_x,
1867 sum_row_stride_y,
1868 sum_row_step_y,
1869 sum_row_offset_first_element_in_bytes
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001870#endif // defined(B_OFFSET)
1871#if defined(ADD_BIAS)
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001872 ,
1873 biases_ptr,
1874 biases_stride_x,
1875 biases_step_x,
1876 biases_offset_first_element_in_bytes
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001877#endif // defined(ADD_BIAS)
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001878 );
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001879
1880 __global uchar *mm_result_addr = mm_result_ptr + mm_result_offset_first_element_in_bytes + x * sizeof(int) + y * mm_result_stride_y + z * mm_result_stride_z;
1881
1882 __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x + y * dst_stride_y + z * dst_stride_z;
1883
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001884 VEC_INT in_s32 = VLOAD(VEC_SIZE)(0, (__global int *)mm_result_addr);
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001885
1886 // Add the offset terms to GEMM's result
1887 in_s32 += offset_term_s32;
1888
1889 // -------------- OUTPUT STAGE
1890
1891 // Multiply by result_mult_int and shift
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +00001892#if defined(PER_CHANNEL_QUANTIZATION)
1893 __global uchar *result_multipliers_addr = result_multipliers_ptr + result_multipliers_offset_first_element_in_bytes + x * sizeof(int);
1894 __global uchar *result_shifts_addr = result_shifts_ptr + result_shifts_offset_first_element_in_bytes + x * sizeof(int);
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001895 VEC_INT result_multipliers_values = VLOAD(VEC_SIZE)(0, (__global int *)result_multipliers_addr);
1896 VEC_INT result_shifts_values = VLOAD(VEC_SIZE)(0, (__global int *)result_shifts_addr);
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +00001897
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001898 VEC_INT in_s32_shift_lt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(in_s32, result_multipliers_values, result_shifts_values, VEC_SIZE);
1899 VEC_INT in_s32_shift_gt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(in_s32, result_multipliers_values, result_shifts_values, VEC_SIZE);
1900 in_s32 = select(in_s32_shift_lt0, in_s32_shift_gt0, result_shifts_values >= 0);
Michele Di Giorgio14cbfb22019-10-23 10:53:10 +01001901#else // defined(PER_CHANNEL_QUANTIZATION)
1902
1903#if RESULT_SHIFT < 0
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001904 in_s32 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(in_s32, RESULT_MULTIPLIER, RESULT_SHIFT, VEC_SIZE);
Michele Di Giorgio14cbfb22019-10-23 10:53:10 +01001905#else // RESULT_SHIFT >= 0
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001906 in_s32 = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(in_s32, RESULT_MULTIPLIER, RESULT_SHIFT, VEC_SIZE);
Michele Di Giorgio14cbfb22019-10-23 10:53:10 +01001907#endif // RESULT_SHIFT < 0
1908
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +00001909#endif // defined(PER_CHANNEL_QUANTIZATION)
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001910
1911 // Add the offset terms to GEMM's result
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001912 in_s32 += (VEC_INT)RESULT_OFFSET;
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001913
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001914 VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE)
1915 res0 = CONVERT_SAT(in_s32, VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE));
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001916
1917#if defined(MIN_BOUND)
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001918 res0 = max(res0, (VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE))MIN_BOUND);
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001919#endif // defined(MIN_BOUND)
1920#if defined(MAX_BOUND)
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001921 res0 = min(res0, (VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE))MAX_BOUND);
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001922#endif // defined(MAX_BOUND)
1923
1924 // Store the result
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001925 STORE_VECTOR_SELECT(res, OUTPUT_DATA_TYPE, dst_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001926}
Michele Di Giorgiob54ba282020-01-14 15:31:55 +00001927#endif // defined(RESULT_OFFSET) && defined(RESULT_MULTIPLIER) && defined(RESULT_SHIFT) && defined(OUTPUT_DATA_TYPE)
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +00001928
Michele Di Giorgio0bfe39f2020-10-21 11:36:21 +01001929#undef VEC_INT
1930
1931#endif // defined(K_OFFSET) && defined(VEC_SIZE) && defined(VEC_SIZE_LEFTOVER)
Gian Marco05288a22017-11-21 10:57:50 +00001932
1933#if defined(RESULT_OFFSET) && defined(RESULT_MULT_INT) && defined(RESULT_SHIFT)
Luca Foschiani689c9682020-02-26 14:30:14 +00001934/** This OpenCL kernel is used to quantize down the int32 accumulator values of GEMMLowp to QASYMM8/QASYMM8_SIGNED
Gian Marco05288a22017-11-21 10:57:50 +00001935 *
Luca Foschiani689c9682020-02-26 14:30:14 +00001936 * This kernel takes a final int32 accumulator value and processes it to obtain the final QASYMM8/QASYMM8_SIGNED value.
Gian Marco05288a22017-11-21 10:57:50 +00001937 * The following computations will be performed by the kernel:
1938 *
1939 * -# Add offset terms to final result
1940 * -# Multiply each entry of result by result_mult_int
1941 * -# Add bias to final result (if -DADD_BIAS is passed at compile time)
1942 * -# Shift the int32 accumulator by result_shift
1943 * -# Clamp the value between the specified min and max bounds (if -DMIN_BOUND and/or -DMAX_BOUND are passed at compile time)
Luca Foschiani689c9682020-02-26 14:30:14 +00001944 * -# Clamp the resulting int32 values:
1945 * -# - to the [0..255] range and cast to QASYMM8.
1946 * -# - to the [-128..127] range and cast to QASYMM8_SIGNED.
Gian Marco05288a22017-11-21 10:57:50 +00001947 *
1948 * @attention The offset, scalar scale factor and number of bits to shift right of output tensor must be passed at compile time using -DRESULT_OFFSET, -RESULT_MULT_INT and -DRESULT_SHIFT
1949 *
1950 * @note In case the addition of int32 biases is required, -DADD_BIAS should be passed at compile time
Luca Foschiani689c9682020-02-26 14:30:14 +00001951 * @note The output datatype should be passed at compile time using -DOUTPUT_DATA_TYPE
Gian Marco05288a22017-11-21 10:57:50 +00001952 * @note In case the clamping of the result is required, the min and max bounds can be passed at compile time using -DMIN_BOUND and -DMAX_BOUND.
1953 * These values can be used to implement "rectified linear unit" activation functions
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01001954 * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
Gian Marco05288a22017-11-21 10:57:50 +00001955 *
1956 * @param[in] src_ptr Pointer to the source tensor. Supported data type: S32
1957 * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
1958 * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
1959 * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
1960 * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
1961 * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
1962 * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
1963 * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001964 * @param[in] biases_ptr (Optional) Pointer to the biases tensor. Supported data type: same as @p src_ptr
1965 * @param[in] biases_stride_x (Optional) Stride of the biases tensor in X dimension (in bytes)
1966 * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
1967 * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases tensor
Luca Foschiani689c9682020-02-26 14:30:14 +00001968 * @param[out] dst_ptr Pointer to the destination tensor Supported data type: QASYMM8/QASYMM8_SIGNED
Gian Marco05288a22017-11-21 10:57:50 +00001969 * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
1970 * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
1971 * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
1972 * @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
1973 * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
1974 * @param[in] dst_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
1975 * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
1976 */
1977__kernel void gemmlowp_output_stage_quantize_down(TENSOR3D_DECLARATION(src),
1978#if defined(ADD_BIAS)
1979 VECTOR_DECLARATION(biases),
1980#endif // defined(ADD_BIAS)
1981 TENSOR3D_DECLARATION(dst))
1982{
1983 // Compute source and destination addresses
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01001984 int x = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0);
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001985 int y = get_global_id(1);
1986 int z = get_global_id(2);
Gian Marco05288a22017-11-21 10:57:50 +00001987
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001988 __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * sizeof(int) + y * src_stride_y + z * src_stride_z;
Gian Marco05288a22017-11-21 10:57:50 +00001989
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001990 __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x + y * dst_stride_y + z * dst_stride_z;
1991
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01001992 VEC_DATA_TYPE(int, VEC_SIZE)
1993 input_values = VLOAD(VEC_SIZE)(0, (__global int *)src_addr);
Gian Marco58c57942017-11-28 09:10:03 +00001994
Gian Marco05288a22017-11-21 10:57:50 +00001995#if defined(ADD_BIAS)
1996 // Add bias
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001997 __global uchar *bias_addr = biases_ptr + biases_offset_first_element_in_bytes + x * sizeof(int);
1998
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01001999 VEC_DATA_TYPE(int, VEC_SIZE)
2000 biases_values = VLOAD(VEC_SIZE)(0, (__global int *)bias_addr);
2001 input_values += biases_values;
Gian Marco05288a22017-11-21 10:57:50 +00002002#endif // defined(ADD_BIAS)
2003
Gian Marco Iodice4b908652018-10-18 10:21:02 +01002004 // Add the offset terms to GEMM's result
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01002005 input_values += (VEC_DATA_TYPE(int, VEC_SIZE))RESULT_OFFSET;
Gian Marco Iodice4b908652018-10-18 10:21:02 +01002006
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +00002007 // Multiply by result_mult_int and shift
Gian Marco58c57942017-11-28 09:10:03 +00002008 input_values *= RESULT_MULT_INT;
Gian Marco05288a22017-11-21 10:57:50 +00002009
Michele Di Giorgio14cbfb22019-10-23 10:53:10 +01002010#if RESULT_SHIFT < 0
2011 input_values >>= -RESULT_SHIFT;
2012#else // RESULT_SHIFT >= 0
Gian Marco58c57942017-11-28 09:10:03 +00002013 input_values >>= RESULT_SHIFT;
Michele Di Giorgio14cbfb22019-10-23 10:53:10 +01002014#endif // RESULT_SHIFT < 0
Gian Marco05288a22017-11-21 10:57:50 +00002015
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01002016 VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE)
2017 res0 = CONVERT_SAT(input_values, VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE));
Gian Marco05288a22017-11-21 10:57:50 +00002018
2019#if defined(MIN_BOUND)
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01002020 res0 = max(res0, (VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE))MIN_BOUND);
Gian Marco05288a22017-11-21 10:57:50 +00002021#endif // defined(MIN_BOUND)
2022#if defined(MAX_BOUND)
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01002023 res0 = min(res0, (VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE))MAX_BOUND);
Gian Marco05288a22017-11-21 10:57:50 +00002024#endif // defined(MAX_BOUND)
2025
2026 // Store the result
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01002027 STORE_VECTOR_SELECT(res, OUTPUT_DATA_TYPE, dst_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
Gian Marco05288a22017-11-21 10:57:50 +00002028}
Gian Marco58c57942017-11-28 09:10:03 +00002029#endif // defined(RESULT_OFFSET) && defined(RESULT_MULT_INT) && defined(RESULT_SHIFT)
2030
2031#if defined(RESULT_OFFSET_AFTER_SHIFT) && defined(RESULT_FIXEDPOINT_MULTIPLIER) && defined(RESULT_SHIFT)
Manuel Bottini959c26d2019-12-02 16:22:35 +00002032/** This OpenCL kernel is used to quantize down the int32 accumulator values of GEMMLowp to QASYMM8/QASYMM8_SIGNED
Gian Marco58c57942017-11-28 09:10:03 +00002033 *
Manuel Bottini959c26d2019-12-02 16:22:35 +00002034 * This kernel takes a final int32 accumulator value (the output of matrix multiplication), and processes it to obtain the final QASYMM8/QASYMM8_SIGNED value.
Gian Marco58c57942017-11-28 09:10:03 +00002035 * The following computations will be performed by the kernel:
2036 *
2037 * -# Compute fixed point multiplication between each entry of input by result_fixedpoint_multiplier
2038 * -# Add bias to final result if bias tensor is not a nullptr
2039 * -# Round to nearest division by a power-of-two using result_shift
2040 * -# Add offset to each result
2041 * -# Clamp the value between the specified min and max bounds
Manuel Bottini1f332d42019-11-29 17:25:25 +00002042 * -# Clamp the resulting int32 values:
2043 * - to the [0..255] range and cast to QASYMM8.
2044 * - to the [-128..127] range and cast to QASYMM8_SIGNED.
Gian Marco58c57942017-11-28 09:10:03 +00002045 *
Gian Marco Iodice4b908652018-10-18 10:21:02 +01002046 * @attention The offset, scalar scale factor and number of bits to shift right of output tensor must be passed at compile time using -DRESULT_OFFSET_AFTER_SHIFT, -DRESULT_FIXEDPOINT_MULTIPLIER and -DRESULT_SHIFT
Gian Marco58c57942017-11-28 09:10:03 +00002047 *
2048 * @note In case the addition of int32 biases is required, -DADD_BIAS should be passed at compile time
Manuel Bottini1f332d42019-11-29 17:25:25 +00002049 * @note The output datatype should be passed at compile time using -DOUTPUT_DATA_TYPE
Gian Marco58c57942017-11-28 09:10:03 +00002050 * @note In case the clamping of the result is required, the min and max bounds can be passed at compile time using -DMIN_BOUND and -DMAX_BOUND.
2051 * These values can be used to implement "rectified linear unit" activation functions
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01002052 * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
2053 * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
Gian Marco58c57942017-11-28 09:10:03 +00002054 *
2055 * @param[in] src_ptr Pointer to the source tensor. Supported data type: S32
2056 * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
2057 * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
2058 * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
2059 * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
2060 * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
2061 * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
2062 * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
Gian Marco Iodice4b908652018-10-18 10:21:02 +01002063 * @param[in] biases_ptr (Optional) Pointer to the biases tensor. Supported data type: same as @p src_ptr
2064 * @param[in] biases_stride_x (Optional) Stride of the biases tensor in X dimension (in bytes)
2065 * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
2066 * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases tensor
Sheri Zhang0cdbda52020-02-25 15:57:21 +00002067 * @param[out] dst_ptr Pointer to the destination tensor Supported data type: QASYMM8/QASYMM8_SIGNED
Gian Marco58c57942017-11-28 09:10:03 +00002068 * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
2069 * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
2070 * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
2071 * @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
2072 * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
2073 * @param[in] dst_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
2074 * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
2075 */
2076__kernel void gemmlowp_output_stage_quantize_down_fixedpoint(TENSOR3D_DECLARATION(src),
2077#if defined(ADD_BIAS)
2078 VECTOR_DECLARATION(biases),
2079#endif // defined(ADD_BIAS)
2080 TENSOR3D_DECLARATION(dst))
2081{
2082 // Compute source and destination addresses
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01002083 int x = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0);
Gian Marco Iodice4b908652018-10-18 10:21:02 +01002084 int y = get_global_id(1);
2085 int z = get_global_id(2);
Georgios Pinitas932491f2018-09-21 16:33:15 +01002086
Gian Marco Iodice4b908652018-10-18 10:21:02 +01002087 __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * sizeof(int) + y * src_stride_y + z * src_stride_z;
Gian Marco58c57942017-11-28 09:10:03 +00002088
Gian Marco Iodice4b908652018-10-18 10:21:02 +01002089 __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x + y * dst_stride_y + z * dst_stride_z;
2090
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01002091 VEC_DATA_TYPE(int, VEC_SIZE)
2092 input_values = VLOAD(VEC_SIZE)(0, (__global int *)src_addr);
Gian Marco58c57942017-11-28 09:10:03 +00002093
2094#if defined(ADD_BIAS)
2095 // Add bias
Gian Marco Iodice4b908652018-10-18 10:21:02 +01002096 __global uchar *bias_addr = biases_ptr + biases_offset_first_element_in_bytes + x * sizeof(int);
2097
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01002098 VEC_DATA_TYPE(int, VEC_SIZE)
2099 biases_values = VLOAD(VEC_SIZE)(0, (__global int *)bias_addr);
2100 input_values += biases_values;
Gian Marco58c57942017-11-28 09:10:03 +00002101#endif // defined(ADD_BIAS)
2102
2103 // Multiply by result_mult_int and shift
Michele Di Giorgio14cbfb22019-10-23 10:53:10 +01002104#if RESULT_SHIFT < 0
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01002105 input_values = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(input_values, RESULT_FIXEDPOINT_MULTIPLIER, RESULT_SHIFT, VEC_SIZE);
Michele Di Giorgio14cbfb22019-10-23 10:53:10 +01002106#else // RESULT_SHIFT >= 0
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01002107 input_values = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(input_values, RESULT_FIXEDPOINT_MULTIPLIER, RESULT_SHIFT, VEC_SIZE);
Michele Di Giorgio14cbfb22019-10-23 10:53:10 +01002108#endif // RESULT_SHIFT < 0
Gian Marco58c57942017-11-28 09:10:03 +00002109
2110 // Add the offset terms to GEMM's result
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01002111 input_values += (VEC_DATA_TYPE(int, VEC_SIZE))RESULT_OFFSET_AFTER_SHIFT;
Gian Marco58c57942017-11-28 09:10:03 +00002112
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01002113 VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE)
2114 res0 = CONVERT_SAT(input_values, VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE));
Gian Marco58c57942017-11-28 09:10:03 +00002115
2116#if defined(MIN_BOUND)
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01002117 res0 = max(res0, (VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE))MIN_BOUND);
Gian Marco58c57942017-11-28 09:10:03 +00002118#endif // defined(MIN_BOUND)
2119#if defined(MAX_BOUND)
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01002120 res0 = min(res0, (VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE))MAX_BOUND);
Gian Marco58c57942017-11-28 09:10:03 +00002121#endif // defined(MAX_BOUND)
2122
2123 // Store the result
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01002124 STORE_VECTOR_SELECT(res, OUTPUT_DATA_TYPE, dst_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
Gian Marco58c57942017-11-28 09:10:03 +00002125}
Chunosov5124be52017-11-22 20:42:13 +07002126#endif // defined(RESULT_OFFSET_AFTER_SHIFT) && defined(RESULT_FIXEDPOINT_MULTIPLIER) && defined(RESULT_SHIFT)
Georgios Pinitas51e53a32018-10-22 13:49:08 +01002127
Manuel Bottini9c9b70b2019-07-01 17:35:56 +01002128#if defined(RESULT_FIXEDPOINT_MULTIPLIER) && defined(RESULT_SHIFT)
2129
Michalis Spyrou51146c52019-07-12 14:42:29 +01002130/** This OpenCL kernel is used to quantize down the int32 accumulator values of GEMMLowp to QSYMM16
Manuel Bottini9c9b70b2019-07-01 17:35:56 +01002131 *
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +00002132 * This kernel takes a final int32 accumulator value (the output of matrix multiplication), and processes it to obtain the final QSYMM16 value.
Manuel Bottini9c9b70b2019-07-01 17:35:56 +01002133 * The following computations will be performed by the kernel:
2134 *
2135 * -# Compute fixed point multiplication between each entry of input by result_fixedpoint_multiplier
2136 * -# Add bias to final result if bias tensor is not a nullptr
2137 * -# Round to nearest division by a power-of-two using result_shift
2138 * -# Add offset to each result
2139 * -# Clamp the value between the specified min and max bounds
2140 * -# Clamp the resulting int32 values to the [-32768..32767] range and cast to QSYMM16.
2141 *
2142 * @attention The offset, scalar scale factor and number of bits to shift right of output tensor must be passed at compile time using -DRESULT_FIXEDPOINT_MULTIPLIER and -DRESULT_SHIFT
2143 *
2144 * @note In case the addition of int32 biases is required, -DADD_BIAS should be passed at compile time
2145 * @note In case the clamping of the result is required, the min and max bounds can be passed at compile time using -DMIN_BOUND and -DMAX_BOUND.
2146 * These values can be used to implement "rectified linear unit" activation functions
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01002147 * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
2148 * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
Manuel Bottini9c9b70b2019-07-01 17:35:56 +01002149 *
2150 * @param[in] src_ptr Pointer to the source tensor. Supported data type: S32
2151 * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
2152 * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
2153 * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
2154 * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
2155 * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
2156 * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
2157 * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
2158 * @param[in] biases_ptr (Optional) Pointer to the biases tensor. Supported data type: same as @p src_ptr
2159 * @param[in] biases_stride_x (Optional) Stride of the biases tensor in X dimension (in bytes)
2160 * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
2161 * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases tensor
Sheri Zhangb18252d2020-04-07 11:04:57 +01002162 * @param[out] dst_ptr Pointer to the destination tensor Supported data type: QSYMM16
Manuel Bottini9c9b70b2019-07-01 17:35:56 +01002163 * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
2164 * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
2165 * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
2166 * @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
2167 * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
2168 * @param[in] dst_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
2169 * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
2170 */
2171__kernel void gemmlowp_output_stage_quantize_down_fixedpoint_qsymm16(TENSOR3D_DECLARATION(src),
2172#if defined(ADD_BIAS)
2173 VECTOR_DECLARATION(biases),
2174#endif // defined(ADD_BIAS)
2175 TENSOR3D_DECLARATION(dst))
2176{
2177 // Compute source and destination addresses
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01002178 int x = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0);
Manuel Bottini9c9b70b2019-07-01 17:35:56 +01002179 int y = get_global_id(1);
2180 int z = get_global_id(2);
2181
Michalis Spyrou51146c52019-07-12 14:42:29 +01002182 __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * sizeof(int) + y * src_stride_y + z * src_stride_z;
Manuel Bottini9c9b70b2019-07-01 17:35:56 +01002183
Michele Di Giorgioba14c922020-10-12 13:27:57 +01002184 __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x * sizeof(short) + y * dst_stride_y + z * dst_stride_z;
Manuel Bottini9c9b70b2019-07-01 17:35:56 +01002185
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01002186 VEC_DATA_TYPE(int, VEC_SIZE)
2187 input_values = VLOAD(VEC_SIZE)(0, (__global int *)src_addr);
Manuel Bottini9c9b70b2019-07-01 17:35:56 +01002188
2189#if defined(ADD_BIAS)
2190 // Add bias
Michalis Spyrou51146c52019-07-12 14:42:29 +01002191 __global uchar *bias_addr = biases_ptr + biases_offset_first_element_in_bytes + x * sizeof(int);
Manuel Bottini9c9b70b2019-07-01 17:35:56 +01002192
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01002193 VEC_DATA_TYPE(int, VEC_SIZE)
2194 biases_values = VLOAD(VEC_SIZE)(0, (__global int *)bias_addr);
2195 input_values += biases_values;
Manuel Bottini9c9b70b2019-07-01 17:35:56 +01002196#endif // defined(ADD_BIAS)
2197
2198 // Multiply by result_mult_int and shift
Manuel Bottini07263982019-10-17 18:37:26 +01002199#if RESULT_SHIFT < 0
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01002200 input_values = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(input_values, RESULT_FIXEDPOINT_MULTIPLIER, RESULT_SHIFT, VEC_SIZE);
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +00002201#else // RESULT_SHIFT >= 0
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01002202 input_values = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(input_values, RESULT_FIXEDPOINT_MULTIPLIER, RESULT_SHIFT, VEC_SIZE);
Manuel Bottini07263982019-10-17 18:37:26 +01002203#endif // RESULT_SHIFT < 0
Manuel Bottini9c9b70b2019-07-01 17:35:56 +01002204
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01002205 VEC_DATA_TYPE(short, VEC_SIZE)
2206 res0 = CONVERT_SAT(input_values, VEC_DATA_TYPE(short, VEC_SIZE));
Manuel Bottini9c9b70b2019-07-01 17:35:56 +01002207
2208#if defined(MIN_BOUND)
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01002209 res0 = max(res0, (VEC_DATA_TYPE(short, VEC_SIZE))MIN_BOUND);
Manuel Bottini9c9b70b2019-07-01 17:35:56 +01002210#endif // defined(MIN_BOUND)
2211#if defined(MAX_BOUND)
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01002212 res0 = min(res0, (VEC_DATA_TYPE(short, VEC_SIZE))MAX_BOUND);
Manuel Bottini9c9b70b2019-07-01 17:35:56 +01002213#endif // defined(MAX_BOUND)
2214
2215 // Store the result
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01002216 STORE_VECTOR_SELECT(res, short, dst_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
Manuel Bottini9c9b70b2019-07-01 17:35:56 +01002217}
2218#endif // defined(RESULT_FIXEDPOINT_MULTIPLIER) && defined(RESULT_SHIFT)
2219
Georgios Pinitas51e53a32018-10-22 13:49:08 +01002220#if defined(REAL_MULTIPLIER) && defined(OUTPUT_OFFSET)
Sheri Zhang1b14c752020-03-09 14:29:52 +00002221/** This OpenCL kernel is used to quantize down the int32 accumulator values of GEMMLowp to QASYMM8/QASYMM8_SIGNED
Georgios Pinitas51e53a32018-10-22 13:49:08 +01002222 *
Sheri Zhang1b14c752020-03-09 14:29:52 +00002223 * This kernel takes a final int32 accumulator value (the output of matrix multiplication), and processes it to obtain the final QASYMM8/QASYMM8_SIGNED value.
Georgios Pinitas51e53a32018-10-22 13:49:08 +01002224 * The following computations will be performed by the kernel:
2225 *
2226 * -# Compute fixed point multiplication between each entry of input by result_fixedpoint_multiplier
2227 * -# Add bias to final result if bias tensor is not a nullptr
2228 * -# Requantize
2229 * -# Add offset to each result
2230 * -# Clamp the value between the specified min and max bounds
Sheri Zhang1b14c752020-03-09 14:29:52 +00002231 * -# Clamp the resulting int32 values:
2232 * - to the [0..255] range and cast to QASYMM8.
2233 * - to the [-128..127] range and cast to QASYMM8_SIGNED.
Georgios Pinitas51e53a32018-10-22 13:49:08 +01002234 *
2235 * @attention The offset and scalar scale factor must be passed at compile time using -DRESULT_OFFSET, -DREAL_MULTIPLIER
2236 *
2237 * @note In case the addition of int32 biases is required, -DADD_BIAS should be passed at compile time
Sheri Zhang1b14c752020-03-09 14:29:52 +00002238 * @note The output datatype should be passed at compile time using -DOUTPUT_DATA_TYPE
Georgios Pinitas51e53a32018-10-22 13:49:08 +01002239 * @note In case the clamping of the result is required, the min and max bounds can be passed at compile time using -DMIN_BOUND and -DMAX_BOUND.
2240 * These values can be used to implement "rectified linear unit" activation functions
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01002241 * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
2242 * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
Georgios Pinitas51e53a32018-10-22 13:49:08 +01002243 *
2244 * @param[in] src_ptr Pointer to the source tensor. Supported data type: S32
2245 * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
2246 * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
2247 * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
2248 * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
2249 * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
2250 * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
2251 * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
2252 * @param[in] biases_ptr Pointer to the biases tensor. Supported data type: same as @p src_ptr
2253 * @param[in] biases_stride_x Stride of the biases tensor in X dimension (in bytes)
2254 * @param[in] biases_step_x biases_stride_x * number of elements along X processed per workitem(in bytes)
2255 * @param[in] biases_offset_first_element_in_bytes The offset of the first element in the biases tensor
2256 * @param[out] dst_ptr Pointer to the destination tensor Supported data type: QASYMM8
2257 * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
2258 * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
2259 * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
2260 * @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
2261 * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
2262 * @param[in] dst_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
2263 * @param[in] dst_stride_w Stride of the source tensor in W dimension (in bytes)
2264 * @param[in] dst_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
2265 * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
2266 */
2267__kernel void gemmlowp_output_stage_quantize_down_float(TENSOR3D_DECLARATION(src),
2268#if defined(ADD_BIAS)
2269 VECTOR_DECLARATION(biases),
2270#endif // defined(ADD_BIAS)
2271#if defined(DST_HEIGHT)
2272 TENSOR4D_DECLARATION(dst))
2273#else // defined(DST_HEIGHT)
2274 TENSOR3D_DECLARATION(dst))
2275#endif // defined(DST_HEIGHT)
2276{
2277 // Compute source and destination addresses
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01002278 int x = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0);
Gian Marco Iodice0c54a622018-10-30 12:20:03 +00002279 int y = get_global_id(1);
2280 int z = get_global_id(2);
Georgios Pinitas51e53a32018-10-22 13:49:08 +01002281
Gian Marco Iodice0c54a622018-10-30 12:20:03 +00002282 __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * sizeof(int) + y * src_stride_y + z * src_stride_z;
Georgios Pinitas51e53a32018-10-22 13:49:08 +01002283
Gian Marco Iodice0c54a622018-10-30 12:20:03 +00002284 __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x + y * dst_stride_y + z * dst_stride_z;
2285
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01002286 VEC_DATA_TYPE(int, VEC_SIZE)
2287 input_values = VLOAD(VEC_SIZE)(0, (__global int *)src_addr);
Georgios Pinitas51e53a32018-10-22 13:49:08 +01002288
2289#if defined(ADD_BIAS)
2290 // Add bias
Gian Marco Iodice0c54a622018-10-30 12:20:03 +00002291 __global uchar *bias_addr = biases_ptr + biases_offset_first_element_in_bytes + x * sizeof(int);
2292
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01002293 VEC_DATA_TYPE(int, VEC_SIZE)
2294 biases_values = VLOAD(VEC_SIZE)(0, (__global int *)bias_addr);
Gian Marco Iodice0c54a622018-10-30 12:20:03 +00002295 input_values += (int4)biases_values;
Georgios Pinitas51e53a32018-10-22 13:49:08 +01002296#endif // defined(ADD_BIAS)
2297
2298 // Convert to float
Sheri Zhang1b14c752020-03-09 14:29:52 +00002299 float4 input_values_f = convert_float4(input_values);
2300 input_values_f = round(input_values_f * (float)REAL_MULTIPLIER + (float)OUTPUT_OFFSET);
Georgios Pinitas51e53a32018-10-22 13:49:08 +01002301
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01002302 VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE)
2303 res0 = CONVERT_SAT(input_values_f, VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE));
Georgios Pinitas51e53a32018-10-22 13:49:08 +01002304
2305#if defined(MIN_BOUND)
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01002306 res0 = max(res0, (VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE))MIN_BOUND);
Georgios Pinitas51e53a32018-10-22 13:49:08 +01002307#endif // defined(MIN_BOUND)
2308#if defined(MAX_BOUND)
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01002309 res0 = min(res0, (VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE))MAX_BOUND);
Georgios Pinitas51e53a32018-10-22 13:49:08 +01002310#endif // defined(MAX_BOUND)
2311
2312 // Store the result
Michele Di Giorgio671d4f02020-10-14 12:26:51 +01002313 STORE_VECTOR_SELECT(res, OUTPUT_DATA_TYPE, dst_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
Georgios Pinitas51e53a32018-10-22 13:49:08 +01002314}
Gian Marco Iodice27423f02020-08-12 14:12:28 +01002315#endif // defined(REAL_MULTIPLIER) && defined(OUTPUT_OFFSET)