blob: 3efb01b0b51f48280e37171de66804b188739c0a [file] [log] [blame]
Gian Marco Iodiceff1fe3e2021-01-02 09:58:51 +00001/*
2 * Copyright (c) 2021 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "gemm_helpers.h"
25#include "helpers.h"
26#include "helpers_asymm.h"
27#include "repeat.h"
28
29#define CONCAT(a, b) a##b
30
31#if defined(IS_QUANTISED)
32
33#define ARM_OFFSET1(a, b, c) \
34 ({ \
35 c += (ACC_DATA_TYPE)a * (ACC_DATA_TYPE)b; \
36 })
37#define ARM_OFFSET2(a, b, c) \
38 ({ \
39 c += (ACC_DATA_TYPE)a.s0 * (ACC_DATA_TYPE)b; \
40 c += (ACC_DATA_TYPE)a.s1 * (ACC_DATA_TYPE)b; \
41 })
42#define ARM_OFFSET3(a, b, c) \
43 ({ \
44 ARM_OFFSET2(a, b, c); \
45 c += (ACC_DATA_TYPE)a.s2 * (ACC_DATA_TYPE)b; \
46 })
47#define ARM_OFFSET4(a, b, c) \
48 ({ \
49 ARM_OFFSET3(a, b, c); \
50 c += (ACC_DATA_TYPE)a.s3 * (ACC_DATA_TYPE)b; \
51 })
52#define ARM_OFFSET8(a, b, c) \
53 ({ \
54 ARM_OFFSET4((a.lo), (b), c); \
55 ARM_OFFSET4((a.hi), (b), c); \
56 })
57#define ARM_OFFSET16(a, b, c) \
58 ({ \
59 ARM_OFFSET8((a.lo), (b), c); \
60 ARM_OFFSET8((a.hi), (b), c); \
61 })
62
63#if N0 == 1
64#define ARM_OFFSET_K0XN0(k0, a, b, a_offset, b_offset, c) \
65 ({ \
66 CONCAT(ARM_OFFSET, k0) \
67 ((a), (b_offset), (c)); \
68 CONCAT(ARM_OFFSET, k0) \
69 ((b##0), (a_offset), (c)); \
70 })
71#elif N0 == 2 // N) == 3
72#define ARM_OFFSET_K0XN0(k0, a, b, a_offset, b_offset, c) \
73 ({ \
74 CONCAT(ARM_OFFSET, k0) \
75 ((a), (b_offset), (c.s0)); \
76 CONCAT(ARM_OFFSET, k0) \
77 ((b##0), (a_offset), (c.s0)); \
78 CONCAT(ARM_OFFSET, k0) \
79 ((a), (b_offset), (c.s1)); \
80 CONCAT(ARM_OFFSET, k0) \
81 ((b##1), (a_offset), (c.s1)); \
82 })
83#elif N0 == 3 // N0 == 3
84#define ARM_OFFSET_K0XN0(k0, a, b, a_offset, b_offset, c) \
85 ({ \
86 CONCAT(ARM_OFFSET, k0) \
87 ((a), (b_offset), (c.s0)); \
88 CONCAT(ARM_OFFSET, k0) \
89 ((b##0), (a_offset), (c.s0)); \
90 CONCAT(ARM_OFFSET, k0) \
91 ((a), (b_offset), (c.s1)); \
92 CONCAT(ARM_OFFSET, k0) \
93 ((b##1), (a_offset), (c.s1)); \
94 CONCAT(ARM_OFFSET, k0) \
95 ((a), (b_offset), (c.s2)); \
96 CONCAT(ARM_OFFSET, k0) \
97 ((b##2), (a_offset), (c.s2)); \
98 })
99#elif N0 == 4 // N0 == 4
100#define ARM_OFFSET_K0XN0(k0, a, b, a_offset, b_offset, c) \
101 ({ \
102 CONCAT(ARM_OFFSET, k0) \
103 ((a), (b_offset), (c.s0)); \
104 CONCAT(ARM_OFFSET, k0) \
105 ((b##0), (a_offset), (c.s0)); \
106 CONCAT(ARM_OFFSET, k0) \
107 ((a), (b_offset), (c.s1)); \
108 CONCAT(ARM_OFFSET, k0) \
109 ((b##1), (a_offset), (c.s1)); \
110 CONCAT(ARM_OFFSET, k0) \
111 ((a), (b_offset), (c.s2)); \
112 CONCAT(ARM_OFFSET, k0) \
113 ((b##2), (a_offset), (c.s2)); \
114 CONCAT(ARM_OFFSET, k0) \
115 ((a), (b_offset), (c.s3)); \
116 CONCAT(ARM_OFFSET, k0) \
117 ((b##3), (a_offset), (c.s3)); \
118 })
119#elif N0 == 8 // N0 == 8
120#define ARM_OFFSET_K0XN0(k0, a, b, a_offset, b_offset, c) \
121 ({ \
122 CONCAT(ARM_OFFSET, k0) \
123 ((a), (b_offset), (c.s0)); \
124 CONCAT(ARM_OFFSET, k0) \
125 ((b##0), (a_offset), (c.s0)); \
126 CONCAT(ARM_OFFSET, k0) \
127 ((a), (b_offset), (c.s1)); \
128 CONCAT(ARM_OFFSET, k0) \
129 ((b##1), (a_offset), (c.s1)); \
130 CONCAT(ARM_OFFSET, k0) \
131 ((a), (b_offset), (c.s2)); \
132 CONCAT(ARM_OFFSET, k0) \
133 ((b##2), (a_offset), (c.s2)); \
134 CONCAT(ARM_OFFSET, k0) \
135 ((a), (b_offset), (c.s3)); \
136 CONCAT(ARM_OFFSET, k0) \
137 ((b##3), (a_offset), (c.s3)); \
138 CONCAT(ARM_OFFSET, k0) \
139 ((a), (b_offset), (c.s4)); \
140 CONCAT(ARM_OFFSET, k0) \
141 ((b##4), (a_offset), (c.s4)); \
142 CONCAT(ARM_OFFSET, k0) \
143 ((a), (b_offset), (c.s5)); \
144 CONCAT(ARM_OFFSET, k0) \
145 ((b##5), (a_offset), (c.s5)); \
146 CONCAT(ARM_OFFSET, k0) \
147 ((a), (b_offset), (c.s6)); \
148 CONCAT(ARM_OFFSET, k0) \
149 ((b##6), (a_offset), (c.s6)); \
150 CONCAT(ARM_OFFSET, k0) \
151 ((a), (b_offset), (c.s7)); \
152 CONCAT(ARM_OFFSET, k0) \
153 ((b##7), (a_offset), (c.s7)); \
154 })
155#elif N0 == 16 // N0 == 16
156#define ARM_OFFSET_K0XN0(k0, a, b, a_offset, b_offset, c) \
157 ({ \
158 CONCAT(ARM_OFFSET, k0) \
159 ((a), (b_offset), (c.s0)); \
160 CONCAT(ARM_OFFSET, k0) \
161 ((b##0), (a_offset), (c.s0)); \
162 CONCAT(ARM_OFFSET, k0) \
163 ((a), (b_offset), (c.s1)); \
164 CONCAT(ARM_OFFSET, k0) \
165 ((b##1), (a_offset), (c.s1)); \
166 CONCAT(ARM_OFFSET, k0) \
167 ((a), (b_offset), (c.s2)); \
168 CONCAT(ARM_OFFSET, k0) \
169 ((b##2), (a_offset), (c.s2)); \
170 CONCAT(ARM_OFFSET, k0) \
171 ((a), (b_offset), (c.s3)); \
172 CONCAT(ARM_OFFSET, k0) \
173 ((b##3), (a_offset), (c.s3)); \
174 CONCAT(ARM_OFFSET, k0) \
175 ((a), (b_offset), (c.s4)); \
176 CONCAT(ARM_OFFSET, k0) \
177 ((b##4), (a_offset), (c.s4)); \
178 CONCAT(ARM_OFFSET, k0) \
179 ((a), (b_offset), (c.s5)); \
180 CONCAT(ARM_OFFSET, k0) \
181 ((b##5), (a_offset), (c.s5)); \
182 CONCAT(ARM_OFFSET, k0) \
183 ((a), (b_offset), (c.s6)); \
184 CONCAT(ARM_OFFSET, k0) \
185 ((b##6), (a_offset), (c.s6)); \
186 CONCAT(ARM_OFFSET, k0) \
187 ((a), (b_offset), (c.s7)); \
188 CONCAT(ARM_OFFSET, k0) \
189 ((b##7), (a_offset), (c.s7)); \
190 CONCAT(ARM_OFFSET, k0) \
191 ((a), (b_offset), (c.s8)); \
192 CONCAT(ARM_OFFSET, k0) \
193 ((b##8), (a_offset), (c.s8)); \
194 CONCAT(ARM_OFFSET, k0) \
195 ((a), (b_offset), (c.s9)); \
196 CONCAT(ARM_OFFSET, k0) \
197 ((b##9), (a_offset), (c.s9)); \
198 CONCAT(ARM_OFFSET, k0) \
199 ((a), (b_offset), (c.sA)); \
200 CONCAT(ARM_OFFSET, k0) \
201 ((b##A), (a_offset), (c.sA)); \
202 CONCAT(ARM_OFFSET, k0) \
203 ((a), (b_offset), (c.sB)); \
204 CONCAT(ARM_OFFSET, k0) \
205 ((b##B), (a_offset), (c.sB)); \
206 CONCAT(ARM_OFFSET, k0) \
207 ((a), (b_offset), (c.sC)); \
208 CONCAT(ARM_OFFSET, k0) \
209 ((b##C), (a_offset), (c.sC)); \
210 CONCAT(ARM_OFFSET, k0) \
211 ((a), (b_offset), (c.sD)); \
212 CONCAT(ARM_OFFSET, k0) \
213 ((b##D), (a_offset), (c.sD)); \
214 CONCAT(ARM_OFFSET, k0) \
215 ((a), (b_offset), (c.sE)); \
216 CONCAT(ARM_OFFSET, k0) \
217 ((b##E), (a_offset), (c.sE)); \
218 CONCAT(ARM_OFFSET, k0) \
219 ((a), (b_offset), (c.sF)); \
220 CONCAT(ARM_OFFSET, k0) \
221 ((b##F), (a_offset), (c.sF)); \
222 })
223#else // N0 not supported
224#error "N0 value not supported"
225#endif // N0 conditions
226#else // defined(IS_QUANTISED)
227#define ARM_OFFSET_K0XN0(k0, a, b, a_offset, b_offset, c) \
228 ({})
229#endif // defined(IS_QUANTISED)
230
231#if defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8) && defined(IS_QUANTISED)
232#if defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8)
233#define ARM_DOT(x, y, val) val = arm_dot_acc((x), (y), (val));
234#else // defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8)
235#define ARM_DOT(x, y, val) val += arm_dot((x), (y));
236#endif // defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8)
237
238#define ARM_DOT1(a, b, c) \
239 ({ \
240 ARM_DOT((VEC_DATA_TYPE(SRC_DATA_TYPE, 4))(a, (VEC_DATA_TYPE(SRC_DATA_TYPE, 3))0), (VEC_DATA_TYPE(WEI_DATA_TYPE, 4))(b, (VEC_DATA_TYPE(WEI_DATA_TYPE, 3))0), c); \
241 })
242#define ARM_DOT2(a, b, c) \
243 ({ \
244 ARM_DOT((VEC_DATA_TYPE(SRC_DATA_TYPE, 4))(a, (VEC_DATA_TYPE(SRC_DATA_TYPE, 2))0), (VEC_DATA_TYPE(WEI_DATA_TYPE, 4))(b, (VEC_DATA_TYPE(WEI_DATA_TYPE, 2))0), c); \
245 })
246#define ARM_DOT3(a, b, c) \
247 ({ \
248 ARM_DOT((VEC_DATA_TYPE(SRC_DATA_TYPE, 4))(a, (SRC_DATA_TYPE)0), (VEC_DATA_TYPE(WEI_DATA_TYPE, 4))(b, (WEI_DATA_TYPE)0), c); \
249 })
250#define ARM_DOT4(a, b, c) \
251 ({ \
252 ARM_DOT(a, b, c); \
253 })
254#define ARM_DOT8(a, b, c) \
255 ({ \
256 ARM_DOT4((a.lo), (b.lo), c); \
257 ARM_DOT4((a.hi), (b.hi), c); \
258 })
259#define ARM_DOT16(a, b, c) \
260 ({ \
261 ARM_DOT8((a.lo), (b.lo), c); \
262 ARM_DOT8((a.hi), (b.hi), c); \
263 })
264
265#else // defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8) && defined(IS_QUANTISED)
266
267#define ARM_DOT1(a, b, c) \
268 ({ \
269 c += (ACC_DATA_TYPE)a * (ACC_DATA_TYPE)b; \
270 })
271#define ARM_DOT2(a, b, c) \
272 ({ \
273 c += (ACC_DATA_TYPE)a.s0 * (ACC_DATA_TYPE)b.s0; \
274 c += (ACC_DATA_TYPE)a.s1 * (ACC_DATA_TYPE)b.s1; \
275 })
276#define ARM_DOT3(a, b, c) \
277 ({ \
278 ARM_DOT2(a, b, c); \
279 c += (ACC_DATA_TYPE)a.s2 * (ACC_DATA_TYPE)b.s2; \
280 })
281#define ARM_DOT4(a, b, c) \
282 ({ \
283 ARM_DOT3(a, b, c); \
284 c += (ACC_DATA_TYPE)a.s3 * (ACC_DATA_TYPE)b.s3; \
285 })
286#define ARM_DOT8(a, b, c) \
287 ({ \
288 ARM_DOT4((a.lo), (b.lo), c); \
289 ARM_DOT4((a.hi), (b.hi), c); \
290 })
291#define ARM_DOT16(a, b, c) \
292 ({ \
293 ARM_DOT8((a.lo), (b.lo), c); \
294 ARM_DOT8((a.hi), (b.hi), c); \
295 })
296#endif // defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)
297
298#if N0 == 1
299#define ARM_DOT_K0XN0(k0, a, b, c) \
300 ({ \
301 CONCAT(ARM_DOT, k0) \
302 ((a), (b##0), (c)); \
303 })
304#elif N0 == 2 // N) == 3
305#define ARM_DOT_K0XN0(k0, a, b, c) \
306 ({ \
307 CONCAT(ARM_DOT, k0) \
308 ((a), (b##0), (c.s0)); \
309 CONCAT(ARM_DOT, k0) \
310 ((a), (b##1), (c.s1)); \
311 })
312#elif N0 == 3 // N0 == 3
313#define ARM_DOT_K0XN0(k0, a, b, c) \
314 ({ \
315 CONCAT(ARM_DOT, k0) \
316 ((a), (b##0), (c.s0)); \
317 CONCAT(ARM_DOT, k0) \
318 ((a), (b##1), (c.s1)); \
319 CONCAT(ARM_DOT, k0) \
320 ((a), (b##2), (c.s2)); \
321 })
322#elif N0 == 4 // N0 == 4
323#define ARM_DOT_K0XN0(k0, a, b, c) \
324 ({ \
325 CONCAT(ARM_DOT, k0) \
326 ((a), (b##0), (c.s0)); \
327 CONCAT(ARM_DOT, k0) \
328 ((a), (b##1), (c.s1)); \
329 CONCAT(ARM_DOT, k0) \
330 ((a), (b##2), (c.s2)); \
331 CONCAT(ARM_DOT, k0) \
332 ((a), (b##3), (c.s3)); \
333 })
334#elif N0 == 8 // N0 == 8
335#define ARM_DOT_K0XN0(k0, a, b, c) \
336 ({ \
337 CONCAT(ARM_DOT, k0) \
338 ((a), (b##0), (c.s0)); \
339 CONCAT(ARM_DOT, k0) \
340 ((a), (b##1), (c.s1)); \
341 CONCAT(ARM_DOT, k0) \
342 ((a), (b##2), (c.s2)); \
343 CONCAT(ARM_DOT, k0) \
344 ((a), (b##3), (c.s3)); \
345 CONCAT(ARM_DOT, k0) \
346 ((a), (b##4), (c.s4)); \
347 CONCAT(ARM_DOT, k0) \
348 ((a), (b##5), (c.s5)); \
349 CONCAT(ARM_DOT, k0) \
350 ((a), (b##6), (c.s6)); \
351 CONCAT(ARM_DOT, k0) \
352 ((a), (b##7), (c.s7)); \
353 })
354#elif N0 == 16 // N0 == 16
355#define ARM_DOT_K0XN0(k0, a, b, c) \
356 ({ \
357 CONCAT(ARM_DOT, k0) \
358 ((a), (b##0), (c.s0)); \
359 CONCAT(ARM_DOT, k0) \
360 ((a), (b##1), (c.s1)); \
361 CONCAT(ARM_DOT, k0) \
362 ((a), (b##2), (c.s2)); \
363 CONCAT(ARM_DOT, k0) \
364 ((a), (b##3), (c.s3)); \
365 CONCAT(ARM_DOT, k0) \
366 ((a), (b##4), (c.s4)); \
367 CONCAT(ARM_DOT, k0) \
368 ((a), (b##5), (c.s5)); \
369 CONCAT(ARM_DOT, k0) \
370 ((a), (b##6), (c.s6)); \
371 CONCAT(ARM_DOT, k0) \
372 ((a), (b##7), (c.s7)); \
373 CONCAT(ARM_DOT, k0) \
374 ((a), (b##8), (c.s8)); \
375 CONCAT(ARM_DOT, k0) \
376 ((a), (b##9), (c.s9)); \
377 CONCAT(ARM_DOT, k0) \
378 ((a), (b##A), (c.sA)); \
379 CONCAT(ARM_DOT, k0) \
380 ((a), (b##B), (c.sB)); \
381 CONCAT(ARM_DOT, k0) \
382 ((a), (b##C), (c.sC)); \
383 CONCAT(ARM_DOT, k0) \
384 ((a), (b##D), (c.sD)); \
385 CONCAT(ARM_DOT, k0) \
386 ((a), (b##E), (c.sE)); \
387 CONCAT(ARM_DOT, k0) \
388 ((a), (b##F), (c.sF)); \
389 })
390#else // N0 not supported
391#error "N0 value not supported"
392#endif // N0 conditions
393
394/** OpenCL kernel to compute the direct convolution.
395 *
396 * @note Data layout supported: NHWC
397 * @note Data type supported: F32/F16/QASYMM8
398 * @note The data type must be passed at compile time using -DDATA_TYPE (e.g. -DDATA_TYPE=half)
399 * @note The accumulation data type must be passed at compile time using -DACC_DATA_TYPE (e.g. -DDATA_TYPE_PROMOTED=half)
400 * @note The convolution padding (left and top) must be passed at compile time using -DPAD_LEFT and -DPAD_TOP (e.g. -DPAD_LEFT=2, -DPAD_TOP=2)
401 * @note The convolution strides must be passed at compile time using -DSTRIDE and -DPAD_TOP (e.g. -DPAD_LEFT=2, -DPAD_TOP=2)
402 * @note The spatial dimensions of the weights must be passed at compile time using -DWEI_WIDTH and -DWEI_HEIGHT (e.g. -DWEI_WIDTH=9, -DWEI_HEIGHT=9)
403 * @note The spatial dimensions of the source tensor must be passed at compile time using -DSRC_WIDTH and -DSRC_HEIGHT (e.g. -DSRC_WIDTH=96, -DSRC_HEIGHT=64)
404 * @note The spatial dimensions of the destination tensor must be passed at compile time using -DDST_WIDTH and -DDST_HEIGHT (e.g. -DDST_WIDTH=96, -DDST_HEIGHT=64)
405 * @note The channels of the source tensor must be passed at compile time using -DSRC_CHANNELS (e.g. -DSRC_CHANNELS=64)
406 * @note The channels of the destination tensor must be passed at compile time using -DDST_CHANNELS (e.g. -DDDST_CHANNELS=64)
407 * @note The data type of the source tensor must be passed at compile time using -DSRC_DATA_TYPE (e.g. -DSRC_DATA_TYPE=float)
408 * @note The data type of the weights tensor must be passed at compile time using -DWEI_DATA_TYPE (e.g. -DWEI_DATA_TYPE=float)
409 * @note The data type of the destination tensor must be passed at compile time using -DDST_DATA_TYPE (e.g. -DDST_DATA_TYPE=float)
410 * @note The data type of the accumulators must be passed at compile time using -DACC_DATA_TYPE (e.g. -DACC_DATA_TYPE=float)
411 * @note The number of M0 rows (width*height) to process must be passed at compile time using -DM0 (e.g. -DM0=2)
412 * @note The number of N0 output channels to process must be passed at compile time using -DN0 (e.g. -DN0=2)
413 * @note The number of K0 inner accumulations must be passed at compile time using -DK0 (e.g. -DK0=2)
414 * @note The size of the partial store block in y must be passed at compile time using -DPARTIAL_STORE_M0 (e.g. -DPARTIAL_STORE_M0=1)
415 * @note The size of the partial store block in x must be passed at compile time using -DPARTIAL_STORE_N0 (e.g. -DPARTIAL_STORE_N0=1)
416 * @note Only the following configurations of M0, N0 and K0 are currently supported:
417 * - M0 = 1
418 * - N0 = 2, 3, 4, 8, 16
419 * - K0 = 2, 3, 4, 8, 16
420 *
421 *@note In case of QASYMM8, the following extra information must be passed at compile time:
422 * - -DIS_QUANTISED
423 * - The destination quantization multiplier e.g. -DDST_MULTIPLIER=1234
424 * - The destination quantization shift e.g. -DDST_SHIFT=4
425 * - The destination offset e.g. -DDST_OFFSET=4
426 * - The source offset e.g. -DSRC_OFFSET=4
427 * - The weights offset e.g. -DWEI_OFFSET=4
428 *
429 * @param[in] src_ptr Pointer to the source tensor. Supported data type: F16/F32
430 * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
431 * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
432 * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
433 * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
434 * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
435 * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
436 * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
437 * @param[out] dst_ptr Pointer to the destination tensor. Supported data type: same as @p src_ptr
438 * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
439 * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
440 * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
441 * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
442 * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
443 * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
444 * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
445 * @param[in] wei_ptr Pointer to the weights tensor. Supported data type: same as @p src_ptr
446 * @param[in] wei_stride_x Stride of the weights tensor in X dimension (in bytes)
447 * @param[in] wei_step_x wei_stride_x * number of elements along X processed per workitem(in bytes)
448 * @param[in] wei_stride_y Stride of the weights tensor in Y dimension (in bytes)
449 * @param[in] wei_step_y wei_stride_y * number of elements along Y processed per workitem(in bytes)
450 * @param[in] wei_stride_z Stride of the weights tensor in Z dimension (in bytes)
451 * @param[in] wei_step_z wei_stride_z * number of elements along Z processed per workitem(in bytes)
452 * @param[in] wei_offset_first_element_in_bytes The offset of the first element in the bias matrix
453 * @param[in] bia_ptr (Optional) Pointer to the bias tensor Supported data type: same as @p src_ptr (if F32/F16) or S32 (if QASYMM8)
454 * @param[in] bia_stride_x (Optional) Stride of the bias tensor in X dimension (in bytes)
455 * @param[in] bia_step_x (Optional) bia_stride_x * number of elements along X processed per workitem(in bytes)
456 * @param[in] bia_offset_first_element_in_bytes (Optional) The offset of the first element in the bias matrix
457 * @param[in] wei_stride_w Stride of the weights tensor in W dimension (in bytes)
458 */
459__kernel void direct_convolution_nhwc(
460 TENSOR3D_DECLARATION(src),
461 TENSOR3D_DECLARATION(dst),
462 TENSOR3D_DECLARATION(wei),
463#if defined(HAS_BIAS)
464 VECTOR_DECLARATION(bia),
465#endif // defined(HAS_BIAS)
466 unsigned int wei_stride_w)
467{
468#if M0 != 1
469#error "M0: Only supported 1"
470#endif // M0 != 1
471
472 const int cout = max((int)(get_global_id(0) * N0 - (N0 - PARTIAL_STORE_N0) % N0), 0); // input channels
473 const int mout = get_global_id(1); // width x height
474 const int zout = get_global_id(2); // batch size index
475
476 REPEAT_VAR_INIT_TO_CONST(16, int, zero, 0);
477 REPEAT_VAR_INIT_TO_CONST(M0, int, xi, 0);
478 REPEAT_VAR_INIT_TO_CONST(M0, int, yi, 0);
479
480#define LINEAR_2_COORDS(i) \
481 xi##i = ((mout * M0 + i) % DST_WIDTH) * STRIDE_X; \
482 yi##i = ((mout * M0 + i) / DST_WIDTH) * STRIDE_Y; \
483 xi##i -= PAD_LEFT; \
484 yi##i -= PAD_TOP;
485
486 // Convert the linear index to coordinate
487 LINEAR_2_COORDS(0);
488
489#undef LINEAR_2_COORDS
490
491 uint src_offset = src_offset_first_element_in_bytes + zout * src_stride_y * (SRC_WIDTH * SRC_HEIGHT);
492 uint wei_offset = wei_offset_first_element_in_bytes + cout * wei_stride_w;
493
494 // Initialize the accumulators
495 REPEAT_VAR_INIT_TO_CONST(M0, VEC_DATA_TYPE(ACC_DATA_TYPE, N0), c, 0);
496
497 for(int i = 0; i < (WEI_WIDTH * WEI_HEIGHT); ++i)
498 {
499 int tmp = 0;
500 int xk = i % WEI_WIDTH;
501 int yk = i / WEI_WIDTH;
502
503 REPEAT_VAR_INIT_TO_CONST(M0, int, mi_valid_row, 0);
504 REPEAT_VAR_INIT_TO_CONST(M0, int, mi_mask, 1);
505
506 // Calculate the input row to read from source tensor
507#define MI_INIT(i) \
508 tmp = xi##i + xk + (yi##i + yk) * SRC_WIDTH; \
509 mi_valid_row##i = max(min(xi##i + xk, SRC_WIDTH - 1), 0) + max(min(yi##i + yk, SRC_HEIGHT - 1), 0) * SRC_WIDTH; \
510 if(tmp == mi_valid_row##i) \
511 mi_mask##i = 1; \
512 else \
513 mi_mask##i = 0;
514
515 MI_INIT(0);
516
517#undef MI_INIT
518
519 int k = 0;
520 for(; k <= (SRC_CHANNELS - K0); k += K0)
521 {
522 // Load values from src tensor
523 LOAD_BLOCK_INDIRECT(M0, K0, SRC_DATA_TYPE, a, src_ptr, src_offset + k * sizeof(SRC_DATA_TYPE), src_stride_y, mi_valid_row, mi_mask);
524
525 // Load values from weights tensor
526 LOAD_BLOCK(N0, K0, WEI_DATA_TYPE, b, wei_ptr, wei_offset, wei_stride_w, zero);
527
528#define TENSOR_DOT(i) \
529 ARM_DOT_K0XN0(K0, a##i, b, c##i); \
530 ARM_OFFSET_K0XN0(K0, a##i, b, SRC_OFFSET, WEI_OFFSET, c##i);
531
532 TENSOR_DOT(0);
533
534#undef TENSOR_DOT
535
536 wei_offset += K0 * sizeof(WEI_DATA_TYPE);
537 }
538
539#if(SRC_CHANNELS % K0) != 0
540 // Left-over accumulations
541 for(; i < SRC_CHANNELS; ++i)
542 {
543 // Load values from src tensor
544 LOAD_BLOCK_INDIRECT(M0, 1, SRC_DATA_TYPE, a, src_ptr, src_offset_first_element_in_bytes + k * sizeof(SRC_DATA_TYPE), src_stride_y, mi_valid_row, mi_mask);
545
546 // Load values from weights tensor
547 LOAD_BLOCK(N0, 1, WEI_DATA_TYPE, b, wei_ptr, wei_offset, wei_stride_w, zero);
548
549#define TENSOR_DOT(i) \
550 ARM_DOT_K0XN0(1, a##i, b, c##i); \
551 ARM_OFFSET_K0XN0(1, a##i, b, SRC_OFFSET, WEI_OFFSET, c##i);
552
553 TENSOR_DOT(0);
554
555#undef TENSOR_DOT
556
557 wei_offset += sizeof(WEI_DATA_TYPE);
558 }
559#endif // (SRC_CHANNELS % K0) != 0
560
561 c0 += (SRC_CHANNELS * SRC_OFFSET * WEI_OFFSET);
562 }
563
564 __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + (cout * sizeof(DST_DATA_TYPE)) + (mout * M0 * dst_stride_y);
565
566 // Batched direct convolution
567 dst_addr += zout * dst_stride_y * (DST_WIDTH * DST_HEIGHT);
568
569#if defined(HAS_BIAS)
570 __global uchar *bias_addr = bia_ptr + bia_offset_first_element_in_bytes + (cout * sizeof(BIA_DATA_TYPE));
571
572 LOAD_BLOCK(1, N0, BIA_DATA_TYPE, bias, bias_addr, 0, zero0, zero);
573
574 // c = c + bias[broadcasted]
575 ADD_BLOCK_BROADCAST(M0, c, bias0);
576#endif // HAS_BIAS
577
578#if defined(IS_QUANTISED)
579
580 REPEAT_VAR_INIT_TO_CONST(M0, VEC_DATA_TYPE(DST_DATA_TYPE, N0), cq, 0);
581
582#if DST_SHIFT < 0
583#define QUANTISE(i) \
584 c##i = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(c##i, DST_MULTIPLIER, DST_SHIFT, N0); \
585 c##i = c##i + DST_OFFSET; \
586 cq##i = CONVERT_SAT(c##i, VEC_DATA_TYPE(DST_DATA_TYPE, N0));
587#else // OUTPUT_SHIFT < 0
588#define QUANTISE(i) \
589 c##i = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(c##i, DST_MULTIPLIER, DST_SHIFT, N0); \
590 c##i = c##i + DST_OFFSET; \
591 cq##i = CONVERT_SAT(c##i, VEC_DATA_TYPE(DST_DATA_TYPE, N0));
592#endif // OUTPUT_SHIFT < 0
593
594 QUANTISE(0);
595
596#undef QUANTISE
597
598 STORE_VECTOR_SELECT(cq, DST_DATA_TYPE, dst_addr, N0, PARTIAL_STORE_N0, PARTIAL_STORE_N0 != 0 && get_global_id(0) == 0);
599#else // defined(IS_QUANTISED)
600 STORE_VECTOR_SELECT(c, DST_DATA_TYPE, dst_addr, N0, PARTIAL_STORE_N0, PARTIAL_STORE_N0 != 0 && get_global_id(0) == 0);
601#endif // defined(IS_QUANTISED)
602}