blob: 635bc9d50b9dfa3706bd745bb40517cff8b0da2f [file] [log] [blame]
Dmitry Savenkod7295b72017-11-20 22:00:08 +07001/*
Giorgio Arena944d3f72018-01-16 15:38:35 +00002 * Copyright (c) 2017-2018 ARM Limited.
Dmitry Savenkod7295b72017-11-20 22:00:08 +07003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25#include "helpers_asymm.h"
26
Giorgio Arenadfca60b2018-01-31 10:30:59 +000027#if defined(WEIGHTS_OFFSET) && defined(INPUT_OFFSET) && defined(K_OFFSET) && defined(OUTPUT_OFFSET) && defined(OUTPUT_MULTIPLIER) && defined(OUTPUT_SHIFT)
Giorgio Arena287b5702018-02-16 11:01:04 +000028
Giorgio Arena99ac60b2018-02-16 15:17:23 +000029#if defined(FUSED_ACTIVATION)
30#define DATA_TYPE uchar
Giorgio Arenadfca60b2018-01-31 10:30:59 +000031#ifndef VEC_SIZE
Giorgio Arena99ac60b2018-02-16 15:17:23 +000032#define VEC_SIZE 8
Giorgio Arenadfca60b2018-01-31 10:30:59 +000033#endif /* VEC_SIZE */
Giorgio Arena99ac60b2018-02-16 15:17:23 +000034#include "activation_layer_qa8.cl"
35#define ACTIVATION_FUNC(x) PERFORM_ACTIVATION_QA8(FUSED_ACTIVATION, x)
36#else /* defined(FUSED_ACTIVATION) */
37#define ACTIVATION_FUNC(x) (x)
38#endif /* defined(FUSED_ACTIVATION) */
39
Giorgio Arenadfca60b2018-01-31 10:30:59 +000040#if defined(CONV_STRIDE_Y) && defined(CONV_STRIDE_X)
41
Giorgio Arena287b5702018-02-16 11:01:04 +000042#if CONV_STRIDE_X > 3
43#error "Stride X not supported"
44#endif /* CONV_STRIDE_X > 3 */
Dmitry Savenkod7295b72017-11-20 22:00:08 +070045
46#if CONV_STRIDE_X == 1
Giorgio Arena287b5702018-02-16 11:01:04 +000047#define GET_VALUES(first_value, left, middle, right) \
48 ({ \
49 int8 temp0 = CONVERT(vload8(0, first_value), int8); \
50 int2 temp1 = CONVERT(vload2(0, (first_value + 8 * sizeof(uchar))), int2); \
51 \
52 left = CONVERT(temp0.s01234567, int8); \
53 middle = CONVERT((int8)(temp0.s1234, temp0.s567, temp1.s0), int8); \
54 right = CONVERT((int8)(temp0.s2345, temp0.s67, temp1.s01), int8); \
55 })
Dmitry Savenkod7295b72017-11-20 22:00:08 +070056#elif CONV_STRIDE_X == 2
Giorgio Arena287b5702018-02-16 11:01:04 +000057#define GET_VALUES(first_value, left, middle, right) \
58 ({ \
59 int16 temp0 = CONVERT(vload16(0, first_value), int16); \
60 int temp1 = CONVERT(*(first_value + 16 * sizeof(uchar)), int); \
61 \
62 left = CONVERT(temp0.s02468ace, int8); \
63 middle = CONVERT(temp0.s13579bdf, int8); \
64 right = CONVERT((int8)(temp0.s2468, temp0.sace, temp1), int8); \
65 })
Dmitry Savenkod7295b72017-11-20 22:00:08 +070066#else /* CONV_STRIDE_X */
Giorgio Arena287b5702018-02-16 11:01:04 +000067#define GET_VALUES(first_value, left, middle, right) \
68 ({ \
69 int16 temp0 = CONVERT(vload16(0, first_value), int16); \
70 int8 temp1 = CONVERT(vload8(0, (first_value + 16 * sizeof(uchar))), int8); \
71 \
72 left = CONVERT((int8)(temp0.s0369, temp0.scf, temp1.s25), int8); \
73 middle = CONVERT((int8)(temp0.s147a, temp0.sd, temp1.s036), int8); \
74 right = CONVERT((int8)(temp0.s258b, temp0.se, temp1.s147), int8); \
75 })
Dmitry Savenkod7295b72017-11-20 22:00:08 +070076#endif /* CONV_STRIDE_X */
77
Giorgio Arenadfca60b2018-01-31 10:30:59 +000078/** This function computes the depthwise convolution quantized.
Anthony Barbierf202e502017-11-23 18:02:04 +000079 *
80 * @param[in] src_ptr Pointer to the source image. Supported data types: QASYMM8
81 * @param[in] src_stride_x Stride of the source image in X dimension (in bytes)
82 * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
83 * @param[in] src_stride_y Stride of the source image in Y dimension (in bytes)
84 * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
85 * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source image
86 * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
87 * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes)
88 * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: QASYMM8
89 * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
90 * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
91 * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
92 * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
93 * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
94 * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
95 * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
96 * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: QASYMM8
97 * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes)
98 * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes)
99 * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes)
100 * @param[in] weights_step_y weights_stride_y * number of elements along Y processed per workitem(in bytes)
101 * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes)
102 * @param[in] weights_step_z weights_stride_z * number of elements along Y processed per workitem(in bytes)
103 * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the weights tensor
104 * @param[in] biases_ptr (Optional) Pointer to the biases vector. Supported data types: QASYMM8
105 * @param[in] biases_stride_x (Optional) Stride of the biases vector in X dimension (in bytes)
106 * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
107 * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases vector
Anthony Barbierf202e502017-11-23 18:02:04 +0000108 */
Dmitry Savenkod7295b72017-11-20 22:00:08 +0700109
Giorgio Arenadfca60b2018-01-31 10:30:59 +0000110__kernel void depthwise_convolution_3x3_quantized_nchw(
Dmitry Savenkod7295b72017-11-20 22:00:08 +0700111 TENSOR3D_DECLARATION(src),
112 TENSOR3D_DECLARATION(dst),
Georgios Pinitas5b2191e2018-02-22 12:56:51 +0000113 TENSOR3D_DECLARATION(weights)
Dmitry Savenkod7295b72017-11-20 22:00:08 +0700114#if defined(HAS_BIAS)
Georgios Pinitas5b2191e2018-02-22 12:56:51 +0000115 ,
Giorgio Arena287b5702018-02-16 11:01:04 +0000116 VECTOR_DECLARATION(biases)
Dmitry Savenkod7295b72017-11-20 22:00:08 +0700117#endif //defined(HAS_BIAS)
Giorgio Arena287b5702018-02-16 11:01:04 +0000118)
Dmitry Savenkod7295b72017-11-20 22:00:08 +0700119{
120 Image src = CONVERT_TENSOR3D_TO_IMAGE_STRUCT(src);
121 Image dst = CONVERT_TENSOR3D_TO_IMAGE_STRUCT(dst);
122 Tensor3D weights = CONVERT_TO_TENSOR3D_STRUCT(weights);
123#if defined(HAS_BIAS)
124 Vector biases = CONVERT_TO_VECTOR_STRUCT_NO_STEP(biases);
Dmitry Savenkod7295b72017-11-20 22:00:08 +0700125
Dmitry Savenkod7295b72017-11-20 22:00:08 +0700126 int bias_value = *((__global int *)(vector_offset(&biases, get_global_id(2))));
127#endif //defined(HAS_BIAS)
128
Giorgio Arena287b5702018-02-16 11:01:04 +0000129 uchar3 w0 = vload3(0, weights.ptr + 0 * weights_stride_y);
130 uchar3 w1 = vload3(0, weights.ptr + 1 * weights_stride_y);
131 uchar3 w2 = vload3(0, weights.ptr + 2 * weights_stride_y);
Dmitry Savenkod7295b72017-11-20 22:00:08 +0700132
Giorgio Arena287b5702018-02-16 11:01:04 +0000133 int8 values0 = 0;
134 int8 sum0 = 0;
135#if CONV_STRIDE_Y == 1
136 int8 values1 = 0;
137 int8 sum1 = 0;
138#endif /* CONV_STRIDE_Y */
139
140 // Row0
141 int8 left, middle, right;
142 GET_VALUES(src.ptr + 0 * src_stride_y, left, middle, right);
143 values0 += left * (int8)(w0.s0);
144 values0 += middle * (int8)(w0.s1);
145 values0 += right * (int8)(w0.s2);
146
147#if WEIGHTS_OFFSET != 0
148 sum0 += left + middle + right;
149#endif /* WEIGHTS_OFFSET != 0 */
150
151 // Row1
152 GET_VALUES(src.ptr + 1 * src_stride_y, left, middle, right);
153 values0 += left * (int8)(w1.s0);
154 values0 += middle * (int8)(w1.s1);
155 values0 += right * (int8)(w1.s2);
156#if CONV_STRIDE_Y == 1
157 values1 += left * (int8)(w0.s0);
158 values1 += middle * (int8)(w0.s1);
159 values1 += right * (int8)(w0.s2);
160#endif /* CONV_STRIDE_Y == 1 */
161
162#if WEIGHTS_OFFSET != 0
163 int8 tmp = left + middle + right;
164 sum0 += tmp;
165#if CONV_STRIDE_Y == 1
166 sum1 += tmp;
167#endif /* CONV_STRIDE_Y == 1 */
168#endif /* WEIGHTS_OFFSET != 0 */
169
170 // Row2
171 GET_VALUES(src.ptr + 2 * src_stride_y, left, middle, right);
172 values0 += left * (int8)(w2.s0);
173 values0 += middle * (int8)(w2.s1);
174 values0 += right * (int8)(w2.s2);
175#if CONV_STRIDE_Y == 1
176 values1 += left * (int8)(w1.s0);
177 values1 += middle * (int8)(w1.s1);
178 values1 += right * (int8)(w1.s2);
179#endif /* CONV_STRIDE_Y == 1 */
180
181#if WEIGHTS_OFFSET != 0
182 tmp = left + middle + right;
183 sum0 += tmp;
184#if CONV_STRIDE_Y == 1
185 sum1 += tmp;
186#endif /* CONV_STRIDE_Y == 1 */
187#endif /* WEIGHTS_OFFSET != 0 */
188
189#if CONV_STRIDE_Y == 1
190 // Row3
191 GET_VALUES(src.ptr + 3 * src_stride_y, left, middle, right);
192 values1 += left * (int8)(w2.s0);
193 values1 += middle * (int8)(w2.s1);
194 values1 += right * (int8)(w2.s2);
195
196#if WEIGHTS_OFFSET != 0
197 sum1 += left + middle + right;
198#endif /* WEIGHTS_OFFSET != 0 */
199#endif /* CONV_STRIDE_Y == 1 */
200
201#if defined(HAS_BIAS)
202 values0 += (int8)(bias_value);
203#if CONV_STRIDE_Y == 1
204 values1 += (int8)(bias_value);
205#endif /* CONV_STRIDE_Y == 1 */
206#endif //defined(HAS_BIAS)
207
208#if WEIGHTS_OFFSET != 0
209 values0 += sum0 * (int8)(WEIGHTS_OFFSET);
210#if CONV_STRIDE_Y == 1
211 values1 += sum1 * (int8)(WEIGHTS_OFFSET);
212#endif /* CONV_STRIDE_Y == 1 */
213#endif /* WEIGHTS_OFFSET != 0 */
214
215#if INPUT_OFFSET != 0
216 ushort sum_weights = 0;
217 ushort3 tmp_we = convert_ushort3(w0) + convert_ushort3(w1) + convert_ushort3(w2);
218 sum_weights += tmp_we.s0 + tmp_we.s1 + tmp_we.s2;
219 values0 += sum_weights * (int8)(INPUT_OFFSET);
220#if CONV_STRIDE_Y == 1
221 values1 += sum_weights * (int8)(INPUT_OFFSET);
222#endif /* CONV_STRIDE_Y == 1 */
223#endif /* INPUT_OFFSET != 0 */
224
225#if K_OFFSET != 0
226 values0 += (int8)(K_OFFSET);
227#if CONV_STRIDE_Y == 1
228 values1 += (int8)(K_OFFSET);
229#endif /* CONV_STRIDE_Y == 1 */
230#endif /* K_OFFSET != 0 */
231
232 values0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(values0, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, 8);
233 values0 += (int8)OUTPUT_OFFSET;
234 uchar8 res0 = convert_uchar8_sat(values0);
235 res0 = max(res0, (uchar8)0);
236 res0 = min(res0, (uchar8)255);
237
Giorgio Arena99ac60b2018-02-16 15:17:23 +0000238 vstore8(ACTIVATION_FUNC(res0), 0, dst.ptr);
Giorgio Arena287b5702018-02-16 11:01:04 +0000239#if CONV_STRIDE_Y == 1
240
241 values1 = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(values1, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, 8);
242 values1 += (int8)OUTPUT_OFFSET;
243 uchar8 res1 = convert_uchar8_sat(values1);
244 res1 = max(res1, (uchar8)0);
245 res1 = min(res1, (uchar8)255);
246
Giorgio Arena99ac60b2018-02-16 15:17:23 +0000247 vstore8(ACTIVATION_FUNC(res1), 0, dst.ptr + dst_stride_y);
Giorgio Arena287b5702018-02-16 11:01:04 +0000248#endif /* CONV_STRIDE_Y == 1 */
Dmitry Savenkod7295b72017-11-20 22:00:08 +0700249}
Giorgio Arena287b5702018-02-16 11:01:04 +0000250
Giorgio Arenadfca60b2018-01-31 10:30:59 +0000251#endif /* defined(CONV_STRIDE_Y) && defined(CONV_STRIDE_X) */
252
253#if defined(VEC_SIZE) && defined(SRC_DEPTH) && defined(CONV_PAD_TOP) && defined(ROWS_READ)
254
255#define asymm_mult_by_quant_multiplier_less_than_one(x, y, z) ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(x, y, z, VEC_SIZE)
256
257#define VEC_INT VEC_DATA_TYPE(int, VEC_SIZE)
258#define VEC_UCHAR VEC_DATA_TYPE(uchar, VEC_SIZE)
259
260#define BIFROST_MAD_4(acc, x, y) \
261 ({ \
262 acc.s0 += (ushort)x.s0 * (ushort)y.s0; \
263 acc.s1 += (ushort)x.s1 * (ushort)y.s1; \
264 acc.s2 += (ushort)x.s2 * (ushort)y.s2; \
265 acc.s3 += (ushort)x.s3 * (ushort)y.s3; \
266 })
267
268#if WEIGHTS_OFFSET != 0
269#define BIFROST_MAD_ACC_4(acc, sum, x, y) \
270 ({ \
271 sum += CONVERT(x, VEC_INT); \
272 BIFROST_MAD_4(acc, x, y); \
273 })
274#else /* WEIGHTS_OFFSET != 0 */
275#define BIFROST_MAD_ACC_4(acc, sum, x, y) BIFROST_MAD_4(acc, x, y)
276#endif /* WEIGHTS_OFFSET != 0 */
277
278/** This function computes the depthwise convolution quantized.
279 *
280 * @param[in] src_ptr Pointer to the source image. Supported data types: QASYMM8
281 * @param[in] src_stride_x Stride of the source image in X dimension (in bytes)
282 * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
283 * @param[in] src_stride_y Stride of the source image in Y dimension (in bytes)
284 * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
285 * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source image
286 * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
287 * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes)
288 * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: QASYMM8
289 * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
290 * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
291 * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
292 * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
293 * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
294 * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
295 * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
296 * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: QASYMM8
297 * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes)
298 * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes)
299 * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes)
300 * @param[in] weights_step_y weights_stride_y * number of elements along Y processed per workitem(in bytes)
301 * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes)
302 * @param[in] weights_step_z weights_stride_z * number of elements along Y processed per workitem(in bytes)
303 * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the weights tensor
304 * @param[in] biases_ptr (Optional) Pointer to the biases vector. Supported data types: QASYMM8
305 * @param[in] biases_stride_x (Optional) Stride of the biases vector in X dimension (in bytes)
306 * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
307 * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases vector
308 */
309
310__kernel void depthwise_convolution_3x3_quantized_nhwc_stride1(
311 TENSOR3D_DECLARATION(src),
312 TENSOR3D_DECLARATION(dst),
313 TENSOR3D_DECLARATION(weights),
314#if defined(HAS_BIAS)
315 VECTOR_DECLARATION(biases)
316#endif /* defined(HAS_BIAS) */
317)
318{
319 Image dst = CONVERT_TENSOR3D_TO_IMAGE_STRUCT(dst);
320 Vector weights = CONVERT_TO_VECTOR_STRUCT(weights);
321#if defined(HAS_BIAS)
322 Vector biases = CONVERT_TO_VECTOR_STRUCT(biases);
323
324 VEC_INT bias_values = VLOAD(VEC_SIZE)(0, (__global int *)biases.ptr);
325#endif /* defined(HAS_BIAS) */
326
327 __global uchar *first_elem = src_ptr + src_offset_first_element_in_bytes;
328
329 const int z = get_global_id(2);
330 const int pad_offs = -ROWS_READ * src_stride_y;
331 const int src_offs0 = get_global_id(0) * src_step_x + get_global_id(1) * src_step_y + z * src_step_z - CONV_PAD_TOP * src_stride_z;
332 const int src_offs1 = src_offs0 + src_stride_z;
333 const int src_offs2 = src_offs1 + src_stride_z;
334
335 const int cond_top = z - CONV_PAD_TOP < 0;
336 const int cond_bottom = z * (src_step_z / src_stride_z) + 2 >= SRC_DEPTH;
337
338 __global uchar *src_addr0 = first_elem + select(src_offs0, pad_offs, cond_top);
339 __global uchar *src_addr1 = first_elem + src_offs1;
340 __global uchar *src_addr2 = first_elem + select(src_offs2, pad_offs, cond_bottom);
341
342 VEC_INT sum_we = 0;
343 VEC_INT acc0 = 0, acc1 = 0, acc2 = 0, acc3 = 0;
344 VEC_INT sum0 = 0, sum1 = 0, sum2 = 0, sum3 = 0;
345
346 // z == 0
347 VEC_UCHAR w0, w1, w2;
348 w0 = VLOAD(VEC_SIZE)(0, weights.ptr + 0 * weights_stride_y);
349 w1 = VLOAD(VEC_SIZE)(0, weights.ptr + 1 * weights_stride_y);
350 w2 = VLOAD(VEC_SIZE)(0, weights.ptr + 2 * weights_stride_y);
351
352#if INPUT_OFFSET != 0
353 sum_we += CONVERT(w0, VEC_INT) + CONVERT(w1, VEC_INT) + CONVERT(w2, VEC_INT);
354#endif /* INPUT_OFFSET != 0 */
355
356 VEC_UCHAR values = VLOAD(VEC_SIZE)(0, src_addr0);
357 BIFROST_MAD_ACC_4(acc0, sum0, values, w0);
358
359 src_addr0 += src_stride_y;
360 values = VLOAD(VEC_SIZE)(0, src_addr0);
361 BIFROST_MAD_ACC_4(acc0, sum0, values, w1);
362 BIFROST_MAD_ACC_4(acc1, sum1, values, w0);
363
364 src_addr0 += src_stride_y;
365 values = VLOAD(VEC_SIZE)(0, src_addr0);
366 BIFROST_MAD_ACC_4(acc0, sum0, values, w2);
367 BIFROST_MAD_ACC_4(acc1, sum1, values, w1);
368 BIFROST_MAD_ACC_4(acc2, sum2, values, w0);
369
370 src_addr0 += src_stride_y;
371 values = VLOAD(VEC_SIZE)(0, src_addr0);
372 BIFROST_MAD_ACC_4(acc1, sum1, values, w2);
373 BIFROST_MAD_ACC_4(acc2, sum2, values, w1);
374 BIFROST_MAD_ACC_4(acc3, sum3, values, w0);
375
376 src_addr0 += src_stride_y;
377 values = VLOAD(VEC_SIZE)(0, src_addr0);
378 BIFROST_MAD_ACC_4(acc2, sum2, values, w2);
379 BIFROST_MAD_ACC_4(acc3, sum3, values, w1);
380
381 src_addr0 += src_stride_y;
382 values = VLOAD(VEC_SIZE)(0, src_addr0);
383 BIFROST_MAD_ACC_4(acc3, sum3, values, w2);
384
385 weights.ptr += weights_stride_z;
386
387 // z == 1
388 w0 = VLOAD(VEC_SIZE)(0, weights.ptr + 0 * weights_stride_y);
389 w1 = VLOAD(VEC_SIZE)(0, weights.ptr + 1 * weights_stride_y);
390 w2 = VLOAD(VEC_SIZE)(0, weights.ptr + 2 * weights_stride_y);
391
392#if INPUT_OFFSET != 0
393 sum_we += CONVERT(w0, VEC_INT) + CONVERT(w1, VEC_INT) + CONVERT(w2, VEC_INT);
394#endif /* INPUT_OFFSET != 0 */
395
396 values = VLOAD(VEC_SIZE)(0, src_addr1);
397 BIFROST_MAD_ACC_4(acc0, sum0, values, w0);
398
399 src_addr1 += src_stride_y;
400 values = VLOAD(VEC_SIZE)(0, src_addr1);
401 BIFROST_MAD_ACC_4(acc0, sum0, values, w1);
402 BIFROST_MAD_ACC_4(acc1, sum1, values, w0);
403
404 src_addr1 += src_stride_y;
405 values = VLOAD(VEC_SIZE)(0, src_addr1);
406 BIFROST_MAD_ACC_4(acc0, sum0, values, w2);
407 BIFROST_MAD_ACC_4(acc1, sum1, values, w1);
408 BIFROST_MAD_ACC_4(acc2, sum2, values, w0);
409
410 src_addr1 += src_stride_y;
411 values = VLOAD(VEC_SIZE)(0, src_addr1);
412 BIFROST_MAD_ACC_4(acc1, sum1, values, w2);
413 BIFROST_MAD_ACC_4(acc2, sum2, values, w1);
414 BIFROST_MAD_ACC_4(acc3, sum3, values, w0);
415
416 src_addr1 += src_stride_y;
417 values = VLOAD(VEC_SIZE)(0, src_addr1);
418 BIFROST_MAD_ACC_4(acc2, sum2, values, w2);
419 BIFROST_MAD_ACC_4(acc3, sum3, values, w1);
420
421 src_addr1 += src_stride_y;
422 values = VLOAD(VEC_SIZE)(0, src_addr1);
423 BIFROST_MAD_ACC_4(acc3, sum3, values, w2);
424
425 weights.ptr += weights_stride_z;
426
427 // z == 2
428 w0 = VLOAD(VEC_SIZE)(0, weights.ptr + 0 * weights_stride_y);
429 w1 = VLOAD(VEC_SIZE)(0, weights.ptr + 1 * weights_stride_y);
430 w2 = VLOAD(VEC_SIZE)(0, weights.ptr + 2 * weights_stride_y);
431
432#if INPUT_OFFSET != 0
433 sum_we += CONVERT(w0, VEC_INT) + CONVERT(w1, VEC_INT) + CONVERT(w2, VEC_INT);
434#endif /* INPUT_OFFSET != 0 */
435
436 values = VLOAD(VEC_SIZE)(0, src_addr2);
437 BIFROST_MAD_ACC_4(acc0, sum0, values, w0);
438
439 src_addr2 += src_stride_y;
440 values = VLOAD(VEC_SIZE)(0, src_addr2);
441 BIFROST_MAD_ACC_4(acc0, sum0, values, w1);
442 BIFROST_MAD_ACC_4(acc1, sum1, values, w0);
443
444 src_addr2 += src_stride_y;
445 values = VLOAD(VEC_SIZE)(0, src_addr2);
446 BIFROST_MAD_ACC_4(acc0, sum0, values, w2);
447 BIFROST_MAD_ACC_4(acc1, sum1, values, w1);
448 BIFROST_MAD_ACC_4(acc2, sum2, values, w0);
449
450 src_addr2 += src_stride_y;
451 values = VLOAD(VEC_SIZE)(0, src_addr2);
452 BIFROST_MAD_ACC_4(acc1, sum1, values, w2);
453 BIFROST_MAD_ACC_4(acc2, sum2, values, w1);
454 BIFROST_MAD_ACC_4(acc3, sum3, values, w0);
455
456 src_addr2 += src_stride_y;
457 values = VLOAD(VEC_SIZE)(0, src_addr2);
458 BIFROST_MAD_ACC_4(acc2, sum2, values, w2);
459 BIFROST_MAD_ACC_4(acc3, sum3, values, w1);
460
461 src_addr2 += src_stride_y;
462 values = VLOAD(VEC_SIZE)(0, src_addr2);
463 BIFROST_MAD_ACC_4(acc3, sum3, values, w2);
464
465#if defined(HAS_BIAS)
466 acc0 += bias_values;
467 acc1 += bias_values;
468 acc2 += bias_values;
469 acc3 += bias_values;
470#endif /* defined(HAS_BIAS) */
471
472#if WEIGHTS_OFFSET != 0
473 acc0 += WEIGHTS_OFFSET * sum0;
474 acc1 += WEIGHTS_OFFSET * sum1;
475 acc2 += WEIGHTS_OFFSET * sum2;
476 acc3 += WEIGHTS_OFFSET * sum3;
477#endif /* WEIGHTS_OFFSET != 0 */
478
479#if INPUT_OFFSET != 0
480 VEC_INT offs = INPUT_OFFSET * sum_we;
481
482 acc0 += offs;
483 acc1 += offs;
484 acc2 += offs;
485 acc3 += offs;
486#endif /* INPUT_OFFSET != 0 */
487
488#if K_OFFSET != 0
489 acc0 += (VEC_INT)K_OFFSET;
490 acc1 += (VEC_INT)K_OFFSET;
491 acc2 += (VEC_INT)K_OFFSET;
492 acc3 += (VEC_INT)K_OFFSET;
493#endif /* K_OFFSET != 0 */
494
495 acc0 = asymm_mult_by_quant_multiplier_less_than_one(acc0, OUTPUT_MULTIPLIER, OUTPUT_SHIFT);
496 acc1 = asymm_mult_by_quant_multiplier_less_than_one(acc1, OUTPUT_MULTIPLIER, OUTPUT_SHIFT);
497 acc2 = asymm_mult_by_quant_multiplier_less_than_one(acc2, OUTPUT_MULTIPLIER, OUTPUT_SHIFT);
498 acc3 = asymm_mult_by_quant_multiplier_less_than_one(acc3, OUTPUT_MULTIPLIER, OUTPUT_SHIFT);
499
500 acc0 += (VEC_INT)OUTPUT_OFFSET;
501 acc1 += (VEC_INT)OUTPUT_OFFSET;
502 acc2 += (VEC_INT)OUTPUT_OFFSET;
503 acc3 += (VEC_INT)OUTPUT_OFFSET;
504
505 VEC_UCHAR res0 = CONVERT_SAT(acc0, VEC_UCHAR);
506 VEC_UCHAR res1 = CONVERT_SAT(acc1, VEC_UCHAR);
507 VEC_UCHAR res2 = CONVERT_SAT(acc2, VEC_UCHAR);
508 VEC_UCHAR res3 = CONVERT_SAT(acc3, VEC_UCHAR);
509
510 res0 = CLAMP(res0, (VEC_UCHAR)0, (VEC_UCHAR)255);
511 res1 = CLAMP(res1, (VEC_UCHAR)0, (VEC_UCHAR)255);
512 res2 = CLAMP(res2, (VEC_UCHAR)0, (VEC_UCHAR)255);
513 res3 = CLAMP(res3, (VEC_UCHAR)0, (VEC_UCHAR)255);
514
515 VSTORE(VEC_SIZE)
516 (res0, 0, dst.ptr + 0 * dst_stride_y);
517 VSTORE(VEC_SIZE)
518 (res1, 0, dst.ptr + 1 * dst_stride_y);
519 VSTORE(VEC_SIZE)
520 (res2, 0, dst.ptr + 2 * dst_stride_y);
521 VSTORE(VEC_SIZE)
522 (res3, 0, dst.ptr + 3 * dst_stride_y);
523}
524
525/** This function computes the depthwise convolution quantized.
526 *
527 * @param[in] src_ptr Pointer to the source image. Supported data types: QASYMM8
528 * @param[in] src_stride_x Stride of the source image in X dimension (in bytes)
529 * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
530 * @param[in] src_stride_y Stride of the source image in Y dimension (in bytes)
531 * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
532 * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source image
533 * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
534 * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes)
535 * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: QASYMM8
536 * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
537 * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
538 * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
539 * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
540 * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
541 * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
542 * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
543 * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: QASYMM8
544 * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes)
545 * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes)
546 * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes)
547 * @param[in] weights_step_y weights_stride_y * number of elements along Y processed per workitem(in bytes)
548 * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes)
549 * @param[in] weights_step_z weights_stride_z * number of elements along Y processed per workitem(in bytes)
550 * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the weights tensor
551 * @param[in] biases_ptr (Optional) Pointer to the biases vector. Supported data types: QASYMM8
552 * @param[in] biases_stride_x (Optional) Stride of the biases vector in X dimension (in bytes)
553 * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
554 * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases vector
555 */
556
557__kernel void depthwise_convolution_3x3_quantized_nhwc_stride2(
558 TENSOR3D_DECLARATION(src),
559 TENSOR3D_DECLARATION(dst),
560 TENSOR3D_DECLARATION(weights),
561#if defined(HAS_BIAS)
562 VECTOR_DECLARATION(biases)
563#endif /* defined(HAS_BIAS) */
564)
565{
566 Image dst = CONVERT_TENSOR3D_TO_IMAGE_STRUCT(dst);
567 Vector weights = CONVERT_TO_VECTOR_STRUCT(weights);
568#if defined(HAS_BIAS)
569 Vector biases = CONVERT_TO_VECTOR_STRUCT(biases);
570
571 VEC_INT bias_values = VLOAD(VEC_SIZE)(0, (__global int *)biases.ptr);
572#endif /* defined(HAS_BIAS) */
573
574 __global uchar *first_elem = src_ptr + src_offset_first_element_in_bytes;
575
576 const int z = get_global_id(2);
577 const int pad_offs = -ROWS_READ * src_stride_y;
578 const int src_offs0 = get_global_id(0) * src_step_x + get_global_id(1) * src_step_y + z * src_step_z - CONV_PAD_TOP * src_stride_z;
579 const int src_offs1 = src_offs0 + src_stride_z;
580 const int src_offs2 = src_offs1 + src_stride_z;
581
582 const int cond_top = z - CONV_PAD_TOP < 0;
583 const int cond_bottom = z * (src_step_z / src_stride_z) + 2 >= SRC_DEPTH;
584 ;
585
586 __global uchar *src_addr0 = first_elem + select(src_offs0, pad_offs, cond_top);
587 __global uchar *src_addr1 = first_elem + src_offs1;
588 __global uchar *src_addr2 = first_elem + select(src_offs2, pad_offs, cond_bottom);
589
590 VEC_INT sum_we = 0;
591 VEC_INT acc0 = 0, acc2 = 0;
592 VEC_INT sum0 = 0, sum2 = 0;
593
594 // z == 0
595 VEC_UCHAR w0, w1, w2;
596 w0 = VLOAD(VEC_SIZE)(0, weights.ptr + 0 * weights_stride_y);
597 w1 = VLOAD(VEC_SIZE)(0, weights.ptr + 1 * weights_stride_y);
598 w2 = VLOAD(VEC_SIZE)(0, weights.ptr + 2 * weights_stride_y);
599
600#if INPUT_OFFSET != 0
601 sum_we += CONVERT(w0, VEC_INT) + CONVERT(w1, VEC_INT) + CONVERT(w2, VEC_INT);
602#endif /* INPUT_OFFSET != 0 */
603
604 VEC_UCHAR values = VLOAD(VEC_SIZE)(0, src_addr0);
605 BIFROST_MAD_ACC_4(acc0, sum0, values, w0);
606
607 src_addr0 += src_stride_y;
608 values = VLOAD(VEC_SIZE)(0, src_addr0);
609 BIFROST_MAD_ACC_4(acc0, sum0, values, w1);
610
611 src_addr0 += src_stride_y;
612 values = VLOAD(VEC_SIZE)(0, src_addr0);
613 BIFROST_MAD_ACC_4(acc0, sum0, values, w2);
614 BIFROST_MAD_ACC_4(acc2, sum2, values, w0);
615
616 src_addr0 += src_stride_y;
617 values = VLOAD(VEC_SIZE)(0, src_addr0);
618 BIFROST_MAD_ACC_4(acc2, sum2, values, w1);
619
620 src_addr0 += src_stride_y;
621 values = VLOAD(VEC_SIZE)(0, src_addr0);
622 BIFROST_MAD_ACC_4(acc2, sum2, values, w2);
623
624 weights.ptr += weights_stride_z;
625
626 // z == 1
627 w0 = VLOAD(VEC_SIZE)(0, weights.ptr + 0 * weights_stride_y);
628 w1 = VLOAD(VEC_SIZE)(0, weights.ptr + 1 * weights_stride_y);
629 w2 = VLOAD(VEC_SIZE)(0, weights.ptr + 2 * weights_stride_y);
630
631#if INPUT_OFFSET != 0
632 sum_we += CONVERT(w0, VEC_INT) + CONVERT(w1, VEC_INT) + CONVERT(w2, VEC_INT);
633#endif /* INPUT_OFFSET != 0 */
634
635 values = VLOAD(VEC_SIZE)(0, src_addr1);
636 BIFROST_MAD_ACC_4(acc0, sum0, values, w0);
637
638 src_addr1 += src_stride_y;
639 values = VLOAD(VEC_SIZE)(0, src_addr1);
640 BIFROST_MAD_ACC_4(acc0, sum0, values, w1);
641
642 src_addr1 += src_stride_y;
643 values = VLOAD(VEC_SIZE)(0, src_addr1);
644 BIFROST_MAD_ACC_4(acc0, sum0, values, w2);
645 BIFROST_MAD_ACC_4(acc2, sum2, values, w0);
646
647 src_addr1 += src_stride_y;
648 values = VLOAD(VEC_SIZE)(0, src_addr1);
649 BIFROST_MAD_ACC_4(acc2, sum2, values, w1);
650
651 src_addr1 += src_stride_y;
652 values = VLOAD(VEC_SIZE)(0, src_addr1);
653 BIFROST_MAD_ACC_4(acc2, sum2, values, w2);
654
655 weights.ptr += weights_stride_z;
656
657 // z == 2
658 w0 = VLOAD(VEC_SIZE)(0, weights.ptr + 0 * weights_stride_y);
659 w1 = VLOAD(VEC_SIZE)(0, weights.ptr + 1 * weights_stride_y);
660 w2 = VLOAD(VEC_SIZE)(0, weights.ptr + 2 * weights_stride_y);
661
662#if INPUT_OFFSET != 0
663 sum_we += CONVERT(w0, VEC_INT) + CONVERT(w1, VEC_INT) + CONVERT(w2, VEC_INT);
664#endif /* INPUT_OFFSET != 0 */
665
666 values = VLOAD(VEC_SIZE)(0, src_addr2);
667 BIFROST_MAD_ACC_4(acc0, sum0, values, w0);
668
669 src_addr2 += src_stride_y;
670 values = VLOAD(VEC_SIZE)(0, src_addr2);
671 BIFROST_MAD_ACC_4(acc0, sum0, values, w1);
672
673 src_addr2 += src_stride_y;
674 values = VLOAD(VEC_SIZE)(0, src_addr2);
675 BIFROST_MAD_ACC_4(acc0, sum0, values, w2);
676 BIFROST_MAD_ACC_4(acc2, sum2, values, w0);
677
678 src_addr2 += src_stride_y;
679 values = VLOAD(VEC_SIZE)(0, src_addr2);
680 BIFROST_MAD_ACC_4(acc2, sum2, values, w1);
681
682 src_addr2 += src_stride_y;
683 values = VLOAD(VEC_SIZE)(0, src_addr2);
684 BIFROST_MAD_ACC_4(acc2, sum2, values, w2);
685
686#if defined(HAS_BIAS)
687 acc0 += bias_values;
688 acc2 += bias_values;
689#endif /* defined(HAS_BIAS) */
690
691#if WEIGHTS_OFFSET != 0
692 acc0 += WEIGHTS_OFFSET * sum0;
693 acc2 += WEIGHTS_OFFSET * sum2;
694#endif /* WEIGHTS_OFFSET != 0 */
695
696#if INPUT_OFFSET != 0
697 VEC_INT offs = INPUT_OFFSET * sum_we;
698
699 acc0 += offs;
700 acc2 += offs;
701#endif /* INPUT_OFFSET != 0 */
702
703#if K_OFFSET != 0
704 acc0 += (VEC_INT)K_OFFSET;
705 acc2 += (VEC_INT)K_OFFSET;
706#endif /* K_OFFSET != 0 */
707
708 acc0 = asymm_mult_by_quant_multiplier_less_than_one(acc0, OUTPUT_MULTIPLIER, OUTPUT_SHIFT);
709 acc2 = asymm_mult_by_quant_multiplier_less_than_one(acc2, OUTPUT_MULTIPLIER, OUTPUT_SHIFT);
710 acc0 += (VEC_INT)OUTPUT_OFFSET;
711 acc2 += (VEC_INT)OUTPUT_OFFSET;
712 VEC_UCHAR res0 = CONVERT_SAT(acc0, VEC_UCHAR);
713 VEC_UCHAR res2 = CONVERT_SAT(acc2, VEC_UCHAR);
714 res0 = CLAMP(res0, (VEC_UCHAR)0, (VEC_UCHAR)255);
715 res2 = CLAMP(res2, (VEC_UCHAR)0, (VEC_UCHAR)255);
716
717 VSTORE(VEC_SIZE)
718 (res0, 0, dst.ptr + 0 * dst_stride_y);
719 VSTORE(VEC_SIZE)
720 (res2, 0, dst.ptr + 1 * dst_stride_y);
721}
722
723#endif /* defined(VEC_SIZE) && defined(SRC_DEPTH) && defined(CONV_PAD_TOP) && defined(ROWS_READ) */
724
725#endif /* defined(WEIGHTS_OFFSET) && defined(INPUT_OFFSET) && defined(K_OFFSET) && defined(OUTPUT_OFFSET) && defined(OUTPUT_MULTIPLIER) && defined(OUTPUT_SHIFT) */