blob: 64da38d64dcabc1b23407ff6de95450588e17b1c [file] [log] [blame]
Michalis Spyrou45091732019-05-13 17:41:01 +01001/*
Michele Di Giorgioba2cc1a2020-07-15 17:39:30 +01002 * Copyright (c) 2019-2020 Arm Limited.
Michalis Spyrou45091732019-05-13 17:41:01 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "helpers.h"
25
26#undef CONVERT_SAT
27
Michele Di Giorgioba2cc1a2020-07-15 17:39:30 +010028#if defined(DATA_TYPE) && defined(STRIDE_X) && defined(WEIGHTS_DEPTH) && defined(DATA_LAYOUT_NHWC) && defined(PAD_TOP)
Michalis Spyrou45091732019-05-13 17:41:01 +010029
30#define PTR_TO_VALUE(PTR, DATA_TYPE) *((__global DATA_TYPE *)(PTR))
31
32#define CONVOLUTION1x9_STRIDE1_NHWC(acc, row_ptr, weights_ptr) \
33 ({ \
34 VEC_DATA_TYPE(DATA_TYPE, 8) \
35 src0 = (VEC_DATA_TYPE(DATA_TYPE, 8))( \
36 PTR_TO_VALUE(row_ptr + 0 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 1 * src_stride_y, DATA_TYPE), \
37 PTR_TO_VALUE(row_ptr + 2 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 3 * src_stride_y, DATA_TYPE), \
38 PTR_TO_VALUE(row_ptr + 4 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 5 * src_stride_y, DATA_TYPE), \
39 PTR_TO_VALUE(row_ptr + 6 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 7 * src_stride_y, DATA_TYPE)); \
40 VEC_DATA_TYPE(DATA_TYPE, 8) \
41 src1 = (VEC_DATA_TYPE(DATA_TYPE, 8))( \
42 PTR_TO_VALUE(row_ptr + 8 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 9 * src_stride_y, DATA_TYPE), \
43 PTR_TO_VALUE(row_ptr + 10 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 11 * src_stride_y, DATA_TYPE), \
44 PTR_TO_VALUE(row_ptr + 12 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 13 * src_stride_y, DATA_TYPE), \
45 PTR_TO_VALUE(row_ptr + 14 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 15 * src_stride_y, DATA_TYPE)); \
46 VEC_DATA_TYPE(DATA_TYPE, 8) \
47 weights_values0 = (VEC_DATA_TYPE(DATA_TYPE, 8))( \
48 PTR_TO_VALUE(weights_ptr + 0 * weights_stride_y, DATA_TYPE), PTR_TO_VALUE(weights_ptr + 1 * weights_stride_y, DATA_TYPE), \
49 PTR_TO_VALUE(weights_ptr + 2 * weights_stride_y, DATA_TYPE), PTR_TO_VALUE(weights_ptr + 3 * weights_stride_y, DATA_TYPE), \
50 PTR_TO_VALUE(weights_ptr + 4 * weights_stride_y, DATA_TYPE), PTR_TO_VALUE(weights_ptr + 5 * weights_stride_y, DATA_TYPE), \
51 PTR_TO_VALUE(weights_ptr + 6 * weights_stride_y, DATA_TYPE), PTR_TO_VALUE(weights_ptr + 7 * weights_stride_y, DATA_TYPE)); \
52 DATA_TYPE weights_value1 = PTR_TO_VALUE(weights_ptr + 8 * weights_stride_y, DATA_TYPE); \
53 acc += src0 * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s0; \
54 acc += (VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s1234, src0.s567, src1.s0) * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s1; \
55 acc += (VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s234, src0.s567, src1.s01) * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s2; \
56 acc += (VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s345, src0.s67, src1.s012) * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s3; \
57 acc += (VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s4567, src1.s0123) * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s4; \
58 acc += (VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s567, src1.s0123, src1.s4) * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s5; \
59 acc += (VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s67, src1.s012, src1.s345) * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s6; \
60 acc += (VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s7, src1.s0123, src1.s456) * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s7; \
61 acc += src1 * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_value1; \
62 })
63
64#define CONVOLUTION1x9_STRIDE2_NHWC(acc, row_ptr, weights_ptr) \
65 ({ \
66 VEC_DATA_TYPE(DATA_TYPE, 16) \
67 src0 = (VEC_DATA_TYPE(DATA_TYPE, 16))( \
68 PTR_TO_VALUE(row_ptr + 0 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 1 * src_stride_y, DATA_TYPE), \
69 PTR_TO_VALUE(row_ptr + 2 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 3 * src_stride_y, DATA_TYPE), \
70 PTR_TO_VALUE(row_ptr + 4 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 5 * src_stride_y, DATA_TYPE), \
71 PTR_TO_VALUE(row_ptr + 6 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 7 * src_stride_y, DATA_TYPE), \
72 PTR_TO_VALUE(row_ptr + 8 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 9 * src_stride_y, DATA_TYPE), \
73 PTR_TO_VALUE(row_ptr + 10 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 11 * src_stride_y, DATA_TYPE), \
74 PTR_TO_VALUE(row_ptr + 12 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 13 * src_stride_y, DATA_TYPE), \
75 PTR_TO_VALUE(row_ptr + 14 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 15 * src_stride_y, DATA_TYPE)); \
76 VEC_DATA_TYPE(DATA_TYPE, 8) \
77 src1 = (VEC_DATA_TYPE(DATA_TYPE, 8))( \
78 PTR_TO_VALUE(row_ptr + 16 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 17 * src_stride_y, DATA_TYPE), \
79 PTR_TO_VALUE(row_ptr + 18 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 19 * src_stride_y, DATA_TYPE), \
80 PTR_TO_VALUE(row_ptr + 20 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 21 * src_stride_y, DATA_TYPE), \
81 PTR_TO_VALUE(row_ptr + 22 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 23 * src_stride_y, DATA_TYPE)); \
82 VEC_DATA_TYPE(DATA_TYPE, 8) \
83 weights_values0 = (VEC_DATA_TYPE(DATA_TYPE, 8))( \
84 PTR_TO_VALUE(weights_ptr + 0 * weights_stride_y, DATA_TYPE), PTR_TO_VALUE(weights_ptr + 1 * weights_stride_y, DATA_TYPE), \
85 PTR_TO_VALUE(weights_ptr + 2 * weights_stride_y, DATA_TYPE), PTR_TO_VALUE(weights_ptr + 3 * weights_stride_y, DATA_TYPE), \
86 PTR_TO_VALUE(weights_ptr + 4 * weights_stride_y, DATA_TYPE), PTR_TO_VALUE(weights_ptr + 5 * weights_stride_y, DATA_TYPE), \
87 PTR_TO_VALUE(weights_ptr + 6 * weights_stride_y, DATA_TYPE), PTR_TO_VALUE(weights_ptr + 7 * weights_stride_y, DATA_TYPE)); \
88 DATA_TYPE weights_value1 = PTR_TO_VALUE(weights_ptr + 8 * weights_stride_y, DATA_TYPE); \
89 acc += src0.s02468ACE * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s0; \
90 acc += (VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s1357, src0.s9BDF) * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s1; \
91 acc += (VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s2468, src0.sACE, src1.s0) * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s2; \
92 acc += (VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s3579, src0.sBDF, src1.s1) * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s3; \
93 acc += (VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s468A, src0.sCE, src1.s02) * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s4; \
94 acc += (VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s579, src0.sBDF, src1.s13) * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s5; \
95 acc += (VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s68A, src0.sCE, src1.s024) * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s6; \
96 acc += (VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s79B, src0.sDF, src1.s135) * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s7; \
97 acc += (VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s8AC, src0.sE, src1.s0246) * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_value1; \
98 })
99
100#if defined(VEC_SIZE)
101#define VFMA(acc, w, src0, src1, src2, src3, src4, src5, src6, src7) \
102 ({ \
103 acc##0 = fma(src0, w, acc##0); \
104 acc##1 = fma(src1, w, acc##1); \
105 acc##2 = fma(src2, w, acc##2); \
106 acc##3 = fma(src3, w, acc##3); \
107 acc##4 = fma(src4, w, acc##4); \
108 acc##5 = fma(src5, w, acc##5); \
109 acc##6 = fma(src6, w, acc##6); \
110 acc##7 = fma(src7, w, acc##7); \
111 })
112
113#define CONVOLUTION1x9_STRIDE1_NHWC_BIFROST(acc, row_ptr, weights_ptr) \
114 ({ \
115 VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) \
116 src0 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)row_ptr); \
117 VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) \
118 src1 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(row_ptr + src_stride_y)); \
119 VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) \
120 src2 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(row_ptr + 2 * src_stride_y)); \
121 VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) \
122 src3 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(row_ptr + 3 * src_stride_y)); \
123 VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) \
124 src4 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(row_ptr + 4 * src_stride_y)); \
125 VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) \
126 src5 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(row_ptr + 5 * src_stride_y)); \
127 VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) \
128 src6 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(row_ptr + 6 * src_stride_y)); \
129 VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) \
130 src7 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(row_ptr + 7 * src_stride_y)); \
131 VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) \
132 src8 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(row_ptr + 8 * src_stride_y)); \
133 VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) \
134 src9 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(row_ptr + 9 * src_stride_y)); \
135 VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) \
136 src10 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(row_ptr + 10 * src_stride_y)); \
137 VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) \
138 src11 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(row_ptr + 11 * src_stride_y)); \
139 VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) \
140 src12 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(row_ptr + 12 * src_stride_y)); \
141 VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) \
142 src13 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(row_ptr + 13 * src_stride_y)); \
143 VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) \
144 src14 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(row_ptr + 14 * src_stride_y)); \
145 VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) \
146 src15 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(row_ptr + 15 * src_stride_y)); \
147 \
148 VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) \
149 w0 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(weights_ptr + 0 * weights_stride_y)); \
150 VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) \
151 w1 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(weights_ptr + 1 * weights_stride_y)); \
152 VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) \
153 w2 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(weights_ptr + 2 * weights_stride_y)); \
154 VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) \
155 w3 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(weights_ptr + 3 * weights_stride_y)); \
156 VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) \
157 w4 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(weights_ptr + 4 * weights_stride_y)); \
158 VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) \
159 w5 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(weights_ptr + 5 * weights_stride_y)); \
160 VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) \
161 w6 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(weights_ptr + 6 * weights_stride_y)); \
162 VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) \
163 w7 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(weights_ptr + 7 * weights_stride_y)); \
164 VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) \
165 w8 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(weights_ptr + 8 * weights_stride_y)); \
166 \
167 VFMA(acc, w0, src0, src1, src2, src3, src4, src5, src6, src7); \
168 VFMA(acc, w1, src1, src2, src3, src4, src5, src6, src7, src8); \
169 VFMA(acc, w2, src2, src3, src4, src5, src6, src7, src8, src9); \
170 VFMA(acc, w3, src3, src4, src5, src6, src7, src8, src9, src10); \
171 VFMA(acc, w4, src4, src5, src6, src7, src8, src9, src10, src11); \
172 VFMA(acc, w5, src5, src6, src7, src8, src9, src10, src11, src12); \
173 VFMA(acc, w6, src6, src7, src8, src9, src10, src11, src12, src13); \
174 VFMA(acc, w7, src7, src8, src9, src10, src11, src12, src13, src14); \
175 VFMA(acc, w8, src8, src9, src10, src11, src12, src13, src14, src15); \
176 })
177
178#if VEC_SIZE == 4
179#define REDUCE(out, vec) \
180 ({ \
181 VEC_DATA_TYPE(DATA_TYPE, 2) \
182 tmp1 = vec.s01 + vec.s23; \
183 out = tmp1.s0 + tmp1.s1; \
184 })
185#else // VEC_SIZE == 4
186#error("Not supported")
187#endif // VEC_SIZE == 4
188
189#if STRIDE_X == 1
190#define CONVOLUTION1x9_NHWC(acc, row_ptr, weights_ptr) CONVOLUTION1x9_STRIDE1_NHWC_BIFROST(acc, row_ptr, weights_ptr)
191#else // STRIDE_X == 1
192#error "Not supported"
193#endif // STRIDE_X == 1
194
195#else // defined(VEC_SIZE)
196
197#if STRIDE_X == 1
198#define CONVOLUTION1x9_NHWC(acc, row_ptr, weights_ptr) CONVOLUTION1x9_STRIDE1_NHWC(acc, row_ptr, weights_ptr)
199#elif STRIDE_X == 2 // STRIDE_X == 1
200#define CONVOLUTION1x9_NHWC(acc, row_ptr, weights_ptr) CONVOLUTION1x9_STRIDE2_NHWC(acc, row_ptr, weights_ptr)
201#else // STRIDE_X == 1
202#error "STRIDE_X larger than 2 is not supported"
203#endif // STRIDE_X == 1
204
205#endif // defined(VEC_SIZE)
206
207//#if defined(VEC_SIZE)
208/** This kernel performs a direct convolution to convolve the low three dimensions in a tensor with the NHWC data layout
209 *
210 * @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
211 * @note The third dimensions of the weights tensors must be passed at compile time using -DWEIGHTS_DEPTH
212 * @note If biases are used then -DHAS_BIAS has to be passed at compile time
213 *
214 * @param[in] src_ptr Pointer to the source tensor. Supported data types: F16/F32
215 * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
216 * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
217 * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
218 * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
219 * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
220 * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
221 * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
222 * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
223 * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
224 * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
225 * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
226 * @param[in] dst_step_y dst_stride_y * number of elements along Z processed per workitem(in bytes)
227 * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
228 * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
229 * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
230 * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: same as @p src_ptr
231 * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes)
232 * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes)
233 * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes)
234 * @param[in] weights_step_y weights_stride_y * number of elements along y processed per workitem(in bytes)
235 * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes)
236 * @param[in] weights_step_z weights_stride_z * number of elements along Z processed per workitem(in bytes)
237 * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the weights tensor
238 * @param[in] biases_ptr (Optional) Pointer to the biases tensor. Same as @p src_ptr
239 * @param[in] biases_stride_x (Optional) Stride of the biases tensor in X dimension (in bytes)
240 * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
241 * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases tensor
242 * @param[in] weights_stride_w (Optional) Stride of the weights tensor in the 4th dimension
243 */
244__kernel void direct_convolution9x9_nhwc(
245 TENSOR3D_DECLARATION(src),
246 TENSOR3D_DECLARATION(dst),
247 TENSOR3D_DECLARATION(weights),
248#ifdef HAS_BIAS
249 VECTOR_DECLARATION(biases),
250#endif /* defined(HAS_BIAS) */
251 unsigned int weights_stride_w)
252{
253 Image src = CONVERT_TO_IMAGE_STRUCT(src);
254 Tensor3D weights = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(weights);
255 Tensor3D dst = CONVERT_TO_TENSOR3D_STRUCT(dst);
256
257 VEC_DATA_TYPE(DATA_TYPE, 8)
258 values = 0;
259
260#if defined(VEC_SIZE)
261 VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
262 values0 = 0;
263 VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
264 values1 = 0;
265 VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
266 values2 = 0;
267 VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
268 values3 = 0;
269 VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
270 values4 = 0;
271 VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
272 values5 = 0;
273 VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
274 values6 = 0;
275 VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
276 values7 = 0;
277#define STEP_X (VEC_SIZE)
278#else // defined(VEC_SIZE)
279#define STEP_X (1)
280#endif // defined(VEC_SIZE)
281
282 const int id0 = get_global_id(0);
283 const int id1 = get_global_id(1);
284 const int id2 = get_global_id(2);
285
286 __global uchar *weights_addr = (__global uchar *)tensor3D_offset(&weights, 0, 0, 0);
giuros01c878f1f2019-07-09 11:01:34 +0100287 __global uchar *src_addr = (__global uchar *)offset(&src, 0, 0) - src_stride_x * id0 + ((id2 * STRIDE_Y) - PAD_TOP) * (int)src_stride_z;
288
289 weights_addr += id0 * weights_stride_w;
Michalis Spyrou45091732019-05-13 17:41:01 +0100290
Michele Di Giorgioba2cc1a2020-07-15 17:39:30 +0100291 const int coordy = (id2 * STRIDE_Y) - PAD_TOP;
292 if(coordy < 0)
Michalis Spyrou45091732019-05-13 17:41:01 +0100293 {
Michele Di Giorgioba2cc1a2020-07-15 17:39:30 +0100294 // Skip first rows containing padding
295 for(volatile int d = 0; d < WEIGHTS_DEPTH; d += STEP_X)
Michalis Spyrou45091732019-05-13 17:41:01 +0100296 {
Michele Di Giorgioba2cc1a2020-07-15 17:39:30 +0100297 const int start_z = -coordy;
298 for(int i = start_z; i < 9; ++i)
299 {
300 CONVOLUTION1x9_NHWC(values, (src_addr + i * (int)src_stride_z), (weights_addr + i * (int)weights_stride_z));
301 }
302 src_addr += STEP_X * sizeof(DATA_TYPE);
303 weights_addr += STEP_X * sizeof(DATA_TYPE);
Michalis Spyrou45091732019-05-13 17:41:01 +0100304 }
Michalis Spyrou45091732019-05-13 17:41:01 +0100305 }
Michele Di Giorgioba2cc1a2020-07-15 17:39:30 +0100306 else if(coordy > (SRC_HEIGHT - 9))
Michalis Spyrou45091732019-05-13 17:41:01 +0100307 {
Michele Di Giorgioba2cc1a2020-07-15 17:39:30 +0100308 for(volatile int d = 0; d < WEIGHTS_DEPTH; d += STEP_X)
Michalis Spyrou45091732019-05-13 17:41:01 +0100309 {
Michele Di Giorgioba2cc1a2020-07-15 17:39:30 +0100310 // Avoid loading rows beyond the input height
311 const int end_z = SRC_HEIGHT - coordy;
312 for(int i = 0; i < end_z; ++i)
313 {
314 CONVOLUTION1x9_NHWC(values, (src_addr + i * (int)src_stride_z), (weights_addr + i * (int)weights_stride_z));
315 }
316 src_addr += STEP_X * sizeof(DATA_TYPE);
317 weights_addr += STEP_X * sizeof(DATA_TYPE);
Michalis Spyrou45091732019-05-13 17:41:01 +0100318 }
Michalis Spyrou45091732019-05-13 17:41:01 +0100319 }
Michele Di Giorgioba2cc1a2020-07-15 17:39:30 +0100320 else
Michalis Spyrou45091732019-05-13 17:41:01 +0100321 {
Michele Di Giorgioba2cc1a2020-07-15 17:39:30 +0100322 for(volatile int d = 0; d < WEIGHTS_DEPTH; d += STEP_X)
323 {
324 CONVOLUTION1x9_NHWC(values, src_addr, weights_addr);
325 CONVOLUTION1x9_NHWC(values, (src_addr + 1 * (int)src_stride_z), (weights_addr + 1 * (int)weights_stride_z));
326 CONVOLUTION1x9_NHWC(values, (src_addr + 2 * (int)src_stride_z), (weights_addr + 2 * (int)weights_stride_z));
327 CONVOLUTION1x9_NHWC(values, (src_addr + 3 * (int)src_stride_z), (weights_addr + 3 * (int)weights_stride_z));
328 CONVOLUTION1x9_NHWC(values, (src_addr + 4 * (int)src_stride_z), (weights_addr + 4 * (int)weights_stride_z));
329 CONVOLUTION1x9_NHWC(values, (src_addr + 5 * (int)src_stride_z), (weights_addr + 5 * (int)weights_stride_z));
330 CONVOLUTION1x9_NHWC(values, (src_addr + 6 * (int)src_stride_z), (weights_addr + 6 * (int)weights_stride_z));
331 CONVOLUTION1x9_NHWC(values, (src_addr + 7 * (int)src_stride_z), (weights_addr + 7 * (int)weights_stride_z));
332 CONVOLUTION1x9_NHWC(values, (src_addr + 8 * (int)src_stride_z), (weights_addr + 8 * (int)weights_stride_z));
333 src_addr += STEP_X * sizeof(DATA_TYPE);
334 weights_addr += STEP_X * sizeof(DATA_TYPE);
335 }
Michalis Spyrou45091732019-05-13 17:41:01 +0100336 }
Michalis Spyrou45091732019-05-13 17:41:01 +0100337
338#if defined(VEC_SIZE)
339 REDUCE(values.s0, values0);
340 REDUCE(values.s1, values1);
341 REDUCE(values.s2, values2);
342 REDUCE(values.s3, values3);
343 REDUCE(values.s4, values4);
344 REDUCE(values.s5, values5);
345 REDUCE(values.s6, values6);
346 REDUCE(values.s7, values7);
347#endif // defined(VEC_SIZE)
348
349#if defined(HAS_BIAS)
350 Vector biases = CONVERT_TO_VECTOR_STRUCT_NO_STEP(biases);
351 values += (VEC_DATA_TYPE(DATA_TYPE, 8)) * ((__global DATA_TYPE *)(vector_offset(&biases, id0)));
352#endif // defined(HAS_BIAS)
353
354 *((__global DATA_TYPE *)(dst.ptr + 0 * dst_stride_y)) = values.s0;
355 *((__global DATA_TYPE *)(dst.ptr + 1 * dst_stride_y)) = values.s1;
356 *((__global DATA_TYPE *)(dst.ptr + 2 * dst_stride_y)) = values.s2;
357 *((__global DATA_TYPE *)(dst.ptr + 3 * dst_stride_y)) = values.s3;
358 *((__global DATA_TYPE *)(dst.ptr + 4 * dst_stride_y)) = values.s4;
359 *((__global DATA_TYPE *)(dst.ptr + 5 * dst_stride_y)) = values.s5;
360 *((__global DATA_TYPE *)(dst.ptr + 6 * dst_stride_y)) = values.s6;
361 *((__global DATA_TYPE *)(dst.ptr + 7 * dst_stride_y)) = values.s7;
362#undef STEP_X
363}
Michele Di Giorgioba2cc1a2020-07-15 17:39:30 +0100364#endif // defined(DATA_TYPE) && defined(STRIDE_X) && defined(WEIGHTS_DEPTH) && defined(DATA_LAYOUT_NHWC) && defined(PAD_TOP)