blob: dfd16e0da3e25ff10d0abe72cc200429928ae1b7 [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
Georgios Pinitas652bde52018-01-10 15:33:28 +00002 * Copyright (c) 2017-2018 ARM Limited.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "helpers.h"
25
Michalis Spyrou172e5702017-06-26 14:18:47 +010026#define ADD_OP(a, b) ((a) + (b))
27#define SUB_OP(a, b) ((a) - (b))
28#define MUL_OP(a, b) ((a) * (b))
29#define INVSQRT_OP(a) rsqrt((a))
30#define SQCVT_SAT(a) (a)
31
Georgios Pinitasc9369172018-09-26 11:25:40 +010032#if defined(VEC_SIZE) && defined(DATA_TYPE)
33
Giorgio Arena99ac60b2018-02-16 15:17:23 +000034#if defined(FUSED_ACTIVATION)
35#include "activation_layer.cl"
36#define ACTIVATION_FUNC(x) ACTIVATION_OP(FUSED_ACTIVATION, x)
37#else /* defined(FUSED_ACTIVATION) */
Giorgio Arena11674872018-02-07 15:38:12 +000038#define ACTIVATION_FUNC(x) (x)
Giorgio Arena99ac60b2018-02-16 15:17:23 +000039#endif /* defined(FUSED_ACTIVATION) */
Giorgio Arena11674872018-02-07 15:38:12 +000040
Anthony Barbier6ff3b192017-09-04 18:44:23 +010041/** Apply batch normalization.
42 *
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +010043 * @param[in] input_ptr Pointer to the first source tensor. Supported data types: F16/F32
Anthony Barbier6ff3b192017-09-04 18:44:23 +010044 * @param[in] input_stride_x Stride of the first source tensor in X dimension (in bytes)
45 * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
46 * @param[in] input_stride_y Stride of the first source tensor in Y dimension (in bytes)
47 * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
48 * @param[in] input_stride_z Stride of the first source tensor in Z dimension (in bytes)
49 * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
50 * @param[in] input_offset_first_element_in_bytes The offset of the first element in the first source tensor
Michalis Spyrou172e5702017-06-26 14:18:47 +010051 * @param[out] output_ptr Pointer to the destination tensor. Supported data types: same as @p input_ptr
Anthony Barbier6ff3b192017-09-04 18:44:23 +010052 * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
53 * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
54 * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes)
55 * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
56 * @param[in] output_stride_z Stride of the destination tensor in Z dimension (in bytes)
57 * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
58 * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
Michalis Spyrou172e5702017-06-26 14:18:47 +010059 * @param[in] mean_ptr Pointer to the mean source tensor. Supported data types: same as @p input_ptr
Anthony Barbier6ff3b192017-09-04 18:44:23 +010060 * @param[in] mean_stride_x Stride of the mean source tensor in X dimension (in bytes)
61 * @param[in] mean_step_x mean_stride_x * number of elements along X processed per workitem(in bytes)
62 * @param[in] mean_offset_first_element_in_bytes The offset of the first element in the mean source tensor
Michalis Spyrou172e5702017-06-26 14:18:47 +010063 * @param[in] var_ptr Pointer to the var tensor. Supported data types: same as @p input_ptr
Anthony Barbier6ff3b192017-09-04 18:44:23 +010064 * @param[in] var_stride_x Stride of the var tensor in X dimension (in bytes)
65 * @param[in] var_step_x var_stride_x * number of elements along X processed per workitem(in bytes)
66 * @param[in] var_offset_first_element_in_bytes The offset of the first element in the var source tensor
Michalis Spyrou172e5702017-06-26 14:18:47 +010067 * @param[in] beta_ptr Pointer to the beta source tensor. Supported data types: same as @p input_ptr
Anthony Barbier6ff3b192017-09-04 18:44:23 +010068 * @param[in] beta_stride_x Stride of the beta source tensor in X dimension (in bytes)
69 * @param[in] beta_step_x beta_stride_x * number of elements along X processed per workitem(in bytes)
70 * @param[in] beta_offset_first_element_in_bytes The offset of the first element in the beta source tensor
Michalis Spyrou172e5702017-06-26 14:18:47 +010071 * @param[in] gamma_ptr Pointer to the gamma source tensor. Supported data types: same as @p input_ptr
Anthony Barbier6ff3b192017-09-04 18:44:23 +010072 * @param[in] gamma_stride_x Stride of the gamma source tensor in X dimension (in bytes)
73 * @param[in] gamma_step_x gamma_stride_x * number of elements along X processed per workitem(in bytes)
74 * @param[in] gamma_offset_first_element_in_bytes The offset of the first element in the gamma source tensor
75 * @param[in] epsilon Epsilon parameter in the batch normalization equation
76 */
Michele Di Giorgiobf3c6622018-03-08 11:52:27 +000077__kernel void batchnormalization_layer_nchw(TENSOR3D_DECLARATION(input),
Georgios Pinitas409ee0a2017-08-18 10:16:09 +010078#ifndef IN_PLACE
Michele Di Giorgiobf3c6622018-03-08 11:52:27 +000079 TENSOR3D_DECLARATION(output),
Georgios Pinitas409ee0a2017-08-18 10:16:09 +010080#endif /* not IN_PLACE */
Michele Di Giorgiobf3c6622018-03-08 11:52:27 +000081 VECTOR_DECLARATION(mean),
82 VECTOR_DECLARATION(var),
Michele Di Giorgio4d336302018-03-02 09:43:54 +000083#ifndef USE_DEFAULT_BETA
Michele Di Giorgiobf3c6622018-03-08 11:52:27 +000084 VECTOR_DECLARATION(beta),
Michele Di Giorgio4d336302018-03-02 09:43:54 +000085#endif /* USE_DEFAULT_BETA */
86#ifndef USE_DEFAULT_GAMMA
Michele Di Giorgiobf3c6622018-03-08 11:52:27 +000087 VECTOR_DECLARATION(gamma),
Michele Di Giorgio4d336302018-03-02 09:43:54 +000088#endif /* USE_DEFAULT_GAMMA */
Michele Di Giorgiobf3c6622018-03-08 11:52:27 +000089 float epsilon)
Anthony Barbier6ff3b192017-09-04 18:44:23 +010090{
Georgios Pinitas409ee0a2017-08-18 10:16:09 +010091 Tensor3D in = CONVERT_TO_TENSOR3D_STRUCT(input);
92#ifdef IN_PLACE
93 Tensor3D out = in;
94#else /* IN_PLACE */
95 Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(output);
96#endif /* IN_PLACE */
Michele Di Giorgio4d336302018-03-02 09:43:54 +000097 Vector mean = CONVERT_TO_VECTOR_STRUCT(mean);
98 Vector var = CONVERT_TO_VECTOR_STRUCT(var);
99#ifndef USE_DEFAULT_BETA
100 Vector beta = CONVERT_TO_VECTOR_STRUCT(beta);
101#endif /* USE_DEFAULT_BETA */
102#ifndef USE_DEFAULT_GAMMA
Georgios Pinitas409ee0a2017-08-18 10:16:09 +0100103 Vector gamma = CONVERT_TO_VECTOR_STRUCT(gamma);
Michele Di Giorgio4d336302018-03-02 09:43:54 +0000104#endif /* USE_DEFAULT_GAMMA */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100105
Michalis Spyrou172e5702017-06-26 14:18:47 +0100106 VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
Gian Marco Iodice349feef2017-09-28 11:21:29 +0100107 data = 0;
Michalis Spyrou172e5702017-06-26 14:18:47 +0100108 VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
109 denominator = 0;
110 VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
111 numerator = 0;
112 VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
113 x_bar = 0;
114 VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
Michele Di Giorgio4d336302018-03-02 09:43:54 +0000115 res = 0;
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100116
117 const int current_slice = get_global_id(2);
118
Gian Marco Iodice349feef2017-09-28 11:21:29 +0100119 data = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)in.ptr);
Michalis Spyrou172e5702017-06-26 14:18:47 +0100120 denominator = *((__global DATA_TYPE *)(var.ptr + current_slice * var.stride_x));
Gian Marco Iodice349feef2017-09-28 11:21:29 +0100121 denominator = INVSQRT_OP(ADD_OP(denominator, ((VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))SQCVT_SAT(epsilon))));
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100122
123 // Calculate x bar and store results
Michalis Spyrou172e5702017-06-26 14:18:47 +0100124 numerator = *((__global DATA_TYPE *)(mean.ptr + current_slice * mean.stride_x));
Gian Marco Iodice349feef2017-09-28 11:21:29 +0100125 numerator = SUB_OP(data, numerator);
Michalis Spyrou172e5702017-06-26 14:18:47 +0100126 x_bar = MUL_OP(numerator, denominator);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100127
Michele Di Giorgio4d336302018-03-02 09:43:54 +0000128#ifndef USE_DEFAULT_GAMMA
Giorgio Arena11674872018-02-07 15:38:12 +0000129 VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
Michele Di Giorgio4d336302018-03-02 09:43:54 +0000130 gamma_vec = *((__global DATA_TYPE *)(gamma.ptr + current_slice * gamma.stride_x));
131
132 res = MUL_OP(gamma_vec, x_bar);
133#else /* USE_DEFAULT_GAMMA */
134 // gamma is equal to 1, no need to perform multiplications
Michele Di Giorgiobf3c6622018-03-08 11:52:27 +0000135 res = x_bar;
Michele Di Giorgio4d336302018-03-02 09:43:54 +0000136#endif /* USE_DEFAULT_GAMMA */
137
138#ifndef USE_DEFAULT_BETA
139 VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
140 beta_vec = *((__global DATA_TYPE *)(beta.ptr + current_slice * beta.stride_x));
141 // beta is not zero, hence we need to perform the addition
142 res = ADD_OP(res, beta_vec);
143#endif /* USE_DEFAULT_BETA */
Giorgio Arena11674872018-02-07 15:38:12 +0000144
145 res = ACTIVATION_FUNC(res);
146
Michalis Spyrou172e5702017-06-26 14:18:47 +0100147 VSTORE(VEC_SIZE)
Giorgio Arena11674872018-02-07 15:38:12 +0000148 (res, 0, (__global DATA_TYPE *)out.ptr);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100149}
Giorgio Arena11674872018-02-07 15:38:12 +0000150
Michele Di Giorgiobf3c6622018-03-08 11:52:27 +0000151/** Apply batch normalization on tensors with NHWC format.
152 *
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +0100153 * @param[in] input_ptr Pointer to the first source tensor. Supported data types: F16/F32
Michele Di Giorgiobf3c6622018-03-08 11:52:27 +0000154 * @param[in] input_stride_x Stride of the first source tensor in X dimension (in bytes)
155 * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
156 * @param[in] input_stride_y Stride of the first source tensor in Y dimension (in bytes)
157 * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
158 * @param[in] input_stride_z Stride of the first source tensor in Z dimension (in bytes)
159 * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
160 * @param[in] input_offset_first_element_in_bytes The offset of the first element in the first source tensor
161 * @param[out] output_ptr Pointer to the destination tensor. Supported data types: same as @p input_ptr
162 * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
163 * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
164 * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes)
165 * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
166 * @param[in] output_stride_z Stride of the destination tensor in Z dimension (in bytes)
167 * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
168 * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
169 * @param[in] mean_ptr Pointer to the mean source tensor. Supported data types: same as @p input_ptr
170 * @param[in] mean_stride_x Stride of the mean source tensor in X dimension (in bytes)
171 * @param[in] mean_step_x mean_stride_x * number of elements along X processed per workitem(in bytes)
172 * @param[in] mean_offset_first_element_in_bytes The offset of the first element in the mean source tensor
173 * @param[in] var_ptr Pointer to the var tensor. Supported data types: same as @p input_ptr
174 * @param[in] var_stride_x Stride of the var tensor in X dimension (in bytes)
175 * @param[in] var_step_x var_stride_x * number of elements along X processed per workitem(in bytes)
176 * @param[in] var_offset_first_element_in_bytes The offset of the first element in the var source tensor
177 * @param[in] beta_ptr Pointer to the beta source tensor. Supported data types: same as @p input_ptr
178 * @param[in] beta_stride_x Stride of the beta source tensor in X dimension (in bytes)
179 * @param[in] beta_step_x beta_stride_x * number of elements along X processed per workitem(in bytes)
180 * @param[in] beta_offset_first_element_in_bytes The offset of the first element in the beta source tensor
181 * @param[in] gamma_ptr Pointer to the gamma source tensor. Supported data types: same as @p input_ptr
182 * @param[in] gamma_stride_x Stride of the gamma source tensor in X dimension (in bytes)
183 * @param[in] gamma_step_x gamma_stride_x * number of elements along X processed per workitem(in bytes)
184 * @param[in] gamma_offset_first_element_in_bytes The offset of the first element in the gamma source tensor
185 * @param[in] epsilon Epsilon parameter in the batch normalization equation
186 */
187__kernel void batchnormalization_layer_nhwc(TENSOR3D_DECLARATION(input),
188#ifndef IN_PLACE
189 TENSOR3D_DECLARATION(output),
190#endif /* not IN_PLACE */
191 VECTOR_DECLARATION(mean),
192 VECTOR_DECLARATION(var),
193#ifndef USE_DEFAULT_BETA
194 VECTOR_DECLARATION(beta),
195#endif /* USE_DEFAULT_BETA */
196#ifndef USE_DEFAULT_GAMMA
197 VECTOR_DECLARATION(gamma),
198#endif /* USE_DEFAULT_GAMMA */
199 float epsilon)
200{
201 Tensor3D in = CONVERT_TO_TENSOR3D_STRUCT(input);
202#ifdef IN_PLACE
203 Tensor3D out = in;
204#else /* IN_PLACE */
205 Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(output);
206#endif /* IN_PLACE */
207 Vector mean = CONVERT_TO_VECTOR_STRUCT(mean);
208 Vector var = CONVERT_TO_VECTOR_STRUCT(var);
209#ifndef USE_DEFAULT_BETA
210 Vector beta = CONVERT_TO_VECTOR_STRUCT(beta);
211#endif /* USE_DEFAULT_BETA */
212#ifndef USE_DEFAULT_GAMMA
213 Vector gamma = CONVERT_TO_VECTOR_STRUCT(gamma);
214#endif /* USE_DEFAULT_GAMMA */
215
216 VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
217 data = 0;
218 VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
219 denominator = 0;
220 VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
221 numerator = 0;
222 VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
223 x_bar = 0;
224 VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
225 res = 0;
226
227 const int current_slice = get_global_id(0);
228
229 data = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)in.ptr);
230 denominator = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(var.ptr + current_slice * VEC_SIZE * var.stride_x));
231 denominator = INVSQRT_OP(ADD_OP(denominator, ((VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))SQCVT_SAT(epsilon))));
232
233 // Calculate x bar and store results
234 numerator = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(mean.ptr + current_slice * VEC_SIZE * mean.stride_x));
235 numerator = SUB_OP(data, numerator);
236 x_bar = MUL_OP(numerator, denominator);
237
238#ifndef USE_DEFAULT_GAMMA
239 VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
240 gamma_vec = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(gamma.ptr + current_slice * VEC_SIZE * gamma.stride_x));
241
242 res = MUL_OP(gamma_vec, x_bar);
243#else /* USE_DEFAULT_GAMMA */
244 // gamma is equal to 1, no need to perform multiplications
245 res = x_bar;
246#endif /* USE_DEFAULT_GAMMA */
247
248#ifndef USE_DEFAULT_BETA
249 VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
250 beta_vec = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(beta.ptr + current_slice * VEC_SIZE * beta.stride_x));
251 // beta is not zero, hence we need to perform the addition
252 res = ADD_OP(res, beta_vec);
253#endif /* USE_DEFAULT_BETA */
254
255 res = ACTIVATION_FUNC(res);
256
257 VSTORE(VEC_SIZE)
258 (res, 0, (__global DATA_TYPE *)out.ptr);
259}
Michele Di Giorgio4d336302018-03-02 09:43:54 +0000260#endif /* defined(VEC_SIZE) && defined(DATA_TYPE) */
Georgios Pinitasc9369172018-09-26 11:25:40 +0100261
262#if defined(NUM_CHANNELS) && defined(DATA_TYPE) && defined(EPSILON)
263/** Fuse batchnorm parameters to convolution layer parameters
264 *
265 * @attention Data type should be passed using the -DDATA_TYPE compile flag, e.g. -DDATA_TYPE=float
266 * @attention Input tensor depth should be given as a preprocessor argument using -DNUM_CHANNELS=size. e.g. -DNUM_CHANNELS=16
267 * @attention Batch normalization epsilon parameter should be given as a preprocessor argument with -DEPSILON=value. e.g. -DEPSILON=0.001f
268 *
269 * @param[in] conv_w_ptr Pointer to the source tensor. Supported data types: F16/F32
270 * @param[in] conv_w_stride_x Stride of the source tensor in X dimension (in bytes)
271 * @param[in] conv_w_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
272 * @param[in] conv_w_stride_y Stride of the source tensor in Y dimension (in bytes)
273 * @param[in] conv_w_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
274 * @param[in] conv_w_stride_z Stride of the source tensor in Z dimension (in bytes)
275 * @param[in] conv_w_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
Georgios Pinitasc55beee2018-10-23 15:23:23 +0100276 * @param[in] conv_w_stride_w Stride of the source tensor in W dimension (in bytes)
277 * @param[in] conv_w_step_w input_stride_w * number of elements along W processed per workitem(in bytes)
Georgios Pinitasc9369172018-09-26 11:25:40 +0100278 * @param[in] conv_w_offset_first_element_in_bytes The offset of the first element in the source tensor
279 * @param[in] bn_mean_ptr Pointer to the mean source tensor. Supported data types: same as @p input_ptr
280 * @param[in] bn_mean_stride_x Stride of the mean source tensor in X dimension (in bytes)
281 * @param[in] bn_mean_step_x bn_mean_stride_x * number of elements along X processed per workitem(in bytes)
282 * @param[in] bn_mean_offset_first_element_in_bytes The offset of the first element in the mean source tensor
283 * @param[in] bn_var_ptr Pointer to the var tensor. Supported data types: same as @p input_ptr
284 * @param[in] bn_var_stride_x Stride of the var tensor in X dimension (in bytes)
285 * @param[in] bn_var_step_x bn_var_stride_x * number of elements along X processed per workitem(in bytes)
286 * @param[in] bn_var_offset_first_element_in_bytes The offset of the first element in the var source tensor
287 * @param[out] fused_w_ptr Pointer to the destination weights tensors. Supported data types: same as @p input_ptr
288 * @param[in] fused_w_stride_x Stride of the destination tensor in X dimension (in bytes)
289 * @param[in] fused_w_step_x fused_w_stride_x * number of elements along X processed per workitem(in bytes)
290 * @param[in] fused_w_stride_y Stride of the destination tensor in Y dimension (in bytes)
291 * @param[in] fused_w_step_y fused_w_stride_y * number of elements along Y processed per workitem(in bytes)
292 * @param[in] fused_w_stride_z Stride of the destination tensor in Z dimension (in bytes)
293 * @param[in] fused_w_step_z fused_w_stride_z * number of elements along Z processed per workitem(in bytes)
294 * @param[in] fused_w_stride_w Stride of the destination tensor in W dimension (in bytes)
295 * @param[in] fused_w_step_w fused_w_stride_w * number of elements along W processed per workitem(in bytes)
296 * @param[in] fused_w_offset_first_element_in_bytes The offset of the first element in the destination tensor
297 * @param[in] fused_b_ptr Pointer to the destination bias tensor. Supported data types: same as @p input_ptr
298 * @param[in] fused_b_stride_x Stride of the bias source tensor in X dimension (in bytes)
299 * @param[in] fused_b_step_x fused_b_stride_x * number of elements along X processed per workitem(in bytes)
300 * @param[in] fused_b_offset_first_element_in_bytes The offset of the first element in the destination tensor
301 * @param[in] conv_b_ptr Pointer to the source bias tensor. Supported data types: same as @p input_ptr
302 * @param[in] conv_b_stride_x Stride of the beta source tensor in X dimension (in bytes)
303 * @param[in] conv_b_step_x conv_b_beta_stride_x * number of elements along X processed per workitem(in bytes)
304 * @param[in] conv_b_offset_first_element_in_bytes The offset of the first element in the source bias tensor
305 * @param[in] bn_beta_ptr Pointer to the beta source tensor. Supported data types: same as @p input_ptr
306 * @param[in] bn_beta_stride_x Stride of the beta source tensor in X dimension (in bytes)
307 * @param[in] bn_beta_step_x bn_beta_stride_x * number of elements along X processed per workitem(in bytes)
308 * @param[in] bn_beta_offset_first_element_in_bytes The offset of the first element in the beta source tensor
309 * @param[in] bn_gamma_ptr Pointer to the gamma source tensor. Supported data types: same as @p input_ptr
310 * @param[in] bn_gamma_stride_x Stride of the gamma source tensor in X dimension (in bytes)
311 * @param[in] bn_gamma_step_x bn_gamma_stride_x * number of elements along X processed per workitem(in bytes)
312 * @param[in] bn_gamma_offset_first_element_in_bytes The offset of the first element in the gamma source tensor
313 * @param[in] epsilon Epsilon parameter in the batch normalization equation
314 */
315__kernel void fuse_batchnormalization_layer(TENSOR4D_DECLARATION(conv_w),
316 VECTOR_DECLARATION(bn_mean),
317 VECTOR_DECLARATION(bn_var)
318#ifndef IN_PLACE_W
319 ,
320 TENSOR4D_DECLARATION(fused_w)
321#endif /* not IN_PLACE_W */
322#ifndef IN_PLACE_B
323 ,
324 VECTOR_DECLARATION(fused_b)
325#endif /* not IN_PLACE_B */
326#ifdef HAS_BIAS
327 ,
328 VECTOR_DECLARATION(conv_b)
329#endif /* HAS_BIAS */
330#ifndef USE_DEFAULT_BETA
331 ,
332 VECTOR_DECLARATION(bn_beta)
333#endif /* USE_DEFAULT_BETA */
334#ifndef USE_DEFAULT_GAMMA
335 ,
336 VECTOR_DECLARATION(bn_gamma)
337#endif /* USE_DEFAULT_GAMMA */
338 )
339{
340 Tensor4D conv_w = CONVERT_TO_TENSOR4D_STRUCT(conv_w, NUM_CHANNELS);
341 Vector bn_mean = CONVERT_TO_VECTOR_STRUCT_NO_STEP(bn_mean);
342 Vector bn_var = CONVERT_TO_VECTOR_STRUCT_NO_STEP(bn_var);
343
344 // In-place ops
345#ifdef IN_PLACE_W
346 Tensor4D fused_w = conv_w;
347#else /* IN_PLACE_W */
348 Tensor4D fused_w = CONVERT_TO_TENSOR4D_STRUCT(fused_w, NUM_CHANNELS);
349#endif /* IN_PLACE */
350#ifdef IN_PLACE_B
351 Vector fused_b = conv_b;
352#else /* IN_PLACE_W */
353 Vector fused_b = CONVERT_TO_VECTOR_STRUCT_NO_STEP(fused_b);
354#endif /* IN_PLACE */
355
356 // Conditional ops
357#ifdef HAS_BIAS
358 Vector conv_b = CONVERT_TO_VECTOR_STRUCT_NO_STEP(conv_b);
359#endif /* USE_DEFAULT_BETA */
360#ifndef USE_DEFAULT_BETA
361 Vector bn_beta = CONVERT_TO_VECTOR_STRUCT_NO_STEP(bn_beta);
362#endif /* USE_DEFAULT_BETA */
363#ifndef USE_DEFAULT_GAMMA
364 Vector bn_gamma = CONVERT_TO_VECTOR_STRUCT_NO_STEP(bn_gamma);
365#endif /* USE_DEFAULT_GAMMA */
366
367 const int current_slice = get_global_id(2) / NUM_CHANNELS;
368
369#if defined(VEC_SIZE) && defined(LAST_ACCESSED_X)
370 // Check if access on width gets out of bounds
371 // If it does shift access vector to access elements within bounds
372 const int xi = (int)(get_global_id(0) * VEC_SIZE);
373 conv_w.ptr -= max(xi - (int)LAST_ACCESSED_X, 0) * conv_w_stride_x;
374 fused_w.ptr -= max(xi - (int)LAST_ACCESSED_X, 0) * fused_w_stride_x;
375
376 // Load W
377 VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
378 wn = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)conv_w.ptr);
379#else // !defined(VEC_SIZE) || !defined(LAST_ACCESSED_X)
380 DATA_TYPE wn = *((__global DATA_TYPE *)(conv_w.ptr));
381#endif // defined(VEC_SIZE) && defined(LAST_ACCESSED_X)
382
383 // rvar = 1 / sqrt(var + epsilon)
384 const DATA_TYPE var = *((__global DATA_TYPE *)(bn_var.ptr + current_slice * bn_var.stride_x));
385 const DATA_TYPE rvar = INVSQRT_OP(ADD_OP(var, SQCVT_SAT((float)EPSILON)));
386 wn *= rvar;
387
388 // Load b
389 const DATA_TYPE mean = *((__global DATA_TYPE *)(bn_mean.ptr + current_slice * bn_mean.stride_x));
390 DATA_TYPE bn = 0;
391#ifdef HAS_BIAS
392 bn = *((__global DATA_TYPE *)(conv_b.ptr + current_slice * conv_b.stride_x));
393#endif /* HAS_BIAS */
394 bn = (bn - mean) * rvar;
395
396#ifndef USE_DEFAULT_GAMMA
397 const DATA_TYPE gamma_scalar = *((__global DATA_TYPE *)(bn_gamma.ptr + current_slice * bn_gamma.stride_x));
398 wn *= gamma_scalar;
399 bn *= gamma_scalar;
400#endif /* USE_DEFAULT_GAMMA */
401
402#ifndef USE_DEFAULT_BETA
403 const DATA_TYPE beta_scalar = *((__global DATA_TYPE *)(bn_beta.ptr + current_slice * bn_beta.stride_x));
404 bn += beta_scalar;
405#endif /* USE_DEFAULT_BETA */
406
407#if defined(VEC_SIZE) && defined(LAST_ACCESSED_X)
408 // Store updated weights
409 VSTORE(VEC_SIZE)
410 (wn, 0, (__global DATA_TYPE *)fused_w.ptr);
411#else // !defined(VEC_SIZE) || !defined(LAST_ACCESSED_X)
412 *((__global DATA_TYPE *)(fused_w.ptr)) = wn;
413#endif // defined(VEC_SIZE) && defined(LAST_ACCESSED_X)
414
415 // Store updated bias
416 *((__global DATA_TYPE *)(fused_b.ptr + current_slice * fused_b.stride_x)) = bn;
417}
418#endif /* defined(NUM_CHANNELS) && defined(DATA_TYPE) && defined(EPSILON) */