blob: 26ab56f24ed3b9844f566b18d50f4a1e596d72ab [file] [log] [blame]
Pablo Tello8f43d742019-03-27 09:28:32 +00001/*
Michele Di Giorgiod9eaf612020-07-08 11:12:57 +01002 * Copyright (c) 2019 Arm Limited.
Pablo Tello8f43d742019-03-27 09:28:32 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25#include "arm.hpp"
26#include "kernel.hpp"
27
28namespace winograd
29{
30
31template <>
32void WeightTransform<5, 5, 6, 6, float, float, WinogradRoots::Integers>::execute(
33 const int n_output_channels,
34 const int n_input_channels,
35 const float* const input,
36 float* const output,
37 const int matrix_stride,
38 const int matrix_row_stride
39)
40{
41 // Get pointers to each cell of the weight tensor
42 const auto weight_col_stride = n_input_channels * n_output_channels;
43 const auto weight_row_stride = 5 * weight_col_stride;
44 const float *inptrs[5][5];
45 for (int i = 0; i < 5; i++)
46 {
47 for (int j = 0; j < 5; j++)
48 {
49 inptrs[i][j] = input + i*weight_row_stride + j*weight_col_stride;
50 }
51 }
52
53 // For each input channel
54 for (int ic = 0; ic < n_input_channels; ic++)
55 {
56 float *outptr = output + ic * matrix_row_stride;
57
58 // For each output channel
59 int channels_remaining = n_output_channels;
60#ifdef __aarch64__
61 for (; channels_remaining >= 4; channels_remaining -= 4)
62 {
63 // Matrices used and computed in this kernel
64 float32x4_t w[5][5], Ww[6][5], V[6][6];
65
66 // Read weights
67 for (int i = 0; i < 5; i++)
68 {
69 for (int j = 0; j < 5; j++)
70 {
71 w[i][j] = vld1q_f32(inptrs[i][j]);
72 inptrs[i][j] += 4;
73 }
74 }
75
76 // Compute the matrix W w
77 for (int j = 0; j < 5; j++)
78 {
79 // Ww[0][j] = w[0][j]/4.0f;
80 Ww[0][j] = vmulq_n_f32(w[0][j], 1.0f/4.0f);
81
82 // Ww[1][j] = -( w[0][j] + w[1][j] + w[2][j] + w[3][j] + w[4][j])/6.0f;
83 Ww[1][j] = vmulq_n_f32(
84 vaddq_f32(
85 vaddq_f32(
86 vaddq_f32(w[1][j], w[0][j]),
87 vaddq_f32(w[3][j], w[2][j])
88 ),
89 w[4][j]
90 ),
91 -1.0f/6.0f
92 );
93
94 // Ww[2][j] = +(-w[0][j] + w[1][j] - w[2][j] + w[3][j] - w[4][j])/6.0f;
95 // Ww[2][j] = ((w[1][j] - w[0][j]) + (w[3][j] - w[2][j]) - w[4][j])/6.0f;
96 Ww[2][j] = vmulq_n_f32(
97 vsubq_f32(
98 vaddq_f32(
99 vsubq_f32(w[1][j], w[0][j]),
100 vsubq_f32(w[3][j], w[2][j])
101 ),
102 w[4][j]
103 ),
104 1.0f/6.0f
105 );
106
107 // Ww[3][j] = (w[0][j]/8.0f + w[1][j]/4.0f + w[2][j]/2.0f + w[3][j] + 2*w[4][j])/3.0f;
108 Ww[3][j] = vmulq_n_f32(
109 vmlaq_n_f32(
110 vaddq_f32(
111 vaddq_f32(vmulq_n_f32(w[0][j], 1.0f/8.0f), vmulq_n_f32(w[1][j], 1.0f/4.0f)),
112 vaddq_f32(vmulq_n_f32(w[2][j], 1.0f/2.0f), w[3][j])
113 ),
114 w[4][j], 2.0f
115 ),
116 1.0f/3.0f
117 );
118
119 // Ww[4][j] = (w[0][j]/8.0f - w[1][j]/4.0f + w[2][j]/2.0f - w[3][j] + 2*w[4][j])/3.0f;
120 Ww[4][j] = vmulq_n_f32(
121 vmlaq_n_f32(
122 vaddq_f32(
123 vsubq_f32(vmulq_n_f32(w[0][j], 1.0f/8.0f), vmulq_n_f32(w[1][j], 1.0f/4.0f)),
124 vsubq_f32(vmulq_n_f32(w[2][j], 1.0f/2.0f), w[3][j])
125 ),
126 w[4][j], 2.0f
127 ),
128 1.0f/3.0f
129 );
130
131 // Ww[5][j] = w[4][j];
132 Ww[5][j] = w[4][j];
133 }
134
135 // Compute V = W w WT
136 for (int i = 0; i < 6; i++)
137 {
138 // V[i][0] = Ww[i][0]/4.0f;
139 V[i][0] = vmulq_n_f32(Ww[i][0], 1.0f/4.0f);
140
141 // V[i][1] = -( Ww[i][0] + Ww[i][1] + Ww[i][2] + Ww[i][3] + Ww[i][4])/6.0f;
142 V[i][1] = vmulq_n_f32(
143 vaddq_f32(
144 vaddq_f32(
145 vaddq_f32(Ww[i][1], Ww[i][0]),
146 vaddq_f32(Ww[i][3], Ww[i][2])
147 ),
148 Ww[i][4]
149 ),
150 -1.0f/6.0f
151 );
152
153 // V[i][2] = +(-Ww[i][0] + Ww[i][1] - Ww[i][2] + Ww[i][3] - Ww[i][4])/6.0f;
154 // V[i][2] = ((Ww[i][1] - Ww[i][0]) + (Ww[i][3] - Ww[i][2]) - Ww[i][4])/6.0f;
155 V[i][2] = vmulq_n_f32(
156 vsubq_f32(
157 vaddq_f32(
158 vsubq_f32(Ww[i][1], Ww[i][0]),
159 vsubq_f32(Ww[i][3], Ww[i][2])
160 ),
161 Ww[i][4]
162 ),
163 1.0f/6.0f
164 );
165
166 // V[i][3] = (Ww[i][0]/8.0f + Ww[i][1]/4.0f + Ww[i][2]/2.0f + Ww[i][3] + 2*Ww[i][4])/3.0f;
167 V[i][3] = vmulq_n_f32(
168 vmlaq_n_f32(
169 vaddq_f32(
170 vaddq_f32(vmulq_n_f32(Ww[i][0], 1.0f/8.0f), vmulq_n_f32(Ww[i][1], 1.0f/4.0f)),
171 vaddq_f32(vmulq_n_f32(Ww[i][2], 1.0f/2.0f), Ww[i][3])
172 ),
173 Ww[i][4], 2.0f
174 ),
175 1.0f/3.0f
176 );
177
178 // V[i][4] = (Ww[i][0]/8.0f - Ww[i][1]/4.0f + Ww[i][2]/2.0f - Ww[i][3] + 2*Ww[i][4])/3.0f;
179 V[i][4] = vmulq_n_f32(
180 vmlaq_n_f32(
181 vaddq_f32(
182 vsubq_f32(vmulq_n_f32(Ww[i][0], 1.0f/8.0f), vmulq_n_f32(Ww[i][1], 1.0f/4.0f)),
183 vsubq_f32(vmulq_n_f32(Ww[i][2], 1.0f/2.0f), Ww[i][3])
184 ),
185 Ww[i][4], 2.0f
186 ),
187 1.0f/3.0f
188 );
189
190 // V[i][5] = Ww[i][4];
191 V[i][5] = Ww[i][4];
192 }
193
194 // Store the transformed weights
195 for (int i = 0, m = 0; i < 6; i++)
196 {
197 for (int j = 0; j < 6; j++, m++)
198 {
199 vst1q_f32(outptr + m*matrix_stride, V[i][j]);
200 }
201 }
202 outptr += 4;
203 }
204#endif // __aarch64__
205#ifdef __arm_any__
206 for (; channels_remaining >= 2; channels_remaining -= 2)
207 {
208 // Matrices used and computed in this kernel
209 float32x2_t w[5][5], Ww[6][5], V[6][6];
210
211 // Read weights
212 for (int i = 0; i < 5; i++)
213 {
214 for (int j = 0; j < 5; j++)
215 {
216 w[i][j] = vld1_f32(inptrs[i][j]);
217 inptrs[i][j] += 2;
218 }
219 }
220
221 // Compute the matrix W w
222 for (int j = 0; j < 5; j++)
223 {
224 // Ww[0][j] = w[0][j]/4.0f;
225 Ww[0][j] = vmul_n_f32(w[0][j], 1.0f/4.0f);
226
227 // Ww[1][j] = -( w[0][j] + w[1][j] + w[2][j] + w[3][j] + w[4][j])/6.0f;
228 Ww[1][j] = vmul_n_f32(
229 vadd_f32(
230 vadd_f32(
231 vadd_f32(w[1][j], w[0][j]),
232 vadd_f32(w[3][j], w[2][j])
233 ),
234 w[4][j]
235 ),
236 -1.0f/6.0f
237 );
238
239 // Ww[2][j] = +(-w[0][j] + w[1][j] - w[2][j] + w[3][j] - w[4][j])/6.0f;
240 // Ww[2][j] = ((w[1][j] - w[0][j]) + (w[3][j] - w[2][j]) - w[4][j])/6.0f;
241 Ww[2][j] = vmul_n_f32(
242 vsub_f32(
243 vadd_f32(
244 vsub_f32(w[1][j], w[0][j]),
245 vsub_f32(w[3][j], w[2][j])
246 ),
247 w[4][j]
248 ),
249 1.0f/6.0f
250 );
251
252 // Ww[3][j] = (w[0][j]/8.0f + w[1][j]/4.0f + w[2][j]/2.0f + w[3][j] + 2*w[4][j])/3.0f;
253 Ww[3][j] = vmul_n_f32(
254 vmla_n_f32(
255 vadd_f32(
256 vadd_f32(vmul_n_f32(w[0][j], 1.0f/8.0f), vmul_n_f32(w[1][j], 1.0f/4.0f)),
257 vadd_f32(vmul_n_f32(w[2][j], 1.0f/2.0f), w[3][j])
258 ),
259 w[4][j], 2.0f
260 ),
261 1.0f/3.0f
262 );
263
264 // Ww[4][j] = (w[0][j]/8.0f - w[1][j]/4.0f + w[2][j]/2.0f - w[3][j] + 2*w[4][j])/3.0f;
265 Ww[4][j] = vmul_n_f32(
266 vmla_n_f32(
267 vadd_f32(
268 vsub_f32(vmul_n_f32(w[0][j], 1.0f/8.0f), vmul_n_f32(w[1][j], 1.0f/4.0f)),
269 vsub_f32(vmul_n_f32(w[2][j], 1.0f/2.0f), w[3][j])
270 ),
271 w[4][j], 2.0f
272 ),
273 1.0f/3.0f
274 );
275
276 // Ww[5][j] = w[4][j];
277 Ww[5][j] = w[4][j];
278 }
279
280 // Compute V = W w WT
281 for (int i = 0; i < 6; i++)
282 {
283 // V[i][0] = Ww[i][0]/4.0f;
284 V[i][0] = vmul_n_f32(Ww[i][0], 1.0f/4.0f);
285
286 // V[i][1] = -( Ww[i][0] + Ww[i][1] + Ww[i][2] + Ww[i][3] + Ww[i][4])/6.0f;
287 V[i][1] = vmul_n_f32(
288 vadd_f32(
289 vadd_f32(
290 vadd_f32(Ww[i][1], Ww[i][0]),
291 vadd_f32(Ww[i][3], Ww[i][2])
292 ),
293 Ww[i][4]
294 ),
295 -1.0f/6.0f
296 );
297
298 // V[i][2] = +(-Ww[i][0] + Ww[i][1] - Ww[i][2] + Ww[i][3] - Ww[i][4])/6.0f;
299 // V[i][2] = ((Ww[i][1] - Ww[i][0]) + (Ww[i][3] - Ww[i][2]) - Ww[i][4])/6.0f;
300 V[i][2] = vmul_n_f32(
301 vsub_f32(
302 vadd_f32(
303 vsub_f32(Ww[i][1], Ww[i][0]),
304 vsub_f32(Ww[i][3], Ww[i][2])
305 ),
306 Ww[i][4]
307 ),
308 1.0f/6.0f
309 );
310
311 // V[i][3] = (Ww[i][0]/8.0f + Ww[i][1]/4.0f + Ww[i][2]/2.0f + Ww[i][3] + 2*Ww[i][4])/3.0f;
312 V[i][3] = vmul_n_f32(
313 vmla_n_f32(
314 vadd_f32(
315 vadd_f32(vmul_n_f32(Ww[i][0], 1.0f/8.0f), vmul_n_f32(Ww[i][1], 1.0f/4.0f)),
316 vadd_f32(vmul_n_f32(Ww[i][2], 1.0f/2.0f), Ww[i][3])
317 ),
318 Ww[i][4], 2.0f
319 ),
320 1.0f/3.0f
321 );
322
323 // V[i][4] = (Ww[i][0]/8.0f - Ww[i][1]/4.0f + Ww[i][2]/2.0f - Ww[i][3] + 2*Ww[i][4])/3.0f;
324 V[i][4] = vmul_n_f32(
325 vmla_n_f32(
326 vadd_f32(
327 vsub_f32(vmul_n_f32(Ww[i][0], 1.0f/8.0f), vmul_n_f32(Ww[i][1], 1.0f/4.0f)),
328 vsub_f32(vmul_n_f32(Ww[i][2], 1.0f/2.0f), Ww[i][3])
329 ),
330 Ww[i][4], 2.0f
331 ),
332 1.0f/3.0f
333 );
334
335 // V[i][5] = Ww[i][4];
336 V[i][5] = Ww[i][4];
337 }
338
339 // Store the transformed weights
340 for (int i = 0, m = 0; i < 6; i++)
341 {
342 for (int j = 0; j < 6; j++, m++)
343 {
344 vst1_f32(outptr + m*matrix_stride, V[i][j]);
345 }
346 }
347 outptr += 2;
348 }
349#endif // __arm_any__
350 for (; channels_remaining; channels_remaining--)
351 {
352 // Matrices used and computed in this kernel
353 float w[5][5], Ww[6][5], V[6][6];
354
355 // Read weights
356 for (int i = 0; i < 5; i++)
357 {
358 for (int j = 0; j < 5; j++)
359 {
360 w[i][j] = *(inptrs[i][j]++);
361 }
362 }
363
364 // Compute the matrix W w
365 for (int j = 0; j < 5; j++)
366 {
367 Ww[0][j] = w[0][j]/4.0f;
368 Ww[1][j] = -( w[0][j] + w[1][j] + w[2][j] + w[3][j] + w[4][j])/6.0f;
369 Ww[2][j] = +(-w[0][j] + w[1][j] - w[2][j] + w[3][j] - w[4][j])/6.0f;
370 Ww[3][j] = (w[0][j]/8.0f + w[1][j]/4.0f + w[2][j]/2.0f + w[3][j] + 2*w[4][j])/3.0f;
371 Ww[4][j] = (w[0][j]/8.0f - w[1][j]/4.0f + w[2][j]/2.0f - w[3][j] + 2*w[4][j])/3.0f;
372 Ww[5][j] = w[4][j];
373 }
374
375 // Compute V = W w WT
376 for (int i = 0; i < 6; i++)
377 {
378 V[i][0] = Ww[i][0]/4.0f;
379 V[i][1] = -( Ww[i][0] + Ww[i][1] + Ww[i][2] + Ww[i][3] + Ww[i][4])/6.0f;
380 V[i][2] = +(-Ww[i][0] + Ww[i][1] - Ww[i][2] + Ww[i][3] - Ww[i][4])/6.0f;
381 V[i][3] = (Ww[i][0]/8.0f + Ww[i][1]/4.0f + Ww[i][2]/2.0f + Ww[i][3] + 2*Ww[i][4])/3.0f;
382 V[i][4] = (Ww[i][0]/8.0f - Ww[i][1]/4.0f + Ww[i][2]/2.0f - Ww[i][3] + 2*Ww[i][4])/3.0f;
383 V[i][5] = Ww[i][4];
384 }
385
386 // Store the transformed weights
387 for (int i = 0, m = 0; i < 6; i++)
388 {
389 for (int j = 0; j < 6; j++, m++)
390 {
391 *(outptr + m*matrix_stride) = V[i][j];
392 }
393 }
394 outptr++;
395 }
396 }
397}
398
399template class WeightTransform<5, 5, 6, 6, float, float, WinogradRoots::Integers>;
400
401} // namespace winograd