blob: 410a0a1dc968e81898559ce878ec9f2f8365156f [file] [log] [blame]
Pablo Telloeb82fd22018-02-23 13:43:50 +00001/*
Georgios Pinitasae0fc862019-09-30 12:39:40 +01002 * Copyright (c) 2017-2019 ARM Limited.
Pablo Telloeb82fd22018-02-23 13:43:50 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#pragma once
25
26#ifdef __aarch64__
27
Anthony Barbier5f707732018-07-03 16:22:02 +010028template<>
David Manselld93991e2018-07-06 14:52:52 +010029inline void MergeResults<12, 8, false>(int32_t *out, const int32_t *in, const int ldout, const int y0, const int ymax, const int x0, const int xmax, const int32_t alpha, const int32_t beta) {
Michalis Spyrou6bff1952019-10-02 17:22:11 +010030 UNUSED(alpha);
Pablo Telloeb82fd22018-02-23 13:43:50 +000031 const int32_t *inptr = in;
32 prefetch_6x(inptr);
33 prefetch_6x(inptr + 96);
34
Anthony Barbier5f707732018-07-03 16:22:02 +010035 for (int y=y0; y<ymax; y+=8) {
Pablo Telloeb82fd22018-02-23 13:43:50 +000036 int32_t *outptr0 = out + (y * ldout) + x0;
37 int32_t *outptr1 = outptr0 + ldout;
38 int32_t *outptr2 = outptr1 + ldout;
39 int32_t *outptr3 = outptr2 + ldout;
40 int32_t *outptr4 = outptr3 + ldout;
41 int32_t *outptr5 = outptr4 + ldout;
42 int32_t *outptr6 = outptr5 + ldout;
43 int32_t *outptr7 = outptr6 + ldout;
44
45 prefetch_2x(outptr0);
46 prefetch_2x(outptr1);
47 prefetch_2x(outptr2);
48 prefetch_2x(outptr3);
49 prefetch_2x(outptr4);
50 prefetch_2x(outptr5);
51 prefetch_2x(outptr6);
52 prefetch_2x(outptr7);
53
Anthony Barbier5f707732018-07-03 16:22:02 +010054 for (int i=x0; i<xmax; i+=12) {
Pablo Telloeb82fd22018-02-23 13:43:50 +000055 int32_t dummyres[12];
56
57 /* Make sure we throw away results if Y isn't a multiple of 8.
58 * We do this by pointing the result pointer at a dummy buffer
59 * we later discard. */
Anthony Barbier5f707732018-07-03 16:22:02 +010060 if ((y+7) >= ymax) {
61 switch ((y + 7) - ymax) {
Pablo Telloeb82fd22018-02-23 13:43:50 +000062 case 6:
63 outptr1 = dummyres;
Georgios Pinitas37d080f2019-06-21 18:43:12 +010064 // fall through
Pablo Telloeb82fd22018-02-23 13:43:50 +000065 case 5:
66 outptr2 = dummyres;
Georgios Pinitas37d080f2019-06-21 18:43:12 +010067 // fall through
Pablo Telloeb82fd22018-02-23 13:43:50 +000068 case 4:
69 outptr3 = dummyres;
Georgios Pinitas37d080f2019-06-21 18:43:12 +010070 // fall through
Pablo Telloeb82fd22018-02-23 13:43:50 +000071 case 3:
72 outptr4 = dummyres;
Georgios Pinitas37d080f2019-06-21 18:43:12 +010073 // fall through
Pablo Telloeb82fd22018-02-23 13:43:50 +000074 case 2:
75 outptr5 = dummyres;
Georgios Pinitas37d080f2019-06-21 18:43:12 +010076 // fall through
Pablo Telloeb82fd22018-02-23 13:43:50 +000077 case 1:
78 outptr6 = dummyres;
Georgios Pinitas37d080f2019-06-21 18:43:12 +010079 // fall through
Pablo Telloeb82fd22018-02-23 13:43:50 +000080 case 0:
81 outptr7 = dummyres;
82 break;
83
84 default:
85 UNREACHABLE("Impossible.");
86 }
87 }
88
89 /* For ragged X, manually copy over the valid results. */
Anthony Barbier5f707732018-07-03 16:22:02 +010090 if ((i+11) >= xmax) {
91 for (int xi=0; xi<12; xi++) {
92 if ((i+xi) < xmax) {
Georgios Pinitasae0fc862019-09-30 12:39:40 +010093 *outptr0 = (inptr[xi]) + (*outptr0 * beta);
Pablo Telloeb82fd22018-02-23 13:43:50 +000094 outptr0++;
Georgios Pinitasae0fc862019-09-30 12:39:40 +010095 *outptr1 = (inptr[xi + 12]) + (*outptr1 * beta);
Pablo Telloeb82fd22018-02-23 13:43:50 +000096 outptr1++;
Georgios Pinitasae0fc862019-09-30 12:39:40 +010097 *outptr2 = (inptr[xi + 24]) + (*outptr2 * beta);
Pablo Telloeb82fd22018-02-23 13:43:50 +000098 outptr2++;
Georgios Pinitasae0fc862019-09-30 12:39:40 +010099 *outptr3 = (inptr[xi + 36]) + (*outptr3 * beta);
Pablo Telloeb82fd22018-02-23 13:43:50 +0000100 outptr3++;
Georgios Pinitasae0fc862019-09-30 12:39:40 +0100101 *outptr4 = (inptr[xi + 48]) + (*outptr4 * beta);
Pablo Telloeb82fd22018-02-23 13:43:50 +0000102 outptr4++;
Georgios Pinitasae0fc862019-09-30 12:39:40 +0100103 *outptr5 = (inptr[xi + 60]) + (*outptr5 * beta);
Pablo Telloeb82fd22018-02-23 13:43:50 +0000104 outptr5++;
Georgios Pinitasae0fc862019-09-30 12:39:40 +0100105 *outptr6 = (inptr[xi + 72]) + (*outptr6 * beta);
Pablo Telloeb82fd22018-02-23 13:43:50 +0000106 outptr6++;
Georgios Pinitasae0fc862019-09-30 12:39:40 +0100107 *outptr7 = (inptr[xi + 84]) + (*outptr7 * beta);
Pablo Telloeb82fd22018-02-23 13:43:50 +0000108 outptr7++;
109 }
110 }
111 inptr += 96;
Anthony Barbier5f707732018-07-03 16:22:02 +0100112 } else {
Georgios Pinitasae0fc862019-09-30 12:39:40 +0100113 if (beta == 0u) {
114 /* Optimized routine to copy an entire block */
115 __asm __volatile (
116 // Row 0
117 ASM_PREFETCH("[%x[outptr1], #192]")
118 "ldr q0, [%x[inptr]]\n"
119 "ldr q1, [%x[inptr], #0x10]\n"
120 "ldr q2, [%x[inptr], #0x20]\n"
Pablo Telloeb82fd22018-02-23 13:43:50 +0000121
Georgios Pinitasae0fc862019-09-30 12:39:40 +0100122 // Row 1
123 ASM_PREFETCH("[%x[outptr2], #192]")
124 "ldr q3, [%x[inptr], #0x30]\n"
125 "str q0, [%x[outptr0]], #0x10\n"
126 "ldr q4, [%x[inptr], #0x40]\n"
127 "str q1, [%x[outptr0]], #0x10\n"
128 "ldr q5, [%x[inptr], #0x50]\n"
129 "str q2, [%x[outptr0]], #0x10\n"
Pablo Telloeb82fd22018-02-23 13:43:50 +0000130
Georgios Pinitasae0fc862019-09-30 12:39:40 +0100131 // Row 2
132 ASM_PREFETCH("[%x[outptr3], #192]")
133 "ldr q0, [%x[inptr], #0x60]\n"
134 "str q3, [%x[outptr1]], #0x10\n"
135 "ldr q1, [%x[inptr], #0x70]\n"
136 "str q4, [%x[outptr1]], #0x10\n"
137 "ldr q2, [%x[inptr], #0x80]\n"
138 "str q5, [%x[outptr1]], #0x10\n"
Pablo Telloeb82fd22018-02-23 13:43:50 +0000139
Georgios Pinitasae0fc862019-09-30 12:39:40 +0100140 // Row 3
141 ASM_PREFETCH("[%x[outptr4], #192]")
142 "ldr q3, [%x[inptr], #0x90]\n"
143 "str q0, [%x[outptr2]], #0x10\n"
144 "ldr q4, [%x[inptr], #0xa0]\n"
145 "str q1, [%x[outptr2]], #0x10\n"
146 "ldr q5, [%x[inptr], #0xb0]\n"
147 "str q2, [%x[outptr2]], #0x10\n"
Pablo Telloeb82fd22018-02-23 13:43:50 +0000148
Georgios Pinitasae0fc862019-09-30 12:39:40 +0100149 // Row 4
150 ASM_PREFETCH("[%x[outptr5], #192]")
151 "ldr q0, [%x[inptr], #0xc0]\n"
152 "str q3, [%x[outptr3]], #0x10\n"
153 "ldr q1, [%x[inptr], #0xd0]\n"
154 "str q4, [%x[outptr3]], #0x10\n"
155 "ldr q2, [%x[inptr], #0xe0]\n"
156 "str q5, [%x[outptr3]], #0x10\n"
Pablo Telloeb82fd22018-02-23 13:43:50 +0000157
Georgios Pinitasae0fc862019-09-30 12:39:40 +0100158 // Row 5
159 ASM_PREFETCH("[%x[outptr6], #192]")
160 "ldr q3, [%x[inptr], #0xf0]\n"
161 "str q0, [%x[outptr4]], #0x10\n"
162 "ldr q4, [%x[inptr], #0x100]\n"
163 "str q1, [%x[outptr4]], #0x10\n"
164 "ldr q5, [%x[inptr], #0x110]\n"
165 "str q2, [%x[outptr4]], #0x10\n"
Pablo Telloeb82fd22018-02-23 13:43:50 +0000166
Georgios Pinitasae0fc862019-09-30 12:39:40 +0100167 // Row 6
168 ASM_PREFETCH("[%x[outptr7], #192]")
169 "ldr q0, [%x[inptr], #0x120]\n"
170 "str q3, [%x[outptr5]], #0x10\n"
171 "ldr q1, [%x[inptr], #0x130]\n"
172 "str q4, [%x[outptr5]], #0x10\n"
173 "ldr q2, [%x[inptr], #0x140]\n"
174 "str q5, [%x[outptr5]], #0x10\n"
Pablo Telloeb82fd22018-02-23 13:43:50 +0000175
Georgios Pinitasae0fc862019-09-30 12:39:40 +0100176 // Row 7
177 "ldr q3, [%x[inptr], #0x150]\n"
178 "str q0, [%x[outptr6]], #0x10\n"
179 "ldr q4, [%x[inptr], #0x160]\n"
180 "str q1, [%x[outptr6]], #0x10\n"
181 "ldr q5, [%x[inptr], #0x170]\n"
182 "str q2, [%x[outptr6]], #0x10\n"
183 "str q3, [%x[outptr7]], #0x10\n"
184 "str q4, [%x[outptr7]], #0x10\n"
185 "str q5, [%x[outptr7]], #0x10\n"
Pablo Telloeb82fd22018-02-23 13:43:50 +0000186
Georgios Pinitasae0fc862019-09-30 12:39:40 +0100187 "add %x[inptr], %x[inptr], #0x180\n"
188 : [outptr0] "+r" (outptr0),
189 [outptr1] "+r" (outptr1),
190 [outptr2] "+r" (outptr2),
191 [outptr3] "+r" (outptr3),
192 [outptr4] "+r" (outptr4),
193 [outptr5] "+r" (outptr5),
194 [outptr6] "+r" (outptr6),
195 [outptr7] "+r" (outptr7),
196 [inptr] "+r" (inptr)
197 :
198 : "v0", "v1", "v2", "v3", "v4", "v5", "v6"
199 );
200 } else {
201 /* Optimized routine to copy an entire block */
202 __asm __volatile (
203 // Row 0
204 ASM_PREFETCH("[%x[outptr1], #192]")
205 "ldr q3, [%x[outptr0]]\n"
206 "ldr q4, [%x[outptr0], #0x10]\n"
207 "ldr q5, [%x[outptr0], #0x20]\n"
208 "ldr q6, [%x[inptr]]\n"
209 "ldr q7, [%x[inptr], #0x10]\n"
210 "ldr q8, [%x[inptr], #0x20]\n"
211 "add v3.4s, v3.4s, v6.4s\n"
212 "ldr q0, [%x[outptr1]]\n"
213 "add v4.4s, v4.4s, v7.4s\n"
214 "ldr q1, [%x[outptr1], #0x10]\n"
215 "add v5.4s, v5.4s, v8.4s\n"
216 "ldr q2, [%x[outptr1], #0x20]\n"
217
218 // Row 1
219 ASM_PREFETCH("[%x[outptr2], #192]")
220 "ldr q6, [%x[inptr], #0x30]\n"
221 "str q3, [%x[outptr0]], #0x10\n"
222 "ldr q7, [%x[inptr], #0x40]\n"
223 "str q4, [%x[outptr0]], #0x10\n"
224 "ldr q8, [%x[inptr], #0x50]\n"
225 "str q5, [%x[outptr0]], #0x10\n"
226 "add v0.4s, v0.4s, v6.4s\n"
227 "ldr q3, [%x[outptr2]]\n"
228 "add v1.4s, v1.4s, v7.4s\n"
229 "ldr q4, [%x[outptr2], #0x10]\n"
230 "add v2.4s, v2.4s, v8.4s\n"
231 "ldr q5, [%x[outptr2], #0x20]\n"
232
233 // Row 2
234 ASM_PREFETCH("[%x[outptr3], #192]")
235 "ldr q6, [%x[inptr], #0x60]\n"
236 "str q0, [%x[outptr1]], #0x10\n"
237 "ldr q7, [%x[inptr], #0x70]\n"
238 "str q1, [%x[outptr1]], #0x10\n"
239 "ldr q8, [%x[inptr], #0x80]\n"
240 "str q2, [%x[outptr1]], #0x10\n"
241 "add v3.4s, v3.4s, v6.4s\n"
242 "ldr q0, [%x[outptr3]]\n"
243 "add v4.4s, v4.4s, v7.4s\n"
244 "ldr q1, [%x[outptr3], #0x10]\n"
245 "add v5.4s, v5.4s, v8.4s\n"
246 "ldr q2, [%x[outptr3], #0x20]\n"
247
248 // Row 3
249 ASM_PREFETCH("[%x[outptr4], #192]")
250 "ldr q6, [%x[inptr], #0x90]\n"
251 "str q3, [%x[outptr2]], #0x10\n"
252 "ldr q7, [%x[inptr], #0xa0]\n"
253 "str q4, [%x[outptr2]], #0x10\n"
254 "ldr q8, [%x[inptr], #0xb0]\n"
255 "str q5, [%x[outptr2]], #0x10\n"
256 "add v0.4s, v0.4s, v6.4s\n"
257 "ldr q3, [%x[outptr4]]\n"
258 "add v1.4s, v1.4s, v7.4s\n"
259 "ldr q4, [%x[outptr4], #0x10]\n"
260 "add v2.4s, v2.4s, v8.4s\n"
261 "ldr q5, [%x[outptr4], #0x20]\n"
262
263 // Row 4
264 ASM_PREFETCH("[%x[outptr5], #192]")
265 "ldr q6, [%x[inptr], #0xc0]\n"
266 "str q0, [%x[outptr3]], #0x10\n"
267 "ldr q7, [%x[inptr], #0xd0]\n"
268 "str q1, [%x[outptr3]], #0x10\n"
269 "ldr q8, [%x[inptr], #0xe0]\n"
270 "str q2, [%x[outptr3]], #0x10\n"
271 "add v3.4s, v3.4s, v6.4s\n"
272 "ldr q0, [%x[outptr5]]\n"
273 "add v4.4s, v4.4s, v7.4s\n"
274 "ldr q1, [%x[outptr5], #0x10]\n"
275 "add v5.4s, v5.4s, v8.4s\n"
276 "ldr q2, [%x[outptr5], #0x20]\n"
277
278 // Row 5
279 ASM_PREFETCH("[%x[outptr6], #192]")
280 "ldr q6, [%x[inptr], #0xf0]\n"
281 "str q3, [%x[outptr4]], #0x10\n"
282 "ldr q7, [%x[inptr], #0x100]\n"
283 "str q4, [%x[outptr4]], #0x10\n"
284 "ldr q8, [%x[inptr], #0x110]\n"
285 "str q5, [%x[outptr4]], #0x10\n"
286 "add v0.4s, v0.4s, v6.4s\n"
287 "ldr q3, [%x[outptr6]]\n"
288 "add v1.4s, v1.4s, v7.4s\n"
289 "ldr q4, [%x[outptr6], #0x10]\n"
290 "add v2.4s, v2.4s, v8.4s\n"
291 "ldr q5, [%x[outptr6], #0x20]\n"
292
293 // Row 6
294 ASM_PREFETCH("[%x[outptr7], #192]")
295 "ldr q6, [%x[inptr], #0x120]\n"
296 "str q0, [%x[outptr5]], #0x10\n"
297 "ldr q7, [%x[inptr], #0x130]\n"
298 "str q1, [%x[outptr5]], #0x10\n"
299 "ldr q8, [%x[inptr], #0x140]\n"
300 "str q2, [%x[outptr5]], #0x10\n"
301 "add v3.4s, v3.4s, v6.4s\n"
302 "ldr q0, [%x[outptr7]]\n"
303 "add v4.4s, v4.4s, v7.4s\n"
304 "ldr q1, [%x[outptr7], #0x10]\n"
305 "add v5.4s, v5.4s, v8.4s\n"
306 "ldr q2, [%x[outptr7], #0x20]\n"
307
308 // Row 7
309 "ldr q6, [%x[inptr], #0x150]\n"
310 "str q3, [%x[outptr6]], #0x10\n"
311 "ldr q7, [%x[inptr], #0x160]\n"
312 "str q4, [%x[outptr6]], #0x10\n"
313 "ldr q8, [%x[inptr], #0x170]\n"
314 "str q5, [%x[outptr6]], #0x10\n"
315 "add v0.4s, v0.4s, v6.4s\n"
316 "add v1.4s, v1.4s, v7.4s\n"
317 "add v2.4s, v2.4s, v8.4s\n"
318 "str q0, [%x[outptr7]], #0x10\n"
319 "str q1, [%x[outptr7]], #0x10\n"
320 "str q2, [%x[outptr7]], #0x10\n"
321
322 "add %x[inptr], %x[inptr], #0x180\n"
323 : [outptr0] "+r" (outptr0),
324 [outptr1] "+r" (outptr1),
325 [outptr2] "+r" (outptr2),
326 [outptr3] "+r" (outptr3),
327 [outptr4] "+r" (outptr4),
328 [outptr5] "+r" (outptr5),
329 [outptr6] "+r" (outptr6),
330 [outptr7] "+r" (outptr7),
331 [inptr] "+r" (inptr)
332 :
333 : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8"
334 );
335
336 }
Pablo Telloeb82fd22018-02-23 13:43:50 +0000337 }
338 }
339 }
340}
341
Anthony Barbier5f707732018-07-03 16:22:02 +0100342template<>
343inline void MergeResults<12, 8>(uint32_t *out, const uint32_t *in, const int ldout, const int y0, const int ymax, const int x0, const int xmax, const uint32_t alpha, const uint32_t beta) {
344 // Since the above code uses only MUL and MLA instructions discard the "unsignedness" and proceed safely.
Georgios Pinitasae0fc862019-09-30 12:39:40 +0100345 MergeResults<12, 8>(reinterpret_cast<int32_t*>(out), reinterpret_cast<const int32_t*>(in), ldout, y0, ymax, x0, xmax, static_cast<const int32_t>(alpha), static_cast<const int32_t>(beta));
Pablo Telloeb82fd22018-02-23 13:43:50 +0000346}
347
348#endif // __aarch64__