blob: 8728e77d9edde678cd08c9b99004be6a332bf128 [file] [log] [blame]
Pablo Tellobf2fb952017-09-29 16:43:25 +01001/*
2 * Copyright (c) 2017 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/NEON/kernels/arm64/NEGEMMLowpAArch64V8P4Kernel.h"
25
26#include "arm_compute/core/AccessWindowStatic.h"
27#include "arm_compute/core/Error.h"
28#include "arm_compute/core/Helpers.h"
29#include "arm_compute/core/IAccessWindow.h"
30#include "arm_compute/core/ITensor.h"
31#include "arm_compute/core/TensorInfo.h"
32#include "arm_compute/core/Types.h"
33#include "arm_compute/core/Utils.h"
34#include "arm_compute/core/Validate.h"
35#include "arm_compute/core/Window.h"
36#include "support/ToolchainSupport.h"
37
38#include <arm_neon.h>
39#include <cstddef>
40#include <cstdint>
41
Ioan-Cristian Szabo33fd07b2017-10-26 15:42:24 +010042// Enable only if compiled for AArch64-V8.2-A targets
43#ifdef ARM_COMPUTE_AARCH64_V8_2
44
Pablo Tellobf2fb952017-09-29 16:43:25 +010045#define ASM_PREFETCH(address) "PRFM PLDL1KEEP, " address "\n"
46#define ASM_PREFETCHL2(address) "PRFM PLDL2KEEP, " address "\n"
47#define ASM_PREFETCHW(address) "PRFM PSTL1KEEP, " address "\n"
48#define ASM_PREFETCHWL2(address) "PRFM PSTL2KEEP, " address "\n"
49
50static inline void stincpld(uint32x4_t v0, uint32x4_t v1, uint32x4_t v2, uint32x4_t v3,
51 uint32x4_t v4, uint32x4_t v5, uint32x4_t v6, uint32x4_t v7,
52 uint32_t *&ptr0, uint32_t *&ptr1, uint32_t *&ptr2, uint32_t *&ptr3,
53 uint32_t *&ptr4, uint32_t *&ptr5, uint32_t *&ptr6, uint32_t *&ptr7)
54{
55 __asm __volatile(
56 "LDR q0, [%[ptr0]]\n"
57 "LDR q1, [%[ptr1]]\n"
58 "LDR q2, [%[ptr2]]\n"
59 "LDR q3, [%[ptr3]]\n"
60 "LDR q4, [%[ptr4]]\n"
61 "LDR q5, [%[ptr5]]\n"
62 "LDR q6, [%[ptr6]]\n"
63 "LDR q7, [%[ptr7]]\n"
64 "ADD v0.4s, v0.4s, %[v0].4s\n" ASM_PREFETCH("[%[ptr0], #80]") "ADD v1.4s, v1.4s, %[v1].4s\n" ASM_PREFETCH("[%[ptr1], #80]") "ADD v2.4s, v2.4s, %[v2].4s\n" ASM_PREFETCH("[%[ptr2], #80]")
65 "ADD v3.4s, v3.4s, %[v3].4s\n" ASM_PREFETCH("[%[ptr3], #80]") "ADD v4.4s, v4.4s, %[v4].4s\n" ASM_PREFETCH("[%[ptr4], #80]") "ADD v5.4s, v5.4s, %[v5].4s\n" ASM_PREFETCH("[%[ptr5], #80]")
66 "ADD v6.4s, v6.4s, %[v6].4s\n" ASM_PREFETCH("[%[ptr6], #80]") "ADD v7.4s, v7.4s, %[v7].4s\n" ASM_PREFETCH("[%[ptr7], #80]")
67 "STR q0, [%[ptr0]], #16\n"
68 "STR q1, [%[ptr1]], #16\n"
69 "STR q2, [%[ptr2]], #16\n"
70 "STR q3, [%[ptr3]], #16\n"
71 "STR q4, [%[ptr4]], #16\n"
72 "STR q5, [%[ptr5]], #16\n"
73 "STR q6, [%[ptr6]], #16\n"
74 "STR q7, [%[ptr7]], #16\n"
75 : [ptr0] "+r"(ptr0), [ptr1] "+r"(ptr1), [ptr2] "+r"(ptr2), [ptr3] "+r"(ptr3),
76 [ptr4] "+r"(ptr4), [ptr5] "+r"(ptr5), [ptr6] "+r"(ptr6), [ptr7] "+r"(ptr7)
77 : [v0] "w"(v0), [v1] "w"(v1), [v2] "w"(v2), [v3] "w"(v3),
78 [v4] "w"(v4), [v5] "w"(v5), [v6] "w"(v6), [v7] "w"(v7)
79 : "x20", "x21", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "memory");
80}
81
82static inline void stinc(uint32x4_t v0, uint32x4_t v1, uint32x4_t v2, uint32x4_t v3,
83 uint32x4_t v4, uint32x4_t v5, uint32x4_t v6, uint32x4_t v7,
84 uint32_t *&ptr0, uint32_t *&ptr1, uint32_t *&ptr2, uint32_t *&ptr3,
85 uint32_t *&ptr4, uint32_t *&ptr5, uint32_t *&ptr6, uint32_t *&ptr7)
86{
87 __asm __volatile(
88 "LDR q0, [%[ptr0]]\n"
89 "LDR q1, [%[ptr1]]\n"
90 "LDR q2, [%[ptr2]]\n"
91 "LDR q3, [%[ptr3]]\n"
92 "LDR q4, [%[ptr4]]\n"
93 "LDR q5, [%[ptr5]]\n"
94 "LDR q6, [%[ptr6]]\n"
95 "LDR q7, [%[ptr7]]\n"
96 "ADD v0.4s, v0.4s, %[v0].4s\n"
97 "ADD v1.4s, v1.4s, %[v1].4s\n"
98 "ADD v2.4s, v2.4s, %[v2].4s\n"
99 "ADD v3.4s, v3.4s, %[v3].4s\n"
100 "ADD v4.4s, v4.4s, %[v4].4s\n"
101 "ADD v5.4s, v5.4s, %[v5].4s\n"
102 "ADD v6.4s, v6.4s, %[v6].4s\n"
103 "ADD v7.4s, v7.4s, %[v7].4s\n"
104 "STR q0, [%[ptr0]], #16\n"
105 "STR q1, [%[ptr1]], #16\n"
106 "STR q2, [%[ptr2]], #16\n"
107 "STR q3, [%[ptr3]], #16\n"
108 "STR q4, [%[ptr4]], #16\n"
109 "STR q5, [%[ptr5]], #16\n"
110 "STR q6, [%[ptr6]], #16\n"
111 "STR q7, [%[ptr7]], #16\n"
112 : [ptr0] "+r"(ptr0), [ptr1] "+r"(ptr1), [ptr2] "+r"(ptr2), [ptr3] "+r"(ptr3),
113 [ptr4] "+r"(ptr4), [ptr5] "+r"(ptr5), [ptr6] "+r"(ptr6), [ptr7] "+r"(ptr7)
114 : [v0] "w"(v0), [v1] "w"(v1), [v2] "w"(v2), [v3] "w"(v3),
115 [v4] "w"(v4), [v5] "w"(v5), [v6] "w"(v6), [v7] "w"(v7)
116 : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "memory");
117}
118
119namespace arm_compute
120{
121void NEGEMMLowpAArch64V8P4Kernel::internal_configure(const ITensor *input0, const ITensor *input1, ITensor *output)
122{
123 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::U8);
124 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U32);
125 ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input0, input1);
126
127 _input0 = input0;
128 _input1 = input1;
129 _output = output;
130
131 // Configure kernel window
132 Window win = calculate_max_window(*output->info());
133
134 AccessWindowRectangle output_access(output->info(), 0, 0, 12, 8);
135
136 const int input0_access_end = ceil_to_multiple(input0->info()->tensor_shape().x(), 8);
137 const int input1_access_end = ceil_to_multiple(input1->info()->tensor_shape().x(), 12);
138
139 update_window_and_padding(win,
140 AccessWindowStatic(input0->info(), 0, 0, input0_access_end, input0->info()->tensor_shape().y()),
141 AccessWindowStatic(input1->info(), 0, 0, input1_access_end, input1->info()->tensor_shape().y()),
142 output_access);
143
144 INEKernel::configure(win);
145}
146
147bool NEGEMMLowpAArch64V8P4Kernel::is_parallelisable() const
148{
149 return false;
150}
151
152#define _UDOT_MACRO \
153 ".altmacro\n" \
154 ".macro udot opd:req, opn:req, opm:req\n" \
155 "local vd, vn, vm, h, l\n" \
156 ".irp reg,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31\n" \
157 ".ifeqs \"\\opd\",\"v\\reg\\.4s\"\n" \
158 ".set vd,\\reg\n" \
159 ".endif\n" \
160 ".ifeqs \"\\opn\",\"v\\reg\\.16b\"\n" \
161 ".set vn,\\reg\n" \
162 ".endif\n" \
163 ".irp idx,0,1,2,3\n" \
164 ".ifeqs \"\\opm\",\"v\\reg\\.4b[\\idx\\]\"\n" \
165 ".set vm,\\reg\n" \
166 ".set h,\\idx / 2\n" \
167 ".set l,\\idx %% 2\n" \
168 ".endif\n" \
169 ".endr\n" \
170 ".endr\n" \
171 ".ifndef vd\n" \
172 ".error \"Bad operand \\opd\"\n" \
173 ".exitm\n" \
174 ".endif\n" \
175 ".ifndef vn\n" \
176 ".error \"Bad operand \\opn\"\n" \
177 ".exitm\n" \
178 ".endif\n" \
179 ".ifndef vm\n" \
180 ".error \"Bad operand \\opm\"\n" \
181 ".exitm\n" \
182 ".endif\n" \
183 ".ifndef h\n" \
184 ".error \"Bad operand \\opm\"\n" \
185 ".exitm\n" \
186 ".endif\n" \
187 ".ifndef l\n" \
188 ".error \"Bad operand \\opm\"\n" \
189 ".exitm\n" \
190 ".endif\n" \
191 ".int 0x6f80e000 | vd | (vn << 5) | (vm << 16) | (l << 21) | (h << 11)\n" \
192 ".endm\n"
193
194#define _PREFETCH_ \
195 __asm __volatile( \
196 "" ASM_PREFETCH("[%[a_ptr], #64]") \
197 ASM_PREFETCH("[%[a_ptr], #128]") \
198 ASM_PREFETCH("[%[a_ptr], #192]") \
199 : \
200 : \
201 [a_ptr] "r"(a_ptr), [b_ptr] "r"(b_ptr) \
202 : "x20", "x21", "memory"); \
203 __asm __volatile( \
204 "" ASM_PREFETCH("[%[b_ptr]]") \
205 ASM_PREFETCH("[%[b_ptr], #64]") \
206 ASM_PREFETCH("[%[b_ptr], #128]") \
207 ASM_PREFETCH("[%[b_ptr], #192]") \
208 : \
209 : \
210 [b_ptr] "r"(b_ptr) \
211 : "x20", "x21"); \
212 __asm __volatile( \
213 "" \
214 : [r00] "+w"(r00), [r01] "+w"(r01), \
215 [r10] "+w"(r10), [r11] "+w"(r11), \
216 [r20] "+w"(r20), [r21] "+w"(r21), \
217 [r30] "+w"(r30), [r31] "+w"(r31), \
218 [a0] "+w"(a0), [a1] "+w"(a1), \
219 [b0] "+w"(b0), [b1] "+w"(b1), [b2] "=w"(b2), \
220 [a_ptr] "+r"(a_ptr), [b_ptr] "+r"(b_ptr) \
221 : \
222 :); \
223 __asm __volatile( \
224 "" \
225 : [r02] "+w"(r02), \
226 [r12] "+w"(r12), \
227 [r22] "+w"(r22), \
228 [r32] "+w"(r32), \
229 [r40] "+w"(r40), \
230 [r50] "+w"(r50), \
231 [r60] "+w"(r60), \
232 [r70] "+w"(r70), \
233 [a0a] "=w"(a0a), [a1a] "=w"(a1a), \
234 [b0] "+w"(b0), [b2] "+w"(b2), [b5] "=&w"(b5) \
235 : \
236 :); \
237 __asm __volatile( \
238 "" \
239 : \
240 [r41] "+w"(r41), [r42] "+w"(r42), \
241 [r51] "+w"(r51), [r52] "+w"(r52), \
242 [r61] "+w"(r61), [r62] "+w"(r62), \
243 [r71] "+w"(r71), [r72] "+w"(r72), \
244 [a1] "+w"(a1), \
245 [b0] "+w"(b0), [b1] "+w"(b1), [b2] "+w"(b2), \
246 [b_ptr] "+r"(b_ptr), [k] "+r"(k) \
247 : \
248 :);
249
250void NEGEMMLowpAArch64V8P4Kernel::run(const Window &window, const ThreadInfo &info)
251{
252 ARM_COMPUTE_UNUSED(info);
253 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
254 ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
255
256 const int x_block = 348;
257 const int k_block = 1664;
258 const int nthreads = 1;
259 const int M = _output->info()->tensor_shape().y();
260 const int N = _output->info()->tensor_shape().x();
261 const int K = _input0->info()->tensor_shape().x() >> 3;
262
263 int yblocksperthread = ((M / nthreads) + 7) / 8;
264
265 if(yblocksperthread < 1)
266 {
267 yblocksperthread = 1;
268 }
269
270 const int lda = _input0->info()->strides_in_bytes().y();
271 const int ldb = _input1->info()->strides_in_bytes().y();
272 const int ldc = _output->info()->strides_in_bytes().y();
273 const int ldc2 = _output->info()->strides_in_bytes().x();
274 const int ldc3 = ldc / sizeof(uint32_t);
275
276 const int threadid = 0;
277 int y0 = threadid * yblocksperthread * 8;
278 int ymax = (threadid + 1) * yblocksperthread * 8;
279 if(y0 >= M)
280 {
281 return;
282 }
283 if(ymax > M)
284 {
285 ymax = M;
286 }
287 for(int k0 = 0; k0 < K; k0 += k_block)
288 {
289 int kmax = k0 + k_block;
290 if(kmax > K)
291 {
292 kmax = K;
293 }
294
295 for(int x0 = 0; x0 < N; x0 += x_block)
296 {
297 int xmax = x0 + x_block;
298 if(xmax > N)
299 {
300 xmax = N;
301 }
302
303 for(int y = y0; y < ymax; y += 8)
304 {
305 auto c_ptr0 = reinterpret_cast<uint32_t *>(_output->buffer() + (y * ldc) + x0 * ldc2);
306 uint32_t *c_ptr1 = c_ptr0 + ldc3;
307 uint32_t *c_ptr2 = c_ptr1 + ldc3;
308 uint32_t *c_ptr3 = c_ptr2 + ldc3;
309 uint32_t *c_ptr4 = c_ptr3 + ldc3;
310 uint32_t *c_ptr5 = c_ptr4 + ldc3;
311 uint32_t *c_ptr6 = c_ptr5 + ldc3;
312 uint32_t *c_ptr7 = c_ptr6 + ldc3;
313
314 __asm __volatile(
315 "" ASM_PREFETCH("[%[c_ptr0]]")
316 ASM_PREFETCH("[%[c_ptr1]]")
317 ASM_PREFETCH("[%[c_ptr2]]")
318 ASM_PREFETCH("[%[c_ptr3]]")
319 ASM_PREFETCH("[%[c_ptr4]]")
320 ASM_PREFETCH("[%[c_ptr5]]")
321 ASM_PREFETCH("[%[c_ptr6]]")
322 ASM_PREFETCH("[%[c_ptr7]]")
323 :
324 : [c_ptr0] "r"(c_ptr0), [c_ptr1] "r"(c_ptr1), [c_ptr2] "r"(c_ptr2), [c_ptr3] "r"(c_ptr3),
325 [c_ptr4] "r"(c_ptr4), [c_ptr5] "r"(c_ptr5), [c_ptr6] "r"(c_ptr6), [c_ptr7] "r"(c_ptr7)
326 : "x20", "x21");
327
328 for(int x = x0; x < xmax; x += 12)
329 {
330 register uint32x4_t r00 asm("v8");
331 register uint32x4_t r10 asm("v9");
332 register uint32x4_t r20 asm("v10");
333 register uint32x4_t r30 asm("v11");
334 register uint32x4_t r40 asm("v12");
335 register uint32x4_t r50 asm("v13");
336 register uint32x4_t r60 asm("v14");
337 register uint32x4_t r70 asm("v15");
338 register uint32x4_t r01 asm("v16");
339 register uint32x4_t r11 asm("v17");
340 register uint32x4_t r21 asm("v18");
341 register uint32x4_t r31 asm("v19");
342 register uint32x4_t r41 asm("v20");
343 register uint32x4_t r51 asm("v21");
344 register uint32x4_t r61 asm("v22");
345 register uint32x4_t r71 asm("v23");
346 register uint32x4_t r02 asm("v24");
347 register uint32x4_t r12 asm("v25");
348 register uint32x4_t r22 asm("v26");
349 register uint32x4_t r32 asm("v27");
350 register uint32x4_t r42 asm("v28");
351 register uint32x4_t r52 asm("v29");
352 register uint32x4_t r62 asm("v30");
353 register uint32x4_t r72 asm("v31");
354
355 register uint8x16_t a0 asm("v0");
356 register uint8x16_t a1 asm("v1");
357 register uint8x16_t b0 asm("v2");
358 register uint8x16_t b1 asm("v3");
359 register uint8x16_t b2 asm("v4");
360 register uint8x16_t a0a asm("v5");
361 register uint8x16_t a1a asm("v6");
362 register uint8x16_t b5 asm("v7");
363 const uint8_t *a_ptr = _input0->buffer() + ((y / 8) * lda) + (k0 * 8);
364 const uint8_t *b_ptr = _input1->buffer() + ((x / 12) * ldb) + (k0 * 12);
365
366 r00 = r01 = r02 = r10 = r11 = r12 = r20 = r21 = r22 = r30 = r31 = r32 = vdupq_n_u32(0);
367 r40 = r41 = r42 = r50 = r51 = r52 = r60 = r61 = r62 = r70 = r71 = r72 = vdupq_n_u32(0);
368
369 int k = ((kmax - k0) / 8) - 1;
370
371 a0 = vld1q_u8(a_ptr);
372 b0 = vld1q_u8(b_ptr);
373 a1 = vld1q_u8(a_ptr + 16);
374 b1 = vld1q_u8(b_ptr + 16);
375
376 _PREFETCH_
377
378 __asm __volatile(
379 _UDOT_MACRO
380 "1:\n"
381 "udot v8.4s , %[b0].16b, %[a0].4b[0]\n"
382 "udot v9.4s , %[b0].16b, %[a0].4b[1]\n"
383 "ldr %q[b2], [%[b_ptr], #32]\n"
384 "udot v10.4s, %[b0].16b, %[a0].4b[2]\n"
385 "udot v11.4s, %[b0].16b, %[a0].4b[3]\n"
386 "ldr %q[a0a], [%[a_ptr], #32]\n"
387 "udot v12.4s, %[b0].16b, %[a1].4b[0]\n"
388 "udot v13.4s, %[b0].16b, %[a1].4b[1]\n"
389 "ldr %q[a1a], [%[a_ptr], #48]\n"
390 "udot v14.4s, %[b0].16b, %[a1].4b[2]\n"
391 "udot v15.4s, %[b0].16b, %[a1].4b[3]\n"
392 "ldr %q[b0], [%[b_ptr], #48]\n"
393
394 "udot v16.4s, %[b1].16b, %[a0].4b[0]\n"
395 "udot v17.4s, %[b1].16b, %[a0].4b[1]\n" ASM_PREFETCH("[%[a_ptr], #256]")
396 "udot v18.4s, %[b1].16b, %[a0].4b[2]\n"
397 "udot v19.4s, %[b1].16b, %[a0].4b[3]\n"
398 "udot v20.4s, %[b1].16b, %[a1].4b[0]\n"
399 "udot v21.4s, %[b1].16b, %[a1].4b[1]\n"
400 "udot v22.4s, %[b1].16b, %[a1].4b[2]\n"
401 "udot v23.4s, %[b1].16b, %[a1].4b[3]\n"
402 "ldr %q[b1], [%[b_ptr], #64]\n"
403
404 "udot v24.4s, %[b2].16b, %[a0].4b[0]\n"
405 "udot v25.4s, %[b2].16b, %[a0].4b[1]\n" ASM_PREFETCH("[%[b_ptr], #256]")
406 "udot v26.4s, %[b2].16b, %[a0].4b[2]\n"
407 "udot v27.4s, %[b2].16b, %[a0].4b[3]\n"
408 "udot v28.4s, %[b2].16b, %[a1].4b[0]\n"
409 "udot v29.4s, %[b2].16b, %[a1].4b[1]\n"
410 "udot v30.4s, %[b2].16b, %[a1].4b[2]\n"
411 "udot v31.4s, %[b2].16b, %[a1].4b[3]\n"
412 "ldr %q[b2], [%[b_ptr], #80]\n"
413
414 "udot v8.4s , %[b0].16b, %[a0a].4b[0]\n"
415 "udot v9.4s , %[b0].16b, %[a0a].4b[1]\n"
416 "ldr %q[a0], [%[a_ptr], #64]\n"
417 "udot v10.4s, %[b0].16b, %[a0a].4b[2]\n"
418 "udot v11.4s, %[b0].16b, %[a0a].4b[3]\n"
419 "udot v12.4s, %[b0].16b, %[a1a].4b[0]\n"
420 "ldr %q[a1], [%[a_ptr], #80]\n"
421 "udot v13.4s, %[b0].16b, %[a1a].4b[1]\n"
422 "udot v14.4s, %[b0].16b, %[a1a].4b[2]\n"
423 "udot v15.4s, %[b0].16b, %[a1a].4b[3]\n"
424 "ldr %q[b0], [%[b_ptr], #96]\n"
425
426 "udot v16.4s, %[b1].16b, %[a0a].4b[0]\n"
427 "udot v17.4s, %[b1].16b, %[a0a].4b[1]\n" ASM_PREFETCH("[%[b_ptr], #320]")
428 "udot v18.4s, %[b1].16b, %[a0a].4b[2]\n"
429 "udot v19.4s, %[b1].16b, %[a0a].4b[3]\n"
430 "udot v20.4s, %[b1].16b, %[a1a].4b[0]\n"
431 "udot v21.4s, %[b1].16b, %[a1a].4b[1]\n"
432 "udot v22.4s, %[b1].16b, %[a1a].4b[2]\n"
433 "udot v23.4s, %[b1].16b, %[a1a].4b[3]\n"
434 "ldr %q[b1], [%[b_ptr], #112]\n"
435
436 "udot v24.4s, %[b2].16b, %[a0a].4b[0]\n"
437 "udot v25.4s, %[b2].16b, %[a0a].4b[1]\n"
438 "add %[a_ptr], %[a_ptr], #64\n"
439 "udot v26.4s, %[b2].16b, %[a0a].4b[2]\n"
440 "udot v27.4s, %[b2].16b, %[a0a].4b[3]\n"
441 "add %[b_ptr], %[b_ptr], #96\n"
442 "udot v28.4s, %[b2].16b, %[a1a].4b[0]\n"
443 "udot v29.4s, %[b2].16b, %[a1a].4b[1]\n"
444 "subs %w[k], %w[k], #1\n"
445 "udot v30.4s, %[b2].16b, %[a1a].4b[2]\n"
446 "udot v31.4s, %[b2].16b, %[a1a].4b[3]\n"
447
448 "bne 1b\n"
449
450 "udot v8.4s , %[b0].16b, %[a0].4b[0]\n"
451 "udot v9.4s , %[b0].16b, %[a0].4b[1]\n"
452 "ldr %q[b2], [%[b_ptr], #32]\n"
453 "udot v10.4s, %[b0].16b, %[a0].4b[2]\n"
454 "udot v11.4s, %[b0].16b, %[a0].4b[3]\n"
455 "ldr %q[a0a], [%[a_ptr], #32]\n"
456 "udot v12.4s, %[b0].16b, %[a1].4b[0]\n"
457 "udot v13.4s, %[b0].16b, %[a1].4b[1]\n"
458 "ldr %q[a1a], [%[a_ptr], #48]\n"
459 "udot v14.4s, %[b0].16b, %[a1].4b[2]\n"
460 "udot v15.4s, %[b0].16b, %[a1].4b[3]\n"
461 "ldr %q[b0], [%[b_ptr], #48]\n"
462
463 "udot v16.4s, %[b1].16b, %[a0].4b[0]\n"
464 "udot v17.4s, %[b1].16b, %[a0].4b[1]\n"
465 "udot v18.4s, %[b1].16b, %[a0].4b[2]\n"
466 "udot v19.4s, %[b1].16b, %[a0].4b[3]\n"
467 "udot v20.4s, %[b1].16b, %[a1].4b[0]\n"
468 "udot v21.4s, %[b1].16b, %[a1].4b[1]\n"
469 "udot v22.4s, %[b1].16b, %[a1].4b[2]\n"
470 "udot v23.4s, %[b1].16b, %[a1].4b[3]\n"
471 "ldr %q[b1], [%[b_ptr], #64]\n"
472
473 "udot v24.4s, %[b2].16b, %[a0].4b[0]\n"
474 "udot v25.4s, %[b2].16b, %[a0].4b[1]\n"
475 "udot v26.4s, %[b2].16b, %[a0].4b[2]\n"
476 "udot v27.4s, %[b2].16b, %[a0].4b[3]\n"
477 "udot v28.4s, %[b2].16b, %[a1].4b[0]\n"
478 "udot v29.4s, %[b2].16b, %[a1].4b[1]\n"
479 "udot v30.4s, %[b2].16b, %[a1].4b[2]\n"
480 "udot v31.4s, %[b2].16b, %[a1].4b[3]\n"
481 "ldr %q[b2], [%[b_ptr], #80]\n"
482
483 "udot v8.4s , %[b0].16b, %[a0a].4b[0]\n" ASM_PREFETCH("[%[c_ptr0]]") "udot v9.4s , %[b0].16b, %[a0a].4b[1]\n" ASM_PREFETCH("[%[c_ptr1]]") "udot v10.4s, %[b0].16b, %[a0a].4b[2]\n"
484 ASM_PREFETCH("[%[c_ptr2]]") "udot v11.4s, %[b0].16b, %[a0a].4b[3]\n" ASM_PREFETCH("[%[c_ptr3]]") "udot v12.4s, %[b0].16b, %[a1a].4b[0]\n" ASM_PREFETCH("[%[c_ptr4]]")
485 "udot v13.4s, %[b0].16b, %[a1a].4b[1]\n" ASM_PREFETCH("[%[c_ptr5]]") "udot v14.4s, %[b0].16b, %[a1a].4b[2]\n" ASM_PREFETCH("[%[c_ptr6]]") "udot v15.4s, %[b0].16b, %[a1a].4b[3]\n"
486 ASM_PREFETCH("[%[c_ptr7]]")
487
488 "udot v16.4s, %[b1].16b, %[a0a].4b[0]\n" ASM_PREFETCH("[%[c_ptr0], #48]") "udot v17.4s, %[b1].16b, %[a0a].4b[1]\n" ASM_PREFETCH("[%[c_ptr1], #48]") "udot v18.4s, %[b1].16b, %[a0a].4b[2]\n"
489 ASM_PREFETCH("[%[c_ptr2], #48]") "udot v19.4s, %[b1].16b, %[a0a].4b[3]\n" ASM_PREFETCH("[%[c_ptr3], #48]") "udot v20.4s, %[b1].16b, %[a1a].4b[0]\n" ASM_PREFETCH("[%[c_ptr4], #48]")
490 "udot v21.4s, %[b1].16b, %[a1a].4b[1]\n" ASM_PREFETCH("[%[c_ptr5], #48]") "udot v22.4s, %[b1].16b, %[a1a].4b[2]\n" ASM_PREFETCH("[%[c_ptr6], #48]") "udot v23.4s, %[b1].16b, %[a1a].4b[3]\n"
491 ASM_PREFETCH("[%[c_ptr7], #48]")
492
493 "udot v24.4s, %[b2].16b, %[a0a].4b[0]\n"
494 "udot v25.4s, %[b2].16b, %[a0a].4b[1]\n"
495 "udot v26.4s, %[b2].16b, %[a0a].4b[2]\n"
496 "udot v27.4s, %[b2].16b, %[a0a].4b[3]\n"
497 "add %[b_ptr], %[b_ptr], #96\n"
498 "udot v28.4s, %[b2].16b, %[a1a].4b[0]\n"
499 "udot v29.4s, %[b2].16b, %[a1a].4b[1]\n"
500 "udot v30.4s, %[b2].16b, %[a1a].4b[2]\n"
501 "udot v31.4s, %[b2].16b, %[a1a].4b[3]\n"
502
503 // Clean up macro namespace
504 ".purgem udot\n"
505
506 :
507 [a_ptr] "+r"(a_ptr), [b_ptr] "+r"(b_ptr),
508 [a0] "+w"(a0), [a1] "+w"(a1), [a0a] "+w"(a0a), [a1a] "+w"(a1a),
509 [b0] "+w"(b0), [b1] "+w"(b1), [b2] "+w"(b2), [k] "+r"(k)
510 : [c_ptr0] "r"(c_ptr0), [c_ptr1] "r"(c_ptr1), [c_ptr2] "r"(c_ptr2), [c_ptr3] "r"(c_ptr3),
511 [c_ptr4] "r"(c_ptr4), [c_ptr5] "r"(c_ptr5), [c_ptr6] "r"(c_ptr6), [c_ptr7] "r"(c_ptr7)
512 : "x20", "x21");
513
514 stincpld(r00, r10, r20, r30, r40, r50, r60, r70, c_ptr0, c_ptr1, c_ptr2, c_ptr3, c_ptr4, c_ptr5, c_ptr6, c_ptr7);
515 stinc(r01, r11, r21, r31, r41, r51, r61, r71, c_ptr0, c_ptr1, c_ptr2, c_ptr3, c_ptr4, c_ptr5, c_ptr6, c_ptr7);
516 stinc(r02, r12, r22, r32, r42, r52, r62, r72, c_ptr0, c_ptr1, c_ptr2, c_ptr3, c_ptr4, c_ptr5, c_ptr6, c_ptr7);
517 }
518 }
519 }
520 }
521}
522} // namespace arm_compute
Ioan-Cristian Szabo33fd07b2017-10-26 15:42:24 +0100523#endif /* ARM_COMPUTE_AARCH64_V8_2 */