Pablo Tello | bf2fb95 | 2017-09-29 16:43:25 +0100 | [diff] [blame^] | 1 | /* |
| 2 | * Copyright (c) 2017 ARM Limited. |
| 3 | * |
| 4 | * SPDX-License-Identifier: MIT |
| 5 | * |
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
| 7 | * of this software and associated documentation files (the "Software"), to |
| 8 | * deal in the Software without restriction, including without limitation the |
| 9 | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
| 10 | * sell copies of the Software, and to permit persons to whom the Software is |
| 11 | * furnished to do so, subject to the following conditions: |
| 12 | * |
| 13 | * The above copyright notice and this permission notice shall be included in all |
| 14 | * copies or substantial portions of the Software. |
| 15 | * |
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
| 19 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| 21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 22 | * SOFTWARE. |
| 23 | */ |
| 24 | #include "arm_compute/core/NEON/kernels/arm64/NEGEMMLowpAArch64V8P4Kernel.h" |
| 25 | |
| 26 | #include "arm_compute/core/AccessWindowStatic.h" |
| 27 | #include "arm_compute/core/Error.h" |
| 28 | #include "arm_compute/core/Helpers.h" |
| 29 | #include "arm_compute/core/IAccessWindow.h" |
| 30 | #include "arm_compute/core/ITensor.h" |
| 31 | #include "arm_compute/core/TensorInfo.h" |
| 32 | #include "arm_compute/core/Types.h" |
| 33 | #include "arm_compute/core/Utils.h" |
| 34 | #include "arm_compute/core/Validate.h" |
| 35 | #include "arm_compute/core/Window.h" |
| 36 | #include "support/ToolchainSupport.h" |
| 37 | |
| 38 | #include <arm_neon.h> |
| 39 | #include <cstddef> |
| 40 | #include <cstdint> |
| 41 | |
| 42 | #define ASM_PREFETCH(address) "PRFM PLDL1KEEP, " address "\n" |
| 43 | #define ASM_PREFETCHL2(address) "PRFM PLDL2KEEP, " address "\n" |
| 44 | #define ASM_PREFETCHW(address) "PRFM PSTL1KEEP, " address "\n" |
| 45 | #define ASM_PREFETCHWL2(address) "PRFM PSTL2KEEP, " address "\n" |
| 46 | |
| 47 | static inline void stincpld(uint32x4_t v0, uint32x4_t v1, uint32x4_t v2, uint32x4_t v3, |
| 48 | uint32x4_t v4, uint32x4_t v5, uint32x4_t v6, uint32x4_t v7, |
| 49 | uint32_t *&ptr0, uint32_t *&ptr1, uint32_t *&ptr2, uint32_t *&ptr3, |
| 50 | uint32_t *&ptr4, uint32_t *&ptr5, uint32_t *&ptr6, uint32_t *&ptr7) |
| 51 | { |
| 52 | __asm __volatile( |
| 53 | "LDR q0, [%[ptr0]]\n" |
| 54 | "LDR q1, [%[ptr1]]\n" |
| 55 | "LDR q2, [%[ptr2]]\n" |
| 56 | "LDR q3, [%[ptr3]]\n" |
| 57 | "LDR q4, [%[ptr4]]\n" |
| 58 | "LDR q5, [%[ptr5]]\n" |
| 59 | "LDR q6, [%[ptr6]]\n" |
| 60 | "LDR q7, [%[ptr7]]\n" |
| 61 | "ADD v0.4s, v0.4s, %[v0].4s\n" ASM_PREFETCH("[%[ptr0], #80]") "ADD v1.4s, v1.4s, %[v1].4s\n" ASM_PREFETCH("[%[ptr1], #80]") "ADD v2.4s, v2.4s, %[v2].4s\n" ASM_PREFETCH("[%[ptr2], #80]") |
| 62 | "ADD v3.4s, v3.4s, %[v3].4s\n" ASM_PREFETCH("[%[ptr3], #80]") "ADD v4.4s, v4.4s, %[v4].4s\n" ASM_PREFETCH("[%[ptr4], #80]") "ADD v5.4s, v5.4s, %[v5].4s\n" ASM_PREFETCH("[%[ptr5], #80]") |
| 63 | "ADD v6.4s, v6.4s, %[v6].4s\n" ASM_PREFETCH("[%[ptr6], #80]") "ADD v7.4s, v7.4s, %[v7].4s\n" ASM_PREFETCH("[%[ptr7], #80]") |
| 64 | "STR q0, [%[ptr0]], #16\n" |
| 65 | "STR q1, [%[ptr1]], #16\n" |
| 66 | "STR q2, [%[ptr2]], #16\n" |
| 67 | "STR q3, [%[ptr3]], #16\n" |
| 68 | "STR q4, [%[ptr4]], #16\n" |
| 69 | "STR q5, [%[ptr5]], #16\n" |
| 70 | "STR q6, [%[ptr6]], #16\n" |
| 71 | "STR q7, [%[ptr7]], #16\n" |
| 72 | : [ptr0] "+r"(ptr0), [ptr1] "+r"(ptr1), [ptr2] "+r"(ptr2), [ptr3] "+r"(ptr3), |
| 73 | [ptr4] "+r"(ptr4), [ptr5] "+r"(ptr5), [ptr6] "+r"(ptr6), [ptr7] "+r"(ptr7) |
| 74 | : [v0] "w"(v0), [v1] "w"(v1), [v2] "w"(v2), [v3] "w"(v3), |
| 75 | [v4] "w"(v4), [v5] "w"(v5), [v6] "w"(v6), [v7] "w"(v7) |
| 76 | : "x20", "x21", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "memory"); |
| 77 | } |
| 78 | |
| 79 | static inline void stinc(uint32x4_t v0, uint32x4_t v1, uint32x4_t v2, uint32x4_t v3, |
| 80 | uint32x4_t v4, uint32x4_t v5, uint32x4_t v6, uint32x4_t v7, |
| 81 | uint32_t *&ptr0, uint32_t *&ptr1, uint32_t *&ptr2, uint32_t *&ptr3, |
| 82 | uint32_t *&ptr4, uint32_t *&ptr5, uint32_t *&ptr6, uint32_t *&ptr7) |
| 83 | { |
| 84 | __asm __volatile( |
| 85 | "LDR q0, [%[ptr0]]\n" |
| 86 | "LDR q1, [%[ptr1]]\n" |
| 87 | "LDR q2, [%[ptr2]]\n" |
| 88 | "LDR q3, [%[ptr3]]\n" |
| 89 | "LDR q4, [%[ptr4]]\n" |
| 90 | "LDR q5, [%[ptr5]]\n" |
| 91 | "LDR q6, [%[ptr6]]\n" |
| 92 | "LDR q7, [%[ptr7]]\n" |
| 93 | "ADD v0.4s, v0.4s, %[v0].4s\n" |
| 94 | "ADD v1.4s, v1.4s, %[v1].4s\n" |
| 95 | "ADD v2.4s, v2.4s, %[v2].4s\n" |
| 96 | "ADD v3.4s, v3.4s, %[v3].4s\n" |
| 97 | "ADD v4.4s, v4.4s, %[v4].4s\n" |
| 98 | "ADD v5.4s, v5.4s, %[v5].4s\n" |
| 99 | "ADD v6.4s, v6.4s, %[v6].4s\n" |
| 100 | "ADD v7.4s, v7.4s, %[v7].4s\n" |
| 101 | "STR q0, [%[ptr0]], #16\n" |
| 102 | "STR q1, [%[ptr1]], #16\n" |
| 103 | "STR q2, [%[ptr2]], #16\n" |
| 104 | "STR q3, [%[ptr3]], #16\n" |
| 105 | "STR q4, [%[ptr4]], #16\n" |
| 106 | "STR q5, [%[ptr5]], #16\n" |
| 107 | "STR q6, [%[ptr6]], #16\n" |
| 108 | "STR q7, [%[ptr7]], #16\n" |
| 109 | : [ptr0] "+r"(ptr0), [ptr1] "+r"(ptr1), [ptr2] "+r"(ptr2), [ptr3] "+r"(ptr3), |
| 110 | [ptr4] "+r"(ptr4), [ptr5] "+r"(ptr5), [ptr6] "+r"(ptr6), [ptr7] "+r"(ptr7) |
| 111 | : [v0] "w"(v0), [v1] "w"(v1), [v2] "w"(v2), [v3] "w"(v3), |
| 112 | [v4] "w"(v4), [v5] "w"(v5), [v6] "w"(v6), [v7] "w"(v7) |
| 113 | : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "memory"); |
| 114 | } |
| 115 | |
| 116 | namespace arm_compute |
| 117 | { |
| 118 | void NEGEMMLowpAArch64V8P4Kernel::internal_configure(const ITensor *input0, const ITensor *input1, ITensor *output) |
| 119 | { |
| 120 | ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::U8); |
| 121 | ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U32); |
| 122 | ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input0, input1); |
| 123 | |
| 124 | _input0 = input0; |
| 125 | _input1 = input1; |
| 126 | _output = output; |
| 127 | |
| 128 | // Configure kernel window |
| 129 | Window win = calculate_max_window(*output->info()); |
| 130 | |
| 131 | AccessWindowRectangle output_access(output->info(), 0, 0, 12, 8); |
| 132 | |
| 133 | const int input0_access_end = ceil_to_multiple(input0->info()->tensor_shape().x(), 8); |
| 134 | const int input1_access_end = ceil_to_multiple(input1->info()->tensor_shape().x(), 12); |
| 135 | |
| 136 | update_window_and_padding(win, |
| 137 | AccessWindowStatic(input0->info(), 0, 0, input0_access_end, input0->info()->tensor_shape().y()), |
| 138 | AccessWindowStatic(input1->info(), 0, 0, input1_access_end, input1->info()->tensor_shape().y()), |
| 139 | output_access); |
| 140 | |
| 141 | INEKernel::configure(win); |
| 142 | } |
| 143 | |
| 144 | bool NEGEMMLowpAArch64V8P4Kernel::is_parallelisable() const |
| 145 | { |
| 146 | return false; |
| 147 | } |
| 148 | |
| 149 | #define _UDOT_MACRO \ |
| 150 | ".altmacro\n" \ |
| 151 | ".macro udot opd:req, opn:req, opm:req\n" \ |
| 152 | "local vd, vn, vm, h, l\n" \ |
| 153 | ".irp reg,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31\n" \ |
| 154 | ".ifeqs \"\\opd\",\"v\\reg\\.4s\"\n" \ |
| 155 | ".set vd,\\reg\n" \ |
| 156 | ".endif\n" \ |
| 157 | ".ifeqs \"\\opn\",\"v\\reg\\.16b\"\n" \ |
| 158 | ".set vn,\\reg\n" \ |
| 159 | ".endif\n" \ |
| 160 | ".irp idx,0,1,2,3\n" \ |
| 161 | ".ifeqs \"\\opm\",\"v\\reg\\.4b[\\idx\\]\"\n" \ |
| 162 | ".set vm,\\reg\n" \ |
| 163 | ".set h,\\idx / 2\n" \ |
| 164 | ".set l,\\idx %% 2\n" \ |
| 165 | ".endif\n" \ |
| 166 | ".endr\n" \ |
| 167 | ".endr\n" \ |
| 168 | ".ifndef vd\n" \ |
| 169 | ".error \"Bad operand \\opd\"\n" \ |
| 170 | ".exitm\n" \ |
| 171 | ".endif\n" \ |
| 172 | ".ifndef vn\n" \ |
| 173 | ".error \"Bad operand \\opn\"\n" \ |
| 174 | ".exitm\n" \ |
| 175 | ".endif\n" \ |
| 176 | ".ifndef vm\n" \ |
| 177 | ".error \"Bad operand \\opm\"\n" \ |
| 178 | ".exitm\n" \ |
| 179 | ".endif\n" \ |
| 180 | ".ifndef h\n" \ |
| 181 | ".error \"Bad operand \\opm\"\n" \ |
| 182 | ".exitm\n" \ |
| 183 | ".endif\n" \ |
| 184 | ".ifndef l\n" \ |
| 185 | ".error \"Bad operand \\opm\"\n" \ |
| 186 | ".exitm\n" \ |
| 187 | ".endif\n" \ |
| 188 | ".int 0x6f80e000 | vd | (vn << 5) | (vm << 16) | (l << 21) | (h << 11)\n" \ |
| 189 | ".endm\n" |
| 190 | |
| 191 | #define _PREFETCH_ \ |
| 192 | __asm __volatile( \ |
| 193 | "" ASM_PREFETCH("[%[a_ptr], #64]") \ |
| 194 | ASM_PREFETCH("[%[a_ptr], #128]") \ |
| 195 | ASM_PREFETCH("[%[a_ptr], #192]") \ |
| 196 | : \ |
| 197 | : \ |
| 198 | [a_ptr] "r"(a_ptr), [b_ptr] "r"(b_ptr) \ |
| 199 | : "x20", "x21", "memory"); \ |
| 200 | __asm __volatile( \ |
| 201 | "" ASM_PREFETCH("[%[b_ptr]]") \ |
| 202 | ASM_PREFETCH("[%[b_ptr], #64]") \ |
| 203 | ASM_PREFETCH("[%[b_ptr], #128]") \ |
| 204 | ASM_PREFETCH("[%[b_ptr], #192]") \ |
| 205 | : \ |
| 206 | : \ |
| 207 | [b_ptr] "r"(b_ptr) \ |
| 208 | : "x20", "x21"); \ |
| 209 | __asm __volatile( \ |
| 210 | "" \ |
| 211 | : [r00] "+w"(r00), [r01] "+w"(r01), \ |
| 212 | [r10] "+w"(r10), [r11] "+w"(r11), \ |
| 213 | [r20] "+w"(r20), [r21] "+w"(r21), \ |
| 214 | [r30] "+w"(r30), [r31] "+w"(r31), \ |
| 215 | [a0] "+w"(a0), [a1] "+w"(a1), \ |
| 216 | [b0] "+w"(b0), [b1] "+w"(b1), [b2] "=w"(b2), \ |
| 217 | [a_ptr] "+r"(a_ptr), [b_ptr] "+r"(b_ptr) \ |
| 218 | : \ |
| 219 | :); \ |
| 220 | __asm __volatile( \ |
| 221 | "" \ |
| 222 | : [r02] "+w"(r02), \ |
| 223 | [r12] "+w"(r12), \ |
| 224 | [r22] "+w"(r22), \ |
| 225 | [r32] "+w"(r32), \ |
| 226 | [r40] "+w"(r40), \ |
| 227 | [r50] "+w"(r50), \ |
| 228 | [r60] "+w"(r60), \ |
| 229 | [r70] "+w"(r70), \ |
| 230 | [a0a] "=w"(a0a), [a1a] "=w"(a1a), \ |
| 231 | [b0] "+w"(b0), [b2] "+w"(b2), [b5] "=&w"(b5) \ |
| 232 | : \ |
| 233 | :); \ |
| 234 | __asm __volatile( \ |
| 235 | "" \ |
| 236 | : \ |
| 237 | [r41] "+w"(r41), [r42] "+w"(r42), \ |
| 238 | [r51] "+w"(r51), [r52] "+w"(r52), \ |
| 239 | [r61] "+w"(r61), [r62] "+w"(r62), \ |
| 240 | [r71] "+w"(r71), [r72] "+w"(r72), \ |
| 241 | [a1] "+w"(a1), \ |
| 242 | [b0] "+w"(b0), [b1] "+w"(b1), [b2] "+w"(b2), \ |
| 243 | [b_ptr] "+r"(b_ptr), [k] "+r"(k) \ |
| 244 | : \ |
| 245 | :); |
| 246 | |
| 247 | void NEGEMMLowpAArch64V8P4Kernel::run(const Window &window, const ThreadInfo &info) |
| 248 | { |
| 249 | ARM_COMPUTE_UNUSED(info); |
| 250 | ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); |
| 251 | ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window); |
| 252 | |
| 253 | const int x_block = 348; |
| 254 | const int k_block = 1664; |
| 255 | const int nthreads = 1; |
| 256 | const int M = _output->info()->tensor_shape().y(); |
| 257 | const int N = _output->info()->tensor_shape().x(); |
| 258 | const int K = _input0->info()->tensor_shape().x() >> 3; |
| 259 | |
| 260 | int yblocksperthread = ((M / nthreads) + 7) / 8; |
| 261 | |
| 262 | if(yblocksperthread < 1) |
| 263 | { |
| 264 | yblocksperthread = 1; |
| 265 | } |
| 266 | |
| 267 | const int lda = _input0->info()->strides_in_bytes().y(); |
| 268 | const int ldb = _input1->info()->strides_in_bytes().y(); |
| 269 | const int ldc = _output->info()->strides_in_bytes().y(); |
| 270 | const int ldc2 = _output->info()->strides_in_bytes().x(); |
| 271 | const int ldc3 = ldc / sizeof(uint32_t); |
| 272 | |
| 273 | const int threadid = 0; |
| 274 | int y0 = threadid * yblocksperthread * 8; |
| 275 | int ymax = (threadid + 1) * yblocksperthread * 8; |
| 276 | if(y0 >= M) |
| 277 | { |
| 278 | return; |
| 279 | } |
| 280 | if(ymax > M) |
| 281 | { |
| 282 | ymax = M; |
| 283 | } |
| 284 | for(int k0 = 0; k0 < K; k0 += k_block) |
| 285 | { |
| 286 | int kmax = k0 + k_block; |
| 287 | if(kmax > K) |
| 288 | { |
| 289 | kmax = K; |
| 290 | } |
| 291 | |
| 292 | for(int x0 = 0; x0 < N; x0 += x_block) |
| 293 | { |
| 294 | int xmax = x0 + x_block; |
| 295 | if(xmax > N) |
| 296 | { |
| 297 | xmax = N; |
| 298 | } |
| 299 | |
| 300 | for(int y = y0; y < ymax; y += 8) |
| 301 | { |
| 302 | auto c_ptr0 = reinterpret_cast<uint32_t *>(_output->buffer() + (y * ldc) + x0 * ldc2); |
| 303 | uint32_t *c_ptr1 = c_ptr0 + ldc3; |
| 304 | uint32_t *c_ptr2 = c_ptr1 + ldc3; |
| 305 | uint32_t *c_ptr3 = c_ptr2 + ldc3; |
| 306 | uint32_t *c_ptr4 = c_ptr3 + ldc3; |
| 307 | uint32_t *c_ptr5 = c_ptr4 + ldc3; |
| 308 | uint32_t *c_ptr6 = c_ptr5 + ldc3; |
| 309 | uint32_t *c_ptr7 = c_ptr6 + ldc3; |
| 310 | |
| 311 | __asm __volatile( |
| 312 | "" ASM_PREFETCH("[%[c_ptr0]]") |
| 313 | ASM_PREFETCH("[%[c_ptr1]]") |
| 314 | ASM_PREFETCH("[%[c_ptr2]]") |
| 315 | ASM_PREFETCH("[%[c_ptr3]]") |
| 316 | ASM_PREFETCH("[%[c_ptr4]]") |
| 317 | ASM_PREFETCH("[%[c_ptr5]]") |
| 318 | ASM_PREFETCH("[%[c_ptr6]]") |
| 319 | ASM_PREFETCH("[%[c_ptr7]]") |
| 320 | : |
| 321 | : [c_ptr0] "r"(c_ptr0), [c_ptr1] "r"(c_ptr1), [c_ptr2] "r"(c_ptr2), [c_ptr3] "r"(c_ptr3), |
| 322 | [c_ptr4] "r"(c_ptr4), [c_ptr5] "r"(c_ptr5), [c_ptr6] "r"(c_ptr6), [c_ptr7] "r"(c_ptr7) |
| 323 | : "x20", "x21"); |
| 324 | |
| 325 | for(int x = x0; x < xmax; x += 12) |
| 326 | { |
| 327 | register uint32x4_t r00 asm("v8"); |
| 328 | register uint32x4_t r10 asm("v9"); |
| 329 | register uint32x4_t r20 asm("v10"); |
| 330 | register uint32x4_t r30 asm("v11"); |
| 331 | register uint32x4_t r40 asm("v12"); |
| 332 | register uint32x4_t r50 asm("v13"); |
| 333 | register uint32x4_t r60 asm("v14"); |
| 334 | register uint32x4_t r70 asm("v15"); |
| 335 | register uint32x4_t r01 asm("v16"); |
| 336 | register uint32x4_t r11 asm("v17"); |
| 337 | register uint32x4_t r21 asm("v18"); |
| 338 | register uint32x4_t r31 asm("v19"); |
| 339 | register uint32x4_t r41 asm("v20"); |
| 340 | register uint32x4_t r51 asm("v21"); |
| 341 | register uint32x4_t r61 asm("v22"); |
| 342 | register uint32x4_t r71 asm("v23"); |
| 343 | register uint32x4_t r02 asm("v24"); |
| 344 | register uint32x4_t r12 asm("v25"); |
| 345 | register uint32x4_t r22 asm("v26"); |
| 346 | register uint32x4_t r32 asm("v27"); |
| 347 | register uint32x4_t r42 asm("v28"); |
| 348 | register uint32x4_t r52 asm("v29"); |
| 349 | register uint32x4_t r62 asm("v30"); |
| 350 | register uint32x4_t r72 asm("v31"); |
| 351 | |
| 352 | register uint8x16_t a0 asm("v0"); |
| 353 | register uint8x16_t a1 asm("v1"); |
| 354 | register uint8x16_t b0 asm("v2"); |
| 355 | register uint8x16_t b1 asm("v3"); |
| 356 | register uint8x16_t b2 asm("v4"); |
| 357 | register uint8x16_t a0a asm("v5"); |
| 358 | register uint8x16_t a1a asm("v6"); |
| 359 | register uint8x16_t b5 asm("v7"); |
| 360 | const uint8_t *a_ptr = _input0->buffer() + ((y / 8) * lda) + (k0 * 8); |
| 361 | const uint8_t *b_ptr = _input1->buffer() + ((x / 12) * ldb) + (k0 * 12); |
| 362 | |
| 363 | r00 = r01 = r02 = r10 = r11 = r12 = r20 = r21 = r22 = r30 = r31 = r32 = vdupq_n_u32(0); |
| 364 | r40 = r41 = r42 = r50 = r51 = r52 = r60 = r61 = r62 = r70 = r71 = r72 = vdupq_n_u32(0); |
| 365 | |
| 366 | int k = ((kmax - k0) / 8) - 1; |
| 367 | |
| 368 | a0 = vld1q_u8(a_ptr); |
| 369 | b0 = vld1q_u8(b_ptr); |
| 370 | a1 = vld1q_u8(a_ptr + 16); |
| 371 | b1 = vld1q_u8(b_ptr + 16); |
| 372 | |
| 373 | _PREFETCH_ |
| 374 | |
| 375 | __asm __volatile( |
| 376 | _UDOT_MACRO |
| 377 | "1:\n" |
| 378 | "udot v8.4s , %[b0].16b, %[a0].4b[0]\n" |
| 379 | "udot v9.4s , %[b0].16b, %[a0].4b[1]\n" |
| 380 | "ldr %q[b2], [%[b_ptr], #32]\n" |
| 381 | "udot v10.4s, %[b0].16b, %[a0].4b[2]\n" |
| 382 | "udot v11.4s, %[b0].16b, %[a0].4b[3]\n" |
| 383 | "ldr %q[a0a], [%[a_ptr], #32]\n" |
| 384 | "udot v12.4s, %[b0].16b, %[a1].4b[0]\n" |
| 385 | "udot v13.4s, %[b0].16b, %[a1].4b[1]\n" |
| 386 | "ldr %q[a1a], [%[a_ptr], #48]\n" |
| 387 | "udot v14.4s, %[b0].16b, %[a1].4b[2]\n" |
| 388 | "udot v15.4s, %[b0].16b, %[a1].4b[3]\n" |
| 389 | "ldr %q[b0], [%[b_ptr], #48]\n" |
| 390 | |
| 391 | "udot v16.4s, %[b1].16b, %[a0].4b[0]\n" |
| 392 | "udot v17.4s, %[b1].16b, %[a0].4b[1]\n" ASM_PREFETCH("[%[a_ptr], #256]") |
| 393 | "udot v18.4s, %[b1].16b, %[a0].4b[2]\n" |
| 394 | "udot v19.4s, %[b1].16b, %[a0].4b[3]\n" |
| 395 | "udot v20.4s, %[b1].16b, %[a1].4b[0]\n" |
| 396 | "udot v21.4s, %[b1].16b, %[a1].4b[1]\n" |
| 397 | "udot v22.4s, %[b1].16b, %[a1].4b[2]\n" |
| 398 | "udot v23.4s, %[b1].16b, %[a1].4b[3]\n" |
| 399 | "ldr %q[b1], [%[b_ptr], #64]\n" |
| 400 | |
| 401 | "udot v24.4s, %[b2].16b, %[a0].4b[0]\n" |
| 402 | "udot v25.4s, %[b2].16b, %[a0].4b[1]\n" ASM_PREFETCH("[%[b_ptr], #256]") |
| 403 | "udot v26.4s, %[b2].16b, %[a0].4b[2]\n" |
| 404 | "udot v27.4s, %[b2].16b, %[a0].4b[3]\n" |
| 405 | "udot v28.4s, %[b2].16b, %[a1].4b[0]\n" |
| 406 | "udot v29.4s, %[b2].16b, %[a1].4b[1]\n" |
| 407 | "udot v30.4s, %[b2].16b, %[a1].4b[2]\n" |
| 408 | "udot v31.4s, %[b2].16b, %[a1].4b[3]\n" |
| 409 | "ldr %q[b2], [%[b_ptr], #80]\n" |
| 410 | |
| 411 | "udot v8.4s , %[b0].16b, %[a0a].4b[0]\n" |
| 412 | "udot v9.4s , %[b0].16b, %[a0a].4b[1]\n" |
| 413 | "ldr %q[a0], [%[a_ptr], #64]\n" |
| 414 | "udot v10.4s, %[b0].16b, %[a0a].4b[2]\n" |
| 415 | "udot v11.4s, %[b0].16b, %[a0a].4b[3]\n" |
| 416 | "udot v12.4s, %[b0].16b, %[a1a].4b[0]\n" |
| 417 | "ldr %q[a1], [%[a_ptr], #80]\n" |
| 418 | "udot v13.4s, %[b0].16b, %[a1a].4b[1]\n" |
| 419 | "udot v14.4s, %[b0].16b, %[a1a].4b[2]\n" |
| 420 | "udot v15.4s, %[b0].16b, %[a1a].4b[3]\n" |
| 421 | "ldr %q[b0], [%[b_ptr], #96]\n" |
| 422 | |
| 423 | "udot v16.4s, %[b1].16b, %[a0a].4b[0]\n" |
| 424 | "udot v17.4s, %[b1].16b, %[a0a].4b[1]\n" ASM_PREFETCH("[%[b_ptr], #320]") |
| 425 | "udot v18.4s, %[b1].16b, %[a0a].4b[2]\n" |
| 426 | "udot v19.4s, %[b1].16b, %[a0a].4b[3]\n" |
| 427 | "udot v20.4s, %[b1].16b, %[a1a].4b[0]\n" |
| 428 | "udot v21.4s, %[b1].16b, %[a1a].4b[1]\n" |
| 429 | "udot v22.4s, %[b1].16b, %[a1a].4b[2]\n" |
| 430 | "udot v23.4s, %[b1].16b, %[a1a].4b[3]\n" |
| 431 | "ldr %q[b1], [%[b_ptr], #112]\n" |
| 432 | |
| 433 | "udot v24.4s, %[b2].16b, %[a0a].4b[0]\n" |
| 434 | "udot v25.4s, %[b2].16b, %[a0a].4b[1]\n" |
| 435 | "add %[a_ptr], %[a_ptr], #64\n" |
| 436 | "udot v26.4s, %[b2].16b, %[a0a].4b[2]\n" |
| 437 | "udot v27.4s, %[b2].16b, %[a0a].4b[3]\n" |
| 438 | "add %[b_ptr], %[b_ptr], #96\n" |
| 439 | "udot v28.4s, %[b2].16b, %[a1a].4b[0]\n" |
| 440 | "udot v29.4s, %[b2].16b, %[a1a].4b[1]\n" |
| 441 | "subs %w[k], %w[k], #1\n" |
| 442 | "udot v30.4s, %[b2].16b, %[a1a].4b[2]\n" |
| 443 | "udot v31.4s, %[b2].16b, %[a1a].4b[3]\n" |
| 444 | |
| 445 | "bne 1b\n" |
| 446 | |
| 447 | "udot v8.4s , %[b0].16b, %[a0].4b[0]\n" |
| 448 | "udot v9.4s , %[b0].16b, %[a0].4b[1]\n" |
| 449 | "ldr %q[b2], [%[b_ptr], #32]\n" |
| 450 | "udot v10.4s, %[b0].16b, %[a0].4b[2]\n" |
| 451 | "udot v11.4s, %[b0].16b, %[a0].4b[3]\n" |
| 452 | "ldr %q[a0a], [%[a_ptr], #32]\n" |
| 453 | "udot v12.4s, %[b0].16b, %[a1].4b[0]\n" |
| 454 | "udot v13.4s, %[b0].16b, %[a1].4b[1]\n" |
| 455 | "ldr %q[a1a], [%[a_ptr], #48]\n" |
| 456 | "udot v14.4s, %[b0].16b, %[a1].4b[2]\n" |
| 457 | "udot v15.4s, %[b0].16b, %[a1].4b[3]\n" |
| 458 | "ldr %q[b0], [%[b_ptr], #48]\n" |
| 459 | |
| 460 | "udot v16.4s, %[b1].16b, %[a0].4b[0]\n" |
| 461 | "udot v17.4s, %[b1].16b, %[a0].4b[1]\n" |
| 462 | "udot v18.4s, %[b1].16b, %[a0].4b[2]\n" |
| 463 | "udot v19.4s, %[b1].16b, %[a0].4b[3]\n" |
| 464 | "udot v20.4s, %[b1].16b, %[a1].4b[0]\n" |
| 465 | "udot v21.4s, %[b1].16b, %[a1].4b[1]\n" |
| 466 | "udot v22.4s, %[b1].16b, %[a1].4b[2]\n" |
| 467 | "udot v23.4s, %[b1].16b, %[a1].4b[3]\n" |
| 468 | "ldr %q[b1], [%[b_ptr], #64]\n" |
| 469 | |
| 470 | "udot v24.4s, %[b2].16b, %[a0].4b[0]\n" |
| 471 | "udot v25.4s, %[b2].16b, %[a0].4b[1]\n" |
| 472 | "udot v26.4s, %[b2].16b, %[a0].4b[2]\n" |
| 473 | "udot v27.4s, %[b2].16b, %[a0].4b[3]\n" |
| 474 | "udot v28.4s, %[b2].16b, %[a1].4b[0]\n" |
| 475 | "udot v29.4s, %[b2].16b, %[a1].4b[1]\n" |
| 476 | "udot v30.4s, %[b2].16b, %[a1].4b[2]\n" |
| 477 | "udot v31.4s, %[b2].16b, %[a1].4b[3]\n" |
| 478 | "ldr %q[b2], [%[b_ptr], #80]\n" |
| 479 | |
| 480 | "udot v8.4s , %[b0].16b, %[a0a].4b[0]\n" ASM_PREFETCH("[%[c_ptr0]]") "udot v9.4s , %[b0].16b, %[a0a].4b[1]\n" ASM_PREFETCH("[%[c_ptr1]]") "udot v10.4s, %[b0].16b, %[a0a].4b[2]\n" |
| 481 | ASM_PREFETCH("[%[c_ptr2]]") "udot v11.4s, %[b0].16b, %[a0a].4b[3]\n" ASM_PREFETCH("[%[c_ptr3]]") "udot v12.4s, %[b0].16b, %[a1a].4b[0]\n" ASM_PREFETCH("[%[c_ptr4]]") |
| 482 | "udot v13.4s, %[b0].16b, %[a1a].4b[1]\n" ASM_PREFETCH("[%[c_ptr5]]") "udot v14.4s, %[b0].16b, %[a1a].4b[2]\n" ASM_PREFETCH("[%[c_ptr6]]") "udot v15.4s, %[b0].16b, %[a1a].4b[3]\n" |
| 483 | ASM_PREFETCH("[%[c_ptr7]]") |
| 484 | |
| 485 | "udot v16.4s, %[b1].16b, %[a0a].4b[0]\n" ASM_PREFETCH("[%[c_ptr0], #48]") "udot v17.4s, %[b1].16b, %[a0a].4b[1]\n" ASM_PREFETCH("[%[c_ptr1], #48]") "udot v18.4s, %[b1].16b, %[a0a].4b[2]\n" |
| 486 | ASM_PREFETCH("[%[c_ptr2], #48]") "udot v19.4s, %[b1].16b, %[a0a].4b[3]\n" ASM_PREFETCH("[%[c_ptr3], #48]") "udot v20.4s, %[b1].16b, %[a1a].4b[0]\n" ASM_PREFETCH("[%[c_ptr4], #48]") |
| 487 | "udot v21.4s, %[b1].16b, %[a1a].4b[1]\n" ASM_PREFETCH("[%[c_ptr5], #48]") "udot v22.4s, %[b1].16b, %[a1a].4b[2]\n" ASM_PREFETCH("[%[c_ptr6], #48]") "udot v23.4s, %[b1].16b, %[a1a].4b[3]\n" |
| 488 | ASM_PREFETCH("[%[c_ptr7], #48]") |
| 489 | |
| 490 | "udot v24.4s, %[b2].16b, %[a0a].4b[0]\n" |
| 491 | "udot v25.4s, %[b2].16b, %[a0a].4b[1]\n" |
| 492 | "udot v26.4s, %[b2].16b, %[a0a].4b[2]\n" |
| 493 | "udot v27.4s, %[b2].16b, %[a0a].4b[3]\n" |
| 494 | "add %[b_ptr], %[b_ptr], #96\n" |
| 495 | "udot v28.4s, %[b2].16b, %[a1a].4b[0]\n" |
| 496 | "udot v29.4s, %[b2].16b, %[a1a].4b[1]\n" |
| 497 | "udot v30.4s, %[b2].16b, %[a1a].4b[2]\n" |
| 498 | "udot v31.4s, %[b2].16b, %[a1a].4b[3]\n" |
| 499 | |
| 500 | // Clean up macro namespace |
| 501 | ".purgem udot\n" |
| 502 | |
| 503 | : |
| 504 | [a_ptr] "+r"(a_ptr), [b_ptr] "+r"(b_ptr), |
| 505 | [a0] "+w"(a0), [a1] "+w"(a1), [a0a] "+w"(a0a), [a1a] "+w"(a1a), |
| 506 | [b0] "+w"(b0), [b1] "+w"(b1), [b2] "+w"(b2), [k] "+r"(k) |
| 507 | : [c_ptr0] "r"(c_ptr0), [c_ptr1] "r"(c_ptr1), [c_ptr2] "r"(c_ptr2), [c_ptr3] "r"(c_ptr3), |
| 508 | [c_ptr4] "r"(c_ptr4), [c_ptr5] "r"(c_ptr5), [c_ptr6] "r"(c_ptr6), [c_ptr7] "r"(c_ptr7) |
| 509 | : "x20", "x21"); |
| 510 | |
| 511 | stincpld(r00, r10, r20, r30, r40, r50, r60, r70, c_ptr0, c_ptr1, c_ptr2, c_ptr3, c_ptr4, c_ptr5, c_ptr6, c_ptr7); |
| 512 | stinc(r01, r11, r21, r31, r41, r51, r61, r71, c_ptr0, c_ptr1, c_ptr2, c_ptr3, c_ptr4, c_ptr5, c_ptr6, c_ptr7); |
| 513 | stinc(r02, r12, r22, r32, r42, r52, r62, r72, c_ptr0, c_ptr1, c_ptr2, c_ptr3, c_ptr4, c_ptr5, c_ptr6, c_ptr7); |
| 514 | } |
| 515 | } |
| 516 | } |
| 517 | } |
| 518 | } |
| 519 | } // namespace arm_compute |