blob: e100d9fe4639e8dccba2ad54128a4041d76af145 [file] [log] [blame]
Georgios Pinitas48b3ef82019-10-14 19:03:09 +01001/*
Renato Arantes36a75da2024-01-26 17:31:18 +00002 * Copyright (c) 2017-2021, 2024 Arm Limited.
Georgios Pinitas48b3ef82019-10-14 19:03:09 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25/* As some of the merges need these headers, but are all included in the
26 * arm_gemm namespace, put these headers here. */
27#include <algorithm>
Georgios Pinitas48b3ef82019-10-14 19:03:09 +010028
29#include <arm_neon.h>
30
31#include "arm_gemm.hpp"
32#include "asmlib.hpp"
Renato Arantes36a75da2024-01-26 17:31:18 +000033#include "bfloat.hpp"
Georgios Pinitas48b3ef82019-10-14 19:03:09 +010034#include "utils.hpp"
35
36namespace arm_gemm {
37
38template<unsigned int twidth, unsigned int height, bool sve=false, typename Tin, typename Tout>
39void MergeResults(Tout * out, const Tin * in, int ldc, int y0, int ymax, int x0, int xmax, const Tout *bias, Activation act, bool append) {
Michalis Spyrou20fca522021-06-07 14:23:57 +010040 // NOTE: The following code is disabled to avoid calling get_vector_length(), so templated MergeResults will not
41 // be correct for SVE cases. This is OK as we have specialisations for all needed SVE cases anyway.
42 //
Georgios Pinitas48b3ef82019-10-14 19:03:09 +010043 // For SVE cases, multiply the width up by the vector length.
44 // Use the *input* type to determine this, since this will be what the kernel operated on.
Michalis Spyrou20fca522021-06-07 14:23:57 +010045 // const int width = twidth * (sve ? get_vector_length<Tin>() : 1);
46 const int width = twidth;
Georgios Pinitas48b3ef82019-10-14 19:03:09 +010047
48 const int full_y_blocks = (ymax - y0) / height;
49 const int y_remainder = (ymax - y0) % height;
50 const int y_blocks = full_y_blocks + (y_remainder ? 1 : 0);
51
52 const int full_x_blocks = (xmax - x0) / width;
53 const int x_remainder = (xmax - x0) % width;
54 const int x_blocks = full_x_blocks + (x_remainder ? 1 : 0);
55
56 for (int y_block = 0; y_block < y_blocks; y_block++) {
57 int ybase = y0 + (y_block * height);
58
59 int fill_rows = (y_block < full_y_blocks) ? height : y_remainder;
60
61 for (int x_block = 0; x_block < x_blocks; x_block++) {
62 int xbase = x0 + (x_block * width);
63
64 int fill_cols = (x_block < full_x_blocks) ? width : x_remainder;
65
66 for (int row=0; row < fill_rows; row++) {
67 for (int col=0; col < fill_cols; col++) {
68 Tout &r = out[(ybase + row) * ldc + xbase + col];
69 Tout v = in[row * width + col];
70
71 if (append) {
72 v += r;
73 }
74
75 if (bias) {
76 v += bias[xbase + col];
77 }
78
79 switch(act.type) {
80 default:
81 case Activation::Type::None:
82 break;
83
84 case Activation::Type::ReLU:
85 v = std::max(v, static_cast<Tout>(0));
86 break;
87
88 case Activation::Type::BoundedReLU:
89 v = std::max(std::min(v, static_cast<Tout>(act.param1)), static_cast<Tout>(0));
90 break;
91 }
92
93 r = v;
94 }
95 }
96
97 in += (width * height);
98 }
99 }
100}
101
102#include "merges/list.hpp"
103
Michalis Spyrou778b95c2021-04-20 12:15:52 +0100104/* Cortex-A53 8x6 SGEMM kernel uses a templated merge as the optimized merge
105 * generator cannot cope with the width (6) not being a multiple of VL (4). */
106#ifdef __aarch64__
107template void MergeResults<6u, 8u, false, float, float>(float *, float const*, int, int, int, int, int, float const *, Activation, bool);
108#endif
109
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100110#if defined(__aarch64__) && defined(__ARM_FP16_ARGS)
111template void MergeResults<12u, 8u, false, float, __fp16>(__fp16*, float const*, int, int, int, int, int, __fp16 const*, Activation, bool);
112#endif
113
114#if defined(__arm__) && defined(__ARM_FP16_ARGS)
115template void MergeResults<8u, 6u, false, float, __fp16>(__fp16*, float const*, int, int, int, int, int, __fp16 const*, Activation, bool);
116#endif
117
Renato Arantes36a75da2024-01-26 17:31:18 +0000118#if defined(__arm__) && defined(ARM_COMPUTE_ENABLE_BF16)
119template void MergeResults<8u, 6u, false, float, bfloat16>(bfloat16*, float const*, int, int, int, int, int, bfloat16 const*, Activation, bool);
120#endif
121
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100122} // namespace arm_gemm