Pablo Tello | 181e651 | 2017-11-15 13:28:27 +0000 | [diff] [blame] | 1 | /* Copyright (c) 2017 ARM Limited. |
| 2 | * |
| 3 | * SPDX-License-Identifier: MIT |
| 4 | * |
| 5 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
| 6 | * of this software and associated documentation files (the "Software"), to |
| 7 | * deal in the Software without restriction, including without limitation the |
| 8 | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
| 9 | * sell copies of the Software, and to permit persons to whom the Software is |
| 10 | * furnished to do so, subject to the following conditions: |
| 11 | * |
| 12 | * The above copyright notice and this permission notice shall be included in all |
| 13 | * copies or substantial portions of the Software. |
| 14 | * |
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
| 18 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| 20 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 21 | * SOFTWARE. |
| 22 | */ |
| 23 | #include "arm_compute/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.h" |
| 24 | |
| 25 | #include "arm_compute/core/Error.h" |
| 26 | #include "arm_compute/core/Helpers.h" |
| 27 | #include "arm_compute/core/ITensor.h" |
| 28 | #include "arm_compute/core/NEON/kernels/NEGEMMAssemblyBaseKernel.h" |
| 29 | #include "arm_compute/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h" |
| 30 | #include "arm_compute/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h" |
| 31 | #include "arm_compute/core/NEON/kernels/NEGEMMTranspose1xWKernel.h" |
Pablo Tello | 27066c2 | 2017-11-23 11:01:10 +0000 | [diff] [blame] | 32 | #include "arm_compute/core/NEON/kernels/arm64/NEGEMMLowpAArch64A53Kernel.h" |
Pablo Tello | 181e651 | 2017-11-15 13:28:27 +0000 | [diff] [blame] | 33 | #include "arm_compute/core/NEON/kernels/arm64/NEGEMMLowpAArch64Kernel.h" |
| 34 | #include "arm_compute/core/NEON/kernels/arm64/NEGEMMLowpAArch64V8P4Kernel.h" |
| 35 | #include "arm_compute/core/TensorInfo.h" |
| 36 | #include "arm_compute/core/Types.h" |
| 37 | #include "arm_compute/core/Validate.h" |
| 38 | #include "arm_compute/runtime/NEON/NEScheduler.h" |
| 39 | #include "arm_compute/runtime/TensorAllocator.h" |
| 40 | #include "support/ToolchainSupport.h" |
| 41 | |
| 42 | namespace arm_compute |
| 43 | { |
| 44 | #include "arm_compute/core/NEON/kernels/assembly/gemm_interleaved.hpp" |
Pablo Tello | 27066c2 | 2017-11-23 11:01:10 +0000 | [diff] [blame] | 45 | #include "arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_s16_12x8.hpp" |
Pablo Tello | 181e651 | 2017-11-15 13:28:27 +0000 | [diff] [blame] | 46 | #include "arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_s8_12x8.hpp" |
| 47 | #include "arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_s8_4x4.hpp" |
Michalis Spyrou | 564ed39 | 2017-11-24 17:06:25 +0000 | [diff] [blame] | 48 | #include "arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_u16_12x8.hpp" |
Michalis Spyrou | f3dfa27 | 2017-11-21 17:52:12 +0000 | [diff] [blame] | 49 | #include "arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_u8_4x4.hpp" |
Pablo Tello | 181e651 | 2017-11-15 13:28:27 +0000 | [diff] [blame] | 50 | } // namespace arm_compute |
| 51 | |
| 52 | using namespace arm_compute; |
| 53 | |
| 54 | NEGEMMLowpAssemblyMatrixMultiplyCore::NEGEMMLowpAssemblyMatrixMultiplyCore(std::shared_ptr<IMemoryManager> memory_manager) |
| 55 | : _memory_group(std::move(memory_manager)), _mm_kernel(nullptr), _mtx_a_reshape_kernel(nullptr), _mtx_b_reshape_kernel(nullptr), _tmp_a(), _tmp_b(), _workspace() |
| 56 | { |
| 57 | } |
| 58 | |
| 59 | void NEGEMMLowpAssemblyMatrixMultiplyCore::configure(const ITensor *a, const ITensor *b, ITensor *output) |
| 60 | { |
Michalis Spyrou | f3dfa27 | 2017-11-21 17:52:12 +0000 | [diff] [blame] | 61 | ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(a, 1, DataType::U8, DataType::S8); |
| 62 | ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U32, DataType::S32); |
Pablo Tello | 181e651 | 2017-11-15 13:28:27 +0000 | [diff] [blame] | 63 | ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(a, b); |
| 64 | ARM_COMPUTE_ERROR_ON_MSG((a)->info()->dimension(0) != (b)->info()->dimension(1), "The product AB is defined only if the number of columns in A is equal to the number of rows in B"); |
| 65 | ARM_COMPUTE_ERROR_ON_MSG((a)->info()->dimension(1) != (output)->info()->dimension(1), "The output matrix must have the same number of rows as the matrix A"); |
| 66 | ARM_COMPUTE_ERROR_ON_MSG((b)->info()->dimension(0) != (output)->info()->dimension(0), "The output matrix must have the same number of columns as the matrix B"); |
| 67 | |
| 68 | #ifdef __aarch64__ |
| 69 | const int M = output->info()->tensor_shape().y(); |
| 70 | const int N = output->info()->tensor_shape().x(); |
| 71 | const int K = a->info()->tensor_shape().x(); |
| 72 | constexpr size_t workspace_alignment = 4096; |
| 73 | const struct CPUInfo ci = NEScheduler::get().cpu_info(); |
| 74 | #endif /* __aarch64__ */ |
| 75 | |
| 76 | #ifdef ARM_COMPUTE_AARCH64_V8_2 |
| 77 | if(ci.CPU == CPUTarget::A75_DOT) |
| 78 | { |
| 79 | // Configure matrix multiply kernel |
| 80 | GemmInterleaved<gemm_s8_12x8, int8_t, int32_t> gemm(&ci, M, N, K, false, false); |
| 81 | _workspace.allocator()->init(TensorInfo(TensorShape{ (gemm.get_working_size() + workspace_alignment - 1) * NEScheduler::get().num_threads() }, 1, DataType::U8)); |
| 82 | _memory_group.manage(&_workspace); |
| 83 | |
| 84 | // Configure matrix multiplication kernel |
| 85 | auto k = arm_compute::support::cpp14::make_unique<NEGEMMLowpAArch64V8P4Kernel>(); |
| 86 | k->configure(a, b, output, &_workspace, 1.f, 1.f); |
| 87 | _mm_kernel = std::move(k); |
| 88 | _workspace.allocator()->allocate(); |
| 89 | } |
| 90 | else if(ci.CPU == CPUTarget::A55_DOT) |
| 91 | { |
| 92 | ARM_COMPUTE_ERROR_ON("WIP"); |
| 93 | } |
| 94 | else |
| 95 | #elif defined(ARM_COMPUTE_AARCH64_V8A) |
Pablo Tello | 27066c2 | 2017-11-23 11:01:10 +0000 | [diff] [blame] | 96 | if(ci.CPU == CPUTarget::A53) |
| 97 | { |
Michalis Spyrou | 564ed39 | 2017-11-24 17:06:25 +0000 | [diff] [blame] | 98 | switch(a->info()->data_type()) |
| 99 | { |
| 100 | case DataType::S8: |
| 101 | { |
| 102 | // Configure matrix multiply kernel |
| 103 | GemmInterleaved<gemm_s16_12x8, int8_t, int32_t> gemm(&ci, M, N, K, false, false); |
| 104 | _workspace.allocator()->init(TensorInfo(TensorShape{ (gemm.get_working_size() + workspace_alignment - 1) * NEScheduler::get().num_threads() }, 1, DataType::U8)); |
| 105 | } |
| 106 | break; |
| 107 | case DataType::U8: |
| 108 | { |
| 109 | // Configure matrix multiply kernel |
| 110 | GemmInterleaved<gemm_u16_12x8, uint8_t, uint32_t> gemm(&ci, M, N, K, false, false); |
| 111 | _workspace.allocator()->init(TensorInfo(TensorShape{ (gemm.get_working_size() + workspace_alignment - 1) * NEScheduler::get().num_threads() }, 1, DataType::U8)); |
| 112 | } |
| 113 | break; |
| 114 | default: |
| 115 | ARM_COMPUTE_ERROR("Datatype not supported"); |
| 116 | } |
| 117 | |
Pablo Tello | 27066c2 | 2017-11-23 11:01:10 +0000 | [diff] [blame] | 118 | _memory_group.manage(&_workspace); |
| 119 | // Configure matrix multiplication kernel |
| 120 | auto k = arm_compute::support::cpp14::make_unique<NEGEMMLowpAArch64A53Kernel>(); |
| 121 | k->configure(a, b, output, &_workspace, 1.f, 1.f); |
| 122 | _mm_kernel = std::move(k); |
| 123 | _workspace.allocator()->allocate(); |
| 124 | } |
| 125 | else if(1) // Generic v8a kernel |
Pablo Tello | 181e651 | 2017-11-15 13:28:27 +0000 | [diff] [blame] | 126 | { |
Michalis Spyrou | f3dfa27 | 2017-11-21 17:52:12 +0000 | [diff] [blame] | 127 | switch(a->info()->data_type()) |
| 128 | { |
| 129 | case DataType::S8: |
| 130 | { |
| 131 | // Configure matrix multiply kernel |
| 132 | GemmInterleaved<gemm_s8_4x4, int8_t, int32_t> gemm(&ci, M, N, K, false, false); |
| 133 | _workspace.allocator()->init(TensorInfo(TensorShape{ (gemm.get_working_size() + workspace_alignment - 1) * NEScheduler::get().num_threads() }, 1, DataType::U8)); |
| 134 | } |
| 135 | break; |
| 136 | case DataType::U8: |
| 137 | { |
| 138 | // Configure matrix multiply kernel |
| 139 | GemmInterleaved<gemm_u8_4x4, uint8_t, uint32_t> gemm(&ci, M, N, K, false, false); |
| 140 | _workspace.allocator()->init(TensorInfo(TensorShape{ (gemm.get_working_size() + workspace_alignment - 1) * NEScheduler::get().num_threads() }, 1, DataType::U8)); |
| 141 | } |
| 142 | break; |
| 143 | default: |
| 144 | ARM_COMPUTE_ERROR("Datatype not supported"); |
| 145 | } |
Pablo Tello | 181e651 | 2017-11-15 13:28:27 +0000 | [diff] [blame] | 146 | _memory_group.manage(&_workspace); |
| 147 | // Configure matrix multiplication kernel |
| 148 | auto k = arm_compute::support::cpp14::make_unique<NEGEMMLowpAArch64Kernel>(); |
| 149 | k->configure(a, b, output, &_workspace, 1.f, 1.f); |
| 150 | _mm_kernel = std::move(k); |
| 151 | _workspace.allocator()->allocate(); |
| 152 | } |
| 153 | else |
| 154 | #endif /* ARM_COMPUTE_AARCH64_V8_2 */ |
| 155 | { |
| 156 | // The interleaved output matrix will have the following shape: [ a_height * 4, ceil(a_width / 4.0f) ] |
| 157 | TensorShape shape_tmp_a = a->info()->tensor_shape(); |
| 158 | shape_tmp_a.set(0, a->info()->dimension(0) * 4); |
| 159 | shape_tmp_a.set(1, std::ceil(a->info()->dimension(1) / 4.f)); |
| 160 | |
| 161 | // The transpose1xW output matrix will have the following shape: [ b_height * 16, ceil(b_width / 16.0f) ] |
| 162 | TensorShape shape_tmp_b = b->info()->tensor_shape(); |
| 163 | shape_tmp_b.set(0, b->info()->dimension(1) * 16); |
| 164 | shape_tmp_b.set(1, std::ceil(b->info()->dimension(0) / 16.f)); |
| 165 | |
| 166 | TensorInfo info_a(shape_tmp_a, 1, a->info()->data_type()); |
| 167 | TensorInfo info_b(shape_tmp_b, 1, b->info()->data_type()); |
| 168 | _tmp_a.allocator()->init(info_a); |
| 169 | _tmp_b.allocator()->init(info_b); |
| 170 | _memory_group.manage(&_tmp_a); |
| 171 | _memory_group.manage(&_tmp_b); |
| 172 | |
| 173 | // Configure interleave kernel |
| 174 | { |
| 175 | auto k = arm_compute::support::cpp14::make_unique<NEGEMMInterleave4x4Kernel>(); |
| 176 | k->configure(a, &_tmp_a); |
| 177 | _mtx_a_reshape_kernel = std::move(k); |
| 178 | } |
| 179 | |
| 180 | // Configure transpose kernel |
| 181 | { |
| 182 | auto k = arm_compute::support::cpp14::make_unique<NEGEMMTranspose1xWKernel>(); |
| 183 | k->configure(b, &_tmp_b); |
| 184 | _mtx_b_reshape_kernel = std::move(k); |
| 185 | } |
| 186 | |
| 187 | // Configure matrix multiply kernel |
| 188 | { |
| 189 | auto k = arm_compute::support::cpp14::make_unique<NEGEMMLowpMatrixMultiplyKernel>(); |
| 190 | k->configure(&_tmp_a, &_tmp_b, output); |
| 191 | _mm_kernel = std::move(k); |
| 192 | } |
| 193 | |
| 194 | // Allocate tensors |
| 195 | _tmp_a.allocator()->allocate(); |
| 196 | _tmp_b.allocator()->allocate(); |
| 197 | } |
| 198 | } |
| 199 | |
| 200 | void NEGEMMLowpAssemblyMatrixMultiplyCore::run() |
| 201 | { |
| 202 | _memory_group.acquire(); |
| 203 | if(_mtx_a_reshape_kernel) |
| 204 | { |
| 205 | NEScheduler::get().schedule(_mtx_a_reshape_kernel.get(), Window::DimY); |
| 206 | } |
| 207 | |
| 208 | if(_mtx_b_reshape_kernel) |
| 209 | { |
| 210 | NEScheduler::get().schedule(_mtx_b_reshape_kernel.get(), Window::DimY); |
| 211 | } |
| 212 | |
| 213 | NEScheduler::get().schedule(_mm_kernel.get(), Window::DimY); |
| 214 | |
| 215 | _memory_group.release(); |
| 216 | } |