Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2018 ARM Limited. |
| 3 | * |
| 4 | * SPDX-License-Identifier: MIT |
| 5 | * |
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
| 7 | * of this software and associated documentation files (the "Software"), to |
| 8 | * deal in the Software without restriction, including without limitation the |
| 9 | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
| 10 | * sell copies of the Software, and to permit persons to whom the Software is |
| 11 | * furnished to do so, subject to the following conditions: |
| 12 | * |
| 13 | * The above copyright notice and this permission notice shall be included in all |
| 14 | * copies or substantial portions of the Software. |
| 15 | * |
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
| 19 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| 21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 22 | * SOFTWARE. |
| 23 | */ |
| 24 | #ifndef __ARM_ASSEMBLY_HELPER_H__ |
| 25 | #define __ARM_ASSEMBLY_HELPER_H__ |
| 26 | |
| 27 | #include "arm_compute/core/ITensor.h" |
| 28 | #include "support/ToolchainSupport.h" |
| 29 | |
| 30 | #include "arm_compute/core/Helpers.h" |
| 31 | #include "arm_compute/core/IAccessWindow.h" |
| 32 | #include "arm_compute/core/Log.h" |
| 33 | #include "arm_compute/core/NEON/kernels/assembly/NEGEMMAssemblyWrapper.h" |
| 34 | #include "arm_compute/core/NEON/kernels/assembly/arm_gemm.hpp" |
| 35 | #include "arm_compute/core/TensorInfo.h" |
| 36 | #include "arm_compute/core/Types.h" |
| 37 | #include "arm_compute/core/Validate.h" |
| 38 | #include "arm_compute/core/Window.h" |
| 39 | #include "arm_compute/runtime/NEON/NEScheduler.h" |
| 40 | |
| 41 | namespace arm_compute |
| 42 | { |
| 43 | template <typename TypeInput, typename TypeOutput> |
| 44 | class AssemblyKernelGlue final |
| 45 | { |
| 46 | public: |
| 47 | using TypeOperator = TypeInput; |
| 48 | using TypeResult = TypeOutput; |
| 49 | AssemblyKernelGlue() |
| 50 | : _gemm_kernel_asm(nullptr), _optimised_kernel(nullptr), _a(nullptr), _b(nullptr), _d(nullptr) |
| 51 | { |
| 52 | } |
| 53 | using AssemblyGemm = arm_gemm::GemmCommon<TypeInput, TypeOutput>; |
| 54 | |
| 55 | const AssemblyKernelGlue<TypeInput, TypeOutput> &operator=(const AssemblyKernelGlue<TypeInput, TypeOutput> &) = delete; |
| 56 | AssemblyKernelGlue(const AssemblyKernelGlue<TypeInput, TypeOutput> &) = delete; |
| 57 | |
| 58 | std::unique_ptr<AssemblyGemm> _gemm_kernel_asm; |
| 59 | std::unique_ptr<INEKernel> _optimised_kernel; |
| 60 | const ITensor *_a; |
| 61 | const ITensor *_b; |
| 62 | ITensor *_d; |
| 63 | |
| 64 | /** Configures the arrays pointers and strides in the assembly kernel and executes the assembly kernel. |
| 65 | * The call to set_arrays is needed to deal with the input sizes containing batches (dims > 2) |
| 66 | */ |
| 67 | inline void run() |
| 68 | { |
| 69 | const int lda = _a->info()->strides_in_bytes().y() / sizeof(TypeInput); |
| 70 | const int ldb = _b->info()->strides_in_bytes().y() / sizeof(TypeInput); |
| 71 | const int ldd = _d->info()->strides_in_bytes().y() / sizeof(TypeOutput); |
| 72 | |
| 73 | // Configure kernel window |
| 74 | Window window = calculate_max_window(*_d->info()); |
| 75 | const auto in1_ptr = reinterpret_cast<const TypeInput *>(_b->buffer()); |
| 76 | |
| 77 | // Only iterate over batches |
| 78 | Window win(window); |
| 79 | win.set(0, Window::Dimension(0, 1, 1)); |
| 80 | win.set(1, Window::Dimension(0, 1, 1)); |
| 81 | Iterator in0(_a, window); |
| 82 | Iterator out(_d, window); |
| 83 | execute_window_loop(win, [&](const Coordinates &) |
| 84 | { |
| 85 | const auto in0_ptr = reinterpret_cast<const TypeInput *>(in0.ptr()); |
| 86 | auto out_ptr = reinterpret_cast<TypeOutput *>(out.ptr()); |
| 87 | _gemm_kernel_asm->set_arrays(in0_ptr, lda, in1_ptr, ldb, out_ptr, ldd); |
| 88 | NEScheduler::get().schedule(_optimised_kernel.get(), Window::DimX); |
| 89 | }, |
| 90 | in0, out); |
| 91 | } |
| 92 | }; |
| 93 | |
| 94 | using AssemblyKernelGlueF32 = AssemblyKernelGlue<float, float>; |
| 95 | using AssemblyKernelGlueU8U32 = AssemblyKernelGlue<uint8_t, uint32_t>; |
| 96 | using AssemblyKernelGlueS8S32 = AssemblyKernelGlue<int8_t, int32_t>; |
| 97 | |
| 98 | inline void allocate_workspace(size_t workspace_size, Tensor &workspace, MemoryGroup &memory_group, size_t alignment, unsigned int num_threads) |
| 99 | { |
| 100 | ARM_COMPUTE_ERROR_ON_MSG(workspace_size == 0, "size cannot be 0"); |
| 101 | workspace.allocator()->init(TensorInfo(TensorShape{ (workspace_size + alignment - 1) * num_threads }, 1, DataType::S8)); |
| 102 | workspace.allocator()->allocate(); |
| 103 | } |
| 104 | |
| 105 | template <typename T> |
| 106 | std::unique_ptr<NEGEMMAssemblyWrapper<T>> create_wrapper_kernel(const ITensor *a, const ITensor *b, const ITensor *c, ITensor *d, float alpha, float beta) |
| 107 | { |
| 108 | // rework this function, why are we checking data type and other things here ? should we create another function can_run_optimised_kernel() ? |
| 109 | #if defined(__arm__) |
| 110 | if(NEScheduler::get().cpu_info().CPU == CPUTarget::ARMV7 && a->info()->data_type() == DataType::F32 && (c == nullptr || beta == 0.f)) |
| 111 | { |
| 112 | return support::cpp14::make_unique<NEGEMMAssemblyWrapper<T>>(); |
| 113 | } |
| 114 | #elif defined(__aarch64__) |
| 115 | if(NEScheduler::get().cpu_info().CPU >= CPUTarget::ARMV8 && a->info()->data_type() == DataType::F32 && (c == nullptr || beta == 0.f)) |
| 116 | { |
| 117 | return support::cpp14::make_unique<NEGEMMAssemblyWrapper<T>>(); |
| 118 | } |
| 119 | else if(a->info()->data_type() == DataType::F16 && (c == nullptr || beta == 0.f)) |
| 120 | { |
| 121 | #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC |
| 122 | return support::cpp14::make_unique<NEGEMMAssemblyWrapper<T>>(); |
| 123 | #else /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ |
| 124 | ARM_COMPUTE_ERROR("Recompile the library with arch=arm64-v8.2-a to enable support for FP16."); |
| 125 | #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ |
| 126 | } |
| 127 | #endif /* defined(__arm__) || defined(__aarch64__) */ |
| 128 | return nullptr; |
| 129 | } |
| 130 | |
| 131 | template <typename T> |
| 132 | inline bool setup_assembly_kernel(const ITensor *a, const ITensor *b, const ITensor *c, ITensor *d, float alpha, float beta, |
| 133 | Tensor &workspace, MemoryGroup &memory_group, T &asm_glue) |
| 134 | { |
| 135 | const ::CPUInfo *ci = get_CPUInfo(); |
| 136 | const int M = d->info()->tensor_shape().y(); |
| 137 | const int N = d->info()->tensor_shape().x(); |
| 138 | const int K = a->info()->tensor_shape().x(); |
| 139 | unsigned int num_threads = NEScheduler::get().num_threads(); |
| 140 | // unique_ptr to a Gemm object |
| 141 | std::unique_ptr<typename T::AssemblyGemm> asm_gemm(arm_gemm::gemm<typename T::TypeOperator, typename T::TypeResult>(*ci, M, N, K, false, false, alpha, beta, num_threads, |
| 142 | false)); |
| 143 | |
| 144 | // arm_compute wrapper for the Gemm object (see above) |
| 145 | std::unique_ptr<NEGEMMAssemblyWrapper<typename T::AssemblyGemm>> acl_gemm_wrapper = create_wrapper_kernel<typename T::AssemblyGemm>(a, b, c, d, alpha, beta); |
| 146 | if(acl_gemm_wrapper != nullptr && asm_gemm != nullptr) |
| 147 | { |
| 148 | acl_gemm_wrapper->configure(asm_gemm.get()); |
| 149 | const size_t workspace_size = asm_gemm->get_working_size(); |
| 150 | if(workspace_size) |
| 151 | { |
| 152 | // Allocate workspace |
| 153 | allocate_workspace(workspace_size, workspace, memory_group, 4096, num_threads); |
| 154 | asm_gemm->set_working_space(reinterpret_cast<typename T::TypeResult *>(workspace.buffer())); |
| 155 | } |
| 156 | const unsigned int window_size = asm_gemm->get_window_size(); |
| 157 | if(window_size < num_threads) |
| 158 | { |
| 159 | num_threads = window_size; |
| 160 | asm_gemm->set_nthreads(num_threads); |
| 161 | } |
| 162 | asm_glue._gemm_kernel_asm = std::move(asm_gemm); |
| 163 | asm_glue._optimised_kernel = std::move(acl_gemm_wrapper); |
| 164 | // We need to setup the ptrs in the run() method |
| 165 | asm_glue._a = a; |
| 166 | asm_glue._b = b; |
| 167 | asm_glue._d = d; |
| 168 | return true; |
| 169 | } |
| 170 | return false; |
| 171 | } |
| 172 | } |
| 173 | #endif /* __ARM_ASSEMBLY_HELPER_H__ */ |