Gian Marco | 05288a2 | 2017-11-21 10:57:50 +0000 | [diff] [blame] | 1 | /* |
Gian Marco | 7b4d547 | 2018-01-10 15:56:30 +0000 | [diff] [blame] | 2 | * Copyright (c) 2017-2018 ARM Limited. |
Gian Marco | 05288a2 | 2017-11-21 10:57:50 +0000 | [diff] [blame] | 3 | * |
| 4 | * SPDX-License-Identifier: MIT |
| 5 | * |
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
| 7 | * of this software and associated documentation files (the "Software"), to |
| 8 | * deal in the Software without restriction, including without limitation the |
| 9 | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
| 10 | * sell copies of the Software, and to permit persons to whom the Software is |
| 11 | * furnished to do so, subject to the following conditions: |
| 12 | * |
| 13 | * The above copyright notice and this permission notice shall be included in all |
| 14 | * copies or substantial portions of the Software. |
| 15 | * |
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
| 19 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| 21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 22 | * SOFTWARE. |
| 23 | */ |
| 24 | #include "arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h" |
| 25 | |
| 26 | #include "arm_compute/core/CL/ICLTensor.h" |
| 27 | #include "arm_compute/core/Error.h" |
| 28 | #include "arm_compute/core/Helpers.h" |
| 29 | #include "arm_compute/core/TensorInfo.h" |
| 30 | #include "arm_compute/core/Types.h" |
| 31 | #include "arm_compute/core/Validate.h" |
Georgios Pinitas | 358ca20 | 2017-12-07 16:47:52 +0000 | [diff] [blame] | 32 | #include "arm_compute/core/utils/misc/ShapeCalculator.h" |
Gian Marco | 05288a2 | 2017-11-21 10:57:50 +0000 | [diff] [blame] | 33 | #include "arm_compute/runtime/CL/CLScheduler.h" |
| 34 | |
| 35 | using namespace arm_compute; |
Georgios Pinitas | 358ca20 | 2017-12-07 16:47:52 +0000 | [diff] [blame] | 36 | using namespace arm_compute::misc::shape_calculator; |
Gian Marco | 05288a2 | 2017-11-21 10:57:50 +0000 | [diff] [blame] | 37 | |
| 38 | CLGEMMLowpMatrixMultiplyCore::CLGEMMLowpMatrixMultiplyCore(std::shared_ptr<IMemoryManager> memory_manager) |
| 39 | : _memory_group(std::move(memory_manager)), _mm_kernel(), _mtx_a_reshape_kernel(), _mtx_b_reshape_kernel(), _mtx_a_reduction_kernel(), _mtx_b_reduction_kernel(), _offset_contribution_kernel(), |
Chunosov | 5124be5 | 2017-11-22 20:42:13 +0700 | [diff] [blame] | 40 | _vector_sum_col(), _vector_sum_row(), _tmp_a(), _tmp_b(), _a_offset(0), _b_offset(0), _is_interleaved_transposed(true), _is_first_run(true), _reshape_b_only_on_first_run(false) |
Gian Marco | 05288a2 | 2017-11-21 10:57:50 +0000 | [diff] [blame] | 41 | { |
| 42 | } |
| 43 | |
Chunosov | 5124be5 | 2017-11-22 20:42:13 +0700 | [diff] [blame] | 44 | void CLGEMMLowpMatrixMultiplyCore::configure(const ICLTensor *a, const ICLTensor *b, ICLTensor *output, const GEMMInfo &gemm_info) |
Gian Marco | 05288a2 | 2017-11-21 10:57:50 +0000 | [diff] [blame] | 45 | { |
Georgios Pinitas | 358ca20 | 2017-12-07 16:47:52 +0000 | [diff] [blame] | 46 | ARM_COMPUTE_ERROR_ON_NULLPTR(a, b, output); |
| 47 | ARM_COMPUTE_UNUSED(gemm_info); |
| 48 | ARM_COMPUTE_ERROR_THROW_ON(CLGEMMLowpMatrixMultiplyCore::validate(a->info(), b->info(), output->info(), gemm_info)); |
Gian Marco | 05288a2 | 2017-11-21 10:57:50 +0000 | [diff] [blame] | 49 | |
Chunosov | 5124be5 | 2017-11-22 20:42:13 +0700 | [diff] [blame] | 50 | _reshape_b_only_on_first_run = gemm_info.reshape_b_only_on_first_run(); |
| 51 | _a_offset = a->info()->quantization_info().offset; |
| 52 | _b_offset = b->info()->quantization_info().offset; |
Gian Marco | 05288a2 | 2017-11-21 10:57:50 +0000 | [diff] [blame] | 53 | |
| 54 | // If the input tensor has less than 16 rows, we run a special version of GEMMLowp without reshaping the input tensors |
Gian Marco | 7b4d547 | 2018-01-10 15:56:30 +0000 | [diff] [blame] | 55 | _is_interleaved_transposed = (a->info()->dimension(1)) > 16 && (CLScheduler::get().target() != GPUTarget::BIFROST); |
| 56 | |
| 57 | // Set the target for the matrix multiply kernel |
| 58 | _mm_kernel.set_target(CLScheduler::get().target()); |
Gian Marco | 05288a2 | 2017-11-21 10:57:50 +0000 | [diff] [blame] | 59 | |
| 60 | const ICLTensor *matrix_a = a; |
| 61 | const ICLTensor *matrix_b = b; |
| 62 | |
| 63 | if(_is_interleaved_transposed) |
| 64 | { |
| 65 | matrix_a = &_tmp_a; |
| 66 | matrix_b = &_tmp_b; |
| 67 | |
Georgios Pinitas | 358ca20 | 2017-12-07 16:47:52 +0000 | [diff] [blame] | 68 | TensorInfo info_a(compute_interleaved_shape(*a->info()), 1, a->info()->data_type()); |
| 69 | TensorInfo info_b(compute_transpose1xW_shape(*b->info()), 1, b->info()->data_type()); |
Gian Marco | 05288a2 | 2017-11-21 10:57:50 +0000 | [diff] [blame] | 70 | _tmp_a.allocator()->init(info_a); |
| 71 | _tmp_b.allocator()->init(info_b); |
| 72 | _memory_group.manage(&_tmp_a); |
| 73 | _memory_group.manage(&_tmp_b); |
| 74 | |
| 75 | // Configure interleave kernel |
| 76 | _mtx_a_reshape_kernel.configure(a, &_tmp_a); |
| 77 | |
| 78 | // Configure transpose kernel |
| 79 | _mtx_b_reshape_kernel.configure(b, &_tmp_b); |
| 80 | } |
| 81 | |
| 82 | // Configure matrix multiply kernel |
| 83 | _mm_kernel.configure(matrix_a, matrix_b, output, _is_interleaved_transposed); |
| 84 | |
| 85 | // Initialize matrix B reduction kernel only if _a_offset is not equal to 0 |
| 86 | if(_a_offset != 0) |
| 87 | { |
Georgios Pinitas | 358ca20 | 2017-12-07 16:47:52 +0000 | [diff] [blame] | 88 | TensorInfo info_vector_sum_col(compute_reductionA_shape(*b->info()), 1, DataType::S32); |
Gian Marco | 05288a2 | 2017-11-21 10:57:50 +0000 | [diff] [blame] | 89 | _vector_sum_col.allocator()->init(info_vector_sum_col); |
| 90 | _memory_group.manage(&_vector_sum_col); |
| 91 | |
| 92 | // Configure Matrix B reduction kernel |
| 93 | _mtx_b_reduction_kernel.configure(b, &_vector_sum_col); |
| 94 | } |
| 95 | |
| 96 | // Initialize Matrix A reduction kernel only if _b_offset is not equal to 0 |
| 97 | if(_b_offset != 0) |
| 98 | { |
Georgios Pinitas | 358ca20 | 2017-12-07 16:47:52 +0000 | [diff] [blame] | 99 | TensorInfo info_vector_sum_row(compute_reductionB_shape(*a->info()), 1, DataType::S32); |
Gian Marco | 05288a2 | 2017-11-21 10:57:50 +0000 | [diff] [blame] | 100 | _vector_sum_row.allocator()->init(info_vector_sum_row); |
| 101 | _memory_group.manage(&_vector_sum_row); |
| 102 | |
| 103 | // Configure matrix A reduction kernel |
| 104 | _mtx_a_reduction_kernel.configure(a, &_vector_sum_row); |
| 105 | } |
| 106 | |
| 107 | // Configure offset contribution kernel |
| 108 | _offset_contribution_kernel.configure(output, _a_offset == 0 ? nullptr : &_vector_sum_col, _b_offset == 0 ? nullptr : &_vector_sum_row, a->info()->dimension(0), _a_offset, _b_offset); |
| 109 | |
| 110 | // Allocate tensors |
| 111 | if(_is_interleaved_transposed) |
| 112 | { |
| 113 | _tmp_a.allocator()->allocate(); |
| 114 | _tmp_b.allocator()->allocate(); |
| 115 | } |
| 116 | |
| 117 | if(_a_offset != 0) |
| 118 | { |
| 119 | _vector_sum_col.allocator()->allocate(); |
| 120 | } |
| 121 | |
| 122 | if(_b_offset != 0) |
| 123 | { |
| 124 | _vector_sum_row.allocator()->allocate(); |
| 125 | } |
| 126 | } |
| 127 | |
Georgios Pinitas | 358ca20 | 2017-12-07 16:47:52 +0000 | [diff] [blame] | 128 | Status CLGEMMLowpMatrixMultiplyCore::validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *output, const GEMMInfo &gemm_info) |
| 129 | { |
| 130 | ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(a, 1, DataType::QASYMM8); |
| 131 | ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S32); |
| 132 | ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(a, b); |
| 133 | ARM_COMPUTE_RETURN_ERROR_ON_MSG((a)->dimension(0) != (b)->dimension(1), |
| 134 | "The product AB is defined only if the number of columns in A is equal to the number of rows in B"); |
| 135 | ARM_COMPUTE_RETURN_ERROR_ON_MSG((a)->dimension(1) != (output)->dimension(1), |
| 136 | "The output matrix must have the same number of rows as the matrix A"); |
| 137 | ARM_COMPUTE_RETURN_ERROR_ON_MSG((b)->dimension(0) != (output)->dimension(0), |
| 138 | "The output matrix must have the same number of columns as the matrix B"); |
| 139 | ARM_COMPUTE_RETURN_ERROR_ON_MSG(gemm_info.is_a_reshaped(), "Matrix A already reshaped is not supported"); |
| 140 | ARM_COMPUTE_RETURN_ERROR_ON_MSG(gemm_info.is_b_reshaped(), "Matrix B already reshaped is not supported"); |
| 141 | |
| 142 | int32_t a_offset = a->quantization_info().offset; |
| 143 | int32_t b_offset = b->quantization_info().offset; |
Gian Marco | 7b4d547 | 2018-01-10 15:56:30 +0000 | [diff] [blame] | 144 | bool is_interleaved_transposed = (a->dimension(1)) > 16 && (CLScheduler::get().target() != GPUTarget::BIFROST); |
Georgios Pinitas | 358ca20 | 2017-12-07 16:47:52 +0000 | [diff] [blame] | 145 | |
| 146 | if(is_interleaved_transposed) |
| 147 | { |
| 148 | TensorInfo info_a(compute_interleaved_shape(*a), 1, a->data_type()); |
| 149 | TensorInfo info_b(compute_transpose1xW_shape(*b), 1, b->data_type()); |
| 150 | |
Gian Marco | 36a0a46 | 2018-01-12 10:21:40 +0000 | [diff] [blame^] | 151 | ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMInterleave4x4Kernel::validate(a, &info_a, 1)); |
| 152 | ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMTranspose1xWKernel::validate(b, &info_b, 1)); |
Georgios Pinitas | 358ca20 | 2017-12-07 16:47:52 +0000 | [diff] [blame] | 153 | ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixMultiplyKernel::validate(&info_a, &info_b, output)); |
| 154 | } |
| 155 | else |
| 156 | { |
| 157 | ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixMultiplyKernel::validate(a, b, output)); |
| 158 | } |
| 159 | |
| 160 | TensorInfo info_vector_sum_col, info_vector_sum_row; |
| 161 | |
| 162 | // Validate matrix B reduction kernel only if _a_offset is not equal to 0 |
| 163 | if(a_offset != 0) |
| 164 | { |
| 165 | info_vector_sum_col = TensorInfo(compute_reductionA_shape(*b), 1, DataType::S32); |
| 166 | |
| 167 | // Configure Matrix B reduction kernel |
| 168 | ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixBReductionKernel::validate(b, &info_vector_sum_col)); |
| 169 | } |
| 170 | |
| 171 | // Validate Matrix A reduction kernel only if _b_offset is not equal to 0 |
| 172 | if(b_offset != 0) |
| 173 | { |
| 174 | info_vector_sum_row = TensorInfo(compute_reductionB_shape(*a), 1, DataType::S32); |
| 175 | |
| 176 | // Configure matrix A reduction kernel |
| 177 | ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixAReductionKernel::validate(a, &info_vector_sum_row)); |
| 178 | } |
| 179 | |
| 180 | // Validate offset contribution kernel |
| 181 | ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpOffsetContributionKernel::validate(output, |
| 182 | a_offset == 0 ? nullptr : &info_vector_sum_col, |
| 183 | b_offset == 0 ? nullptr : &info_vector_sum_row, |
| 184 | a_offset, b_offset)); |
| 185 | |
| 186 | return Status{}; |
| 187 | } |
| 188 | |
Gian Marco | 05288a2 | 2017-11-21 10:57:50 +0000 | [diff] [blame] | 189 | void CLGEMMLowpMatrixMultiplyCore::run() |
| 190 | { |
| 191 | _memory_group.acquire(); |
| 192 | |
| 193 | if(_is_interleaved_transposed) |
| 194 | { |
| 195 | // Run reshape matrix A |
| 196 | CLScheduler::get().enqueue(_mtx_a_reshape_kernel, false); |
| 197 | |
Chunosov | 5124be5 | 2017-11-22 20:42:13 +0700 | [diff] [blame] | 198 | if(_is_first_run || !_reshape_b_only_on_first_run) |
| 199 | { |
| 200 | // Run reshape matrix B |
| 201 | CLScheduler::get().enqueue(_mtx_b_reshape_kernel, false); |
| 202 | } |
| 203 | } |
| 204 | |
| 205 | // Note: if _reshape_b_only_on_first_run = true, the reduction kernel can be executed only once |
| 206 | if(_is_first_run || !_reshape_b_only_on_first_run) |
| 207 | { |
| 208 | // Run matrix B reduction kernel only if _a_offset is not equal to 0 |
| 209 | if(_a_offset != 0) |
| 210 | { |
| 211 | CLScheduler::get().enqueue(_mtx_b_reduction_kernel, false); |
| 212 | } |
Gian Marco | 05288a2 | 2017-11-21 10:57:50 +0000 | [diff] [blame] | 213 | } |
| 214 | |
| 215 | // Run matrix multiply |
| 216 | CLScheduler::get().enqueue(_mm_kernel, false); |
| 217 | |
| 218 | // Run matrix A reduction kernel only if _b_offset is not equal to 0 |
| 219 | if(_b_offset != 0) |
| 220 | { |
| 221 | CLScheduler::get().enqueue(_mtx_a_reduction_kernel, false); |
| 222 | } |
| 223 | |
Gian Marco | 05288a2 | 2017-11-21 10:57:50 +0000 | [diff] [blame] | 224 | // Run offset contribution kernel |
| 225 | CLScheduler::get().enqueue(_offset_contribution_kernel, true); |
| 226 | |
| 227 | _memory_group.release(); |
Chunosov | 5124be5 | 2017-11-22 20:42:13 +0700 | [diff] [blame] | 228 | |
| 229 | _is_first_run = false; |
Gian Marco | 05288a2 | 2017-11-21 10:57:50 +0000 | [diff] [blame] | 230 | } |