blob: b2545228c4e40382f1cba07eccd4f51f8ff6eb57 [file] [log] [blame]
Gian Marco05288a22017-11-21 10:57:50 +00001/*
giuros011c9efeb2019-01-11 14:04:43 +00002 * Copyright (c) 2017-2019 ARM Limited.
Gian Marco05288a22017-11-21 10:57:50 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h"
25
26#include "arm_compute/core/CL/ICLTensor.h"
27#include "arm_compute/core/Error.h"
28#include "arm_compute/core/Helpers.h"
29#include "arm_compute/core/TensorInfo.h"
30#include "arm_compute/core/Types.h"
31#include "arm_compute/core/Validate.h"
Georgios Pinitas358ca202017-12-07 16:47:52 +000032#include "arm_compute/core/utils/misc/ShapeCalculator.h"
Gian Marco05288a22017-11-21 10:57:50 +000033#include "arm_compute/runtime/CL/CLScheduler.h"
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +000034#include "arm_compute/runtime/CL/gemm_reshaped/CLGEMMReshapedConfiguration.h"
Gian Marco05288a22017-11-21 10:57:50 +000035
giuros011c9efeb2019-01-11 14:04:43 +000036namespace arm_compute
37{
Georgios Pinitas358ca202017-12-07 16:47:52 +000038using namespace arm_compute::misc::shape_calculator;
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +000039using namespace arm_compute::cl_gemm;
Gian Marco05288a22017-11-21 10:57:50 +000040
Gian Marco19835e52018-01-30 13:35:54 +000041namespace
42{
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +000043inline bool is_gemm_reshaped(unsigned int m, bool reshape_b_only_on_first_run, GPUTarget gpu_target)
Gian Marco19835e52018-01-30 13:35:54 +000044{
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +000045 return (get_arch_from_target(gpu_target) != GPUTarget::MIDGARD) && (m > 1) && (reshape_b_only_on_first_run);
Gian Marco19835e52018-01-30 13:35:54 +000046}
47} // namespace
48
Gian Marco05288a22017-11-21 10:57:50 +000049CLGEMMLowpMatrixMultiplyCore::CLGEMMLowpMatrixMultiplyCore(std::shared_ptr<IMemoryManager> memory_manager)
Georgios Pinitas72219332018-06-05 14:56:06 +010050 : _memory_group(std::move(memory_manager)),
51 _mm_kernel(),
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +000052 _mm_reshaped_kernel(),
Georgios Pinitas72219332018-06-05 14:56:06 +010053 _mtx_a_reshape_kernel(),
54 _mtx_b_reshape_kernel(),
55 _mtx_a_reduction_kernel(),
56 _mtx_b_reduction_kernel(),
57 _offset_contribution_kernel(),
Gian Marco Iodice4b908652018-10-18 10:21:02 +010058 _offset_contribution_output_stage_kernel(),
Georgios Pinitas72219332018-06-05 14:56:06 +010059 _vector_sum_col(),
60 _vector_sum_row(),
61 _tmp_a(),
62 _tmp_b(),
Gian Marco Iodice4b908652018-10-18 10:21:02 +010063 _mm_result_s32(),
Georgios Pinitas72219332018-06-05 14:56:06 +010064 _original_b(nullptr),
65 _a_offset(0),
66 _b_offset(0),
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +000067 _is_gemm_reshaped(true),
Georgios Pinitas72219332018-06-05 14:56:06 +010068 _reshape_b_only_on_first_run(false),
Gian Marco Iodice4b908652018-10-18 10:21:02 +010069 _is_prepared(false),
70 _fuse_output_stage(false)
Gian Marco05288a22017-11-21 10:57:50 +000071{
72}
73
Gian Marco Iodice4b908652018-10-18 10:21:02 +010074void CLGEMMLowpMatrixMultiplyCore::configure(const ICLTensor *a, const ICLTensor *b, const ICLTensor *c, ICLTensor *output, const GEMMInfo &gemm_info)
Gian Marco05288a22017-11-21 10:57:50 +000075{
Georgios Pinitas358ca202017-12-07 16:47:52 +000076 ARM_COMPUTE_ERROR_ON_NULLPTR(a, b, output);
Gian Marco Iodice4b908652018-10-18 10:21:02 +010077 ARM_COMPUTE_ERROR_THROW_ON(CLGEMMLowpMatrixMultiplyCore::validate(a->info(), b->info(), c != nullptr ? c->info() : nullptr, output->info(), gemm_info));
Gian Marco05288a22017-11-21 10:57:50 +000078
Georgios Pinitas72219332018-06-05 14:56:06 +010079 _is_prepared = false;
80 _original_b = b;
Chunosov5124be52017-11-22 20:42:13 +070081 _reshape_b_only_on_first_run = gemm_info.reshape_b_only_on_first_run();
82 _a_offset = a->info()->quantization_info().offset;
83 _b_offset = b->info()->quantization_info().offset;
Gian Marco05288a22017-11-21 10:57:50 +000084
Gian Marco19835e52018-01-30 13:35:54 +000085 // Get the GPU target
86 const GPUTarget gpu_target = CLScheduler::get().target();
Gian Marco7b4d5472018-01-10 15:56:30 +000087
Gian Marco19835e52018-01-30 13:35:54 +000088 // Set the target for the kernels
89 _mtx_a_reshape_kernel.set_target(gpu_target);
90 _mm_kernel.set_target(gpu_target);
Gian Marco05288a22017-11-21 10:57:50 +000091
92 const ICLTensor *matrix_a = a;
93 const ICLTensor *matrix_b = b;
giuros018b6b4a92018-12-18 19:01:33 +000094 GEMMRHSMatrixInfo rhs_info;
giuros011c9efeb2019-01-11 14:04:43 +000095 GEMMLHSMatrixInfo lhs_info;
Gian Marco05288a22017-11-21 10:57:50 +000096
Gian Marco19835e52018-01-30 13:35:54 +000097 // Arguments used by GEMMReshapeInfo
98 // If we pass the matrix A and matrix B reshaped to CLGEMMMatrixMultiplyKernel, we need to pass m, n, k, mult_transpose1xW_width and mult_interleave4x4_height to CLGEMMReshapeInfo
99 // in order to know how the matrices have been reshaped
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000100 bool reinterpret_input_as_3d = gemm_info.reinterpret_input_as_3d();
101 const unsigned int m = reinterpret_input_as_3d ? (a->info()->dimension(1) * a->info()->dimension(2)) : a->info()->dimension(1);
102 const unsigned int n = b->info()->dimension(0);
103 const unsigned int k = a->info()->dimension(0);
104 const unsigned int batch_size = reinterpret_input_as_3d ? a->info()->dimension(3) : a->info()->dimension(2);
105 const int depth_output_gemm3d = gemm_info.depth_output_gemm3d();
Gian Marco19835e52018-01-30 13:35:54 +0000106
107 // Check if we need to reshape the matrix A and matrix B
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000108 _is_gemm_reshaped = is_gemm_reshaped(m, _reshape_b_only_on_first_run, gpu_target);
Gian Marco19835e52018-01-30 13:35:54 +0000109
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000110 if(_is_gemm_reshaped)
Georgios Pinitasebf6b8a2018-09-24 16:31:08 +0100111 {
Isabella Gottardif02e5272018-10-01 12:26:28 +0100112 // if _is_interleaved_transposed is set, force reinterpret_input_as_3d to be false as the output of CLGEMMInterleaveKernel will be 2D
Georgios Pinitasebf6b8a2018-09-24 16:31:08 +0100113 reinterpret_input_as_3d = false;
Georgios Pinitasebf6b8a2018-09-24 16:31:08 +0100114
Gian Marco05288a22017-11-21 10:57:50 +0000115 matrix_a = &_tmp_a;
116 matrix_b = &_tmp_b;
117
Gian Marco05288a22017-11-21 10:57:50 +0000118 _memory_group.manage(&_tmp_a);
Giorgio Arenabb54e4e2018-04-05 17:20:34 +0100119 if(!_reshape_b_only_on_first_run)
120 {
121 _memory_group.manage(&_tmp_b);
122 }
Gian Marco05288a22017-11-21 10:57:50 +0000123
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000124 // Pick up the GEMM configuration
125 std::tie(lhs_info, rhs_info) = CLGEMMReshapedConfigurationFactory::create()->configure(m, n, k, batch_size, DataType::QASYMM8);
126
Gian Marco05288a22017-11-21 10:57:50 +0000127 // Configure interleave kernel
giuros011c9efeb2019-01-11 14:04:43 +0000128 _mtx_a_reshape_kernel.configure(a, &_tmp_a, lhs_info, gemm_info.reinterpret_input_as_3d());
Gian Marco05288a22017-11-21 10:57:50 +0000129
130 // Configure transpose kernel
giuros018b6b4a92018-12-18 19:01:33 +0000131 _mtx_b_reshape_kernel.configure(b, &_tmp_b, rhs_info);
Gian Marco05288a22017-11-21 10:57:50 +0000132 }
Gian Marco05288a22017-11-21 10:57:50 +0000133
134 // Initialize matrix B reduction kernel only if _a_offset is not equal to 0
135 if(_a_offset != 0)
136 {
Georgios Pinitas358ca202017-12-07 16:47:52 +0000137 TensorInfo info_vector_sum_col(compute_reductionA_shape(*b->info()), 1, DataType::S32);
Gian Marco05288a22017-11-21 10:57:50 +0000138 _vector_sum_col.allocator()->init(info_vector_sum_col);
Giorgio Arenabb54e4e2018-04-05 17:20:34 +0100139 if(!_reshape_b_only_on_first_run)
140 {
141 _memory_group.manage(&_vector_sum_col);
142 }
Gian Marco05288a22017-11-21 10:57:50 +0000143
144 // Configure Matrix B reduction kernel
145 _mtx_b_reduction_kernel.configure(b, &_vector_sum_col);
146 }
147
148 // Initialize Matrix A reduction kernel only if _b_offset is not equal to 0
149 if(_b_offset != 0)
150 {
Georgios Pinitas358ca202017-12-07 16:47:52 +0000151 TensorInfo info_vector_sum_row(compute_reductionB_shape(*a->info()), 1, DataType::S32);
Gian Marco05288a22017-11-21 10:57:50 +0000152 _vector_sum_row.allocator()->init(info_vector_sum_row);
153 _memory_group.manage(&_vector_sum_row);
154
155 // Configure matrix A reduction kernel
156 _mtx_a_reduction_kernel.configure(a, &_vector_sum_row);
157 }
158
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100159 // If GEMMLowpOutputStage != NONE, fuse the offset contribution with the output stage
160 if(gemm_info.gemmlowp_output_stage().type != GEMMLowpOutputStageType::NONE)
161 {
162 _fuse_output_stage = true;
163
164 _memory_group.manage(&_mm_result_s32);
165
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000166 if(_is_gemm_reshaped)
167 {
168 // Configure and tune matrix multiply kernel
169 _mm_reshaped_kernel.configure(matrix_a, matrix_b, &_mm_result_s32, lhs_info, rhs_info, GEMMReshapeInfo(m, n, k, 1, 1, depth_output_gemm3d, reinterpret_input_as_3d));
170 }
171 else
172 {
173 // Configure matrix multiply kernel
174 _mm_kernel.configure(matrix_a, matrix_b, &_mm_result_s32, false, GEMMReshapeInfo(m, n, k, 1, 1, depth_output_gemm3d, reinterpret_input_as_3d));
175 }
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100176
177 // Configure offset contribution kernel
178 _offset_contribution_output_stage_kernel.configure(&_mm_result_s32, _a_offset == 0 ? nullptr : &_vector_sum_col, _b_offset == 0 ? nullptr : &_vector_sum_row, c, output, a->info()->dimension(0),
179 _a_offset, _b_offset, gemm_info.gemmlowp_output_stage());
180
181 _mm_result_s32.allocator()->allocate();
182 }
183 else
184 {
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000185 if(_is_gemm_reshaped)
186 {
187 // Configure and tune matrix multiply kernel
188 _mm_reshaped_kernel.configure(matrix_a, matrix_b, output, lhs_info, rhs_info, GEMMReshapeInfo(m, n, k, 1, 1, depth_output_gemm3d, reinterpret_input_as_3d));
189 }
190 else
191 {
192 // Configure matrix multiply kernel
193 _mm_kernel.configure(matrix_a, matrix_b, output, false, GEMMReshapeInfo(m, n, k, 1, 1, depth_output_gemm3d, reinterpret_input_as_3d));
194 }
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100195
196 // Configure offset contribution kernel
197 _offset_contribution_kernel.configure(output, _a_offset == 0 ? nullptr : &_vector_sum_col, _b_offset == 0 ? nullptr : &_vector_sum_row, c, a->info()->dimension(0), _a_offset, _b_offset);
198 }
Gian Marco05288a22017-11-21 10:57:50 +0000199
200 // Allocate tensors
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000201 if(_is_gemm_reshaped)
Gian Marco05288a22017-11-21 10:57:50 +0000202 {
203 _tmp_a.allocator()->allocate();
Georgios Pinitas72219332018-06-05 14:56:06 +0100204 if(!_reshape_b_only_on_first_run)
205 {
206 _tmp_b.allocator()->allocate();
207 }
Gian Marco05288a22017-11-21 10:57:50 +0000208 }
209
Georgios Pinitas72219332018-06-05 14:56:06 +0100210 if(_a_offset != 0 && !_reshape_b_only_on_first_run)
Gian Marco05288a22017-11-21 10:57:50 +0000211 {
212 _vector_sum_col.allocator()->allocate();
213 }
214
215 if(_b_offset != 0)
216 {
217 _vector_sum_row.allocator()->allocate();
218 }
219}
220
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100221Status CLGEMMLowpMatrixMultiplyCore::validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *output, const GEMMInfo &gemm_info)
Georgios Pinitas358ca202017-12-07 16:47:52 +0000222{
223 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(a, 1, DataType::QASYMM8);
Georgios Pinitas358ca202017-12-07 16:47:52 +0000224 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(a, b);
Georgios Pinitas358ca202017-12-07 16:47:52 +0000225 ARM_COMPUTE_RETURN_ERROR_ON_MSG(gemm_info.is_a_reshaped(), "Matrix A already reshaped is not supported");
226 ARM_COMPUTE_RETURN_ERROR_ON_MSG(gemm_info.is_b_reshaped(), "Matrix B already reshaped is not supported");
227
Gian Marco19835e52018-01-30 13:35:54 +0000228 int32_t a_offset = a->quantization_info().offset;
229 int32_t b_offset = b->quantization_info().offset;
Georgios Pinitas358ca202017-12-07 16:47:52 +0000230
Isabella Gottardic4f582e2018-10-11 19:14:55 +0100231 const ITensorInfo *matrix_a_info = a;
232 const ITensorInfo *matrix_b_info = b;
233
giuros018b6b4a92018-12-18 19:01:33 +0000234 TensorInfo tmp_a_info{};
235 TensorInfo tmp_b_info{};
236 GEMMRHSMatrixInfo rhs_info;
giuros011c9efeb2019-01-11 14:04:43 +0000237 GEMMLHSMatrixInfo lhs_info;
Isabella Gottardic4f582e2018-10-11 19:14:55 +0100238
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000239 bool reinterpret_input_as_3d = gemm_info.reinterpret_input_as_3d();
240 const unsigned int m = reinterpret_input_as_3d ? (a->dimension(1) * a->dimension(2)) : a->dimension(1);
241 const unsigned int n = b->dimension(0);
242 const unsigned int k = a->dimension(0);
243 const unsigned int batch_size = reinterpret_input_as_3d ? a->dimension(3) : a->dimension(2);
244 const int depth_output_gemm3d = gemm_info.depth_output_gemm3d();
Gian Marco19835e52018-01-30 13:35:54 +0000245
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000246 bool reshape_matrices = is_gemm_reshaped(m, gemm_info.reshape_b_only_on_first_run(), CLScheduler::get().target());
Gian Marco19835e52018-01-30 13:35:54 +0000247
Georgios Pinitasebf6b8a2018-09-24 16:31:08 +0100248 // if reshape_matrices is set, force reinterpret_input_as_3d to be false as the output of CLGEMMInterleaveKernel will be 2D
249 if(reshape_matrices)
250 {
251 reinterpret_input_as_3d = false;
252 }
253
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000254 const GEMMReshapeInfo reshape_info = GEMMReshapeInfo(m, n, k, 1, 1, depth_output_gemm3d, reinterpret_input_as_3d);
Georgios Pinitasebf6b8a2018-09-24 16:31:08 +0100255
Gian Marco19835e52018-01-30 13:35:54 +0000256 if(reshape_matrices)
Georgios Pinitas358ca202017-12-07 16:47:52 +0000257 {
Isabella Gottardic4f582e2018-10-11 19:14:55 +0100258 matrix_a_info = &tmp_a_info;
259 matrix_b_info = &tmp_b_info;
Georgios Pinitas358ca202017-12-07 16:47:52 +0000260
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000261 // Pick up the GEMM configuration
262 std::tie(lhs_info, rhs_info) = CLGEMMReshapedConfigurationFactory::create()->configure(m, n, k, batch_size, DataType::QASYMM8);
263
Isabella Gottardic4f582e2018-10-11 19:14:55 +0100264 // Validate interleave kernel
giuros011c9efeb2019-01-11 14:04:43 +0000265 auto_init_if_empty(tmp_a_info, a->clone()->set_tensor_shape(compute_lhs_reshaped_shape(*a, lhs_info, gemm_info.reinterpret_input_as_3d())));
266 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMReshapeLHSMatrixKernel::validate(a, &tmp_a_info, lhs_info, gemm_info.reinterpret_input_as_3d()));
Isabella Gottardic4f582e2018-10-11 19:14:55 +0100267
268 // Validate transpose kernel
giuros018b6b4a92018-12-18 19:01:33 +0000269
270 auto_init_if_empty(tmp_b_info, b->clone()->set_tensor_shape(compute_rhs_reshaped_shape(*b, rhs_info)));
271 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMReshapeRHSMatrixKernel::validate(b, &tmp_b_info, rhs_info));
Georgios Pinitas358ca202017-12-07 16:47:52 +0000272 }
Isabella Gottardic4f582e2018-10-11 19:14:55 +0100273
Georgios Pinitas358ca202017-12-07 16:47:52 +0000274 TensorInfo info_vector_sum_col, info_vector_sum_row;
275
276 // Validate matrix B reduction kernel only if _a_offset is not equal to 0
277 if(a_offset != 0)
278 {
279 info_vector_sum_col = TensorInfo(compute_reductionA_shape(*b), 1, DataType::S32);
280
281 // Configure Matrix B reduction kernel
282 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixBReductionKernel::validate(b, &info_vector_sum_col));
283 }
284
285 // Validate Matrix A reduction kernel only if _b_offset is not equal to 0
286 if(b_offset != 0)
287 {
288 info_vector_sum_row = TensorInfo(compute_reductionB_shape(*a), 1, DataType::S32);
289
290 // Configure matrix A reduction kernel
291 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixAReductionKernel::validate(a, &info_vector_sum_row));
292 }
293
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100294 if(gemm_info.gemmlowp_output_stage().type != GEMMLowpOutputStageType::NONE)
295 {
296 TensorInfo mm_result_s32_info{};
297
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000298 if(reshape_matrices)
299 {
300 // Output tensor auto inizialitation if not yet initialized
301 auto_init_if_empty(mm_result_s32_info, a->clone()->set_tensor_shape(compute_mm_shape(*matrix_a_info, *matrix_b_info, reshape_info)).set_data_type(DataType::S32));
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100302
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000303 // Validate matrix multiply
304 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixMultiplyReshapedKernel::validate(matrix_a_info, matrix_b_info, &mm_result_s32_info, lhs_info, rhs_info, reshape_info));
305 }
306 else
307 {
308 // Output tensor auto inizialitation if not yet initialized
309 auto_init_if_empty(mm_result_s32_info, a->clone()->set_tensor_shape(compute_mm_shape(*matrix_a_info, *matrix_b_info, false, reshape_info)).set_data_type(DataType::S32));
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100310
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000311 // Validate matrix multiply
312 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixMultiplyKernel::validate(matrix_a_info, matrix_b_info, &mm_result_s32_info, false, reshape_info));
313 }
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100314 // Validate offset contribution kernel
315 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpOffsetContributionOutputStageKernel::validate(&mm_result_s32_info,
316 a_offset == 0 ? nullptr : &info_vector_sum_col,
317 b_offset == 0 ? nullptr : &info_vector_sum_row,
318 c,
319 output,
320 a_offset, b_offset,
321 gemm_info.gemmlowp_output_stage()));
322 }
323 else
324 {
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000325 if(reshape_matrices)
326 {
327 // Validate matrix multiply
328 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixMultiplyReshapedKernel::validate(matrix_a_info, matrix_b_info, output, lhs_info, rhs_info, reshape_info));
329 }
330 else
331 {
332 // Validate matrix multiply
333 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixMultiplyKernel::validate(matrix_a_info, matrix_b_info, output, false, reshape_info));
334 }
giuros012f7c1492019-03-18 12:30:02 +0000335 if(output->total_size() != 0)
336 {
337 // Validate offset contribution kernel
338 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpOffsetContributionKernel::validate(output,
339 a_offset == 0 ? nullptr : &info_vector_sum_col,
340 b_offset == 0 ? nullptr : &info_vector_sum_row,
341 c,
342 a_offset, b_offset));
343 }
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100344 }
Georgios Pinitas358ca202017-12-07 16:47:52 +0000345
346 return Status{};
347}
348
Gian Marco05288a22017-11-21 10:57:50 +0000349void CLGEMMLowpMatrixMultiplyCore::run()
350{
Georgios Pinitas72219332018-06-05 14:56:06 +0100351 prepare();
352
Gian Marco05288a22017-11-21 10:57:50 +0000353 _memory_group.acquire();
354
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000355 if(_is_gemm_reshaped)
Gian Marco05288a22017-11-21 10:57:50 +0000356 {
357 // Run reshape matrix A
358 CLScheduler::get().enqueue(_mtx_a_reshape_kernel, false);
359
Georgios Pinitas72219332018-06-05 14:56:06 +0100360 if(!_reshape_b_only_on_first_run)
Chunosov5124be52017-11-22 20:42:13 +0700361 {
362 // Run reshape matrix B
363 CLScheduler::get().enqueue(_mtx_b_reshape_kernel, false);
364 }
365 }
366
Georgios Pinitas72219332018-06-05 14:56:06 +0100367 // Run matrix B reduction kernel only if _a_offset is not equal to 0
368 if(_a_offset != 0 && !_reshape_b_only_on_first_run)
Chunosov5124be52017-11-22 20:42:13 +0700369 {
Georgios Pinitas72219332018-06-05 14:56:06 +0100370 CLScheduler::get().enqueue(_mtx_b_reduction_kernel, false);
Gian Marco05288a22017-11-21 10:57:50 +0000371 }
372
373 // Run matrix multiply
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000374 if(_is_gemm_reshaped)
375 {
376 CLScheduler::get().enqueue(_mm_reshaped_kernel, false);
377 }
378 else
379 {
380 CLScheduler::get().enqueue(_mm_kernel, false);
381 }
Gian Marco05288a22017-11-21 10:57:50 +0000382
383 // Run matrix A reduction kernel only if _b_offset is not equal to 0
384 if(_b_offset != 0)
385 {
386 CLScheduler::get().enqueue(_mtx_a_reduction_kernel, false);
387 }
388
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100389 if(_fuse_output_stage)
390 {
391 // Run offset contribution/output stage kernel
392 CLScheduler::get().enqueue(_offset_contribution_output_stage_kernel, true);
393 }
394 else
395 {
396 // Run offset contribution kernel
397 CLScheduler::get().enqueue(_offset_contribution_kernel, true);
398 }
Gian Marco05288a22017-11-21 10:57:50 +0000399
400 _memory_group.release();
Georgios Pinitas72219332018-06-05 14:56:06 +0100401}
Chunosov5124be52017-11-22 20:42:13 +0700402
Georgios Pinitas72219332018-06-05 14:56:06 +0100403void CLGEMMLowpMatrixMultiplyCore::prepare()
404{
405 if(!_is_prepared)
406 {
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000407 if(_is_gemm_reshaped && _reshape_b_only_on_first_run)
Georgios Pinitas72219332018-06-05 14:56:06 +0100408 {
409 ARM_COMPUTE_ERROR_ON(!_original_b->is_used());
410
411 // Run reshape kernel and mark original weights tensor as unused
412 _tmp_b.allocator()->allocate();
413 CLScheduler::get().enqueue(_mtx_b_reshape_kernel, false);
414 _original_b->mark_as_unused();
415 }
416
417 // Run matrix B reduction kernel only if _a_offset is not equal to 0
418 if(_a_offset != 0 && _reshape_b_only_on_first_run)
419 {
420 _vector_sum_col.allocator()->allocate();
421 CLScheduler::get().enqueue(_mtx_b_reduction_kernel, false);
422 }
423
424 CLScheduler::get().queue().finish();
425 _is_prepared = true;
426 }
Gian Marco05288a22017-11-21 10:57:50 +0000427}
giuros011c9efeb2019-01-11 14:04:43 +0000428} // namespace arm_compute