blob: c447cb8778327b59c1737539cdc7aeae4c6402d5 [file] [log] [blame]
Gian Marco05288a22017-11-21 10:57:50 +00001/*
giuros011c9efeb2019-01-11 14:04:43 +00002 * Copyright (c) 2017-2019 ARM Limited.
Gian Marco05288a22017-11-21 10:57:50 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h"
25
26#include "arm_compute/core/CL/ICLTensor.h"
Gian Marco Iodice926afe12019-03-19 11:44:13 +000027#include "arm_compute/core/CL/gemm/reshaped/CLGEMMReshapedKernelConfiguration.h"
Gian Marco05288a22017-11-21 10:57:50 +000028#include "arm_compute/core/Error.h"
29#include "arm_compute/core/Helpers.h"
30#include "arm_compute/core/TensorInfo.h"
31#include "arm_compute/core/Types.h"
32#include "arm_compute/core/Validate.h"
Georgios Pinitas358ca202017-12-07 16:47:52 +000033#include "arm_compute/core/utils/misc/ShapeCalculator.h"
Gian Marco05288a22017-11-21 10:57:50 +000034#include "arm_compute/runtime/CL/CLScheduler.h"
35
giuros011c9efeb2019-01-11 14:04:43 +000036namespace arm_compute
37{
Georgios Pinitas358ca202017-12-07 16:47:52 +000038using namespace arm_compute::misc::shape_calculator;
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +000039using namespace arm_compute::cl_gemm;
Gian Marco05288a22017-11-21 10:57:50 +000040
Gian Marco19835e52018-01-30 13:35:54 +000041namespace
42{
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +000043inline bool is_gemm_reshaped(unsigned int m, bool reshape_b_only_on_first_run, GPUTarget gpu_target)
Gian Marco19835e52018-01-30 13:35:54 +000044{
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +000045 return (get_arch_from_target(gpu_target) != GPUTarget::MIDGARD) && (m > 1) && (reshape_b_only_on_first_run);
Gian Marco19835e52018-01-30 13:35:54 +000046}
47} // namespace
48
Gian Marco05288a22017-11-21 10:57:50 +000049CLGEMMLowpMatrixMultiplyCore::CLGEMMLowpMatrixMultiplyCore(std::shared_ptr<IMemoryManager> memory_manager)
Georgios Pinitas72219332018-06-05 14:56:06 +010050 : _memory_group(std::move(memory_manager)),
51 _mm_kernel(),
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +000052 _mm_reshaped_kernel(),
Georgios Pinitas72219332018-06-05 14:56:06 +010053 _mtx_a_reshape_kernel(),
54 _mtx_b_reshape_kernel(),
55 _mtx_a_reduction_kernel(),
56 _mtx_b_reduction_kernel(),
57 _offset_contribution_kernel(),
Gian Marco Iodice4b908652018-10-18 10:21:02 +010058 _offset_contribution_output_stage_kernel(),
Georgios Pinitas72219332018-06-05 14:56:06 +010059 _vector_sum_col(),
60 _vector_sum_row(),
61 _tmp_a(),
62 _tmp_b(),
Gian Marco Iodice4b908652018-10-18 10:21:02 +010063 _mm_result_s32(),
Georgios Pinitas72219332018-06-05 14:56:06 +010064 _original_b(nullptr),
65 _a_offset(0),
66 _b_offset(0),
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +000067 _is_gemm_reshaped(true),
Georgios Pinitas72219332018-06-05 14:56:06 +010068 _reshape_b_only_on_first_run(false),
Gian Marco Iodice4b908652018-10-18 10:21:02 +010069 _is_prepared(false),
70 _fuse_output_stage(false)
Gian Marco05288a22017-11-21 10:57:50 +000071{
72}
73
Gian Marco Iodice4b908652018-10-18 10:21:02 +010074void CLGEMMLowpMatrixMultiplyCore::configure(const ICLTensor *a, const ICLTensor *b, const ICLTensor *c, ICLTensor *output, const GEMMInfo &gemm_info)
Gian Marco05288a22017-11-21 10:57:50 +000075{
Georgios Pinitas358ca202017-12-07 16:47:52 +000076 ARM_COMPUTE_ERROR_ON_NULLPTR(a, b, output);
Gian Marco Iodice4b908652018-10-18 10:21:02 +010077 ARM_COMPUTE_ERROR_THROW_ON(CLGEMMLowpMatrixMultiplyCore::validate(a->info(), b->info(), c != nullptr ? c->info() : nullptr, output->info(), gemm_info));
Gian Marco05288a22017-11-21 10:57:50 +000078
Georgios Pinitas72219332018-06-05 14:56:06 +010079 _is_prepared = false;
80 _original_b = b;
Chunosov5124be52017-11-22 20:42:13 +070081 _reshape_b_only_on_first_run = gemm_info.reshape_b_only_on_first_run();
82 _a_offset = a->info()->quantization_info().offset;
83 _b_offset = b->info()->quantization_info().offset;
Gian Marco05288a22017-11-21 10:57:50 +000084
Gian Marco19835e52018-01-30 13:35:54 +000085 // Get the GPU target
86 const GPUTarget gpu_target = CLScheduler::get().target();
Gian Marco7b4d5472018-01-10 15:56:30 +000087
Gian Marco19835e52018-01-30 13:35:54 +000088 // Set the target for the kernels
89 _mtx_a_reshape_kernel.set_target(gpu_target);
90 _mm_kernel.set_target(gpu_target);
Gian Marco05288a22017-11-21 10:57:50 +000091
92 const ICLTensor *matrix_a = a;
93 const ICLTensor *matrix_b = b;
giuros018b6b4a92018-12-18 19:01:33 +000094 GEMMRHSMatrixInfo rhs_info;
giuros011c9efeb2019-01-11 14:04:43 +000095 GEMMLHSMatrixInfo lhs_info;
Gian Marco05288a22017-11-21 10:57:50 +000096
Gian Marco19835e52018-01-30 13:35:54 +000097 // Arguments used by GEMMReshapeInfo
98 // If we pass the matrix A and matrix B reshaped to CLGEMMMatrixMultiplyKernel, we need to pass m, n, k, mult_transpose1xW_width and mult_interleave4x4_height to CLGEMMReshapeInfo
99 // in order to know how the matrices have been reshaped
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000100 bool reinterpret_input_as_3d = gemm_info.reinterpret_input_as_3d();
101 const unsigned int m = reinterpret_input_as_3d ? (a->info()->dimension(1) * a->info()->dimension(2)) : a->info()->dimension(1);
102 const unsigned int n = b->info()->dimension(0);
103 const unsigned int k = a->info()->dimension(0);
104 const unsigned int batch_size = reinterpret_input_as_3d ? a->info()->dimension(3) : a->info()->dimension(2);
105 const int depth_output_gemm3d = gemm_info.depth_output_gemm3d();
Gian Marco19835e52018-01-30 13:35:54 +0000106
107 // Check if we need to reshape the matrix A and matrix B
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000108 _is_gemm_reshaped = is_gemm_reshaped(m, _reshape_b_only_on_first_run, gpu_target);
Gian Marco19835e52018-01-30 13:35:54 +0000109
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000110 if(_is_gemm_reshaped)
Georgios Pinitasebf6b8a2018-09-24 16:31:08 +0100111 {
Isabella Gottardif02e5272018-10-01 12:26:28 +0100112 // if _is_interleaved_transposed is set, force reinterpret_input_as_3d to be false as the output of CLGEMMInterleaveKernel will be 2D
Georgios Pinitasebf6b8a2018-09-24 16:31:08 +0100113 reinterpret_input_as_3d = false;
Georgios Pinitasebf6b8a2018-09-24 16:31:08 +0100114
Gian Marco05288a22017-11-21 10:57:50 +0000115 matrix_a = &_tmp_a;
116 matrix_b = &_tmp_b;
117
Gian Marco05288a22017-11-21 10:57:50 +0000118 _memory_group.manage(&_tmp_a);
Giorgio Arenabb54e4e2018-04-05 17:20:34 +0100119 if(!_reshape_b_only_on_first_run)
120 {
121 _memory_group.manage(&_tmp_b);
122 }
Gian Marco05288a22017-11-21 10:57:50 +0000123
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000124 // Pick up the GEMM configuration
Gian Marco Iodice926afe12019-03-19 11:44:13 +0000125 std::tie(lhs_info, rhs_info) = CLGEMMReshapedKernelConfigurationFactory::create(gpu_target)->configure(m, n, k, batch_size, DataType::QASYMM8);
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000126
Gian Marco Iodice926afe12019-03-19 11:44:13 +0000127 // Configure reshape LHS kernel
giuros011c9efeb2019-01-11 14:04:43 +0000128 _mtx_a_reshape_kernel.configure(a, &_tmp_a, lhs_info, gemm_info.reinterpret_input_as_3d());
Gian Marco05288a22017-11-21 10:57:50 +0000129
Gian Marco Iodice926afe12019-03-19 11:44:13 +0000130 // Configure reshape RHS kernel
giuros018b6b4a92018-12-18 19:01:33 +0000131 _mtx_b_reshape_kernel.configure(b, &_tmp_b, rhs_info);
Gian Marco05288a22017-11-21 10:57:50 +0000132 }
Gian Marco05288a22017-11-21 10:57:50 +0000133
134 // Initialize matrix B reduction kernel only if _a_offset is not equal to 0
135 if(_a_offset != 0)
136 {
Georgios Pinitas358ca202017-12-07 16:47:52 +0000137 TensorInfo info_vector_sum_col(compute_reductionA_shape(*b->info()), 1, DataType::S32);
Gian Marco05288a22017-11-21 10:57:50 +0000138 _vector_sum_col.allocator()->init(info_vector_sum_col);
Giorgio Arenabb54e4e2018-04-05 17:20:34 +0100139 if(!_reshape_b_only_on_first_run)
140 {
141 _memory_group.manage(&_vector_sum_col);
142 }
Gian Marco05288a22017-11-21 10:57:50 +0000143
144 // Configure Matrix B reduction kernel
145 _mtx_b_reduction_kernel.configure(b, &_vector_sum_col);
146 }
147
148 // Initialize Matrix A reduction kernel only if _b_offset is not equal to 0
149 if(_b_offset != 0)
150 {
Georgios Pinitas358ca202017-12-07 16:47:52 +0000151 TensorInfo info_vector_sum_row(compute_reductionB_shape(*a->info()), 1, DataType::S32);
Gian Marco05288a22017-11-21 10:57:50 +0000152 _vector_sum_row.allocator()->init(info_vector_sum_row);
153 _memory_group.manage(&_vector_sum_row);
154
155 // Configure matrix A reduction kernel
156 _mtx_a_reduction_kernel.configure(a, &_vector_sum_row);
157 }
158
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100159 // If GEMMLowpOutputStage != NONE, fuse the offset contribution with the output stage
160 if(gemm_info.gemmlowp_output_stage().type != GEMMLowpOutputStageType::NONE)
161 {
162 _fuse_output_stage = true;
163
164 _memory_group.manage(&_mm_result_s32);
165
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000166 if(_is_gemm_reshaped)
167 {
168 // Configure and tune matrix multiply kernel
169 _mm_reshaped_kernel.configure(matrix_a, matrix_b, &_mm_result_s32, lhs_info, rhs_info, GEMMReshapeInfo(m, n, k, 1, 1, depth_output_gemm3d, reinterpret_input_as_3d));
170 }
171 else
172 {
173 // Configure matrix multiply kernel
174 _mm_kernel.configure(matrix_a, matrix_b, &_mm_result_s32, false, GEMMReshapeInfo(m, n, k, 1, 1, depth_output_gemm3d, reinterpret_input_as_3d));
175 }
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100176
177 // Configure offset contribution kernel
178 _offset_contribution_output_stage_kernel.configure(&_mm_result_s32, _a_offset == 0 ? nullptr : &_vector_sum_col, _b_offset == 0 ? nullptr : &_vector_sum_row, c, output, a->info()->dimension(0),
179 _a_offset, _b_offset, gemm_info.gemmlowp_output_stage());
180
181 _mm_result_s32.allocator()->allocate();
182 }
183 else
184 {
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000185 if(_is_gemm_reshaped)
186 {
187 // Configure and tune matrix multiply kernel
188 _mm_reshaped_kernel.configure(matrix_a, matrix_b, output, lhs_info, rhs_info, GEMMReshapeInfo(m, n, k, 1, 1, depth_output_gemm3d, reinterpret_input_as_3d));
189 }
190 else
191 {
192 // Configure matrix multiply kernel
193 _mm_kernel.configure(matrix_a, matrix_b, output, false, GEMMReshapeInfo(m, n, k, 1, 1, depth_output_gemm3d, reinterpret_input_as_3d));
194 }
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100195
196 // Configure offset contribution kernel
197 _offset_contribution_kernel.configure(output, _a_offset == 0 ? nullptr : &_vector_sum_col, _b_offset == 0 ? nullptr : &_vector_sum_row, c, a->info()->dimension(0), _a_offset, _b_offset);
198 }
Gian Marco05288a22017-11-21 10:57:50 +0000199
200 // Allocate tensors
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000201 if(_is_gemm_reshaped)
Gian Marco05288a22017-11-21 10:57:50 +0000202 {
203 _tmp_a.allocator()->allocate();
Georgios Pinitas72219332018-06-05 14:56:06 +0100204 if(!_reshape_b_only_on_first_run)
205 {
206 _tmp_b.allocator()->allocate();
207 }
Gian Marco05288a22017-11-21 10:57:50 +0000208 }
209
Georgios Pinitas72219332018-06-05 14:56:06 +0100210 if(_a_offset != 0 && !_reshape_b_only_on_first_run)
Gian Marco05288a22017-11-21 10:57:50 +0000211 {
212 _vector_sum_col.allocator()->allocate();
213 }
214
215 if(_b_offset != 0)
216 {
217 _vector_sum_row.allocator()->allocate();
218 }
219}
220
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100221Status CLGEMMLowpMatrixMultiplyCore::validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *output, const GEMMInfo &gemm_info)
Georgios Pinitas358ca202017-12-07 16:47:52 +0000222{
223 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(a, 1, DataType::QASYMM8);
Georgios Pinitas358ca202017-12-07 16:47:52 +0000224 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(a, b);
Georgios Pinitas358ca202017-12-07 16:47:52 +0000225 ARM_COMPUTE_RETURN_ERROR_ON_MSG(gemm_info.is_a_reshaped(), "Matrix A already reshaped is not supported");
226 ARM_COMPUTE_RETURN_ERROR_ON_MSG(gemm_info.is_b_reshaped(), "Matrix B already reshaped is not supported");
227
Gian Marco19835e52018-01-30 13:35:54 +0000228 int32_t a_offset = a->quantization_info().offset;
229 int32_t b_offset = b->quantization_info().offset;
Georgios Pinitas358ca202017-12-07 16:47:52 +0000230
Isabella Gottardic4f582e2018-10-11 19:14:55 +0100231 const ITensorInfo *matrix_a_info = a;
232 const ITensorInfo *matrix_b_info = b;
233
giuros018b6b4a92018-12-18 19:01:33 +0000234 TensorInfo tmp_a_info{};
235 TensorInfo tmp_b_info{};
236 GEMMRHSMatrixInfo rhs_info;
giuros011c9efeb2019-01-11 14:04:43 +0000237 GEMMLHSMatrixInfo lhs_info;
Isabella Gottardic4f582e2018-10-11 19:14:55 +0100238
Gian Marco Iodice926afe12019-03-19 11:44:13 +0000239 // Get the GPU target
240 const GPUTarget gpu_target = CLScheduler::get().target();
241
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000242 bool reinterpret_input_as_3d = gemm_info.reinterpret_input_as_3d();
243 const unsigned int m = reinterpret_input_as_3d ? (a->dimension(1) * a->dimension(2)) : a->dimension(1);
244 const unsigned int n = b->dimension(0);
245 const unsigned int k = a->dimension(0);
246 const unsigned int batch_size = reinterpret_input_as_3d ? a->dimension(3) : a->dimension(2);
247 const int depth_output_gemm3d = gemm_info.depth_output_gemm3d();
Gian Marco19835e52018-01-30 13:35:54 +0000248
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000249 bool reshape_matrices = is_gemm_reshaped(m, gemm_info.reshape_b_only_on_first_run(), CLScheduler::get().target());
Gian Marco19835e52018-01-30 13:35:54 +0000250
Georgios Pinitasebf6b8a2018-09-24 16:31:08 +0100251 // if reshape_matrices is set, force reinterpret_input_as_3d to be false as the output of CLGEMMInterleaveKernel will be 2D
252 if(reshape_matrices)
253 {
254 reinterpret_input_as_3d = false;
255 }
256
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000257 const GEMMReshapeInfo reshape_info = GEMMReshapeInfo(m, n, k, 1, 1, depth_output_gemm3d, reinterpret_input_as_3d);
Georgios Pinitasebf6b8a2018-09-24 16:31:08 +0100258
Gian Marco19835e52018-01-30 13:35:54 +0000259 if(reshape_matrices)
Georgios Pinitas358ca202017-12-07 16:47:52 +0000260 {
Isabella Gottardic4f582e2018-10-11 19:14:55 +0100261 matrix_a_info = &tmp_a_info;
262 matrix_b_info = &tmp_b_info;
Georgios Pinitas358ca202017-12-07 16:47:52 +0000263
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000264 // Pick up the GEMM configuration
Gian Marco Iodice926afe12019-03-19 11:44:13 +0000265 std::tie(lhs_info, rhs_info) = CLGEMMReshapedKernelConfigurationFactory::create(gpu_target)->configure(m, n, k, batch_size, DataType::QASYMM8);
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000266
Gian Marco Iodice926afe12019-03-19 11:44:13 +0000267 // Validate reshape LHS kernel
giuros011c9efeb2019-01-11 14:04:43 +0000268 auto_init_if_empty(tmp_a_info, a->clone()->set_tensor_shape(compute_lhs_reshaped_shape(*a, lhs_info, gemm_info.reinterpret_input_as_3d())));
269 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMReshapeLHSMatrixKernel::validate(a, &tmp_a_info, lhs_info, gemm_info.reinterpret_input_as_3d()));
Isabella Gottardic4f582e2018-10-11 19:14:55 +0100270
Gian Marco Iodice926afe12019-03-19 11:44:13 +0000271 // Validate reshape RHS kernel
giuros018b6b4a92018-12-18 19:01:33 +0000272 auto_init_if_empty(tmp_b_info, b->clone()->set_tensor_shape(compute_rhs_reshaped_shape(*b, rhs_info)));
273 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMReshapeRHSMatrixKernel::validate(b, &tmp_b_info, rhs_info));
Georgios Pinitas358ca202017-12-07 16:47:52 +0000274 }
Isabella Gottardic4f582e2018-10-11 19:14:55 +0100275
Georgios Pinitas358ca202017-12-07 16:47:52 +0000276 TensorInfo info_vector_sum_col, info_vector_sum_row;
277
278 // Validate matrix B reduction kernel only if _a_offset is not equal to 0
279 if(a_offset != 0)
280 {
281 info_vector_sum_col = TensorInfo(compute_reductionA_shape(*b), 1, DataType::S32);
282
283 // Configure Matrix B reduction kernel
284 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixBReductionKernel::validate(b, &info_vector_sum_col));
285 }
286
287 // Validate Matrix A reduction kernel only if _b_offset is not equal to 0
288 if(b_offset != 0)
289 {
290 info_vector_sum_row = TensorInfo(compute_reductionB_shape(*a), 1, DataType::S32);
291
292 // Configure matrix A reduction kernel
293 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixAReductionKernel::validate(a, &info_vector_sum_row));
294 }
295
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100296 if(gemm_info.gemmlowp_output_stage().type != GEMMLowpOutputStageType::NONE)
297 {
298 TensorInfo mm_result_s32_info{};
299
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000300 if(reshape_matrices)
301 {
302 // Output tensor auto inizialitation if not yet initialized
303 auto_init_if_empty(mm_result_s32_info, a->clone()->set_tensor_shape(compute_mm_shape(*matrix_a_info, *matrix_b_info, reshape_info)).set_data_type(DataType::S32));
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100304
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000305 // Validate matrix multiply
306 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixMultiplyReshapedKernel::validate(matrix_a_info, matrix_b_info, &mm_result_s32_info, lhs_info, rhs_info, reshape_info));
307 }
308 else
309 {
310 // Output tensor auto inizialitation if not yet initialized
311 auto_init_if_empty(mm_result_s32_info, a->clone()->set_tensor_shape(compute_mm_shape(*matrix_a_info, *matrix_b_info, false, reshape_info)).set_data_type(DataType::S32));
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100312
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000313 // Validate matrix multiply
314 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixMultiplyKernel::validate(matrix_a_info, matrix_b_info, &mm_result_s32_info, false, reshape_info));
315 }
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100316 // Validate offset contribution kernel
317 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpOffsetContributionOutputStageKernel::validate(&mm_result_s32_info,
318 a_offset == 0 ? nullptr : &info_vector_sum_col,
319 b_offset == 0 ? nullptr : &info_vector_sum_row,
320 c,
321 output,
322 a_offset, b_offset,
323 gemm_info.gemmlowp_output_stage()));
324 }
325 else
326 {
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000327 if(reshape_matrices)
328 {
329 // Validate matrix multiply
330 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixMultiplyReshapedKernel::validate(matrix_a_info, matrix_b_info, output, lhs_info, rhs_info, reshape_info));
331 }
332 else
333 {
334 // Validate matrix multiply
335 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixMultiplyKernel::validate(matrix_a_info, matrix_b_info, output, false, reshape_info));
336 }
giuros012f7c1492019-03-18 12:30:02 +0000337 if(output->total_size() != 0)
338 {
339 // Validate offset contribution kernel
340 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpOffsetContributionKernel::validate(output,
341 a_offset == 0 ? nullptr : &info_vector_sum_col,
342 b_offset == 0 ? nullptr : &info_vector_sum_row,
343 c,
344 a_offset, b_offset));
345 }
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100346 }
Georgios Pinitas358ca202017-12-07 16:47:52 +0000347
348 return Status{};
349}
350
Gian Marco05288a22017-11-21 10:57:50 +0000351void CLGEMMLowpMatrixMultiplyCore::run()
352{
Georgios Pinitas72219332018-06-05 14:56:06 +0100353 prepare();
354
Georgios Pinitasda953f22019-04-02 17:27:03 +0100355 MemoryGroupResourceScope scope_mg(_memory_group);
Gian Marco05288a22017-11-21 10:57:50 +0000356
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000357 if(_is_gemm_reshaped)
Gian Marco05288a22017-11-21 10:57:50 +0000358 {
359 // Run reshape matrix A
360 CLScheduler::get().enqueue(_mtx_a_reshape_kernel, false);
361
Georgios Pinitas72219332018-06-05 14:56:06 +0100362 if(!_reshape_b_only_on_first_run)
Chunosov5124be52017-11-22 20:42:13 +0700363 {
364 // Run reshape matrix B
365 CLScheduler::get().enqueue(_mtx_b_reshape_kernel, false);
366 }
367 }
368
Georgios Pinitas72219332018-06-05 14:56:06 +0100369 // Run matrix B reduction kernel only if _a_offset is not equal to 0
370 if(_a_offset != 0 && !_reshape_b_only_on_first_run)
Chunosov5124be52017-11-22 20:42:13 +0700371 {
Georgios Pinitas72219332018-06-05 14:56:06 +0100372 CLScheduler::get().enqueue(_mtx_b_reduction_kernel, false);
Gian Marco05288a22017-11-21 10:57:50 +0000373 }
374
375 // Run matrix multiply
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000376 if(_is_gemm_reshaped)
377 {
378 CLScheduler::get().enqueue(_mm_reshaped_kernel, false);
379 }
380 else
381 {
382 CLScheduler::get().enqueue(_mm_kernel, false);
383 }
Gian Marco05288a22017-11-21 10:57:50 +0000384
385 // Run matrix A reduction kernel only if _b_offset is not equal to 0
386 if(_b_offset != 0)
387 {
388 CLScheduler::get().enqueue(_mtx_a_reduction_kernel, false);
389 }
390
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100391 if(_fuse_output_stage)
392 {
393 // Run offset contribution/output stage kernel
394 CLScheduler::get().enqueue(_offset_contribution_output_stage_kernel, true);
395 }
396 else
397 {
398 // Run offset contribution kernel
399 CLScheduler::get().enqueue(_offset_contribution_kernel, true);
400 }
Georgios Pinitas72219332018-06-05 14:56:06 +0100401}
Chunosov5124be52017-11-22 20:42:13 +0700402
Georgios Pinitas72219332018-06-05 14:56:06 +0100403void CLGEMMLowpMatrixMultiplyCore::prepare()
404{
405 if(!_is_prepared)
406 {
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000407 if(_is_gemm_reshaped && _reshape_b_only_on_first_run)
Georgios Pinitas72219332018-06-05 14:56:06 +0100408 {
409 ARM_COMPUTE_ERROR_ON(!_original_b->is_used());
410
411 // Run reshape kernel and mark original weights tensor as unused
412 _tmp_b.allocator()->allocate();
413 CLScheduler::get().enqueue(_mtx_b_reshape_kernel, false);
414 _original_b->mark_as_unused();
415 }
416
417 // Run matrix B reduction kernel only if _a_offset is not equal to 0
418 if(_a_offset != 0 && _reshape_b_only_on_first_run)
419 {
420 _vector_sum_col.allocator()->allocate();
421 CLScheduler::get().enqueue(_mtx_b_reduction_kernel, false);
422 }
423
424 CLScheduler::get().queue().finish();
425 _is_prepared = true;
426 }
Gian Marco05288a22017-11-21 10:57:50 +0000427}
giuros011c9efeb2019-01-11 14:04:43 +0000428} // namespace arm_compute