blob: 4acb316a3c5494414c9c76fc85defe695e47134a [file] [log] [blame]
SiCongLi282f3242020-11-24 15:24:16 +00001/*
Gian Marco Iodice60ab4e62023-04-28 10:40:07 +01002 * Copyright (c) 2020-2021, 2023 Arm Limited.
SiCongLi282f3242020-11-24 15:24:16 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef ARM_COMPUTE_CL /* Needed by Utils.cpp to handle OpenCL exceptions properly */
25#error "This example needs to be built with -DARM_COMPUTE_CL"
26#endif /* ARM_COMPUTE_CL */
27
SiCongLi282f3242020-11-24 15:24:16 +000028#include "arm_compute/core/Helpers.h"
29#include "arm_compute/core/KernelDescriptors.h"
SiCongLi282f3242020-11-24 15:24:16 +000030#include "arm_compute/core/Types.h"
31#include "arm_compute/core/utils/misc/ShapeCalculator.h"
32#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
33#include "arm_compute/runtime/CL/CLScheduler.h"
34#include "arm_compute/runtime/CL/CLTuner.h"
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010035
Georgios Pinitas7891a732021-08-20 21:39:25 +010036#include "src/gpu/cl/kernels/ClGemmLowpMatrixMultiplyReshapedOnlyRhsKernel.h"
37#include "src/gpu/cl/kernels/ClGemmLowpReductionKernel.h"
SiCongLi282f3242020-11-24 15:24:16 +000038#include "tests/CL/Helper.h"
SiCongLi282f3242020-11-24 15:24:16 +000039#include "utils/command_line/CommandLineOptions.h"
40#include "utils/command_line/CommandLineParser.h"
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010041#include "utils/Utils.h"
SiCongLi282f3242020-11-24 15:24:16 +000042
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010043#include "CommonGemmExampleOptions.h"
44#include "GemmTunerHelpers.h"
SiCongLi282f3242020-11-24 15:24:16 +000045#include <cstdlib>
46#include <memory>
47
48using namespace arm_compute;
49using namespace utils;
Georgios Pinitas4a578b92021-06-25 12:13:49 +010050using namespace arm_compute::opencl::kernels;
SiCongLi282f3242020-11-24 15:24:16 +000051using namespace arm_compute::misc::shape_calculator;
52using namespace gemm_tuner;
53
54namespace
55{
56/** Structure holding all tunable gemm configs specific to this example/strategy */
57struct GemmConfigs
58{
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010059 size_t m0{4}; /**< Number of rows processed by the matrix multiplication */
60 size_t n0{4}; /**< Number of columns processed by the matrix multiplication */
61 size_t k0{4}; /**< Number of partial accumulations performed by the matrix multiplication */
62 size_t h0{1}; /**< Number of horizontal blocks of size (k0xn0) stored on the same output row */
63 bool interleave_rhs{true}; /**< Interleave rhs matrix */
64 bool transpose_rhs{true}; /**< Transpose rhs matrix */
SiCongLi282f3242020-11-24 15:24:16 +000065};
66
67/** Formatted output of the GemmConfigs type
68 *
69 * @param[out] os Output stream.
70 * @param[in] configs Tunable configurations to output
71 *
72 * @return Modified output stream.
73 */
74::std::ostream &operator<<(::std::ostream &os, const GemmConfigs &configs)
75{
76 std::string false_str = std::string("false");
77 std::string true_str = std::string("true");
78
79 os << "m0 : " << configs.m0 << std::endl;
80 os << "n0 : " << configs.n0 << std::endl;
81 os << "k0 : " << configs.k0 << std::endl;
82 os << "h0 : " << configs.h0 << std::endl;
83 os << "interleave_rhs : " << (configs.interleave_rhs ? true_str : false_str) << std::endl;
84 os << "transpose_rhs : " << (configs.transpose_rhs ? true_str : false_str) << std::endl;
85 return os;
86}
87
88/** Command line options for gemm configs */
89class GemmConfigOptions
90{
91public:
92 /** Constructor
93 *
94 * @param[in,out] parser A parser on which "parse()" hasn't been called yet.
95 */
96 GemmConfigOptions(CommandLineParser &parser)
97 : m0(parser.add_positional_option<SimpleOption<size_t>>("m0", 4)),
98 n0(parser.add_positional_option<SimpleOption<size_t>>("n0", 4)),
99 k0(parser.add_positional_option<SimpleOption<size_t>>("k0", 4)),
100 h0(parser.add_positional_option<SimpleOption<size_t>>("h0", 1)),
101 interleave_rhs(parser.add_positional_option<SimpleOption<size_t>>("interleave_rhs", 1)),
102 transpose_rhs(parser.add_positional_option<SimpleOption<size_t>>("transpose_rhs", 1))
103 {
104 m0->set_help("Number of rows processed by the matrix multiplication");
105 n0->set_help("Number of columns processed by the matrix multiplication");
106 k0->set_help("Number of partial accumulations performed by the matrix multiplication");
107 h0->set_help("Number of horizontal blocks of size (k0xn0) stored on the same output row");
108 interleave_rhs->set_help("Interleave rhs matrix (1) / Do not interleave rhs matrix (0)");
109 transpose_rhs->set_help("Transpose rhs matrix (1) / Do not transpose rhs matrix (0)");
110 }
111 /** Prevent instances of this class from being copied (As this class contains pointers) */
112 GemmConfigOptions(const GemmConfigOptions &) = delete;
113 /** Prevent instances of this class from being copied (As this class contains pointers) */
114 GemmConfigOptions &operator=(const GemmConfigOptions &) = delete;
115 /** Allow instances of this class to be moved */
116 GemmConfigOptions(GemmConfigOptions &&) = default;
117 /** Allow instances of this class to be moved */
118 GemmConfigOptions &operator=(GemmConfigOptions &&) = default;
119 /** Default destructor */
120 ~GemmConfigOptions() = default;
121
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100122 SimpleOption<size_t> *m0; /**< Number of rows processed by the matrix multiplication option */
123 SimpleOption<size_t> *n0; /**< Number of columns processed by the matrix multiplication option */
124 SimpleOption<size_t> *k0; /**< Number of partial accumulations performed by the matrix multiplication option */
125 SimpleOption<size_t> *h0; /**< Number of horizontal blocks of size (k0xn0) stored on the same output row option */
SiCongLi282f3242020-11-24 15:24:16 +0000126 SimpleOption<size_t> *interleave_rhs; /**< Interleave rhs matrix option (1 enable; 0 disable) */
127 SimpleOption<size_t> *transpose_rhs; /**< Transpose rhs matrix option (1 enable; 0 disable) */
128};
129
130/** Consumes the gemm configuration options and creates a structure containing all information
131 *
132 * @param[in] options Options to consume
133 *
134 * @return Structure containing the gemm configurations
135 */
136GemmConfigs consume_gemm_configs(const GemmConfigOptions &options)
137{
138 GemmConfigs configs;
139 configs.m0 = options.m0->value();
140 configs.n0 = options.n0->value();
141 configs.k0 = options.k0->value();
142 configs.h0 = options.h0->value();
143 configs.interleave_rhs = options.interleave_rhs->value() != 0;
144 configs.transpose_rhs = options.transpose_rhs->value() != 0;
145 return configs;
146}
147
148} // namespace
149
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100150using ClGemmLowpMatrixMultiplyReshapedOnlyRhs =
151 test::CLSynthetizeOperator<ClGemmLowpMatrixMultiplyReshapedOnlyRhsKernel>;
152using ClGemmLowpMatrixAReduction = test::CLSynthetizeOperator<ClGemmLowpMatrixAReductionKernel>;
SiCongLi282f3242020-11-24 15:24:16 +0000153
154class CLGEMMLowpMatrixMultiplyReshapedOnlyRHSFusedOutputStageFixedpointExample : public Example
155{
156public:
157 bool do_setup(int argc, char **argv) override
158 {
159 // Default parameters
160 CommonGemmExampleParams params;
161 GemmConfigs configs;
162
163 // Parse command line options
164 CommandLineParser parser;
SiCong Li98e33b92020-12-03 14:52:53 +0000165 CommonGemmExampleOptions param_options(parser, DataType::QASYMM8);
SiCongLi282f3242020-11-24 15:24:16 +0000166 GemmConfigOptions config_options(parser);
167
168 parser.parse(argc, argv);
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100169 if (param_options.help->is_set() && param_options.help->value())
SiCongLi282f3242020-11-24 15:24:16 +0000170 {
171 parser.print_help(argv[0]);
172 return false;
173 }
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100174 if (!parser.validate())
SiCongLi282f3242020-11-24 15:24:16 +0000175 {
176 // Invalid arguments. Use default parameters and configs
177 std::cerr << "Invalid arguments." << std::endl;
178 parser.print_help(argv[0]);
179 std::cerr << "Falling back to default parameters and configs" << std::endl;
180 }
181 else
182 {
183 params = consume_common_gemm_example_parameters(param_options);
184 configs = consume_gemm_configs(config_options);
185 }
186
187 std::cout << "Gemm parameters:" << std::endl;
188 std::cout << params << std::endl;
189 std::cout << "Gemm configurations:" << std::endl;
190 std::cout << configs << std::endl;
191
Gian Marco Iodiceca419dd2021-03-03 17:25:07 +0000192 tuner.set_tuner_mode(params.tuner_mode);
193
SiCongLi282f3242020-11-24 15:24:16 +0000194 CLScheduler::get().default_init(&tuner);
195
196 lhs.allocator()->init(TensorInfo(TensorShape(params.K, params.M, params.B), 1, params.data_type));
197 rhs.allocator()->init(TensorInfo(TensorShape(params.N, params.K, params.B), 1, params.data_type));
SiCongLieda87d42021-03-04 10:27:03 +0000198 bias.allocator()->init(TensorInfo(TensorShape(params.N), 1, DataType::S32));
SiCongLi282f3242020-11-24 15:24:16 +0000199 dst.allocator()->init(TensorInfo(TensorShape(params.N, params.M, params.B), 1, params.data_type));
200
201 // Set arbitrary quantization information (non-zero offset to ensure offset contribution stage is included)
202 // Could be extended in the future to include a user-controlled option for offset == 0
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100203 const QuantizationInfo q_info{0.012, 3};
SiCong Li98e33b92020-12-03 14:52:53 +0000204 lhs.info()->set_quantization_info(q_info);
205 rhs.info()->set_quantization_info(q_info);
206 bias.info()->set_quantization_info(q_info);
207 dst.info()->set_quantization_info(q_info);
SiCongLi282f3242020-11-24 15:24:16 +0000208
209 GEMMLHSMatrixInfo lhs_info;
210 lhs_info.m0 = configs.m0;
211 lhs_info.k0 = configs.k0;
212
213 GEMMRHSMatrixInfo rhs_info;
214 rhs_info.n0 = configs.n0;
215 rhs_info.k0 = configs.k0;
216 rhs_info.h0 = configs.h0;
217 rhs_info.interleave = configs.interleave_rhs;
218 rhs_info.transpose = configs.transpose_rhs;
219 rhs_info.export_to_cl_image = false; // CL image not supported for quantized cases yet
220
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100221 if (rhs_info.h0 == 0)
Gian Marco Iodice60ab4e62023-04-28 10:40:07 +0100222 {
223 rhs_info.h0 = std::max(static_cast<unsigned int>(params.N) / rhs_info.n0, 1U);
224 }
225
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100226 rhs_reshaped.allocator()->init(
227 TensorInfo(compute_rhs_reshaped_shape(*rhs.info(), rhs_info), 1, params.data_type));
SiCong Li98e33b92020-12-03 14:52:53 +0000228 rhs_reshaped.info()->set_quantization_info(q_info);
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100229 if (rhs_info.export_to_cl_image)
SiCongLi282f3242020-11-24 15:24:16 +0000230 {
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100231 if (!examples::gemm_tuner_helpers::update_padding_for_cl_image(rhs_reshaped.info()))
Manuel Bottini7b427862021-02-08 13:45:19 +0000232 {
233 std::cerr << "cl_image is not supported on the device, disable export_to_cl_image" << std::endl;
234 return false;
235 }
SiCongLi282f3242020-11-24 15:24:16 +0000236 }
237
238 // Configure output stage for quantized case
239 GEMMLowpOutputStageInfo gemmlowp_output_stage;
240 gemmlowp_output_stage.type = GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT;
241 gemmlowp_output_stage.output_data_type = dst.info()->data_type();
242 gemmlowp_output_stage.gemmlowp_offset = 0;
243 {
SiCongLi282f3242020-11-24 15:24:16 +0000244 gemmlowp_output_stage.is_quantized_per_channel = false;
245 // Num_filters is 1 unless quantized type is of per_channel type. Could be extended in the future to support per-channel quantization.
246 const unsigned int num_filters = 1;
247
248 dst_multipliers.allocator()->init(TensorInfo(TensorShape(num_filters), 1, DataType::S32));
249 dst_shifts.allocator()->init(TensorInfo(TensorShape(num_filters), 1, DataType::S32));
250
251 gemmlowp_output_stage.gemmlowp_multipliers.resize(num_filters);
252 gemmlowp_output_stage.gemmlowp_shifts.resize(num_filters);
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100253 quantization::compute_quantized_multipliers_and_shifts(lhs.info(), rhs.info(), dst.info(),
SiCongLi282f3242020-11-24 15:24:16 +0000254 gemmlowp_output_stage.gemmlowp_multipliers.data(),
255 gemmlowp_output_stage.gemmlowp_shifts.data());
256 gemmlowp_output_stage.gemmlowp_multiplier = gemmlowp_output_stage.gemmlowp_multipliers[0];
257 gemmlowp_output_stage.gemmlowp_shift = gemmlowp_output_stage.gemmlowp_shifts[0];
258
259 // No fused activation
260 PixelValue min_val{};
261 PixelValue max_val{};
262 std::tie(min_val, max_val) = get_min_max(dst.info()->data_type());
263
264 auto min_activation = min_val.get<int32_t>();
265 auto max_activation = max_val.get<int32_t>();
266
267 // Set the GEMMLowp output stage info
268 gemmlowp_output_stage.gemmlowp_offset = dst.info()->quantization_info().uniform().offset;
269 gemmlowp_output_stage.gemmlowp_min_bound = min_activation;
270 gemmlowp_output_stage.gemmlowp_max_bound = max_activation;
271 }
272
273 GEMMKernelInfo gemm_info;
274 gemm_info.m = params.M;
275 gemm_info.n = params.N;
276 gemm_info.k = params.K;
277 gemm_info.depth_output_gemm3d = 0;
278 gemm_info.reinterpret_input_as_3d = false;
279 gemm_info.broadcast_bias = true;
280 gemm_info.fp_mixed_precision = false;
281 gemm_info.has_pad_y = false;
282 gemm_info.mult_transpose1xW_width = configs.h0;
283 gemm_info.lhs_info = lhs_info;
284 gemm_info.rhs_info = rhs_info;
285 gemm_info.a_offset = lhs.info()->quantization_info().uniform().offset;
286 gemm_info.b_offset = rhs.info()->quantization_info().uniform().offset;
287 gemm_info.output_stage = gemmlowp_output_stage;
288
289 // Initialize Matrix A reduction kernel only if _b_offset is not equal to 0
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100290 if (gemm_info.b_offset != 0)
SiCongLi282f3242020-11-24 15:24:16 +0000291 {
292 const TensorInfo info_vector_sum_row(compute_reductionB_shape(*lhs.info()), 1, DataType::S32);
293 vector_sum_row.allocator()->init(info_vector_sum_row);
294
Georgios Pinitas4a578b92021-06-25 12:13:49 +0100295 mtx_a_reduction = std::make_unique<ClGemmLowpMatrixAReduction>();
SiCongLi282f3242020-11-24 15:24:16 +0000296
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100297 if (!mtx_a_reduction->validate(lhs.info(), vector_sum_row.info(), GEMMLowpReductionKernelInfo{}))
SiCongLi282f3242020-11-24 15:24:16 +0000298 {
299 std::cerr << "Invalid arguments for CLGEMMLowpMatrixAReductionKernel." << std::endl;
300 return false;
301 }
302
Georgios Pinitas4a578b92021-06-25 12:13:49 +0100303 mtx_a_reduction->configure(lhs.info(), vector_sum_row.info(), GEMMLowpReductionKernelInfo{});
SiCongLi282f3242020-11-24 15:24:16 +0000304 }
305 // Initialize matrix B reduction kernel only if _a_offset is not equal to 0
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100306 if (gemm_info.a_offset != 0)
SiCongLi282f3242020-11-24 15:24:16 +0000307 {
308 const TensorInfo info_vector_sum_col(compute_reductionA_shape(*rhs.info()), 1, DataType::S32);
309 vector_sum_col.allocator()->init(info_vector_sum_col);
310 // There's no need for a Matrix B reduction kernel as this is assumed to be run only once in the prepare stage
311 }
312
313 // Validate argments
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100314 if (!gemm.validate(lhs.info(), rhs_reshaped.info(), dst.info(), gemm_info,
315 gemm_info.a_offset == 0 ? nullptr : vector_sum_col.info(),
316 gemm_info.b_offset == 0 ? nullptr : vector_sum_row.info(), bias.info(),
317 dst_multipliers.info(), dst_shifts.info()))
SiCongLi282f3242020-11-24 15:24:16 +0000318 {
Georgios Pinitas4a578b92021-06-25 12:13:49 +0100319 std::cerr << "Invalid arguments for ClGemmLowpMatrixMultiplyReshapedOnlyRhsKernel." << std::endl;
SiCongLi282f3242020-11-24 15:24:16 +0000320 return false;
321 }
322
323 // Configure function
Georgios Pinitas4a578b92021-06-25 12:13:49 +0100324 gemm.configure(lhs.info(), rhs_reshaped.info(), dst.info(), gemm_info,
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100325 gemm_info.a_offset == 0 ? nullptr : vector_sum_col.info(),
326 gemm_info.b_offset == 0 ? nullptr : vector_sum_row.info(), bias.info(), dst_multipliers.info(),
327 dst_shifts.info());
SiCongLi282f3242020-11-24 15:24:16 +0000328
329 // Allocate tensors
330 lhs.allocator()->allocate();
331 rhs.allocator()->allocate();
332 rhs_reshaped.allocator()->allocate();
333 bias.allocator()->allocate();
334 dst.allocator()->allocate();
335 vector_sum_col.allocator()->allocate();
336 vector_sum_row.allocator()->allocate();
337 dst_multipliers.allocator()->allocate();
338 dst_shifts.allocator()->allocate();
339
340 return true;
341 }
342 void do_run() override
343 {
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100344 if (mtx_a_reduction != nullptr)
SiCongLi282f3242020-11-24 15:24:16 +0000345 {
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100346 ITensorPack red_pack({{ACL_SRC, &lhs}, {ACL_DST, &dst}});
Georgios Pinitas4a578b92021-06-25 12:13:49 +0100347 mtx_a_reduction->run(red_pack);
SiCongLi282f3242020-11-24 15:24:16 +0000348 }
Georgios Pinitas4a578b92021-06-25 12:13:49 +0100349
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100350 ITensorPack gemm_pack({{ACL_SRC_0, &lhs},
351 {ACL_SRC_1, &rhs},
352 {ACL_BIAS, &bias},
353 {ACL_VEC_COL_SUM, &vector_sum_col},
354 {ACL_VEC_ROW_SUM, &vector_sum_row},
355 {ACL_SHIFTS, &dst_shifts},
356 {ACL_MULTIPLIERS, &dst_multipliers},
357 {ACL_DST, &dst}});
Georgios Pinitas4a578b92021-06-25 12:13:49 +0100358 gemm.run(gemm_pack);
SiCongLi282f3242020-11-24 15:24:16 +0000359
360 // Make sure all the OpenCL jobs are done executing:
361 CLScheduler::get().sync();
362 }
363
364 void do_teardown() override
365 {
366 }
367
368private:
369 CLTensor lhs{};
370 CLTensor rhs{};
371 CLTensor rhs_reshaped{};
372 CLTensor bias{};
373 CLTensor dst{};
374 CLTensor vector_sum_col{};
375 CLTensor vector_sum_row{};
376 CLTensor dst_multipliers{};
377 CLTensor dst_shifts{};
378 CLTuner tuner{};
Georgios Pinitas4a578b92021-06-25 12:13:49 +0100379 ClGemmLowpMatrixMultiplyReshapedOnlyRhs gemm{};
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100380 std::unique_ptr<ClGemmLowpMatrixAReduction> mtx_a_reduction{nullptr};
SiCongLi282f3242020-11-24 15:24:16 +0000381};
382
383/** Main test program for gemmlowp reshaped rhs only with fused output stage fixedpoint
384 *
385 * @param[in] argc Number of arguments
386 * @param[in] argv Arguments ( [optional] M, [optional] N, [optional] K, [optional] B, [optional] m0, [optional] n0, [optional] k0, [optional] h0, [optional] interleave_rhs, [optional] transpose_rhs )
387 */
388int main(int argc, char **argv)
389{
390 return run_example<CLGEMMLowpMatrixMultiplyReshapedOnlyRHSFusedOutputStageFixedpointExample>(argc, argv);
391}