blob: a990ef1dee25879fba6f101eb995aa1cd3b8a76c [file] [log] [blame]
SiCongLi282f3242020-11-24 15:24:16 +00001/*
2 * Copyright (c) 2020 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef ARM_COMPUTE_CL /* Needed by Utils.cpp to handle OpenCL exceptions properly */
25#error "This example needs to be built with -DARM_COMPUTE_CL"
26#endif /* ARM_COMPUTE_CL */
27
28#include "CommonGemmExampleOptions.h"
29#include "GemmTunerHelpers.h"
30#include "arm_compute/core/Helpers.h"
31#include "arm_compute/core/KernelDescriptors.h"
32#include "arm_compute/core/KernelDescriptors.h"
33#include "arm_compute/core/Types.h"
34#include "arm_compute/core/utils/misc/ShapeCalculator.h"
35#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
36#include "arm_compute/runtime/CL/CLScheduler.h"
37#include "arm_compute/runtime/CL/CLTuner.h"
38#include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.h"
39#include "src/core/CL/kernels/CLGEMMLowpReductionKernel.h"
40#include "tests/CL/Helper.h"
41#include "utils/Utils.h"
42#include "utils/command_line/CommandLineOptions.h"
43#include "utils/command_line/CommandLineParser.h"
44
45#include <cstdlib>
46#include <memory>
47
48using namespace arm_compute;
49using namespace utils;
50using namespace arm_compute::misc::shape_calculator;
51using namespace gemm_tuner;
52
53namespace
54{
55/** Structure holding all tunable gemm configs specific to this example/strategy */
56struct GemmConfigs
57{
58 size_t m0{ 4 }; /**< Number of rows processed by the matrix multiplication */
59 size_t n0{ 4 }; /**< Number of columns processed by the matrix multiplication */
60 size_t k0{ 4 }; /**< Number of partial accumulations performed by the matrix multiplication */
61 size_t h0{ 1 }; /**< Number of horizontal blocks of size (k0xn0) stored on the same output row */
62 bool interleave_rhs{ true }; /**< Interleave rhs matrix */
63 bool transpose_rhs{ true }; /**< Transpose rhs matrix */
64};
65
66/** Formatted output of the GemmConfigs type
67 *
68 * @param[out] os Output stream.
69 * @param[in] configs Tunable configurations to output
70 *
71 * @return Modified output stream.
72 */
73::std::ostream &operator<<(::std::ostream &os, const GemmConfigs &configs)
74{
75 std::string false_str = std::string("false");
76 std::string true_str = std::string("true");
77
78 os << "m0 : " << configs.m0 << std::endl;
79 os << "n0 : " << configs.n0 << std::endl;
80 os << "k0 : " << configs.k0 << std::endl;
81 os << "h0 : " << configs.h0 << std::endl;
82 os << "interleave_rhs : " << (configs.interleave_rhs ? true_str : false_str) << std::endl;
83 os << "transpose_rhs : " << (configs.transpose_rhs ? true_str : false_str) << std::endl;
84 return os;
85}
86
87/** Command line options for gemm configs */
88class GemmConfigOptions
89{
90public:
91 /** Constructor
92 *
93 * @param[in,out] parser A parser on which "parse()" hasn't been called yet.
94 */
95 GemmConfigOptions(CommandLineParser &parser)
96 : m0(parser.add_positional_option<SimpleOption<size_t>>("m0", 4)),
97 n0(parser.add_positional_option<SimpleOption<size_t>>("n0", 4)),
98 k0(parser.add_positional_option<SimpleOption<size_t>>("k0", 4)),
99 h0(parser.add_positional_option<SimpleOption<size_t>>("h0", 1)),
100 interleave_rhs(parser.add_positional_option<SimpleOption<size_t>>("interleave_rhs", 1)),
101 transpose_rhs(parser.add_positional_option<SimpleOption<size_t>>("transpose_rhs", 1))
102 {
103 m0->set_help("Number of rows processed by the matrix multiplication");
104 n0->set_help("Number of columns processed by the matrix multiplication");
105 k0->set_help("Number of partial accumulations performed by the matrix multiplication");
106 h0->set_help("Number of horizontal blocks of size (k0xn0) stored on the same output row");
107 interleave_rhs->set_help("Interleave rhs matrix (1) / Do not interleave rhs matrix (0)");
108 transpose_rhs->set_help("Transpose rhs matrix (1) / Do not transpose rhs matrix (0)");
109 }
110 /** Prevent instances of this class from being copied (As this class contains pointers) */
111 GemmConfigOptions(const GemmConfigOptions &) = delete;
112 /** Prevent instances of this class from being copied (As this class contains pointers) */
113 GemmConfigOptions &operator=(const GemmConfigOptions &) = delete;
114 /** Allow instances of this class to be moved */
115 GemmConfigOptions(GemmConfigOptions &&) = default;
116 /** Allow instances of this class to be moved */
117 GemmConfigOptions &operator=(GemmConfigOptions &&) = default;
118 /** Default destructor */
119 ~GemmConfigOptions() = default;
120
121 SimpleOption<size_t> *m0; /**< Number of rows processed by the matrix multiplication option */
122 SimpleOption<size_t> *n0; /**< Number of columns processed by the matrix multiplication option */
123 SimpleOption<size_t> *k0; /**< Number of partial accumulations performed by the matrix multiplication option */
124 SimpleOption<size_t> *h0; /**< Number of horizontal blocks of size (k0xn0) stored on the same output row option */
125 SimpleOption<size_t> *interleave_rhs; /**< Interleave rhs matrix option (1 enable; 0 disable) */
126 SimpleOption<size_t> *transpose_rhs; /**< Transpose rhs matrix option (1 enable; 0 disable) */
127};
128
129/** Consumes the gemm configuration options and creates a structure containing all information
130 *
131 * @param[in] options Options to consume
132 *
133 * @return Structure containing the gemm configurations
134 */
135GemmConfigs consume_gemm_configs(const GemmConfigOptions &options)
136{
137 GemmConfigs configs;
138 configs.m0 = options.m0->value();
139 configs.n0 = options.n0->value();
140 configs.k0 = options.k0->value();
141 configs.h0 = options.h0->value();
142 configs.interleave_rhs = options.interleave_rhs->value() != 0;
143 configs.transpose_rhs = options.transpose_rhs->value() != 0;
144 return configs;
145}
146
147} // namespace
148
149using CLGEMMLowpMatrixMultiplyReshapedOnlyRHS = test::CLSynthetizeFunction<CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel>;
150using CLGEMMLowpMatrixAReduction = test::CLSynthetizeFunction<CLGEMMLowpMatrixAReductionKernel>;
151
152class CLGEMMLowpMatrixMultiplyReshapedOnlyRHSFusedOutputStageFixedpointExample : public Example
153{
154public:
155 bool do_setup(int argc, char **argv) override
156 {
157 // Default parameters
158 CommonGemmExampleParams params;
159 GemmConfigs configs;
160
161 // Parse command line options
162 CommandLineParser parser;
SiCong Li98e33b92020-12-03 14:52:53 +0000163 CommonGemmExampleOptions param_options(parser, DataType::QASYMM8);
SiCongLi282f3242020-11-24 15:24:16 +0000164 GemmConfigOptions config_options(parser);
165
166 parser.parse(argc, argv);
167 if(param_options.help->is_set() && param_options.help->value())
168 {
169 parser.print_help(argv[0]);
170 return false;
171 }
172 if(!parser.validate())
173 {
174 // Invalid arguments. Use default parameters and configs
175 std::cerr << "Invalid arguments." << std::endl;
176 parser.print_help(argv[0]);
177 std::cerr << "Falling back to default parameters and configs" << std::endl;
178 }
179 else
180 {
181 params = consume_common_gemm_example_parameters(param_options);
182 configs = consume_gemm_configs(config_options);
183 }
184
185 std::cout << "Gemm parameters:" << std::endl;
186 std::cout << params << std::endl;
187 std::cout << "Gemm configurations:" << std::endl;
188 std::cout << configs << std::endl;
189
190 CLScheduler::get().default_init(&tuner);
191
192 lhs.allocator()->init(TensorInfo(TensorShape(params.K, params.M, params.B), 1, params.data_type));
193 rhs.allocator()->init(TensorInfo(TensorShape(params.N, params.K, params.B), 1, params.data_type));
194 bias.allocator()->init(TensorInfo(TensorShape(params.N, 1, params.B), 1, DataType::S32));
195 dst.allocator()->init(TensorInfo(TensorShape(params.N, params.M, params.B), 1, params.data_type));
196
197 // Set arbitrary quantization information (non-zero offset to ensure offset contribution stage is included)
198 // Could be extended in the future to include a user-controlled option for offset == 0
SiCong Li98e33b92020-12-03 14:52:53 +0000199 const QuantizationInfo q_info
200 {
201 0.012, 3
202 };
203 lhs.info()->set_quantization_info(q_info);
204 rhs.info()->set_quantization_info(q_info);
205 bias.info()->set_quantization_info(q_info);
206 dst.info()->set_quantization_info(q_info);
SiCongLi282f3242020-11-24 15:24:16 +0000207
208 GEMMLHSMatrixInfo lhs_info;
209 lhs_info.m0 = configs.m0;
210 lhs_info.k0 = configs.k0;
211
212 GEMMRHSMatrixInfo rhs_info;
213 rhs_info.n0 = configs.n0;
214 rhs_info.k0 = configs.k0;
215 rhs_info.h0 = configs.h0;
216 rhs_info.interleave = configs.interleave_rhs;
217 rhs_info.transpose = configs.transpose_rhs;
218 rhs_info.export_to_cl_image = false; // CL image not supported for quantized cases yet
219
220 rhs_reshaped.allocator()->init(TensorInfo(compute_rhs_reshaped_shape(*rhs.info(), rhs_info), 1, params.data_type));
SiCong Li98e33b92020-12-03 14:52:53 +0000221 rhs_reshaped.info()->set_quantization_info(q_info);
SiCongLi282f3242020-11-24 15:24:16 +0000222 if(rhs_info.export_to_cl_image)
223 {
224 examples::gemm_tuner_helpers::update_padding_for_cl_image(rhs_reshaped.info());
225 }
226
227 // Configure output stage for quantized case
228 GEMMLowpOutputStageInfo gemmlowp_output_stage;
229 gemmlowp_output_stage.type = GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT;
230 gemmlowp_output_stage.output_data_type = dst.info()->data_type();
231 gemmlowp_output_stage.gemmlowp_offset = 0;
232 {
233 const int idx_kernels = get_data_layout_dimension_index(lhs.info()->data_layout(), DataLayoutDimension::BATCHES);
234 gemmlowp_output_stage.is_quantized_per_channel = false;
235 // Num_filters is 1 unless quantized type is of per_channel type. Could be extended in the future to support per-channel quantization.
236 const unsigned int num_filters = 1;
237
238 dst_multipliers.allocator()->init(TensorInfo(TensorShape(num_filters), 1, DataType::S32));
239 dst_shifts.allocator()->init(TensorInfo(TensorShape(num_filters), 1, DataType::S32));
240
241 gemmlowp_output_stage.gemmlowp_multipliers.resize(num_filters);
242 gemmlowp_output_stage.gemmlowp_shifts.resize(num_filters);
243 quantization::compute_quantized_multipliers_and_shifts(lhs.info(),
244 rhs.info(),
245 dst.info(),
246 idx_kernels,
247 gemmlowp_output_stage.gemmlowp_multipliers.data(),
248 gemmlowp_output_stage.gemmlowp_shifts.data());
249 gemmlowp_output_stage.gemmlowp_multiplier = gemmlowp_output_stage.gemmlowp_multipliers[0];
250 gemmlowp_output_stage.gemmlowp_shift = gemmlowp_output_stage.gemmlowp_shifts[0];
251
252 // No fused activation
253 PixelValue min_val{};
254 PixelValue max_val{};
255 std::tie(min_val, max_val) = get_min_max(dst.info()->data_type());
256
257 auto min_activation = min_val.get<int32_t>();
258 auto max_activation = max_val.get<int32_t>();
259
260 // Set the GEMMLowp output stage info
261 gemmlowp_output_stage.gemmlowp_offset = dst.info()->quantization_info().uniform().offset;
262 gemmlowp_output_stage.gemmlowp_min_bound = min_activation;
263 gemmlowp_output_stage.gemmlowp_max_bound = max_activation;
264 }
265
266 GEMMKernelInfo gemm_info;
267 gemm_info.m = params.M;
268 gemm_info.n = params.N;
269 gemm_info.k = params.K;
270 gemm_info.depth_output_gemm3d = 0;
271 gemm_info.reinterpret_input_as_3d = false;
272 gemm_info.broadcast_bias = true;
273 gemm_info.fp_mixed_precision = false;
274 gemm_info.has_pad_y = false;
275 gemm_info.mult_transpose1xW_width = configs.h0;
276 gemm_info.lhs_info = lhs_info;
277 gemm_info.rhs_info = rhs_info;
278 gemm_info.a_offset = lhs.info()->quantization_info().uniform().offset;
279 gemm_info.b_offset = rhs.info()->quantization_info().uniform().offset;
280 gemm_info.output_stage = gemmlowp_output_stage;
281
282 // Initialize Matrix A reduction kernel only if _b_offset is not equal to 0
283 if(gemm_info.b_offset != 0)
284 {
285 const TensorInfo info_vector_sum_row(compute_reductionB_shape(*lhs.info()), 1, DataType::S32);
286 vector_sum_row.allocator()->init(info_vector_sum_row);
287
Georgios Pinitas40f51a62020-11-21 03:04:18 +0000288 mtx_a_reduction = std::make_unique<CLGEMMLowpMatrixAReduction>();
SiCongLi282f3242020-11-24 15:24:16 +0000289
290 if(!mtx_a_reduction->validate(lhs.info(), vector_sum_row.info(), GEMMLowpReductionKernelInfo{}))
291 {
292 std::cerr << "Invalid arguments for CLGEMMLowpMatrixAReductionKernel." << std::endl;
293 return false;
294 }
295
296 mtx_a_reduction->configure(&lhs, &vector_sum_row, GEMMLowpReductionKernelInfo{});
297 }
298 // Initialize matrix B reduction kernel only if _a_offset is not equal to 0
299 if(gemm_info.a_offset != 0)
300 {
301 const TensorInfo info_vector_sum_col(compute_reductionA_shape(*rhs.info()), 1, DataType::S32);
302 vector_sum_col.allocator()->init(info_vector_sum_col);
303 // There's no need for a Matrix B reduction kernel as this is assumed to be run only once in the prepare stage
304 }
305
306 // Validate argments
307 if(!gemm.validate(lhs.info(), rhs_reshaped.info(), dst.info(), gemm_info, gemm_info.a_offset == 0 ? nullptr : vector_sum_col.info(),
308 gemm_info.b_offset == 0 ? nullptr : vector_sum_row.info(), bias.info(), dst_multipliers.info(), dst_shifts.info()))
309 {
310 std::cerr << "Invalid arguments for CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel." << std::endl;
311 return false;
312 }
313
314 // Configure function
315 gemm.configure(&lhs, &rhs_reshaped, &dst, gemm_info, gemm_info.a_offset == 0 ? nullptr : &vector_sum_col, gemm_info.b_offset == 0 ? nullptr : &vector_sum_row, &bias, &dst_multipliers, &dst_shifts);
316
317 // Allocate tensors
318 lhs.allocator()->allocate();
319 rhs.allocator()->allocate();
320 rhs_reshaped.allocator()->allocate();
321 bias.allocator()->allocate();
322 dst.allocator()->allocate();
323 vector_sum_col.allocator()->allocate();
324 vector_sum_row.allocator()->allocate();
325 dst_multipliers.allocator()->allocate();
326 dst_shifts.allocator()->allocate();
327
328 return true;
329 }
330 void do_run() override
331 {
332 if(mtx_a_reduction != nullptr)
333 {
334 mtx_a_reduction->run();
335 }
336 gemm.run();
337
338 // Make sure all the OpenCL jobs are done executing:
339 CLScheduler::get().sync();
340 }
341
342 void do_teardown() override
343 {
344 }
345
346private:
347 CLTensor lhs{};
348 CLTensor rhs{};
349 CLTensor rhs_reshaped{};
350 CLTensor bias{};
351 CLTensor dst{};
352 CLTensor vector_sum_col{};
353 CLTensor vector_sum_row{};
354 CLTensor dst_multipliers{};
355 CLTensor dst_shifts{};
356 CLTuner tuner{};
357 CLGEMMLowpMatrixMultiplyReshapedOnlyRHS gemm{};
358 std::unique_ptr<CLGEMMLowpMatrixAReduction> mtx_a_reduction{ nullptr };
359};
360
361/** Main test program for gemmlowp reshaped rhs only with fused output stage fixedpoint
362 *
363 * @param[in] argc Number of arguments
364 * @param[in] argv Arguments ( [optional] M, [optional] N, [optional] K, [optional] B, [optional] m0, [optional] n0, [optional] k0, [optional] h0, [optional] interleave_rhs, [optional] transpose_rhs )
365 */
366int main(int argc, char **argv)
367{
368 return run_example<CLGEMMLowpMatrixMultiplyReshapedOnlyRHSFusedOutputStageFixedpointExample>(argc, argv);
369}