blob: 717ba77e1797bd4bf81a6f71de8c0f666eef8331 [file] [log] [blame]
Isabella Gottardi01a214a2018-04-09 16:00:52 +01001/*
Giorgio Arena33b103b2021-01-08 10:37:15 +00002 * Copyright (c) 2017-2021 Arm Limited.
Isabella Gottardi01a214a2018-04-09 16:00:52 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef ARM_COMPUTE_CL /* Needed by Utils.cpp to handle OpenCL exceptions properly */
25#error "This example needs to be built with -DARM_COMPUTE_CL"
26#endif /* ARM_COMPUTE_CL */
27
28#include "arm_compute/core/Types.h"
SiCong Lie357a252020-08-09 20:05:52 +010029#include "arm_compute/core/Utils.h"
Isabella Gottardi01a214a2018-04-09 16:00:52 +010030#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
Isabella Gottardi01a214a2018-04-09 16:00:52 +010031#include "arm_compute/runtime/CL/CLScheduler.h"
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +010032#include "arm_compute/runtime/CL/functions/CLGEMM.h"
33#include "arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h"
34#include "arm_compute/runtime/CL/functions/CLGEMMLowpOutputStage.h"
35#include "src/core/CL/kernels/CLDepthConvertLayerKernel.h"
36#include "src/core/CL/kernels/CLFillBorderKernel.h"
37#include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.h"
38#include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.h"
39#include "src/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.h"
40#include "src/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.h"
41#include "src/core/CL/kernels/CLGEMMLowpReductionKernel.h"
42#include "src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h"
43#include "src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.h"
44#include "src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedOnlyRHSKernel.h"
45#include "src/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.h"
46#include "src/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.h"
47#include "src/core/CL/kernels/CLIm2ColKernel.h"
48#include "src/core/CL/kernels/CLWeightsReshapeKernel.h"
Isabella Gottardi01a214a2018-04-09 16:00:52 +010049#include "tests/AssetsLibrary.h"
50#include "tests/CL/CLAccessor.h"
51#include "tests/Globals.h"
52#include "tests/IAccessor.h"
53#include "tests/SimpleTensor.h"
54#include "tests/validation/Validation.h"
55#include "tests/validation/reference/GEMM.h"
56#include "tests/validation/reference/GEMMLowp.h"
57
Georgios Pinitas108a95e2019-03-27 13:55:59 +000058#include "utils/TypePrinter.h"
Isabella Gottardi01a214a2018-04-09 16:00:52 +010059#include "utils/Utils.h"
Georgios Pinitas108a95e2019-03-27 13:55:59 +000060#include "utils/command_line/CommandLineOptions.h"
61#include "utils/command_line/CommandLineParser.h"
62
63#include "ValidateExample.h"
Isabella Gottardi01a214a2018-04-09 16:00:52 +010064
65#include <cstdlib>
66
67using namespace arm_compute;
68using namespace utils;
69using namespace arm_compute::test;
70using namespace arm_compute::test::validation;
71
72constexpr float abs_tolerance_f32(0.0001f); /**< F32 Absolute tolerance value for comparing reference's output against implementation's output for
73 * floating point data types in case using relative tolerance fails because of small values */
74RelativeTolerance<float> tolerance_f32(0.001f); /**< F32 Tolerance value for comparing reference's output against implementation's output for floating point data types */
75RelativeTolerance<half_float::half> tolerance_f16(half(0.2)); /**< F16 Tolerance value for comparing reference's output against implementation's output for floating point data types */
76constexpr float tolerance_num_f16 = 0.02f; /**< F16 Tolerance number */
77
Georgios Pinitas108a95e2019-03-27 13:55:59 +000078namespace
79{
80class GEMMCommandLineOptions final
81{
82public:
83 explicit GEMMCommandLineOptions(CommandLineParser &parser) noexcept
84 : help(parser.add_option<ToggleOption>("help")),
85 add_bias(parser.add_option<ToggleOption>("add_bias")),
86 M(parser.add_option<SimpleOption<int>>("m", 7)),
87 N(parser.add_option<SimpleOption<int>>("n", 3)),
88 K(parser.add_option<SimpleOption<int>>("k", 5)),
89 B(parser.add_option<SimpleOption<int>>("b", 1)),
90 alpha(parser.add_option<SimpleOption<float>>("alpha", 1.f)),
91 beta(parser.add_option<SimpleOption<float>>("beta", 0.f)),
92 offset_src0(parser.add_option<SimpleOption<int>>("offset_i0", 10)),
93 offset_src1(parser.add_option<SimpleOption<int>>("offset_i1", 10)),
94 offset_dst(parser.add_option<SimpleOption<int>>("offset_o", 10)),
95 scale_src0(parser.add_option<SimpleOption<float>>("scale_i0", 1.f / 255)),
96 scale_src1(parser.add_option<SimpleOption<float>>("scale_i1", 1.f / 255)),
97 scale_dst(parser.add_option<SimpleOption<float>>("scale_o", 1.f / 255)),
98 data_type()
99 {
100 // Setup data type
101 const std::set<arm_compute::DataType> supported_data_types
102 {
103 DataType::F16,
104 DataType::F32,
105 DataType::QASYMM8,
106 };
107 data_type = parser.add_option<EnumOption<DataType>>("type", supported_data_types, DataType::F32);
108
109 // Setup help strings
110 help->set_help("Show this help message");
111 add_bias->set_help("Add bias to the GEMM. Used when running in QASYMM8");
112 M->set_help("M value");
113 N->set_help("N value");
114 K->set_help("K value");
115 B->set_help("B value - number of batches");
116 alpha->set_help("Alpha value");
117 beta->set_help("Beta value");
118 offset_src0->set_help("Offset of first input. Used when running in QASYMM8");
119 offset_src1->set_help("Offset of second input. Used when running in QASYMM8");
120 offset_dst->set_help("Offset of output. Used when running in QASYMM8");
121 scale_src0->set_help("Scale of first input. Used when running in QASYMM8");
122 scale_src1->set_help("Scale of second input. Used when running in QASYMM8");
123 scale_dst->set_help("Scale of output. Used when running in QASYMM8");
124 data_type->set_help("Data type to use");
125 }
126 /** Prevent instances of this class from being copied (As this class contains pointers) */
127 GEMMCommandLineOptions(const GEMMCommandLineOptions &) = delete;
128 /** Prevent instances of this class from being copied (As this class contains pointers) */
129 GEMMCommandLineOptions &operator=(const GEMMCommandLineOptions &) = delete;
130 /** Allow instances of this class to be moved */
131 GEMMCommandLineOptions(GEMMCommandLineOptions &&) noexcept(true) = default;
132 /** Allow instances of this class to be moved */
133 GEMMCommandLineOptions &operator=(GEMMCommandLineOptions &&) noexcept(true) = default;
134 /** Default destructor */
135 ~GEMMCommandLineOptions() = default;
136
137public:
138 ToggleOption *help;
139 ToggleOption *add_bias;
140 SimpleOption<int> *M;
141 SimpleOption<int> *N;
142 SimpleOption<int> *K;
143 SimpleOption<int> *B;
144 SimpleOption<float> *alpha;
145 SimpleOption<float> *beta;
146 SimpleOption<int> *offset_src0;
147 SimpleOption<int> *offset_src1;
148 SimpleOption<int> *offset_dst;
149 SimpleOption<float> *scale_src0;
150 SimpleOption<float> *scale_src1;
151 SimpleOption<float> *scale_dst;
152 EnumOption<arm_compute::DataType> *data_type;
153};
154} // namespace
155
Isabella Gottardi01a214a2018-04-09 16:00:52 +0100156class CLGEMMValidateExample : public ValidateExample
157{
158public:
Anthony Barbiere88b9bb2018-07-12 13:26:27 +0100159 bool do_setup(int argc, char **argv) override
Isabella Gottardi01a214a2018-04-09 16:00:52 +0100160 {
161 CLScheduler::get().default_init();
Georgios Pinitas108a95e2019-03-27 13:55:59 +0000162
163 // Parse options
164 CommandLineParser parser;
165 GEMMCommandLineOptions gemm_options(parser);
166 parser.parse(argc, argv);
167
168 // Print help
169 const bool print_help = gemm_options.help->is_set() ? gemm_options.help->value() : false;
170 if(print_help)
Isabella Gottardi01a214a2018-04-09 16:00:52 +0100171 {
Georgios Pinitas108a95e2019-03-27 13:55:59 +0000172 parser.print_help(argv[0]);
173 return false;
Isabella Gottardi01a214a2018-04-09 16:00:52 +0100174 }
175
Georgios Pinitas108a95e2019-03-27 13:55:59 +0000176 // Consume parameters
177 consume_params(gemm_options);
178 print_parameters_internal();
179
Michele Di Giorgio14cbfb22019-10-23 10:53:10 +0100180 const bool is_quantized = is_data_type_quantized(data_type);
181
Georgios Pinitas108a95e2019-03-27 13:55:59 +0000182 // Calculate re-quantization parameters
Michele Di Giorgio14cbfb22019-10-23 10:53:10 +0100183 if(is_quantized)
Isabella Gottardi01a214a2018-04-09 16:00:52 +0100184 {
Georgios Pinitas108a95e2019-03-27 13:55:59 +0000185 float multiplier = scale_src0 * scale_src1 / scale_dst;
Michele Di Giorgio14cbfb22019-10-23 10:53:10 +0100186 quantization::calculate_quantized_multiplier(multiplier, &dst_multiplier, &dst_shift);
Isabella Gottardi01a214a2018-04-09 16:00:52 +0100187 }
188
Georgios Pinitas108a95e2019-03-27 13:55:59 +0000189 // Initialize GEMM inputs/outputs
190 src0.allocator()->init(TensorInfo(TensorShape(K, M, B), 1, data_type));
191 src1.allocator()->init(TensorInfo(TensorShape(N, K, B), 1, data_type));
192 src2.allocator()->init(TensorInfo(TensorShape(N, M, B), 1, data_type));
Isabella Gottardi01a214a2018-04-09 16:00:52 +0100193 init_sgemm_output(dst, src0, src1, data_type);
194
195 // Configure function
Michele Di Giorgio14cbfb22019-10-23 10:53:10 +0100196 if(is_quantized)
Isabella Gottardi01a214a2018-04-09 16:00:52 +0100197 {
198 src0.info()->set_quantization_info(QuantizationInfo(scale_src0, offset_src0));
199 src1.info()->set_quantization_info(QuantizationInfo(scale_src1, offset_src1));
200 dst.info()->set_quantization_info(QuantizationInfo(scale_dst, offset_dst));
201 biases.allocator()->init(TensorInfo(TensorShape(N), 1, DataType::S32));
202 init_sgemm_output(tmp_dst, src0, src1, DataType::S32);
203
204 // Configure GEMMlowp matrix multiply function
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100205 mm_gemmlowp.configure(&src0, &src1, nullptr, &tmp_dst);
Isabella Gottardi01a214a2018-04-09 16:00:52 +0100206
207 // Configure GEMMlowp output stage
208 mm_gemmlowp_output_stage.configure(&tmp_dst, add_bias ? &biases : nullptr, &dst, dst_multiplier, dst_shift, offset_dst);
209 tmp_dst.allocator()->allocate();
210 biases.allocator()->allocate();
211 fill(CLAccessor(biases), 3);
212 }
213 else
214 {
215 // Configure matrix multiply function
216 mm_gemm.configure(&src0, &src1, &src2, &dst, alpha, beta);
217 }
218
219 // Allocate all the tensors
220 src0.allocator()->allocate();
221 src1.allocator()->allocate();
222 dst.allocator()->allocate();
223 src2.allocator()->allocate();
224
225 fill(CLAccessor(src0), 0);
226 fill(CLAccessor(src1), 1);
227 fill(CLAccessor(src2), 2);
Anthony Barbiere88b9bb2018-07-12 13:26:27 +0100228
229 return true;
Isabella Gottardi01a214a2018-04-09 16:00:52 +0100230 }
231
Georgios Pinitas108a95e2019-03-27 13:55:59 +0000232 void print_parameters_internal()
Isabella Gottardi01a214a2018-04-09 16:00:52 +0100233 {
Georgios Pinitas108a95e2019-03-27 13:55:59 +0000234 std::cout << "Datatype : " << string_from_data_type(data_type) << "\n";
235 std::cout << "M : " << support::cpp11::to_string(M) << "\n";
236 std::cout << "N : " << support::cpp11::to_string(N) << "\n";
237 std::cout << "K : " << support::cpp11::to_string(K) << "\n";
238 std::cout << "B : " << support::cpp11::to_string(B) << "\n";
Isabella Gottardi01a214a2018-04-09 16:00:52 +0100239 if(data_type == DataType::QASYMM8)
240 {
Georgios Pinitas108a95e2019-03-27 13:55:59 +0000241 std::cout << "Scale_Src0 : " << support::cpp11::to_string(scale_src0) << "\n";
242 std::cout << "Offset_Src0 : " << support::cpp11::to_string(offset_src0) << "\n";
243 std::cout << "Scale_Scr1 : " << support::cpp11::to_string(scale_src1) << "\n";
244 std::cout << "Offset_Src1 : " << support::cpp11::to_string(offset_src1) << "\n";
245 std::cout << "Scale_Dst : " << support::cpp11::to_string(scale_dst) << "\n";
246 std::cout << "Offset_Dst : " << support::cpp11::to_string(offset_dst) << "\n";
247 std::cout << "Bias : " << support::cpp11::to_string(add_bias) << "\n";
Isabella Gottardi01a214a2018-04-09 16:00:52 +0100248 }
249 else
250 {
Georgios Pinitas108a95e2019-03-27 13:55:59 +0000251 std::cout << "Alpha : " << support::cpp11::to_string(alpha) << "\n";
252 std::cout << "Beta : " << support::cpp11::to_string(beta) << "\n";
Isabella Gottardi01a214a2018-04-09 16:00:52 +0100253 }
254 }
255
256 void do_validate() override
257 {
258 switch(data_type)
259 {
260 case DataType::F16:
261 {
Georgios Pinitas108a95e2019-03-27 13:55:59 +0000262 SimpleTensor<half> ref_src0 = { TensorShape(K, M, B), data_type, 1 };
263 SimpleTensor<half> ref_src1 = { TensorShape(N, K, B), data_type, 1 };
264 SimpleTensor<half> ref_src2 = { TensorShape(N, M, B), data_type, 1 };
Isabella Gottardi01a214a2018-04-09 16:00:52 +0100265
266 fill(ref_src0, 0);
267 fill(ref_src1, 1);
268 fill(ref_src2, 2);
269
270 SimpleTensor<half> ref_dst = reference::gemm<half>(ref_src0, ref_src1, ref_src2, alpha, beta);
271 validate(CLAccessor(dst), ref_dst, tolerance_f16, tolerance_num_f16);
272 break;
273 }
274 case DataType::F32:
275 {
Georgios Pinitas108a95e2019-03-27 13:55:59 +0000276 SimpleTensor<float> ref_src0 = { TensorShape(K, M, B), data_type, 1 };
277 SimpleTensor<float> ref_src1 = { TensorShape(N, K, B), data_type, 1 };
278 SimpleTensor<float> ref_src2 = { TensorShape(N, M, B), data_type, 1 };
Isabella Gottardi01a214a2018-04-09 16:00:52 +0100279
280 fill(ref_src0, 0);
281 fill(ref_src1, 1);
282 fill(ref_src2, 2);
283
284 SimpleTensor<float> ref_dst = reference::gemm<float>(ref_src0, ref_src1, ref_src2, alpha, beta);
285 validate(CLAccessor(dst), ref_dst, tolerance_f32, 0.f, abs_tolerance_f32);
286 break;
287 }
288 case DataType::QASYMM8:
289 {
Georgios Pinitas108a95e2019-03-27 13:55:59 +0000290 SimpleTensor<uint8_t> ref_src0{ TensorShape(K, M, B), data_type, 1 };
291 SimpleTensor<uint8_t> ref_src1{ TensorShape(N, K, B), data_type, 1 };
Isabella Gottardi01a214a2018-04-09 16:00:52 +0100292 SimpleTensor<uint8_t> ref_dst;
293
294 // Fill reference
295 fill(ref_src0, 0);
296 fill(ref_src1, 1);
297
Georgios Pinitas108a95e2019-03-27 13:55:59 +0000298 SimpleTensor<int32_t> ref_tmp_dst = reference::gemmlowp_matrix_multiply_core<int32_t, uint8_t>(ref_src0, ref_src1, TensorShape(N, M, B), offset_src0, offset_src1);
Isabella Gottardi01a214a2018-04-09 16:00:52 +0100299
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000300 const std::vector<int32_t> dst_multiplier_vec = { dst_multiplier };
301 const std::vector<int32_t> dst_shift_vec = { dst_shift };
302
Isabella Gottardi01a214a2018-04-09 16:00:52 +0100303 if(add_bias)
304 {
305 SimpleTensor<int32_t> biases{ TensorShape(N), DataType::S32, 1 };
306 // Fill bias
307 fill(biases, 3);
Georgios Pinitas448a81f2019-11-21 14:10:25 +0000308 ref_dst = reference::gemmlowp_quantize_down_scale_by_fixedpoint<int32_t, uint8_t>(ref_tmp_dst, biases, dst_multiplier_vec, dst_shift_vec, offset_dst);
Isabella Gottardi01a214a2018-04-09 16:00:52 +0100309 }
310 else
311 {
Georgios Pinitas448a81f2019-11-21 14:10:25 +0000312 ref_dst = reference::gemmlowp_quantize_down_scale_by_fixedpoint<int32_t, uint8_t>(ref_tmp_dst, dst_multiplier_vec, dst_shift_vec, offset_dst);
Isabella Gottardi01a214a2018-04-09 16:00:52 +0100313 }
314 validate(CLAccessor(dst), ref_dst);
315 break;
316 }
317 default:
318 break;
319 }
320 }
321 void do_run() override
322 {
323 // Execute the function
324 if(data_type == DataType::QASYMM8)
325 {
326 // Run gemmlowp
327 mm_gemmlowp.run();
328 // Run output stage
329 mm_gemmlowp_output_stage.run();
330 }
331 else
332 {
333 // Run gemm
334 mm_gemm.run();
335 }
336
337 // Make sure all the OpenCL jobs are done executing:
338 CLScheduler::get().sync();
339 }
340
341private:
342 template <typename U>
343 void fill(U &&tensor, int i)
344 {
345 switch(tensor.data_type())
346 {
347 case DataType::F16:
Giorgio Arena4bdd1772020-12-17 16:47:07 +0000348 {
Giorgio Arena33b103b2021-01-08 10:37:15 +0000349 arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ -1.0f, 1.0f };
Giorgio Arena4bdd1772020-12-17 16:47:07 +0000350 library->fill(tensor, distribution, i);
351 break;
352 }
Isabella Gottardi01a214a2018-04-09 16:00:52 +0100353 case DataType::F32:
354 {
Giorgio Arena4bdd1772020-12-17 16:47:07 +0000355 std::uniform_real_distribution<float> distribution(-1.0f, 1.0f);
Isabella Gottardi01a214a2018-04-09 16:00:52 +0100356 library->fill(tensor, distribution, i);
357 break;
358 }
359 case DataType::S32:
360 case DataType::QASYMM8:
361 {
362 std::uniform_int_distribution<> distribution(-6000, 6000);
363 library->fill(tensor, distribution, i);
364 break;
365 }
366 default:
367 library->fill_tensor_uniform(tensor, i);
368 }
369 }
370
Georgios Pinitas108a95e2019-03-27 13:55:59 +0000371 void consume_params(const GEMMCommandLineOptions &opts)
372 {
373 ARM_COMPUTE_ERROR_ON(opts.M->value() <= 0);
374 ARM_COMPUTE_ERROR_ON(opts.N->value() <= 0);
375 ARM_COMPUTE_ERROR_ON(opts.K->value() <= 0);
376 ARM_COMPUTE_ERROR_ON(opts.B->value() <= 0);
377 M = opts.M->value();
378 N = opts.N->value();
379 K = opts.K->value();
380 B = opts.B->value();
381 alpha = opts.alpha->value();
382 beta = opts.beta->value();
383 offset_src0 = opts.offset_src0->value();
384 offset_src1 = opts.offset_src1->value();
385 offset_dst = opts.offset_dst->value();
386 scale_src0 = opts.scale_src0->value();
387 scale_src1 = opts.scale_src1->value();
388 scale_dst = opts.scale_dst->value();
389 add_bias = opts.add_bias->is_set() ? opts.add_bias->value() : true;
390 data_type = opts.data_type->value();
391 }
392
Isabella Gottardi01a214a2018-04-09 16:00:52 +0100393 CLTensor src0{}, src1{}, src2{}, dst{};
394 CLTensor tmp_dst{}, biases{};
395
396 CLGEMM mm_gemm{};
397 CLGEMMLowpMatrixMultiplyCore mm_gemmlowp{};
398 CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint mm_gemmlowp_output_stage{};
399
Georgios Pinitas108a95e2019-03-27 13:55:59 +0000400 size_t M{ 7 }, N{ 3 }, K{ 5 }, B{ 1 };
Isabella Gottardi01a214a2018-04-09 16:00:52 +0100401 DataType data_type{ DataType::F32 };
402 float alpha{ 1.0 }, beta{ 0.0 };
403 int offset_src0{ 10 }, offset_src1{ 10 }, offset_dst{ 10 };
404 float scale_src0{ 1.0f / 255 }, scale_src1{ 1.0f / 255 }, scale_dst{ 1.0f / 255 };
405 int32_t dst_multiplier{ 0 }, dst_shift{ 0 };
406 bool add_bias{ true };
407};
408
409/** Main program for gemm test
410 *
411 * @param[in] argc Number of arguments
Georgios Pinitas108a95e2019-03-27 13:55:59 +0000412 * @param[in] argv Arguments
Isabella Gottardi01a214a2018-04-09 16:00:52 +0100413 *
414 */
415int main(int argc, char **argv)
416{
417 return utils::run_example<CLGEMMValidateExample>(argc, argv);
418}