blob: e0aefbf3598ed1f02190c92a8bbd64aeacd6dac3 [file] [log] [blame]
Isabella Gottardi01a214a2018-04-09 16:00:52 +01001/*
2 * Copyright (c) 2017-2018 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef ARM_COMPUTE_CL /* Needed by Utils.cpp to handle OpenCL exceptions properly */
25#error "This example needs to be built with -DARM_COMPUTE_CL"
26#endif /* ARM_COMPUTE_CL */
27
28#include "arm_compute/core/Types.h"
29#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
30#include "arm_compute/runtime/CL/CLFunctions.h"
31#include "arm_compute/runtime/CL/CLScheduler.h"
32
33#include "tests/AssetsLibrary.h"
34#include "tests/CL/CLAccessor.h"
35#include "tests/Globals.h"
36#include "tests/IAccessor.h"
37#include "tests/SimpleTensor.h"
38#include "tests/validation/Validation.h"
39#include "tests/validation/reference/GEMM.h"
40#include "tests/validation/reference/GEMMLowp.h"
41
42#include "ValidateExample.h"
43
44#include "utils/Utils.h"
45
46#include <cstdlib>
47
48using namespace arm_compute;
49using namespace utils;
50using namespace arm_compute::test;
51using namespace arm_compute::test::validation;
52
53constexpr float abs_tolerance_f32(0.0001f); /**< F32 Absolute tolerance value for comparing reference's output against implementation's output for
54 * floating point data types in case using relative tolerance fails because of small values */
55RelativeTolerance<float> tolerance_f32(0.001f); /**< F32 Tolerance value for comparing reference's output against implementation's output for floating point data types */
56RelativeTolerance<half_float::half> tolerance_f16(half(0.2)); /**< F16 Tolerance value for comparing reference's output against implementation's output for floating point data types */
57constexpr float tolerance_num_f16 = 0.02f; /**< F16 Tolerance number */
58
59class CLGEMMValidateExample : public ValidateExample
60{
61public:
Anthony Barbiere88b9bb2018-07-12 13:26:27 +010062 bool do_setup(int argc, char **argv) override
Isabella Gottardi01a214a2018-04-09 16:00:52 +010063 {
Anthony Barbiere88b9bb2018-07-12 13:26:27 +010064 //TODO(antbar01): Update to use command line interface ?
Isabella Gottardi01a214a2018-04-09 16:00:52 +010065 CLScheduler::get().default_init();
66 if(argc == 2)
67 {
68 size_t dt = strtol(argv[1], nullptr, 10);
69 switch(dt)
70 {
71 case 1:
72 {
73 data_type = DataType::F16;
74 std::cout << "Usage: " << argv[0] << "1 M N K [alpha = 1.0f] [beta = 0.0f]\n";
75 std::cout << "Using default values: Datatype=FP16 M=7, N=3, K=5, alpha=1.0f and beta=0.0f\n";
76 break;
77 }
78 case 2:
79 {
80 data_type = DataType::QASYMM8;
81 std::cout << "Usage: " << argv[0] << "2 M N K [scale_src0 = 0.1f] [offset_scr0 = f] [scale_scr1 = 0.1f] [offset_scr1 = 10] [scale_dst = 0.1f] [offset_dst = 10] [bias = 1]\n";
82 std::cout <<
83 "Using default values: Datatype=QASYMM8 M=7, N=3, K=5, scale_src0 =(1.0f/255), offset_src0 = 10, scale_src1 =(1.0f/255), offset_src1 = 10, scale_dst =(1.0f/255), offset_dst = 10, bias=1\n\n";
84 break;
85 }
86 case 0:
87 default:
88 {
89 data_type = DataType::F32;
90 std::cout << "Usage: " << argv[0] << "0 M N K [alpha = 1.0f] [beta = 0.0f]\n";
91 std::cout << "Using default values: Datatype=FP32 M=7, N=3, K=5, alpha=1.0f and beta=0.0f\n";
92 }
93 }
94 }
95 else if(argc < 5)
96 {
97 // Print help
98 std::cout << "Usage with datatype = FP32 : " << argv[0] << "0 M N K [alpha = 1.0f] [beta = 0.0f]\n";
99 std::cout << " datatype = FP16 : " << argv[0] << "1 M N K [alpha = 1.0f] [beta = 0.0f]\n";
100 std::cout << " datatype = QASYMM8 : " << argv[0] << "2 M N K [scale_src0 = 0.1f] [offset_scr0 = f] [scale_scr1 = 0.1f] [offset_scr1 = 10] [scale_dst = 0.1f] [offset_dst = 10] [bias = 1]\n";
101 std::cout << "Too few or no arguments provided.\n";
102 std::cout << "Using default values: Datatype=FP32 M=7, N=3, K=5, alpha=1.0f and beta=0.0f\n";
103 }
104 else
105 {
106 size_t dt = strtol(argv[1], nullptr, 10);
107 switch(dt)
108 {
109 case 1:
110 {
111 data_type = DataType::F16;
112 break;
113 }
114 case 2:
115 {
116 data_type = DataType::QASYMM8;
117 break;
118 }
119 case 0:
120 default:
121 data_type = DataType::F32;
122 }
123 M = strtol(argv[2], nullptr, 10);
124 N = strtol(argv[3], nullptr, 10);
125 K = strtol(argv[4], nullptr, 10);
126 }
127
128 switch(data_type)
129 {
130 case DataType::F16:
131 case DataType::F32:
132 {
133 if(argc > 5)
134 {
135 alpha = strtof(argv[5], nullptr);
136 if(argc > 6)
137 {
138 beta = strtof(argv[6], nullptr);
139 }
140 }
141 break;
142 }
143 case DataType::QASYMM8:
144 {
145 if(argc > 5)
146 {
147 scale_src0 = strtof(argv[5], nullptr);
148 if(argc > 6)
149 {
150 offset_src0 = strtol(argv[6], nullptr, 10);
151 if(argc > 7)
152 {
153 scale_src1 = strtof(argv[7], nullptr);
154 if(argc > 8)
155 {
156 offset_src1 = strtol(argv[8], nullptr, 10);
157 if(argc > 9)
158 {
159 scale_dst = strtof(argv[9], nullptr);
160 if(argc > 10)
161 {
162 offset_dst = strtol(argv[10], nullptr, 10);
163 if(argc > 11)
164 {
165 add_bias = (strtol(argv[11], nullptr, 10) == 1);
166 }
167 }
168 }
169 }
170 }
171 }
172 }
173 float multiplier = scale_src0 * scale_src1 / scale_dst;
174 quantization::calculate_quantized_multiplier_less_than_one(multiplier, &dst_multiplier, &dst_shift);
175 break;
176 }
177 default:
178 break;
179 }
180
181 src0.allocator()->init(TensorInfo(TensorShape(K, M), 1, data_type));
182 src1.allocator()->init(TensorInfo(TensorShape(N, K), 1, data_type));
183 src2.allocator()->init(TensorInfo(TensorShape(N, M), 1, data_type));
184 init_sgemm_output(dst, src0, src1, data_type);
185
186 // Configure function
187 if(data_type == DataType::QASYMM8)
188 {
189 src0.info()->set_quantization_info(QuantizationInfo(scale_src0, offset_src0));
190 src1.info()->set_quantization_info(QuantizationInfo(scale_src1, offset_src1));
191 dst.info()->set_quantization_info(QuantizationInfo(scale_dst, offset_dst));
192 biases.allocator()->init(TensorInfo(TensorShape(N), 1, DataType::S32));
193 init_sgemm_output(tmp_dst, src0, src1, DataType::S32);
194
195 // Configure GEMMlowp matrix multiply function
196 mm_gemmlowp.configure(&src0, &src1, &tmp_dst);
197
198 // Configure GEMMlowp output stage
199 mm_gemmlowp_output_stage.configure(&tmp_dst, add_bias ? &biases : nullptr, &dst, dst_multiplier, dst_shift, offset_dst);
200 tmp_dst.allocator()->allocate();
201 biases.allocator()->allocate();
202 fill(CLAccessor(biases), 3);
203 }
204 else
205 {
206 // Configure matrix multiply function
207 mm_gemm.configure(&src0, &src1, &src2, &dst, alpha, beta);
208 }
209
210 // Allocate all the tensors
211 src0.allocator()->allocate();
212 src1.allocator()->allocate();
213 dst.allocator()->allocate();
214 src2.allocator()->allocate();
215
216 fill(CLAccessor(src0), 0);
217 fill(CLAccessor(src1), 1);
218 fill(CLAccessor(src2), 2);
Anthony Barbiere88b9bb2018-07-12 13:26:27 +0100219
220 return true;
Isabella Gottardi01a214a2018-04-09 16:00:52 +0100221 }
222
223 void print_parameters(framework::Printer &printer) override
224 {
225 printer.print_entry("Datatype", string_from_data_type(data_type));
226 printer.print_entry("M", support::cpp11::to_string(M));
227 printer.print_entry("N", support::cpp11::to_string(N));
228 printer.print_entry("K", support::cpp11::to_string(K));
229 if(data_type == DataType::QASYMM8)
230 {
231 printer.print_entry("Scale_Src0", support::cpp11::to_string(scale_src0));
232 printer.print_entry("Offset_Src0", support::cpp11::to_string(offset_src0));
233 printer.print_entry("Scale_Scr1", support::cpp11::to_string(scale_src1));
234 printer.print_entry("Offset_Src1", support::cpp11::to_string(offset_src1));
235 printer.print_entry("Scale_Dst", support::cpp11::to_string(scale_dst));
236 printer.print_entry("Offset_Dst", support::cpp11::to_string(offset_dst));
237 printer.print_entry("Bias", support::cpp11::to_string(add_bias));
238 }
239 else
240 {
241 printer.print_entry("Alpha", support::cpp11::to_string(alpha));
242 printer.print_entry("Beta", support::cpp11::to_string(beta));
243 }
244 }
245
246 void do_validate() override
247 {
248 switch(data_type)
249 {
250 case DataType::F16:
251 {
252 SimpleTensor<half> ref_src0 = { TensorShape(K, M), data_type, 1 };
253 SimpleTensor<half> ref_src1 = { TensorShape(N, K), data_type, 1 };
254 SimpleTensor<half> ref_src2 = { TensorShape(N, M), data_type, 1 };
255
256 fill(ref_src0, 0);
257 fill(ref_src1, 1);
258 fill(ref_src2, 2);
259
260 SimpleTensor<half> ref_dst = reference::gemm<half>(ref_src0, ref_src1, ref_src2, alpha, beta);
261 validate(CLAccessor(dst), ref_dst, tolerance_f16, tolerance_num_f16);
262 break;
263 }
264 case DataType::F32:
265 {
266 SimpleTensor<float> ref_src0 = { TensorShape(K, M), data_type, 1 };
267 SimpleTensor<float> ref_src1 = { TensorShape(N, K), data_type, 1 };
268 SimpleTensor<float> ref_src2 = { TensorShape(N, M), data_type, 1 };
269
270 fill(ref_src0, 0);
271 fill(ref_src1, 1);
272 fill(ref_src2, 2);
273
274 SimpleTensor<float> ref_dst = reference::gemm<float>(ref_src0, ref_src1, ref_src2, alpha, beta);
275 validate(CLAccessor(dst), ref_dst, tolerance_f32, 0.f, abs_tolerance_f32);
276 break;
277 }
278 case DataType::QASYMM8:
279 {
280 SimpleTensor<uint8_t> ref_src0{ TensorShape(K, M), data_type, 1 };
281 SimpleTensor<uint8_t> ref_src1{ TensorShape(N, K), data_type, 1 };
282 SimpleTensor<uint8_t> ref_dst;
283
284 // Fill reference
285 fill(ref_src0, 0);
286 fill(ref_src1, 1);
287
288 SimpleTensor<int32_t> ref_tmp_dst = reference::gemmlowp_matrix_multiply_core<int32_t, uint8_t>(ref_src0, ref_src1, offset_src0, offset_src1);
289
290 if(add_bias)
291 {
292 SimpleTensor<int32_t> biases{ TensorShape(N), DataType::S32, 1 };
293 // Fill bias
294 fill(biases, 3);
295 ref_dst = reference::gemmlowp_quantize_down_int32_to_uint8_scale_by_fixedpoint<int32_t>(ref_tmp_dst, biases, dst_multiplier, dst_shift, offset_dst);
296 }
297 else
298 {
299 ref_dst = reference::gemmlowp_quantize_down_int32_to_uint8_scale_by_fixedpoint<int32_t>(ref_tmp_dst, dst_multiplier, dst_shift, offset_dst);
300 }
301 validate(CLAccessor(dst), ref_dst);
302 break;
303 }
304 default:
305 break;
306 }
307 }
308 void do_run() override
309 {
310 // Execute the function
311 if(data_type == DataType::QASYMM8)
312 {
313 // Run gemmlowp
314 mm_gemmlowp.run();
315 // Run output stage
316 mm_gemmlowp_output_stage.run();
317 }
318 else
319 {
320 // Run gemm
321 mm_gemm.run();
322 }
323
324 // Make sure all the OpenCL jobs are done executing:
325 CLScheduler::get().sync();
326 }
327
328private:
329 template <typename U>
330 void fill(U &&tensor, int i)
331 {
332 switch(tensor.data_type())
333 {
334 case DataType::F16:
335 case DataType::F32:
336 {
337 std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
338 library->fill(tensor, distribution, i);
339 break;
340 }
341 case DataType::S32:
342 case DataType::QASYMM8:
343 {
344 std::uniform_int_distribution<> distribution(-6000, 6000);
345 library->fill(tensor, distribution, i);
346 break;
347 }
348 default:
349 library->fill_tensor_uniform(tensor, i);
350 }
351 }
352
353 CLTensor src0{}, src1{}, src2{}, dst{};
354 CLTensor tmp_dst{}, biases{};
355
356 CLGEMM mm_gemm{};
357 CLGEMMLowpMatrixMultiplyCore mm_gemmlowp{};
358 CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint mm_gemmlowp_output_stage{};
359
360 size_t M{ 7 }, N{ 3 }, K{ 5 };
361 DataType data_type{ DataType::F32 };
362 float alpha{ 1.0 }, beta{ 0.0 };
363 int offset_src0{ 10 }, offset_src1{ 10 }, offset_dst{ 10 };
364 float scale_src0{ 1.0f / 255 }, scale_src1{ 1.0f / 255 }, scale_dst{ 1.0f / 255 };
365 int32_t dst_multiplier{ 0 }, dst_shift{ 0 };
366 bool add_bias{ true };
367};
368
369/** Main program for gemm test
370 *
371 * @param[in] argc Number of arguments
372 * @param[in] argv Arguments ( [optional] datatype, [optional] M, [optional] N, [optional] K, [optional] scale_src0, [optional] offset_src0, [optional] scale_src1, [optional] offset_src1, [optional] scale_dst, [optional] offset_dst, [optional] bias, [optional] alpha, [optional] beta )
373 *
374 */
375int main(int argc, char **argv)
376{
377 return utils::run_example<CLGEMMValidateExample>(argc, argv);
378}