blob: 3aaad02f8a0dcfc014aedbfe23d4c0b6a466a483 [file] [log] [blame]
Diana Biteb7f4a952020-02-06 22:12:07 +00001/*
Manuel Bottiniae58bdf2021-06-17 17:18:45 +01002 * Copyright (c) 2020-2021 Arm Limited.
Diana Biteb7f4a952020-02-06 22:12:07 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/Types.h"
Diana Biteb7f4a952020-02-06 22:12:07 +000025#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010026#include "arm_compute/core/WindowIterator.h"
Diana Biteb7f4a952020-02-06 22:12:07 +000027#include "arm_compute/runtime/NEON/NEFunctions.h"
28#include "arm_compute/runtime/NEON/NEScheduler.h"
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010029
Diana Biteb7f4a952020-02-06 22:12:07 +000030#include "support/ToolchainSupport.h"
Manuel Bottiniae58bdf2021-06-17 17:18:45 +010031#include "utils/Utils.h"
Diana Biteb7f4a952020-02-06 22:12:07 +000032
33#include <cstdlib>
34
35using namespace arm_compute;
36using namespace utils;
37
38// Find min and max value in a float array
39void find_min_max(int size, const float *data, float *min, float *max)
40{
41 *min = *max = data[0];
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010042 for (int i = 0; i < size; i++)
Diana Biteb7f4a952020-02-06 22:12:07 +000043 {
44 const float val = data[i];
45 *min = std::min(*min, val);
46 *max = std::max(*max, val);
47 }
48}
49
50// Return reasonable quantisation parameters to use for an array of floats
51// based on min and max values
52QuantizationInfo choose_quantization_params(float min, float max)
53{
54 // Extend the [min,max] interval to contain 0 so we can represent it exactly
55 min = std::min(min, 0.f);
56 max = std::max(max, 0.f);
57
58 // Set the quantized min and max in float values
59 const float qmin = 0;
60 const float qmax = 255;
61
62 // Determine the scale
63 const float scale = (max - min) / (qmax - qmin);
64
65 // Determine the zero-point; using affine equation val = (qval-zerop) * scale
66 const float zero_point_real = qmin - min / scale;
67
68 // But we need to nudge the zero_point to an integer (exact quantized value)
69 std::uint8_t zero_point_nudged = 0;
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010070 if (zero_point_real < qmin)
Diana Biteb7f4a952020-02-06 22:12:07 +000071 {
72 zero_point_nudged = qmin;
73 }
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010074 else if (zero_point_real > qmax)
Diana Biteb7f4a952020-02-06 22:12:07 +000075 {
76 zero_point_nudged = qmax;
77 }
78 else
79 {
80 zero_point_nudged = static_cast<std::uint8_t>(support::cpp11::round(zero_point_real));
81 }
82
83 QuantizationInfo qinfo = QuantizationInfo(scale, zero_point_nudged);
84 return qinfo;
85}
86
87void quantize_values(int size, qasymm8_t *output, float *input, const QuantizationInfo qinfo)
88{
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010089 for (int i = 0; i < size; i++)
Diana Biteb7f4a952020-02-06 22:12:07 +000090 {
91 output[i] = quantize_qasymm8(input[i], qinfo);
92 }
93 std::cout << "\n";
94}
95
96int main(int argc, char **argv)
97{
98 Tensor src1;
99 Tensor src2;
100 Tensor dst0;
101 Tensor q_src1;
102 Tensor q_src2;
103 Tensor q_dst0;
104 Tensor q_res;
105 Tensor q_res_output;
Manuel Bottiniae58bdf2021-06-17 17:18:45 +0100106 size_t M = 4;
107 size_t N = 4;
108 size_t K = 4;
109 bool default_input = true;
Diana Biteb7f4a952020-02-06 22:12:07 +0000110
111 // Parse args
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100112 if (argc < 3) /* case default matrix sizes */
Diana Biteb7f4a952020-02-06 22:12:07 +0000113 {
114 // Print help
115 std::cout << "Usage: ./build/neon_gemm_qasymm8 M N K\n";
116 std::cout << "Too few or no inputs provided. Using default M=4, N=4, K=4\n\n";
117 }
118 else /* case M N K arguments provided */
119 {
120 M = strtol(argv[1], nullptr, 10);
121 N = strtol(argv[2], nullptr, 10);
122 K = strtol(argv[3], nullptr, 10);
123 default_input = false;
124 }
125
126 /*** Floating point matrix multiplication ***/
127
128 // Initialise input matrices
129 NEGEMM fgemm{};
130
131 src1.allocator()->init(TensorInfo(TensorShape(K, M), 1, DataType::F32));
132 src2.allocator()->init(TensorInfo(TensorShape(N, K), 1, DataType::F32));
133 dst0.allocator()->init(TensorInfo(TensorShape(N, M), 1, DataType::F32));
134 fgemm.configure(&src1, &src2, nullptr, &dst0, 1, 0);
135
136 // Allocate matrices
137 src1.allocator()->allocate();
138 src2.allocator()->allocate();
139 dst0.allocator()->allocate();
140
141 // Fill in tensors, by default fill in with known data - for easy testing
142 auto *src1_ptr = reinterpret_cast<float *>(src1.buffer());
143 auto *src2_ptr = reinterpret_cast<float *>(src2.buffer());
144 auto *dst0_ptr = reinterpret_cast<float *>(dst0.buffer());
145
146 // Fill in: one is the identity matrix, other is sequential values
147 // src1: Identity matrix
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100148 for (size_t i = 0; i < M * K; i++)
Manuel Bottiniae58bdf2021-06-17 17:18:45 +0100149 {
Diana Biteb7f4a952020-02-06 22:12:07 +0000150 src1_ptr[i] = 0;
151 }
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100152 for (size_t i = 0; i < M; i++)
Manuel Bottiniae58bdf2021-06-17 17:18:45 +0100153 {
Diana Biteb7f4a952020-02-06 22:12:07 +0000154 src1_ptr[i * K + i] = 1.0f;
155 }
156
157 // src2: Sequential values matrix
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100158 for (size_t i = 0; i < K * N; i++)
Manuel Bottiniae58bdf2021-06-17 17:18:45 +0100159 {
Diana Biteb7f4a952020-02-06 22:12:07 +0000160 src2_ptr[i] = i * 1.123f;
161 }
162
163 // Otherwise if M, N, K is given, fill in with random values
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100164 if (!default_input)
Diana Biteb7f4a952020-02-06 22:12:07 +0000165 {
166 fill_random_tensor(src1, 0.f, 1.f);
167 fill_random_tensor(src2, 0.f, 1.f);
168 }
169
170 // Run single precision gemm and print result
171 fgemm.run();
172
173#if ARM_COMPUTE_DEBUG_ENABLED
174 std::cout << "Result matrix:\n";
175 src1.print(std::cout);
176 src2.print(std::cout);
177 dst0.print(std::cout);
178#endif // ARM_COMPUTE_DEBUG_ENABLED
179
180 /*** Quantised asymmetric 8bit matrix multiplication ***/
181
182 // Start by finding the quantisation parameters for each set of values
183 float src1_min;
184 float src1_max;
185 float src2_min;
186 float src2_max;
187 float dst0_min;
188 float dst0_max;
189
190 find_min_max(M * K, src1_ptr, &src1_min, &src1_max);
191 find_min_max(K * N, src2_ptr, &src2_min, &src2_max);
192 find_min_max(M * N, dst0_ptr, &dst0_min, &dst0_max);
193
194 const QuantizationInfo src1_qinfo = choose_quantization_params(src1_min, src1_max);
195 const QuantizationInfo src2_qinfo = choose_quantization_params(src2_min, src2_max);
196 const QuantizationInfo dst0_qinfo = choose_quantization_params(dst0_min, dst0_max);
197
198 std::cout << "Matrix 1: min=" << src1_min << ", max=" << src1_max << ", ";
199 std::cout << "QuantisationInfo(" << src1_qinfo.scale()[0] << ", " << src1_qinfo.offset()[0] << ")\n";
200 std::cout << "Matrix 2: min=" << src2_min << ", max=" << src2_max << ", ";
201 std::cout << "QuantisationInfo(" << src2_qinfo.scale()[0] << ", " << src2_qinfo.offset()[0] << ")\n";
202 std::cout << "Result : min=" << dst0_min << ", max=" << dst0_max << ", ";
203 std::cout << "QuantisationInfo(" << dst0_qinfo.scale()[0] << ", " << dst0_qinfo.offset()[0] << ")\n";
204
205 // We now have the quantisation info and can configure the quantised tensors
206 q_src1.allocator()->init(TensorInfo(TensorShape(K, M), 1, DataType::QASYMM8, src1_qinfo));
207 q_src2.allocator()->init(TensorInfo(TensorShape(N, K), 1, DataType::QASYMM8, src2_qinfo));
208 q_dst0.allocator()->init(TensorInfo(TensorShape(N, M), 1, DataType::QASYMM8, dst0_qinfo));
209
210 // In this approach we use the QuantizationLayer construct to perform quantization
211 NEQuantizationLayer q1;
212 NEQuantizationLayer q2;
213 NEQuantizationLayer q3;
214 q1.configure(&src1, &q_src1);
215 q2.configure(&src2, &q_src2);
216 q3.configure(&dst0, &q_dst0);
217
218 // Configure low precision gemm and initialise result tensor (pre-output)
219 NEGEMMLowpMatrixMultiplyCore qgemm;
220 q_res.allocator()->init(TensorInfo(TensorShape(N, M), 1, DataType::S32));
221 qgemm.configure(&q_src1, &q_src2, nullptr, &q_res);
222
223 // Configure output stage after computing shift and multiplier parameters
Manuel Bottiniae58bdf2021-06-17 17:18:45 +0100224 NEGEMMLowpOutputStage gemmlowp_output_stage;
225 int output_multiplier;
226 int output_shift;
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100227 float multiplier = (src1_qinfo.uniform().scale * src2_qinfo.uniform().scale) / dst0_qinfo.uniform().scale;
Diana Biteb7f4a952020-02-06 22:12:07 +0000228 quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
229 std::cout << "(q_multiplier, q_shift) = (" << output_multiplier << ", " << output_shift << ")\n\n";
Manuel Bottiniae58bdf2021-06-17 17:18:45 +0100230
231 GEMMLowpOutputStageInfo info;
232 info.type = GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT;
233 info.gemmlowp_multiplier = output_multiplier;
234 info.gemmlowp_shift = output_shift;
235 info.gemmlowp_offset = dst0_qinfo.uniform().offset;
236 info.output_data_type = DataType::QASYMM8;
237 q_res_output.info()->set_data_type(DataType::QASYMM8);
238 q_res_output.info()->set_num_channels(1);
239 gemmlowp_output_stage.configure(&q_res, nullptr, &q_res_output, info);
Diana Biteb7f4a952020-02-06 22:12:07 +0000240
241 // Allocate all tensors
242 q_src1.allocator()->allocate();
243 q_src2.allocator()->allocate();
244 q_dst0.allocator()->allocate();
245 q_res.allocator()->allocate();
246 q_res_output.allocator()->allocate();
247
248 // Run quantization layers (quantizes values of each tensor)
249 q1.run();
250 q2.run();
251 q3.run();
252 // Run low precision matrix multiply kernel
253 qgemm.run();
254 // Run output stage kernel
255 gemmlowp_output_stage.run();
Gunes Bayir3841f4c2021-09-10 16:28:57 +0100256 std::cout << "\nTest Passed\n";
Diana Biteb7f4a952020-02-06 22:12:07 +0000257
258#if ARM_COMPUTE_DEBUG_ENABLED
259 // Print quantized source matrices
260 q_src1.print(std::cout);
261 q_src2.print(std::cout);
262 // Print result matrix in int32 form - before output stage processing
263 std::cout << "Lowp GEMM output (int32):\n";
264 q_res.print(std::cout);
265 // Print QASYMM8 (quantized) matrix
266 std::cout << "Output pipeline result matrix:\n";
267 q_res_output.print(std::cout);
268
269 // Expected result
270 std::cout << "Expected result:\n";
271 q_dst0.print(std::cout);
272#endif // ARM_COMPUTE_DEBUG_ENABLED
273}