blob: 0210602eb8f9fd995f96350a3a8f60afb8cfdaec [file] [log] [blame]
Matthew Sloyan0d35a932020-11-09 12:25:05 +00001//
Teresa Charlinad1b3d72023-03-14 12:10:28 +00002// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
Matthew Sloyan0d35a932020-11-09 12:25:05 +00003// SPDX-License-Identifier: MIT
4//
5
6#include "QuantizationTestHelper.hpp"
7
8#include <armnn_delegate.hpp>
9
10#include <flatbuffers/flatbuffers.h>
Teresa Charlinad1b3d72023-03-14 12:10:28 +000011#include <schema_generated.h>
Matthew Sloyan0d35a932020-11-09 12:25:05 +000012
13#include <doctest/doctest.h>
14
15namespace armnnDelegate
16{
17
18// Dequantize operator test functions.
19void DequantizeUint8Test(std::vector<armnn::BackendId>& backends)
20{
21 std::vector<int32_t> inputShape { 2, 4 };
22 std::vector<int32_t> outputShape { 2, 4 };
23
24 // Set input and output data
25 std::vector<uint8_t> inputValues
26 {
27 0, 1, 2, 3, // Lower bounds
28 252, 253, 254, 255 // Upper bounds
29 };
30 std::vector<float> expectedOutputValues
31 {
32 0.f, 1.f, 2.f, 3.f,
33 252.f, 253.f, 254.f, 255.f
34 };
35
36 QuantizationTest<uint8_t, float>(tflite::BuiltinOperator_DEQUANTIZE,
37 ::tflite::TensorType_UINT8,
38 ::tflite::TensorType_FLOAT32,
39 backends,
40 inputShape,
41 outputShape,
42 inputValues,
43 expectedOutputValues);
44}
45
46void DequantizeInt8Test(std::vector<armnn::BackendId>& backends)
47{
48 std::vector<int32_t> inputShape { 2, 4 };
49 std::vector<int32_t> outputShape { 2, 4 };
50
51 std::vector<int8_t> inputValues
52 {
53 -1, 0, 1, 2,
54 -128, -127, 126, 127
55 };
56 std::vector<float> expectedOutputValues
57 {
58 -1.f, 0.f, 1.f, 2.f,
59 -128.f, -127.f, 126.f, 127.f
60 };
61
62 QuantizationTest<int8_t , float>(tflite::BuiltinOperator_DEQUANTIZE,
63 ::tflite::TensorType_INT8,
64 ::tflite::TensorType_FLOAT32,
65 backends,
66 inputShape,
67 outputShape,
68 inputValues,
69 expectedOutputValues);
70}
71
72void DequantizeInt16Test(std::vector<armnn::BackendId>& backends)
73{
74 std::vector<int32_t> inputShape { 2, 5 };
75 std::vector<int32_t> outputShape { 2, 5 };
76
77 std::vector<int16_t> inputValues
78 {
79 -1, 0, 1, 2,
80 -32768, -16384, 16384, 32767
81 };
82 std::vector<float> expectedOutputValues
83 {
84 -1.f, 0.f, 1.f, 2.f,
85 -32768.f, -16384.f, 16384.f, 32767.f
86 };
87
88 QuantizationTest<int16_t, float>(tflite::BuiltinOperator_DEQUANTIZE,
89 ::tflite::TensorType_INT16,
90 ::tflite::TensorType_FLOAT32,
91 backends,
92 inputShape,
93 outputShape,
94 inputValues,
95 expectedOutputValues);
96}
97
98// Quantize operator test functions.
99void QuantizeFloat32Uint8Test(std::vector<armnn::BackendId>& backends)
100{
101 std::vector<int32_t> inputShape { 2, 4 };
102 std::vector<int32_t> outputShape { 2, 4 };
103
104 // Set input and output data
105 std::vector<float> inputValues
106 {
107 -1.f, 0.f, 1.f, 2.f, // Lower bounds
108 252.f, 253.f, 255.f, 256.f // Upper bounds
109 };
110 std::vector<uint8_t> expectedOutputValues
111 {
112 0, 0, 1, 2,
113 252, 253, 255, 255
114 };
115
116 QuantizationTest<float, uint8_t>(tflite::BuiltinOperator_QUANTIZE,
117 ::tflite::TensorType_FLOAT32,
118 ::tflite::TensorType_UINT8,
119 backends,
120 inputShape,
121 outputShape,
122 inputValues,
123 expectedOutputValues);
124}
125
126void QuantizeFloat32Int8Test(std::vector<armnn::BackendId>& backends)
127{
128 std::vector<int32_t> inputShape { 2, 4 };
129 std::vector<int32_t> outputShape { 2, 4 };
130
131 std::vector<float> inputValues
132 {
133 -1.f, 0.f, 1.f, 2.f,
134 -128.5f, -127.f, 126.f, 127.5f
135 };
136 std::vector<int8_t> expectedOutputValues
137 {
138 -1, 0, 1, 2,
139 -128, -127, 126, 127
140 };
141
142 QuantizationTest<float, int8_t>(tflite::BuiltinOperator_QUANTIZE,
143 ::tflite::TensorType_FLOAT32,
144 ::tflite::TensorType_INT8,
145 backends,
146 inputShape,
147 outputShape,
148 inputValues,
149 expectedOutputValues);
150}
151
152void QuantizeFloat32Int16Test(std::vector<armnn::BackendId>& backends)
153{
154 std::vector<int32_t> inputShape { 2, 4 };
155 std::vector<int32_t> outputShape { 2, 4 };
156
157 std::vector<float> inputValues
158 {
159 -1.f, 0.f, 1.f, 2.f,
160 -32768.5f, -16384.f, 16384.f, 32767.5f
161 };
162 std::vector<int16_t> expectedOutputValues
163 {
164 -1, 0, 1, 2,
165 -32768, -16384, 16384, 32767
166 };
167
168 QuantizationTest<float, int16_t>(tflite::BuiltinOperator_QUANTIZE,
169 ::tflite::TensorType_FLOAT32,
170 ::tflite::TensorType_INT16,
171 backends,
172 inputShape,
173 outputShape,
174 inputValues,
175 expectedOutputValues);
176}
177
178void QuantizeInt16Int16Test(std::vector<armnn::BackendId>& backends)
179{
180 std::vector<int32_t> inputShape { 2, 4 };
181 std::vector<int32_t> outputShape { 2, 4 };
182
183 std::vector<int16_t> inputValues
184 {
185 -1, 0, 1, 2,
186 -32768, -16384, 16384, 32767
187 };
188 std::vector<int16_t> expectedOutputValues
189 {
190 -1, 0, 1, 2,
191 -32768, -16384, 16384, 32767
192 };
193
194 QuantizationTest<int16_t, int16_t>(tflite::BuiltinOperator_QUANTIZE,
195 ::tflite::TensorType_INT16,
196 ::tflite::TensorType_INT16,
197 backends,
198 inputShape,
199 outputShape,
200 inputValues,
201 expectedOutputValues);
202}
203
204void QuantizeInt16Int8Test(std::vector<armnn::BackendId>& backends)
205{
206 std::vector<int32_t> inputShape { 2, 4 };
207 std::vector<int32_t> outputShape { 2, 4 };
208
209 std::vector<int16_t> inputValues
210 {
211 -1, 0, 1, 2,
212 -32768, -16384, 16384, 32767
213 };
214 std::vector<int8_t> expectedOutputValues
215 {
216 -1, 0, 1, 2,
217 -128, -128, 127, 127
218 };
219
220 QuantizationTest<int16_t, int8_t>(tflite::BuiltinOperator_QUANTIZE,
221 ::tflite::TensorType_INT16,
222 ::tflite::TensorType_INT8,
223 backends,
224 inputShape,
225 outputShape,
226 inputValues,
227 expectedOutputValues);
228}
229
230void QuantizeInt8Uint8Test(std::vector<armnn::BackendId>& backends)
231{
232 std::vector<int32_t> inputShape { 2, 4 };
233 std::vector<int32_t> outputShape { 2, 4 };
234
235 std::vector<int8_t> inputValues
236 {
237 -1, 0, 1, 2,
238 -128, -127, 126, 127
239 };
240 std::vector<uint8_t> expectedOutputValues
241 {
242 0, 0, 1, 2,
243 0, 0, 126, 127
244 };
245
246 QuantizationTest<int8_t, uint8_t>(tflite::BuiltinOperator_QUANTIZE,
247 ::tflite::TensorType_INT8,
248 ::tflite::TensorType_UINT8,
249 backends,
250 inputShape,
251 outputShape,
252 inputValues,
253 expectedOutputValues);
254}
255
256void QuantizeUint8Int8Test(std::vector<armnn::BackendId>& backends)
257{
258 std::vector<int32_t> inputShape { 2, 4 };
259 std::vector<int32_t> outputShape { 2, 4 };
260
261 std::vector<uint8_t> inputValues
262 {
263 0, 1, 2, 3,
264 126, 127, 254, 255
265 };
266 std::vector<int8_t> expectedOutputValues
267 {
268 0, 1, 2, 3,
269 126, 127, 127, 127
270 };
271
272 QuantizationTest<uint8_t, int8_t>(tflite::BuiltinOperator_QUANTIZE,
273 ::tflite::TensorType_UINT8,
274 ::tflite::TensorType_INT8,
275 backends,
276 inputShape,
277 outputShape,
278 inputValues,
279 expectedOutputValues);
280}
281
David Monahan63e75dc2020-11-20 15:30:49 +0000282TEST_SUITE("CpuRef_QuantizationTests")
283{
284
285TEST_CASE ("DEQUANTIZE_UINT8_CpuRef_Test")
286{
287 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
288 DequantizeUint8Test(backends);
289}
290
291
292TEST_CASE ("DEQUANTIZE_INT8_CpuRef_Test")
293{
294 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
295 DequantizeInt8Test(backends);
296}
297
298
299TEST_CASE ("DEQUANTIZE_INT16_CpuRef_Test")
300{
301 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
302 DequantizeInt16Test(backends);
303}
304
305
306TEST_CASE ("QUANTIZE_FLOAT32_UINT8_CpuRef_Test")
307{
308 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
309 QuantizeFloat32Uint8Test(backends);
310}
311
312
313TEST_CASE ("QUANTIZE_FLOAT32_INT8_CpuRef_Test")
314{
315 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
316 QuantizeFloat32Int8Test(backends);
317}
318
319
320TEST_CASE ("QUANTIZE_FLOAT32_INT16_CpuRef_Test")
321{
322 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
323 QuantizeFloat32Int16Test(backends);
324}
325
326
327TEST_CASE ("QUANTIZE_INT16_INT16_CpuRef_Test")
328{
329 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
330 QuantizeInt16Int16Test(backends);
331}
332
333
334TEST_CASE ("QUANTIZE_INT16_INT8_CpuRef_Test")
335{
336 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
337 QuantizeInt16Int8Test(backends);
338}
339
340
341
342TEST_CASE ("QUANTIZE_INT8_UINT8_CpuRef_Test")
343{
344 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
345 QuantizeInt8Uint8Test(backends);
346}
347
348
349TEST_CASE ("QUANTIZE_UINT8_INT8_CpuRef_Test")
350{
351 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
352 QuantizeUint8Int8Test(backends);
353}
354
355}
356
357TEST_SUITE("CpuAcc_QuantizationTests")
358{
359
360// Dequantize Operator Tests
361TEST_CASE ("DEQUANTIZE_UINT8_CpuAcc_Test")
362{
363 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
364 DequantizeUint8Test(backends);
365}
366
367TEST_CASE ("DEQUANTIZE_INT8_CpuAcc_Test")
368{
369 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
370 DequantizeInt8Test(backends);
371}
372
373TEST_CASE ("DEQUANTIZE_INT16_CpuAcc_Test")
374{
375 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
376 DequantizeInt16Test(backends);
377}
378
379// Quantize Operator Tests
380TEST_CASE ("QUANTIZE_FLOAT32_UINT8_CpuAcc_Test")
381{
382 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
383 QuantizeFloat32Uint8Test(backends);
384}
385
386TEST_CASE ("QUANTIZE_FLOAT32_INT8_CpuAcc_Test")
387{
388 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
389 QuantizeFloat32Int8Test(backends);
390}
391
392TEST_CASE ("QUANTIZE_INT8_UINT8_CpuAcc_Test")
393{
394 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
395 QuantizeInt8Uint8Test(backends);
396}
397
398TEST_CASE ("QUANTIZE_UINT8_INT8_CpuAcc_Test")
399{
400 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
401 QuantizeUint8Int8Test(backends);
402}
403
404}
405
406TEST_SUITE("GpuAcc_QuantizationTests")
Matthew Sloyan0d35a932020-11-09 12:25:05 +0000407{
408
409// Dequantize Operator Tests
410TEST_CASE ("DEQUANTIZE_UINT8_GpuAcc_Test")
411{
David Monahan63e75dc2020-11-20 15:30:49 +0000412 std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
Matthew Sloyan0d35a932020-11-09 12:25:05 +0000413 DequantizeUint8Test(backends);
414}
415
416TEST_CASE ("DEQUANTIZE_INT8_GpuAcc_Test")
417{
David Monahan63e75dc2020-11-20 15:30:49 +0000418 std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
Matthew Sloyan0d35a932020-11-09 12:25:05 +0000419 DequantizeInt8Test(backends);
420}
421
422TEST_CASE ("DEQUANTIZE_INT16_GpuAcc_Test")
423{
David Monahan63e75dc2020-11-20 15:30:49 +0000424 std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
Matthew Sloyan0d35a932020-11-09 12:25:05 +0000425 DequantizeInt16Test(backends);
426}
427
428// Quantize Operator Tests
429TEST_CASE ("QUANTIZE_FLOAT32_UINT8_GpuAcc_Test")
430{
David Monahan63e75dc2020-11-20 15:30:49 +0000431 std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
Matthew Sloyan0d35a932020-11-09 12:25:05 +0000432 QuantizeFloat32Uint8Test(backends);
433}
434
435TEST_CASE ("QUANTIZE_FLOAT32_INT8_GpuAcc_Test")
436{
David Monahan63e75dc2020-11-20 15:30:49 +0000437 std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
Matthew Sloyan0d35a932020-11-09 12:25:05 +0000438 QuantizeFloat32Int8Test(backends);
439}
440
Matthew Sloyan0d35a932020-11-09 12:25:05 +0000441TEST_CASE ("QUANTIZE_INT8_UINT8_GpuAcc_Test")
442{
David Monahan63e75dc2020-11-20 15:30:49 +0000443 std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
Matthew Sloyan0d35a932020-11-09 12:25:05 +0000444 QuantizeInt8Uint8Test(backends);
445}
446
447TEST_CASE ("QUANTIZE_UINT8_INT8_GpuAcc_Test")
448{
David Monahan63e75dc2020-11-20 15:30:49 +0000449 std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
Matthew Sloyan0d35a932020-11-09 12:25:05 +0000450 QuantizeUint8Int8Test(backends);
451}
452
453}
454
455} // namespace armnnDelegate