blob: 5466d47f48b71007295f51a218bf7f09ce748f04 [file] [log] [blame]
Matthew Sloyan0d35a932020-11-09 12:25:05 +00001//
2// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "QuantizationTestHelper.hpp"
7
8#include <armnn_delegate.hpp>
9
10#include <flatbuffers/flatbuffers.h>
11#include <tensorflow/lite/schema/schema_generated.h>
12
13#include <doctest/doctest.h>
14
15namespace armnnDelegate
16{
17
18// Dequantize operator test functions.
19void DequantizeUint8Test(std::vector<armnn::BackendId>& backends)
20{
21 std::vector<int32_t> inputShape { 2, 4 };
22 std::vector<int32_t> outputShape { 2, 4 };
23
24 // Set input and output data
25 std::vector<uint8_t> inputValues
26 {
27 0, 1, 2, 3, // Lower bounds
28 252, 253, 254, 255 // Upper bounds
29 };
30 std::vector<float> expectedOutputValues
31 {
32 0.f, 1.f, 2.f, 3.f,
33 252.f, 253.f, 254.f, 255.f
34 };
35
36 QuantizationTest<uint8_t, float>(tflite::BuiltinOperator_DEQUANTIZE,
37 ::tflite::TensorType_UINT8,
38 ::tflite::TensorType_FLOAT32,
39 backends,
40 inputShape,
41 outputShape,
42 inputValues,
43 expectedOutputValues);
44}
45
46void DequantizeInt8Test(std::vector<armnn::BackendId>& backends)
47{
48 std::vector<int32_t> inputShape { 2, 4 };
49 std::vector<int32_t> outputShape { 2, 4 };
50
51 std::vector<int8_t> inputValues
52 {
53 -1, 0, 1, 2,
54 -128, -127, 126, 127
55 };
56 std::vector<float> expectedOutputValues
57 {
58 -1.f, 0.f, 1.f, 2.f,
59 -128.f, -127.f, 126.f, 127.f
60 };
61
62 QuantizationTest<int8_t , float>(tflite::BuiltinOperator_DEQUANTIZE,
63 ::tflite::TensorType_INT8,
64 ::tflite::TensorType_FLOAT32,
65 backends,
66 inputShape,
67 outputShape,
68 inputValues,
69 expectedOutputValues);
70}
71
72void DequantizeInt16Test(std::vector<armnn::BackendId>& backends)
73{
74 std::vector<int32_t> inputShape { 2, 5 };
75 std::vector<int32_t> outputShape { 2, 5 };
76
77 std::vector<int16_t> inputValues
78 {
79 -1, 0, 1, 2,
80 -32768, -16384, 16384, 32767
81 };
82 std::vector<float> expectedOutputValues
83 {
84 -1.f, 0.f, 1.f, 2.f,
85 -32768.f, -16384.f, 16384.f, 32767.f
86 };
87
88 QuantizationTest<int16_t, float>(tflite::BuiltinOperator_DEQUANTIZE,
89 ::tflite::TensorType_INT16,
90 ::tflite::TensorType_FLOAT32,
91 backends,
92 inputShape,
93 outputShape,
94 inputValues,
95 expectedOutputValues);
96}
97
98// Quantize operator test functions.
99void QuantizeFloat32Uint8Test(std::vector<armnn::BackendId>& backends)
100{
101 std::vector<int32_t> inputShape { 2, 4 };
102 std::vector<int32_t> outputShape { 2, 4 };
103
104 // Set input and output data
105 std::vector<float> inputValues
106 {
107 -1.f, 0.f, 1.f, 2.f, // Lower bounds
108 252.f, 253.f, 255.f, 256.f // Upper bounds
109 };
110 std::vector<uint8_t> expectedOutputValues
111 {
112 0, 0, 1, 2,
113 252, 253, 255, 255
114 };
115
116 QuantizationTest<float, uint8_t>(tflite::BuiltinOperator_QUANTIZE,
117 ::tflite::TensorType_FLOAT32,
118 ::tflite::TensorType_UINT8,
119 backends,
120 inputShape,
121 outputShape,
122 inputValues,
123 expectedOutputValues);
124}
125
126void QuantizeFloat32Int8Test(std::vector<armnn::BackendId>& backends)
127{
128 std::vector<int32_t> inputShape { 2, 4 };
129 std::vector<int32_t> outputShape { 2, 4 };
130
131 std::vector<float> inputValues
132 {
133 -1.f, 0.f, 1.f, 2.f,
134 -128.5f, -127.f, 126.f, 127.5f
135 };
136 std::vector<int8_t> expectedOutputValues
137 {
138 -1, 0, 1, 2,
139 -128, -127, 126, 127
140 };
141
142 QuantizationTest<float, int8_t>(tflite::BuiltinOperator_QUANTIZE,
143 ::tflite::TensorType_FLOAT32,
144 ::tflite::TensorType_INT8,
145 backends,
146 inputShape,
147 outputShape,
148 inputValues,
149 expectedOutputValues);
150}
151
152void QuantizeFloat32Int16Test(std::vector<armnn::BackendId>& backends)
153{
154 std::vector<int32_t> inputShape { 2, 4 };
155 std::vector<int32_t> outputShape { 2, 4 };
156
157 std::vector<float> inputValues
158 {
159 -1.f, 0.f, 1.f, 2.f,
160 -32768.5f, -16384.f, 16384.f, 32767.5f
161 };
162 std::vector<int16_t> expectedOutputValues
163 {
164 -1, 0, 1, 2,
165 -32768, -16384, 16384, 32767
166 };
167
168 QuantizationTest<float, int16_t>(tflite::BuiltinOperator_QUANTIZE,
169 ::tflite::TensorType_FLOAT32,
170 ::tflite::TensorType_INT16,
171 backends,
172 inputShape,
173 outputShape,
174 inputValues,
175 expectedOutputValues);
176}
177
178void QuantizeInt16Int16Test(std::vector<armnn::BackendId>& backends)
179{
180 std::vector<int32_t> inputShape { 2, 4 };
181 std::vector<int32_t> outputShape { 2, 4 };
182
183 std::vector<int16_t> inputValues
184 {
185 -1, 0, 1, 2,
186 -32768, -16384, 16384, 32767
187 };
188 std::vector<int16_t> expectedOutputValues
189 {
190 -1, 0, 1, 2,
191 -32768, -16384, 16384, 32767
192 };
193
194 QuantizationTest<int16_t, int16_t>(tflite::BuiltinOperator_QUANTIZE,
195 ::tflite::TensorType_INT16,
196 ::tflite::TensorType_INT16,
197 backends,
198 inputShape,
199 outputShape,
200 inputValues,
201 expectedOutputValues);
202}
203
204void QuantizeInt16Int8Test(std::vector<armnn::BackendId>& backends)
205{
206 std::vector<int32_t> inputShape { 2, 4 };
207 std::vector<int32_t> outputShape { 2, 4 };
208
209 std::vector<int16_t> inputValues
210 {
211 -1, 0, 1, 2,
212 -32768, -16384, 16384, 32767
213 };
214 std::vector<int8_t> expectedOutputValues
215 {
216 -1, 0, 1, 2,
217 -128, -128, 127, 127
218 };
219
220 QuantizationTest<int16_t, int8_t>(tflite::BuiltinOperator_QUANTIZE,
221 ::tflite::TensorType_INT16,
222 ::tflite::TensorType_INT8,
223 backends,
224 inputShape,
225 outputShape,
226 inputValues,
227 expectedOutputValues);
228}
229
230void QuantizeInt8Uint8Test(std::vector<armnn::BackendId>& backends)
231{
232 std::vector<int32_t> inputShape { 2, 4 };
233 std::vector<int32_t> outputShape { 2, 4 };
234
235 std::vector<int8_t> inputValues
236 {
237 -1, 0, 1, 2,
238 -128, -127, 126, 127
239 };
240 std::vector<uint8_t> expectedOutputValues
241 {
242 0, 0, 1, 2,
243 0, 0, 126, 127
244 };
245
246 QuantizationTest<int8_t, uint8_t>(tflite::BuiltinOperator_QUANTIZE,
247 ::tflite::TensorType_INT8,
248 ::tflite::TensorType_UINT8,
249 backends,
250 inputShape,
251 outputShape,
252 inputValues,
253 expectedOutputValues);
254}
255
256void QuantizeUint8Int8Test(std::vector<armnn::BackendId>& backends)
257{
258 std::vector<int32_t> inputShape { 2, 4 };
259 std::vector<int32_t> outputShape { 2, 4 };
260
261 std::vector<uint8_t> inputValues
262 {
263 0, 1, 2, 3,
264 126, 127, 254, 255
265 };
266 std::vector<int8_t> expectedOutputValues
267 {
268 0, 1, 2, 3,
269 126, 127, 127, 127
270 };
271
272 QuantizationTest<uint8_t, int8_t>(tflite::BuiltinOperator_QUANTIZE,
273 ::tflite::TensorType_UINT8,
274 ::tflite::TensorType_INT8,
275 backends,
276 inputShape,
277 outputShape,
278 inputValues,
279 expectedOutputValues);
280}
281
282TEST_SUITE("QuantizationTests")
283{
284
285// Dequantize Operator Tests
286TEST_CASE ("DEQUANTIZE_UINT8_GpuAcc_Test")
287{
288 std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
289 armnn::Compute::CpuRef };
290 DequantizeUint8Test(backends);
291}
292
293TEST_CASE ("DEQUANTIZE_UINT8_CpuAcc_Test")
294{
295 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
296 armnn::Compute::CpuRef };
297 DequantizeUint8Test(backends);
298}
299
300TEST_CASE ("DEQUANTIZE_INT8_GpuAcc_Test")
301{
302 std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
303 armnn::Compute::CpuRef };
304 DequantizeInt8Test(backends);
305}
306
307TEST_CASE ("DEQUANTIZE_INT8_CpuAcc_Test")
308{
309 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
310 armnn::Compute::CpuRef };
311 DequantizeInt8Test(backends);
312}
313
314TEST_CASE ("DEQUANTIZE_INT16_GpuAcc_Test")
315{
316 std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
317 armnn::Compute::CpuRef };
318 DequantizeInt16Test(backends);
319}
320
321TEST_CASE ("DEQUANTIZE_INT16_CpuAcc_Test")
322{
323 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
324 armnn::Compute::CpuRef };
325 DequantizeInt16Test(backends);
326}
327
328// Quantize Operator Tests
329TEST_CASE ("QUANTIZE_FLOAT32_UINT8_GpuAcc_Test")
330{
331 std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
332 armnn::Compute::CpuRef };
333 QuantizeFloat32Uint8Test(backends);
334}
335
336TEST_CASE ("QUANTIZE_FLOAT32_UINT8_CpuAcc_Test")
337{
338 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
339 armnn::Compute::CpuRef };
340 QuantizeFloat32Uint8Test(backends);
341}
342
343TEST_CASE ("QUANTIZE_FLOAT32_INT8_GpuAcc_Test")
344{
345 std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
346 armnn::Compute::CpuRef };
347 QuantizeFloat32Int8Test(backends);
348}
349
350TEST_CASE ("QUANTIZE_FLOAT32_INT8_CpuAcc_Test")
351{
352 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
353 armnn::Compute::CpuRef };
354 QuantizeFloat32Int8Test(backends);
355}
356
357TEST_CASE ("QUANTIZE_FLOAT32_INT16_GpuAcc_Test")
358{
359 std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
360 armnn::Compute::CpuRef };
361 QuantizeFloat32Int16Test(backends);
362}
363
364TEST_CASE ("QUANTIZE_FLOAT32_INT16_CpuAcc_Test")
365{
366 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
367 armnn::Compute::CpuRef };
368 QuantizeFloat32Int16Test(backends);
369}
370
371TEST_CASE ("QUANTIZE_INT16_INT16_GpuAcc_Test")
372{
373 std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
374 armnn::Compute::CpuRef };
375 QuantizeInt16Int16Test(backends);
376}
377
378TEST_CASE ("QUANTIZE_INT16_INT16_CpuAcc_Test")
379{
380 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
381 armnn::Compute::CpuRef };
382 QuantizeInt16Int16Test(backends);
383}
384
385TEST_CASE ("QUANTIZE_INT16_INT8_GpuAcc_Test")
386{
387 std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
388 armnn::Compute::CpuRef };
389 QuantizeInt16Int8Test(backends);
390}
391
392TEST_CASE ("QUANTIZE_INT16_INT8_CpuAcc_Test")
393{
394 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
395 armnn::Compute::CpuRef };
396 QuantizeInt16Int8Test(backends);
397}
398
399TEST_CASE ("QUANTIZE_INT8_UINT8_GpuAcc_Test")
400{
401 std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
402 armnn::Compute::CpuRef };
403 QuantizeInt8Uint8Test(backends);
404}
405
406TEST_CASE ("QUANTIZE_INT8_UINT8_CpuAcc_Test")
407{
408 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
409 armnn::Compute::CpuRef };
410 QuantizeInt8Uint8Test(backends);
411}
412
413TEST_CASE ("QUANTIZE_UINT8_INT8_GpuAcc_Test")
414{
415 std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
416 armnn::Compute::CpuRef };
417 QuantizeUint8Int8Test(backends);
418}
419
420TEST_CASE ("QUANTIZE_UINT8_INT8_CpuAcc_Test")
421{
422 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
423 armnn::Compute::CpuRef };
424 QuantizeUint8Int8Test(backends);
425}
426
427}
428
429} // namespace armnnDelegate