blob: a480a4ec3d94cdfb014dd932cf1df9e5ee7ededd [file] [log] [blame]
telsoa01c577f2c2018-08-31 09:22:23 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa01c577f2c2018-08-31 09:22:23 +01004//
5
6#include <boost/test/unit_test.hpp>
7#include "ParserFlatbuffersFixture.hpp"
8#include "../TfLiteParser.hpp"
9#include <sstream>
10
11BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
12
13struct SimpleConv2DFixture : public ParserFlatbuffersFixture
14{
15 explicit SimpleConv2DFixture()
16 {
17 m_JsonString = R"(
18 {
19 "version": 3,
20 "operator_codes": [ { "builtin_code": "CONV_2D" } ],
21 "subgraphs": [ {
22 "tensors": [
23 {
24 "shape": [ 1, 3, 3, 1 ],
25 "type": "UINT8",
26 "buffer": 0,
27 "name": "inputTensor",
28 "quantization": {
29 "min": [ 0.0 ],
30 "max": [ 255.0 ],
31 "scale": [ 1.0 ],
32 "zero_point": [ 0 ],
33 }
34 },
35 {
36 "shape": [ 1, 1, 1, 1 ],
37 "type": "UINT8",
38 "buffer": 1,
39 "name": "outputTensor",
40 "quantization": {
41 "min": [ 0.0 ],
42 "max": [ 511.0 ],
43 "scale": [ 2.0 ],
44 "zero_point": [ 0 ],
45 }
46 },
47 {
48 "shape": [ 1, 3, 3, 1 ],
49 "type": "UINT8",
50 "buffer": 2,
51 "name": "filterTensor",
52 "quantization": {
53 "min": [ 0.0 ],
54 "max": [ 255.0 ],
55 "scale": [ 1.0 ],
56 "zero_point": [ 0 ],
57 }
58 }
59 ],
60 "inputs": [ 0 ],
61 "outputs": [ 1 ],
62 "operators": [
63 {
64 "opcode_index": 0,
65 "inputs": [ 0, 2 ],
66 "outputs": [ 1 ],
67 "builtin_options_type": "Conv2DOptions",
68 "builtin_options": {
69 "padding": "VALID",
70 "stride_w": 1,
71 "stride_h": 1,
72 "fused_activation_function": "NONE"
73 },
74 "custom_options_format": "FLEXBUFFERS"
75 }
76 ],
77 } ],
78 "buffers" : [
79 { },
80 { },
81 { "data": [ 2,1,0, 6,2,1, 4,1,2 ], },
82 { },
83 ]
84 }
85 )";
86 SetupSingleInputSingleOutput("inputTensor", "outputTensor");
87 }
88};
89
90BOOST_FIXTURE_TEST_CASE( ParseSimpleConv2D, SimpleConv2DFixture )
91{
Derek Lambertif90c56d2020-01-10 17:14:08 +000092 RunTest<4, armnn::DataType::QAsymmU8>(
telsoa01c577f2c2018-08-31 09:22:23 +010093 0,
94 {
95 1, 2, 3,
96 4, 5, 6,
97 7, 8, 9,
98 },
99 // because of the output scaling we need to take half of the values
100 {
101 (1*2 + 2*1 + 3*0 +
102 4*6 + 5*2 + 6*1 +
103 7*4 + 8*1 + 9*2) /2
104 });
105}
106
107struct Conv2DWithBiasesFixture : public ParserFlatbuffersFixture
108{
109 explicit Conv2DWithBiasesFixture(const std::string & inputShape,
110 const std::string & outputShape,
111 const std::string & filterShape,
112 const std::string & filterData,
113 const std::string & biasShape,
114 const std::string & biasData,
115 const std::string & strides,
116 const std::string & activation="NONE",
117 const std::string & filterScale="1.0",
118 const std::string & filterZeroPoint="0",
119 const std::string & outputScale="2.0",
120 const std::string & outputZeroPoint="0")
121 {
122 m_JsonString = R"(
123 {
124 "version": 3,
125 "operator_codes": [ { "builtin_code": "CONV_2D" } ],
126 "subgraphs": [ {
127 "tensors": [
128 {
129 "shape": )" + inputShape + R"(,
130 "type": "UINT8",
131 "buffer": 0,
132 "name": "inputTensor",
133 "quantization": {
134 "min": [ 0.0 ],
135 "max": [ 255.0 ],
136 "scale": [ 1.0 ],
137 "zero_point": [ 0 ],
138 }
139 },
140 {
141 "shape": )" + outputShape + R"(,
142 "type": "UINT8",
143 "buffer": 1,
144 "name": "outputTensor",
145 "quantization": {
146 "min": [ 0.0 ],
147 "max": [ 511.0 ],
148 "scale": [ )" + outputScale + R"( ],
149 "zero_point": [ )" + outputZeroPoint + R"( ],
150 }
151 },
152 {
153 "shape": )" + filterShape + R"( ,
154 "type": "UINT8",
155 "buffer": 2,
156 "name": "filterTensor",
157 "quantization": {
158 "min": [ 0.0 ],
159 "max": [ 255.0 ],
160 "scale": [ )" + filterScale + R"( ],
161 "zero_point": [ )" + filterZeroPoint + R"( ],
162 }
163 },
164 {
165 "shape": )" + biasShape + R"( ,
166 "type": "INT32",
167 "buffer": 3,
168 "name": "biasTensor",
169 "quantization": {
170 "min": [ 0.0 ],
171 "max": [ 255.0 ],
172 "scale": [ 1.0 ],
173 "zero_point": [ 0 ],
174 }
175 }
176 ],
177 "inputs": [ 0 ],
178 "outputs": [ 1 ],
179 "operators": [
180 {
181 "opcode_index": 0,
182 "inputs": [ 0, 2, 3 ],
183 "outputs": [ 1 ],
184 "builtin_options_type": "Conv2DOptions",
185 "builtin_options": {
186 "padding": "SAME",
187 "stride_w": )" + strides + R"(,
188 "stride_h": )" + strides + R"(,
189 "fused_activation_function": )" + activation + R"(
190 },
191 "custom_options_format": "FLEXBUFFERS"
192 }
193 ],
194 } ],
195 "buffers" : [
196 { },
197 { },
198 { "data": )" + filterData + R"(, },
199 { "data": )" + biasData + R"(, },
200 ]
201 }
202 )";
203 SetupSingleInputSingleOutput("inputTensor", "outputTensor");
204 }
205};
206
207struct SimpleConv2DWithBiasesFixture : Conv2DWithBiasesFixture
208{
209 SimpleConv2DWithBiasesFixture()
210 : Conv2DWithBiasesFixture("[ 1, 2, 2, 1 ]", // inputShape
211 "[ 1, 2, 2, 1 ]", // outputShape
212 "[ 1, 2, 2, 1 ]", // filterShape
213 "[ 2,1, 0,6 ]", // filterData
214 "[ 1 ]", // biasShape
215 "[ 10, 0, 0, 0 ]", // biasData
216 "1") // stride w and h
217 {}
218};
219
220BOOST_FIXTURE_TEST_CASE( ParseConv2DWithBias, SimpleConv2DWithBiasesFixture )
221{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000222 RunTest<4, armnn::DataType::QAsymmU8>(
telsoa01c577f2c2018-08-31 09:22:23 +0100223 0,
224 {
225 1, 2,
226 3, 4,
227 },
228 // because of the output scaling we need to take half of the values
229 {
230 (1*2 + 2*1 + 3*0 + 4*6 + 10)/2,
231 (2*2 + 0*1 + 4*0 + 0*6 + 10)/2,
232 (3*2 + 4*1 + 0*0 + 0*6 + 10)/2,
233 (4*2 + 0*1 + 0*0 + 0*6 + 10)/2
234 });
235}
236
Sadik Armagand109a4d2020-07-28 10:42:13 +0100237struct DynamicConv2DWithBiasesFixture : Conv2DWithBiasesFixture
238{
239 DynamicConv2DWithBiasesFixture()
240 : Conv2DWithBiasesFixture("[ 1, 2, 2, 1 ]", // inputShape
241 "[ ]", // outputShape
242 "[ 1, 2, 2, 1 ]", // filterShape
243 "[ 2,1, 0,6 ]", // filterData
244 "[ 1 ]", // biasShape
245 "[ 10, 0, 0, 0 ]", // biasData
246 "1") // stride w and h
247 {}
248};
249
250BOOST_FIXTURE_TEST_CASE( ParseDynamicConv2DWithBias, DynamicConv2DWithBiasesFixture )
251{
252 RunTest<4,
253 armnn::DataType::QAsymmU8,
254 armnn::DataType::QAsymmU8>(0,
255 { { "inputTensor", { 1, 2, 3, 4, } } },
256 { { "outputTensor", { (1*2 + 2*1 + 3*0 + 4*6 + 10)/2,
257 (2*2 + 0*1 + 4*0 + 0*6 + 10)/2,
258 (3*2 + 4*1 + 0*0 + 0*6 + 10)/2,
259 (4*2 + 0*1 + 0*0 + 0*6 + 10)/2} } },
260 true);
261}
262
telsoa01c577f2c2018-08-31 09:22:23 +0100263struct Conv2DShapeTestFixture : Conv2DWithBiasesFixture
264{
265 static std::string GenerateInts(unsigned int n)
266 {
267 std::stringstream ss;
268 ss << " [ ";
269 for( unsigned int i=0; i<n; ++i ) {
270 if (i > 0 )
271 {
272 ss << " , ";
273 }
274 ss << " " << (i%256);
275 }
276 ss << " ] ";
277 return ss.str();
278 }
279
280 Conv2DShapeTestFixture()
281 : Conv2DWithBiasesFixture("[ 1, 224, 224, 3 ]", // inputShape
282 "[ 1, 112, 112, 32 ]", // outputShape
283 "[ 32, 3, 3, 3 ]", // filterShape
284 GenerateInts(32*3*3*3), // filterData
285 "[ 32 ]", // biasShape
286 GenerateInts(32*4), // biasData
287 "2") // stride w and h
288 {}
289};
290
291BOOST_FIXTURE_TEST_CASE( ParseConv2D_112x112_out, Conv2DShapeTestFixture )
292{
293}
294
295struct ReluConv2DWithBiasesFixture : Conv2DWithBiasesFixture
296{
297 ReluConv2DWithBiasesFixture()
298 : Conv2DWithBiasesFixture("[ 1, 2, 2, 1 ]", // inputShape
299 "[ 1, 2, 2, 1 ]", // outputShape
300 "[ 1, 2, 2, 1 ]", // filterShape
301 "[ 2,1, 0,6 ]", // filterData
302 "[ 1 ]", // biasShape
303 "[ 16, 0, 0, 0 ]", // biasData
304 "1", // stride w and h
305 "RELU", // activation
306 "1.0", // filter scale
307 "4", // filter zero point
308 "2.0", // output scale
309 "20") // output zero point
310 {}
311};
312
313BOOST_FIXTURE_TEST_CASE( ParseConv2DAndReluWithBias, ReluConv2DWithBiasesFixture )
314{
315 uint8_t bias = 16;
316 uint8_t outZero = 20;
317 uint8_t fz = 4; // filter zero point
318
Derek Lambertif90c56d2020-01-10 17:14:08 +0000319 RunTest<4, armnn::DataType::QAsymmU8>(
telsoa01c577f2c2018-08-31 09:22:23 +0100320 0,
321 {
322 1, 2,
323 4, 8,
324 },
325 // factors to consider:
326 // - the filter zero point is non zero, hence the (x-fz)
327 // - the output scale is 2 hence the /2
328 // - output zero point is non zero, hence the +outZero
329 // - RELU cuts negative values and then we add the output zero point
330 {
331 std::max(outZero, static_cast<uint8_t>((1*(2-fz) + 2*(1-fz) + 4*(0-fz) + 8*(6-fz) + bias)/2 + outZero)),
332 std::max(outZero, static_cast<uint8_t>((2*(2-fz) + 0*(1-fz) + 8*(0-fz) + 0*(6-fz) + bias)/2 + outZero)),
333 std::max(outZero, static_cast<uint8_t>((4*(2-fz) + 8*(1-fz) + 0*(0-fz) + 0*(6-fz) + bias)/2 + outZero)),
334 std::max(outZero, static_cast<uint8_t>((8*(2-fz) + 0*(1-fz) + 0*(0-fz) + 0*(6-fz) + bias)/2 + outZero))
335 });
336}
337
338struct Relu6Conv2DWithBiasesFixture : Conv2DWithBiasesFixture
339{
340 Relu6Conv2DWithBiasesFixture()
341 : Conv2DWithBiasesFixture("[ 1, 2, 2, 1 ]", // inputShape
342 "[ 1, 2, 2, 1 ]", // outputShape
343 "[ 1, 2, 2, 1 ]", // filterShape
344 "[ 2,1, 0,6 ]", // filterData
345 "[ 1 ]", // biasShape
346 "[ 0, 0, 0, 0 ]", // biasData
347 "1", // stride w and h
348 "RELU6", // activation
349 "1.0", // filter scale
350 "0", // filter zero point
351 "2.0", // output scale
352 "0") // output zero point
353 {}
354};
355
356BOOST_FIXTURE_TEST_CASE( ParseConv2DAndRelu6WithBias, Relu6Conv2DWithBiasesFixture )
357{
358 uint8_t relu6Min = 6 / 2; // divide by output scale
359
Derek Lambertif90c56d2020-01-10 17:14:08 +0000360 RunTest<4, armnn::DataType::QAsymmU8>(
telsoa01c577f2c2018-08-31 09:22:23 +0100361 0,
362 {
363 1, 2,
364 4, 1,
365 },
366 // factors to consider:
367 // - the output scale is 2 hence the /2
368 // - RELU6 cuts output values at +6
369 {
370 std::min(relu6Min, static_cast<uint8_t>((1*2 + 2*1 + 4*0 + 1*6)/2)),
371 std::min(relu6Min, static_cast<uint8_t>((2*2 + 0*1 + 1*0 + 0*6)/2)),
372 std::min(relu6Min, static_cast<uint8_t>((4*2 + 1*1 + 0*0 + 0*6)/2)),
373 std::min(relu6Min, static_cast<uint8_t>((1*2 + 0*1 + 0*0 + 0*6)/2))
374 });
375}
376
Jan Eilersea835e72021-04-21 16:58:28 +0100377
378struct PerChannelConv2DFixture : public ParserFlatbuffersFixture
379{
380 explicit PerChannelConv2DFixture()
381 {
382 m_JsonString = R"(
383 {
384 "version": 3,
385 "operator_codes": [
386 {
387 "builtin_code": "CONV_2D",
388 "version": 3
389 }
390 ],
391 "subgraphs": [
392 {
393 "tensors": [
394 {
395 "shape": [
396 1,
397 4,
398 4,
399 2
400 ],
401 "type": "INT8",
402 "buffer": 1,
403 "name": "input",
404 "quantization": {
405 "min": [
406 -50.0
407 ],
408 "max": [
409 49.0
410 ],
411 "scale": [
412 0.388235
413 ],
414 "zero_point": [
415 1
416 ],
417 "details_type": "NONE",
418 "quantized_dimension": 0
419 },
420 "is_variable": false
421 },
422 {
423 "shape": [
424 4
425 ],
426 "type": "INT32",
427 "buffer": 2,
428 "name": "model/conv2d/Conv2D",
429 "quantization": {
430 "scale": [
431 0.001523,
432 0.001197,
433 0.001517,
434 0.001364
435 ],
436 "zero_point": [
437 0,
438 0,
439 0,
440 0
441 ],
442 "details_type": "NONE",
443 "quantized_dimension": 0
444 },
445 "is_variable": false
446 },
447 {
448 "shape": [
449 4,
450 2,
451 2,
452 2
453 ],
454 "type": "INT8",
455 "buffer": 3,
456 "name": "model/conv2d/Conv2D1",
457 "quantization": {
458 "min": [
459 -0.498056,
460 -0.362561,
461 -0.307959,
462 -0.207799
463 ],
464 "max": [
465 0.339136,
466 0.391629,
467 0.496193,
468 0.446191
469 ],
470 "scale": [
471 0.003922,
472 0.003084,
473 0.003907,
474 0.003513
475 ],
476 "zero_point": [
477 0,
478 0,
479 0,
480 0
481 ],
482 "details_type": "NONE",
483 "quantized_dimension": 0
484 },
485 "is_variable": false
486 },
487 {
488 "shape": [
489 1,
490 4,
491 4,
492 4
493 ],
494 "type": "INT8",
495 "buffer": 4,
496 "name": "Identity",
497 "quantization": {
498 "min": [
499 -66.578751
500 ],
501 "max": [
502 70.137619
503 ],
504 "scale": [
505 0.536143
506 ],
507 "zero_point": [
508 -4
509 ],
510 "details_type": "NONE",
511 "quantized_dimension": 0
512 },
513 "is_variable": false
514 }
515 ],
516 "inputs": [
517 0
518 ],
519 "outputs": [
520 3
521 ],
522 "operators": [
523 {
524 "opcode_index": 0,
525 "inputs": [
526 0,
527 2,
528 1
529 ],
530 "outputs": [
531 3
532 ],
533 "builtin_options_type": "Conv2DOptions",
534 "builtin_options": {
535 "padding": "SAME",
536 "stride_w": 1,
537 "stride_h": 1,
538 "fused_activation_function": "NONE",
539 "dilation_w_factor": 1,
540 "dilation_h_factor": 1
541 },
542 "custom_options_format": "FLEXBUFFERS"
543 }
544 ],
545 "name": "main"
546 }
547 ],
548 "description": "MLIR Converted.",
549 "buffers": [
550 {
551 },
552 {
553 },
554 {
555 "data": [
556 0,
557 0,
558 0,
559 0,
560 0,
561 0,
562 0,
563 0,
564 0,
565 0,
566 0,
567 0,
568 0,
569 0,
570 0,
571 0
572 ]
573 },
574 {
575 "data": [
576 157,
577 201,
578 86,
579 129,
580 17,
581 33,
582 209,
583 13,
584 76,
585 249,
586 127,
587 138,
588 35,
589 18,
590 250,
591 233,
592 15,
593 205,
594 98,
595 127,
596 68,
597 196,
598 246,
599 177,
600 65,
601 197,
602 230,
603 246,
604 127,
605 66,
606 212,
607 30
608 ]
609 },
610 {
611 },
612 {
613 "data": [
614 49,
615 46,
616 53,
617 46,
618 48,
619 0,
620 0,
621 0,
622 0,
623 0,
624 0,
625 0,
626 0,
627 0,
628 0,
629 0
630 ]
631 }
632 ],
633 "metadata": [
634 {
635 "name": "min_runtime_version",
636 "buffer": 5
637 }
638 ]
639 }
640 )";
641 SetupSingleInputSingleOutput("input", "Identity");
642 }
643};
644
645BOOST_FIXTURE_TEST_CASE( ParsePerChannelConv2D, PerChannelConv2DFixture )
646{
647 RunTest<4, armnn::DataType::QAsymmS8>(
648 0,
649 {
650 -11, 40,-26, 11,-28, 8, 0, -8,
651 -10, 34, 47, 0,-33,-14, 28, 35,
652 6,-28,-26, 8, 13, 33,-31,-41,
653 31,-20,-31,-16, 8,-18,-44, 0
654 },
655 {
656 -21,-17,-23,-14, -1,-14, 1, 9,
657 1,-12,-22,-23, 2, -1, -3, 12,
658 7, 6, 8,-13,-21, -6,-31, 0,
659 9, -6, 24, 0,-22, -4, -7,-22,
660 -7, -9, 9, 11,-11,-16, 9,-27,
661 -1, 0,-26, 0, 9,-12, -8,-18,
662 -11, -3,-15, 7, 16, -2, -8, -7,
663 -14,-15,-14, 3, 9,-12, -6,-11
664 });
665}
666
telsoa01c577f2c2018-08-31 09:22:23 +0100667BOOST_AUTO_TEST_SUITE_END()