blob: dc5e6974adfefadc13bd608a28c7ed3e6ec5febc [file] [log] [blame]
telsoa01c577f2c2018-08-31 09:22:23 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa01c577f2c2018-08-31 09:22:23 +01004//
5
telsoa01c577f2c2018-08-31 09:22:23 +01006#include "ParserFlatbuffersFixture.hpp"
7#include "../TfLiteParser.hpp"
8#include <sstream>
9
Sadik Armagan1625efc2021-06-10 18:24:34 +010010TEST_SUITE("TensorflowLiteParser_Conv2D")
11{
telsoa01c577f2c2018-08-31 09:22:23 +010012struct SimpleConv2DFixture : public ParserFlatbuffersFixture
13{
14 explicit SimpleConv2DFixture()
15 {
16 m_JsonString = R"(
17 {
18 "version": 3,
19 "operator_codes": [ { "builtin_code": "CONV_2D" } ],
20 "subgraphs": [ {
21 "tensors": [
22 {
23 "shape": [ 1, 3, 3, 1 ],
24 "type": "UINT8",
25 "buffer": 0,
26 "name": "inputTensor",
27 "quantization": {
28 "min": [ 0.0 ],
29 "max": [ 255.0 ],
30 "scale": [ 1.0 ],
31 "zero_point": [ 0 ],
32 }
33 },
34 {
35 "shape": [ 1, 1, 1, 1 ],
36 "type": "UINT8",
37 "buffer": 1,
38 "name": "outputTensor",
39 "quantization": {
40 "min": [ 0.0 ],
41 "max": [ 511.0 ],
42 "scale": [ 2.0 ],
43 "zero_point": [ 0 ],
44 }
45 },
46 {
47 "shape": [ 1, 3, 3, 1 ],
48 "type": "UINT8",
49 "buffer": 2,
50 "name": "filterTensor",
51 "quantization": {
52 "min": [ 0.0 ],
53 "max": [ 255.0 ],
54 "scale": [ 1.0 ],
55 "zero_point": [ 0 ],
56 }
57 }
58 ],
59 "inputs": [ 0 ],
60 "outputs": [ 1 ],
61 "operators": [
62 {
63 "opcode_index": 0,
64 "inputs": [ 0, 2 ],
65 "outputs": [ 1 ],
66 "builtin_options_type": "Conv2DOptions",
67 "builtin_options": {
68 "padding": "VALID",
69 "stride_w": 1,
70 "stride_h": 1,
71 "fused_activation_function": "NONE"
72 },
73 "custom_options_format": "FLEXBUFFERS"
74 }
75 ],
76 } ],
77 "buffers" : [
78 { },
79 { },
80 { "data": [ 2,1,0, 6,2,1, 4,1,2 ], },
81 { },
82 ]
83 }
84 )";
85 SetupSingleInputSingleOutput("inputTensor", "outputTensor");
86 }
87};
88
Sadik Armagan1625efc2021-06-10 18:24:34 +010089TEST_CASE_FIXTURE(SimpleConv2DFixture, "ParseSimpleConv2D")
telsoa01c577f2c2018-08-31 09:22:23 +010090{
Derek Lambertif90c56d2020-01-10 17:14:08 +000091 RunTest<4, armnn::DataType::QAsymmU8>(
telsoa01c577f2c2018-08-31 09:22:23 +010092 0,
93 {
94 1, 2, 3,
95 4, 5, 6,
96 7, 8, 9,
97 },
98 // because of the output scaling we need to take half of the values
99 {
100 (1*2 + 2*1 + 3*0 +
101 4*6 + 5*2 + 6*1 +
102 7*4 + 8*1 + 9*2) /2
103 });
104}
105
106struct Conv2DWithBiasesFixture : public ParserFlatbuffersFixture
107{
108 explicit Conv2DWithBiasesFixture(const std::string & inputShape,
109 const std::string & outputShape,
110 const std::string & filterShape,
111 const std::string & filterData,
112 const std::string & biasShape,
113 const std::string & biasData,
114 const std::string & strides,
115 const std::string & activation="NONE",
116 const std::string & filterScale="1.0",
117 const std::string & filterZeroPoint="0",
118 const std::string & outputScale="2.0",
119 const std::string & outputZeroPoint="0")
120 {
121 m_JsonString = R"(
122 {
123 "version": 3,
124 "operator_codes": [ { "builtin_code": "CONV_2D" } ],
125 "subgraphs": [ {
126 "tensors": [
127 {
128 "shape": )" + inputShape + R"(,
129 "type": "UINT8",
130 "buffer": 0,
131 "name": "inputTensor",
132 "quantization": {
133 "min": [ 0.0 ],
134 "max": [ 255.0 ],
135 "scale": [ 1.0 ],
136 "zero_point": [ 0 ],
137 }
138 },
139 {
140 "shape": )" + outputShape + R"(,
141 "type": "UINT8",
142 "buffer": 1,
143 "name": "outputTensor",
144 "quantization": {
145 "min": [ 0.0 ],
146 "max": [ 511.0 ],
147 "scale": [ )" + outputScale + R"( ],
148 "zero_point": [ )" + outputZeroPoint + R"( ],
149 }
150 },
151 {
152 "shape": )" + filterShape + R"( ,
153 "type": "UINT8",
154 "buffer": 2,
155 "name": "filterTensor",
156 "quantization": {
157 "min": [ 0.0 ],
158 "max": [ 255.0 ],
159 "scale": [ )" + filterScale + R"( ],
160 "zero_point": [ )" + filterZeroPoint + R"( ],
161 }
162 },
163 {
164 "shape": )" + biasShape + R"( ,
165 "type": "INT32",
166 "buffer": 3,
167 "name": "biasTensor",
168 "quantization": {
169 "min": [ 0.0 ],
170 "max": [ 255.0 ],
171 "scale": [ 1.0 ],
172 "zero_point": [ 0 ],
173 }
174 }
175 ],
176 "inputs": [ 0 ],
177 "outputs": [ 1 ],
178 "operators": [
179 {
180 "opcode_index": 0,
181 "inputs": [ 0, 2, 3 ],
182 "outputs": [ 1 ],
183 "builtin_options_type": "Conv2DOptions",
184 "builtin_options": {
185 "padding": "SAME",
186 "stride_w": )" + strides + R"(,
187 "stride_h": )" + strides + R"(,
188 "fused_activation_function": )" + activation + R"(
189 },
190 "custom_options_format": "FLEXBUFFERS"
191 }
192 ],
193 } ],
194 "buffers" : [
195 { },
196 { },
197 { "data": )" + filterData + R"(, },
198 { "data": )" + biasData + R"(, },
199 ]
200 }
201 )";
202 SetupSingleInputSingleOutput("inputTensor", "outputTensor");
203 }
204};
205
206struct SimpleConv2DWithBiasesFixture : Conv2DWithBiasesFixture
207{
208 SimpleConv2DWithBiasesFixture()
209 : Conv2DWithBiasesFixture("[ 1, 2, 2, 1 ]", // inputShape
210 "[ 1, 2, 2, 1 ]", // outputShape
211 "[ 1, 2, 2, 1 ]", // filterShape
212 "[ 2,1, 0,6 ]", // filterData
213 "[ 1 ]", // biasShape
214 "[ 10, 0, 0, 0 ]", // biasData
215 "1") // stride w and h
216 {}
217};
218
Sadik Armagan1625efc2021-06-10 18:24:34 +0100219TEST_CASE_FIXTURE(SimpleConv2DWithBiasesFixture, "ParseConv2DWithBias")
telsoa01c577f2c2018-08-31 09:22:23 +0100220{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000221 RunTest<4, armnn::DataType::QAsymmU8>(
telsoa01c577f2c2018-08-31 09:22:23 +0100222 0,
223 {
224 1, 2,
225 3, 4,
226 },
227 // because of the output scaling we need to take half of the values
228 {
229 (1*2 + 2*1 + 3*0 + 4*6 + 10)/2,
230 (2*2 + 0*1 + 4*0 + 0*6 + 10)/2,
231 (3*2 + 4*1 + 0*0 + 0*6 + 10)/2,
232 (4*2 + 0*1 + 0*0 + 0*6 + 10)/2
233 });
234}
235
Sadik Armagand109a4d2020-07-28 10:42:13 +0100236struct DynamicConv2DWithBiasesFixture : Conv2DWithBiasesFixture
237{
238 DynamicConv2DWithBiasesFixture()
239 : Conv2DWithBiasesFixture("[ 1, 2, 2, 1 ]", // inputShape
240 "[ ]", // outputShape
241 "[ 1, 2, 2, 1 ]", // filterShape
242 "[ 2,1, 0,6 ]", // filterData
243 "[ 1 ]", // biasShape
244 "[ 10, 0, 0, 0 ]", // biasData
245 "1") // stride w and h
246 {}
247};
248
Sadik Armagan1625efc2021-06-10 18:24:34 +0100249TEST_CASE_FIXTURE(DynamicConv2DWithBiasesFixture, "ParseDynamicConv2DWithBias")
Sadik Armagand109a4d2020-07-28 10:42:13 +0100250{
251 RunTest<4,
252 armnn::DataType::QAsymmU8,
253 armnn::DataType::QAsymmU8>(0,
254 { { "inputTensor", { 1, 2, 3, 4, } } },
255 { { "outputTensor", { (1*2 + 2*1 + 3*0 + 4*6 + 10)/2,
256 (2*2 + 0*1 + 4*0 + 0*6 + 10)/2,
257 (3*2 + 4*1 + 0*0 + 0*6 + 10)/2,
258 (4*2 + 0*1 + 0*0 + 0*6 + 10)/2} } },
259 true);
260}
261
telsoa01c577f2c2018-08-31 09:22:23 +0100262struct Conv2DShapeTestFixture : Conv2DWithBiasesFixture
263{
264 static std::string GenerateInts(unsigned int n)
265 {
266 std::stringstream ss;
267 ss << " [ ";
268 for( unsigned int i=0; i<n; ++i ) {
269 if (i > 0 )
270 {
271 ss << " , ";
272 }
273 ss << " " << (i%256);
274 }
275 ss << " ] ";
276 return ss.str();
277 }
278
279 Conv2DShapeTestFixture()
280 : Conv2DWithBiasesFixture("[ 1, 224, 224, 3 ]", // inputShape
281 "[ 1, 112, 112, 32 ]", // outputShape
282 "[ 32, 3, 3, 3 ]", // filterShape
283 GenerateInts(32*3*3*3), // filterData
284 "[ 32 ]", // biasShape
285 GenerateInts(32*4), // biasData
286 "2") // stride w and h
287 {}
288};
289
Sadik Armagan1625efc2021-06-10 18:24:34 +0100290TEST_CASE_FIXTURE(Conv2DShapeTestFixture, "ParseConv2D_112x112_out")
telsoa01c577f2c2018-08-31 09:22:23 +0100291{
292}
293
294struct ReluConv2DWithBiasesFixture : Conv2DWithBiasesFixture
295{
296 ReluConv2DWithBiasesFixture()
297 : Conv2DWithBiasesFixture("[ 1, 2, 2, 1 ]", // inputShape
298 "[ 1, 2, 2, 1 ]", // outputShape
299 "[ 1, 2, 2, 1 ]", // filterShape
300 "[ 2,1, 0,6 ]", // filterData
301 "[ 1 ]", // biasShape
302 "[ 16, 0, 0, 0 ]", // biasData
303 "1", // stride w and h
304 "RELU", // activation
305 "1.0", // filter scale
306 "4", // filter zero point
307 "2.0", // output scale
308 "20") // output zero point
309 {}
310};
311
Sadik Armagan1625efc2021-06-10 18:24:34 +0100312TEST_CASE_FIXTURE(ReluConv2DWithBiasesFixture, "ParseConv2DAndReluWithBias")
telsoa01c577f2c2018-08-31 09:22:23 +0100313{
314 uint8_t bias = 16;
315 uint8_t outZero = 20;
316 uint8_t fz = 4; // filter zero point
317
Derek Lambertif90c56d2020-01-10 17:14:08 +0000318 RunTest<4, armnn::DataType::QAsymmU8>(
telsoa01c577f2c2018-08-31 09:22:23 +0100319 0,
320 {
321 1, 2,
322 4, 8,
323 },
324 // factors to consider:
325 // - the filter zero point is non zero, hence the (x-fz)
326 // - the output scale is 2 hence the /2
327 // - output zero point is non zero, hence the +outZero
328 // - RELU cuts negative values and then we add the output zero point
329 {
330 std::max(outZero, static_cast<uint8_t>((1*(2-fz) + 2*(1-fz) + 4*(0-fz) + 8*(6-fz) + bias)/2 + outZero)),
331 std::max(outZero, static_cast<uint8_t>((2*(2-fz) + 0*(1-fz) + 8*(0-fz) + 0*(6-fz) + bias)/2 + outZero)),
332 std::max(outZero, static_cast<uint8_t>((4*(2-fz) + 8*(1-fz) + 0*(0-fz) + 0*(6-fz) + bias)/2 + outZero)),
333 std::max(outZero, static_cast<uint8_t>((8*(2-fz) + 0*(1-fz) + 0*(0-fz) + 0*(6-fz) + bias)/2 + outZero))
334 });
335}
336
337struct Relu6Conv2DWithBiasesFixture : Conv2DWithBiasesFixture
338{
339 Relu6Conv2DWithBiasesFixture()
340 : Conv2DWithBiasesFixture("[ 1, 2, 2, 1 ]", // inputShape
341 "[ 1, 2, 2, 1 ]", // outputShape
342 "[ 1, 2, 2, 1 ]", // filterShape
343 "[ 2,1, 0,6 ]", // filterData
344 "[ 1 ]", // biasShape
345 "[ 0, 0, 0, 0 ]", // biasData
346 "1", // stride w and h
347 "RELU6", // activation
348 "1.0", // filter scale
349 "0", // filter zero point
350 "2.0", // output scale
351 "0") // output zero point
352 {}
353};
354
Sadik Armagan1625efc2021-06-10 18:24:34 +0100355TEST_CASE_FIXTURE(Relu6Conv2DWithBiasesFixture, "ParseConv2DAndRelu6WithBias")
telsoa01c577f2c2018-08-31 09:22:23 +0100356{
357 uint8_t relu6Min = 6 / 2; // divide by output scale
358
Derek Lambertif90c56d2020-01-10 17:14:08 +0000359 RunTest<4, armnn::DataType::QAsymmU8>(
telsoa01c577f2c2018-08-31 09:22:23 +0100360 0,
361 {
362 1, 2,
363 4, 1,
364 },
365 // factors to consider:
366 // - the output scale is 2 hence the /2
367 // - RELU6 cuts output values at +6
368 {
369 std::min(relu6Min, static_cast<uint8_t>((1*2 + 2*1 + 4*0 + 1*6)/2)),
370 std::min(relu6Min, static_cast<uint8_t>((2*2 + 0*1 + 1*0 + 0*6)/2)),
371 std::min(relu6Min, static_cast<uint8_t>((4*2 + 1*1 + 0*0 + 0*6)/2)),
372 std::min(relu6Min, static_cast<uint8_t>((1*2 + 0*1 + 0*0 + 0*6)/2))
373 });
374}
375
Jan Eilersea835e72021-04-21 16:58:28 +0100376
377struct PerChannelConv2DFixture : public ParserFlatbuffersFixture
378{
379 explicit PerChannelConv2DFixture()
380 {
381 m_JsonString = R"(
382 {
383 "version": 3,
384 "operator_codes": [
385 {
386 "builtin_code": "CONV_2D",
387 "version": 3
388 }
389 ],
390 "subgraphs": [
391 {
392 "tensors": [
393 {
394 "shape": [
395 1,
396 4,
397 4,
398 2
399 ],
400 "type": "INT8",
401 "buffer": 1,
402 "name": "input",
403 "quantization": {
404 "min": [
405 -50.0
406 ],
407 "max": [
408 49.0
409 ],
410 "scale": [
411 0.388235
412 ],
413 "zero_point": [
414 1
415 ],
416 "details_type": "NONE",
417 "quantized_dimension": 0
418 },
419 "is_variable": false
420 },
421 {
422 "shape": [
423 4
424 ],
425 "type": "INT32",
426 "buffer": 2,
427 "name": "model/conv2d/Conv2D",
428 "quantization": {
429 "scale": [
430 0.001523,
431 0.001197,
432 0.001517,
433 0.001364
434 ],
435 "zero_point": [
436 0,
437 0,
438 0,
439 0
440 ],
441 "details_type": "NONE",
442 "quantized_dimension": 0
443 },
444 "is_variable": false
445 },
446 {
447 "shape": [
448 4,
449 2,
450 2,
451 2
452 ],
453 "type": "INT8",
454 "buffer": 3,
455 "name": "model/conv2d/Conv2D1",
456 "quantization": {
457 "min": [
458 -0.498056,
459 -0.362561,
460 -0.307959,
461 -0.207799
462 ],
463 "max": [
464 0.339136,
465 0.391629,
466 0.496193,
467 0.446191
468 ],
469 "scale": [
470 0.003922,
471 0.003084,
472 0.003907,
473 0.003513
474 ],
475 "zero_point": [
476 0,
477 0,
478 0,
479 0
480 ],
481 "details_type": "NONE",
482 "quantized_dimension": 0
483 },
484 "is_variable": false
485 },
486 {
487 "shape": [
488 1,
489 4,
490 4,
491 4
492 ],
493 "type": "INT8",
494 "buffer": 4,
495 "name": "Identity",
496 "quantization": {
497 "min": [
498 -66.578751
499 ],
500 "max": [
501 70.137619
502 ],
503 "scale": [
504 0.536143
505 ],
506 "zero_point": [
507 -4
508 ],
509 "details_type": "NONE",
510 "quantized_dimension": 0
511 },
512 "is_variable": false
513 }
514 ],
515 "inputs": [
516 0
517 ],
518 "outputs": [
519 3
520 ],
521 "operators": [
522 {
523 "opcode_index": 0,
524 "inputs": [
525 0,
526 2,
527 1
528 ],
529 "outputs": [
530 3
531 ],
532 "builtin_options_type": "Conv2DOptions",
533 "builtin_options": {
534 "padding": "SAME",
535 "stride_w": 1,
536 "stride_h": 1,
537 "fused_activation_function": "NONE",
538 "dilation_w_factor": 1,
539 "dilation_h_factor": 1
540 },
541 "custom_options_format": "FLEXBUFFERS"
542 }
543 ],
544 "name": "main"
545 }
546 ],
547 "description": "MLIR Converted.",
548 "buffers": [
549 {
550 },
551 {
552 },
553 {
554 "data": [
555 0,
556 0,
557 0,
558 0,
559 0,
560 0,
561 0,
562 0,
563 0,
564 0,
565 0,
566 0,
567 0,
568 0,
569 0,
570 0
571 ]
572 },
573 {
574 "data": [
575 157,
576 201,
577 86,
578 129,
579 17,
580 33,
581 209,
582 13,
583 76,
584 249,
585 127,
586 138,
587 35,
588 18,
589 250,
590 233,
591 15,
592 205,
593 98,
594 127,
595 68,
596 196,
597 246,
598 177,
599 65,
600 197,
601 230,
602 246,
603 127,
604 66,
605 212,
606 30
607 ]
608 },
609 {
610 },
611 {
612 "data": [
613 49,
614 46,
615 53,
616 46,
617 48,
618 0,
619 0,
620 0,
621 0,
622 0,
623 0,
624 0,
625 0,
626 0,
627 0,
628 0
629 ]
630 }
631 ],
632 "metadata": [
633 {
634 "name": "min_runtime_version",
635 "buffer": 5
636 }
637 ]
638 }
639 )";
640 SetupSingleInputSingleOutput("input", "Identity");
641 }
642};
643
Sadik Armagan1625efc2021-06-10 18:24:34 +0100644TEST_CASE_FIXTURE(PerChannelConv2DFixture, "ParsePerChannelConv2D")
Jan Eilersea835e72021-04-21 16:58:28 +0100645{
646 RunTest<4, armnn::DataType::QAsymmS8>(
647 0,
648 {
649 -11, 40,-26, 11,-28, 8, 0, -8,
650 -10, 34, 47, 0,-33,-14, 28, 35,
651 6,-28,-26, 8, 13, 33,-31,-41,
652 31,-20,-31,-16, 8,-18,-44, 0
653 },
654 {
655 -21,-17,-23,-14, -1,-14, 1, 9,
656 1,-12,-22,-23, 2, -1, -3, 12,
657 7, 6, 8,-13,-21, -6,-31, 0,
658 9, -6, 24, 0,-22, -4, -7,-22,
659 -7, -9, 9, 11,-11,-16, 9,-27,
660 -1, 0,-26, 0, 9,-12, -8,-18,
661 -11, -3,-15, 7, 16, -2, -8, -7,
662 -14,-15,-14, 3, 9,-12, -6,-11
663 });
664}
665
Sadik Armagan1625efc2021-06-10 18:24:34 +0100666}