blob: 334c1023443bdec12cfb8ac663ce7fac7fac994c [file] [log] [blame]
telsoa01c577f2c2018-08-31 09:22:23 +01001//
Finn Williamsb49ed182021-06-29 15:50:08 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa01c577f2c2018-08-31 09:22:23 +01004//
5
telsoa01c577f2c2018-08-31 09:22:23 +01006#include "ParserFlatbuffersFixture.hpp"
telsoa01c577f2c2018-08-31 09:22:23 +01007#include <sstream>
8
Sadik Armagan1625efc2021-06-10 18:24:34 +01009TEST_SUITE("TensorflowLiteParser_Conv2D")
10{
telsoa01c577f2c2018-08-31 09:22:23 +010011struct SimpleConv2DFixture : public ParserFlatbuffersFixture
12{
13 explicit SimpleConv2DFixture()
14 {
15 m_JsonString = R"(
16 {
17 "version": 3,
18 "operator_codes": [ { "builtin_code": "CONV_2D" } ],
19 "subgraphs": [ {
20 "tensors": [
21 {
22 "shape": [ 1, 3, 3, 1 ],
23 "type": "UINT8",
24 "buffer": 0,
25 "name": "inputTensor",
26 "quantization": {
27 "min": [ 0.0 ],
28 "max": [ 255.0 ],
29 "scale": [ 1.0 ],
30 "zero_point": [ 0 ],
31 }
32 },
33 {
34 "shape": [ 1, 1, 1, 1 ],
35 "type": "UINT8",
36 "buffer": 1,
37 "name": "outputTensor",
38 "quantization": {
39 "min": [ 0.0 ],
40 "max": [ 511.0 ],
41 "scale": [ 2.0 ],
42 "zero_point": [ 0 ],
43 }
44 },
45 {
46 "shape": [ 1, 3, 3, 1 ],
47 "type": "UINT8",
48 "buffer": 2,
49 "name": "filterTensor",
50 "quantization": {
51 "min": [ 0.0 ],
52 "max": [ 255.0 ],
53 "scale": [ 1.0 ],
54 "zero_point": [ 0 ],
55 }
56 }
57 ],
58 "inputs": [ 0 ],
59 "outputs": [ 1 ],
60 "operators": [
61 {
62 "opcode_index": 0,
63 "inputs": [ 0, 2 ],
64 "outputs": [ 1 ],
65 "builtin_options_type": "Conv2DOptions",
66 "builtin_options": {
67 "padding": "VALID",
68 "stride_w": 1,
69 "stride_h": 1,
70 "fused_activation_function": "NONE"
71 },
72 "custom_options_format": "FLEXBUFFERS"
73 }
74 ],
75 } ],
76 "buffers" : [
77 { },
78 { },
79 { "data": [ 2,1,0, 6,2,1, 4,1,2 ], },
80 { },
81 ]
82 }
83 )";
84 SetupSingleInputSingleOutput("inputTensor", "outputTensor");
85 }
86};
87
Sadik Armagan1625efc2021-06-10 18:24:34 +010088TEST_CASE_FIXTURE(SimpleConv2DFixture, "ParseSimpleConv2D")
telsoa01c577f2c2018-08-31 09:22:23 +010089{
Derek Lambertif90c56d2020-01-10 17:14:08 +000090 RunTest<4, armnn::DataType::QAsymmU8>(
telsoa01c577f2c2018-08-31 09:22:23 +010091 0,
92 {
93 1, 2, 3,
94 4, 5, 6,
95 7, 8, 9,
96 },
97 // because of the output scaling we need to take half of the values
98 {
99 (1*2 + 2*1 + 3*0 +
100 4*6 + 5*2 + 6*1 +
101 7*4 + 8*1 + 9*2) /2
102 });
103}
104
105struct Conv2DWithBiasesFixture : public ParserFlatbuffersFixture
106{
Mike Kelly5880b912022-01-28 16:18:54 +0000107 explicit Conv2DWithBiasesFixture(const std::string& inputShape,
108 const std::string& outputShape,
109 const std::string& filterShape,
110 const std::string& filterData,
111 const std::string& biasShape,
112 const std::string& biasData,
113 const std::string& strides,
114 const std::string& activation="NONE",
115 const std::string& filterScale="1.0",
116 const std::string& filterZeroPoint="0",
117 const std::string& outputScale="2.0",
118 const std::string& outputZeroPoint="0",
119 const std::string& dataType = "UINT8",
120 const std::string& filterDataType = "UINT8",
121 const std::string& biasDataType = "INT32")
telsoa01c577f2c2018-08-31 09:22:23 +0100122 {
123 m_JsonString = R"(
124 {
125 "version": 3,
126 "operator_codes": [ { "builtin_code": "CONV_2D" } ],
127 "subgraphs": [ {
128 "tensors": [
129 {
130 "shape": )" + inputShape + R"(,
Mike Kelly5880b912022-01-28 16:18:54 +0000131 "type": )" + dataType + R"(,
telsoa01c577f2c2018-08-31 09:22:23 +0100132 "buffer": 0,
133 "name": "inputTensor",
134 "quantization": {
135 "min": [ 0.0 ],
136 "max": [ 255.0 ],
137 "scale": [ 1.0 ],
138 "zero_point": [ 0 ],
139 }
140 },
141 {
142 "shape": )" + outputShape + R"(,
Mike Kelly5880b912022-01-28 16:18:54 +0000143 "type": )" + dataType + R"(,
telsoa01c577f2c2018-08-31 09:22:23 +0100144 "buffer": 1,
145 "name": "outputTensor",
146 "quantization": {
147 "min": [ 0.0 ],
148 "max": [ 511.0 ],
149 "scale": [ )" + outputScale + R"( ],
150 "zero_point": [ )" + outputZeroPoint + R"( ],
151 }
152 },
153 {
154 "shape": )" + filterShape + R"( ,
Mike Kelly5880b912022-01-28 16:18:54 +0000155 "type": )" + filterDataType + R"(,
telsoa01c577f2c2018-08-31 09:22:23 +0100156 "buffer": 2,
157 "name": "filterTensor",
158 "quantization": {
159 "min": [ 0.0 ],
160 "max": [ 255.0 ],
161 "scale": [ )" + filterScale + R"( ],
162 "zero_point": [ )" + filterZeroPoint + R"( ],
163 }
164 },
165 {
166 "shape": )" + biasShape + R"( ,
Mike Kelly5880b912022-01-28 16:18:54 +0000167 "type": )" + biasDataType + R"(,
telsoa01c577f2c2018-08-31 09:22:23 +0100168 "buffer": 3,
169 "name": "biasTensor",
170 "quantization": {
171 "min": [ 0.0 ],
172 "max": [ 255.0 ],
173 "scale": [ 1.0 ],
174 "zero_point": [ 0 ],
175 }
176 }
177 ],
178 "inputs": [ 0 ],
179 "outputs": [ 1 ],
180 "operators": [
181 {
182 "opcode_index": 0,
183 "inputs": [ 0, 2, 3 ],
184 "outputs": [ 1 ],
185 "builtin_options_type": "Conv2DOptions",
186 "builtin_options": {
187 "padding": "SAME",
188 "stride_w": )" + strides + R"(,
189 "stride_h": )" + strides + R"(,
190 "fused_activation_function": )" + activation + R"(
191 },
192 "custom_options_format": "FLEXBUFFERS"
193 }
194 ],
195 } ],
196 "buffers" : [
197 { },
198 { },
199 { "data": )" + filterData + R"(, },
200 { "data": )" + biasData + R"(, },
201 ]
202 }
203 )";
204 SetupSingleInputSingleOutput("inputTensor", "outputTensor");
205 }
206};
207
208struct SimpleConv2DWithBiasesFixture : Conv2DWithBiasesFixture
209{
210 SimpleConv2DWithBiasesFixture()
211 : Conv2DWithBiasesFixture("[ 1, 2, 2, 1 ]", // inputShape
212 "[ 1, 2, 2, 1 ]", // outputShape
213 "[ 1, 2, 2, 1 ]", // filterShape
214 "[ 2,1, 0,6 ]", // filterData
215 "[ 1 ]", // biasShape
216 "[ 10, 0, 0, 0 ]", // biasData
217 "1") // stride w and h
218 {}
219};
220
Sadik Armagan1625efc2021-06-10 18:24:34 +0100221TEST_CASE_FIXTURE(SimpleConv2DWithBiasesFixture, "ParseConv2DWithBias")
telsoa01c577f2c2018-08-31 09:22:23 +0100222{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000223 RunTest<4, armnn::DataType::QAsymmU8>(
telsoa01c577f2c2018-08-31 09:22:23 +0100224 0,
225 {
226 1, 2,
227 3, 4,
228 },
229 // because of the output scaling we need to take half of the values
230 {
231 (1*2 + 2*1 + 3*0 + 4*6 + 10)/2,
232 (2*2 + 0*1 + 4*0 + 0*6 + 10)/2,
233 (3*2 + 4*1 + 0*0 + 0*6 + 10)/2,
234 (4*2 + 0*1 + 0*0 + 0*6 + 10)/2
235 });
236}
237
Sadik Armagand109a4d2020-07-28 10:42:13 +0100238struct DynamicConv2DWithBiasesFixture : Conv2DWithBiasesFixture
239{
240 DynamicConv2DWithBiasesFixture()
241 : Conv2DWithBiasesFixture("[ 1, 2, 2, 1 ]", // inputShape
242 "[ ]", // outputShape
243 "[ 1, 2, 2, 1 ]", // filterShape
244 "[ 2,1, 0,6 ]", // filterData
245 "[ 1 ]", // biasShape
246 "[ 10, 0, 0, 0 ]", // biasData
247 "1") // stride w and h
248 {}
249};
250
Sadik Armagan1625efc2021-06-10 18:24:34 +0100251TEST_CASE_FIXTURE(DynamicConv2DWithBiasesFixture, "ParseDynamicConv2DWithBias")
Sadik Armagand109a4d2020-07-28 10:42:13 +0100252{
253 RunTest<4,
254 armnn::DataType::QAsymmU8,
255 armnn::DataType::QAsymmU8>(0,
256 { { "inputTensor", { 1, 2, 3, 4, } } },
257 { { "outputTensor", { (1*2 + 2*1 + 3*0 + 4*6 + 10)/2,
258 (2*2 + 0*1 + 4*0 + 0*6 + 10)/2,
259 (3*2 + 4*1 + 0*0 + 0*6 + 10)/2,
260 (4*2 + 0*1 + 0*0 + 0*6 + 10)/2} } },
261 true);
262}
263
telsoa01c577f2c2018-08-31 09:22:23 +0100264struct Conv2DShapeTestFixture : Conv2DWithBiasesFixture
265{
266 static std::string GenerateInts(unsigned int n)
267 {
268 std::stringstream ss;
269 ss << " [ ";
270 for( unsigned int i=0; i<n; ++i ) {
271 if (i > 0 )
272 {
273 ss << " , ";
274 }
275 ss << " " << (i%256);
276 }
277 ss << " ] ";
278 return ss.str();
279 }
280
281 Conv2DShapeTestFixture()
282 : Conv2DWithBiasesFixture("[ 1, 224, 224, 3 ]", // inputShape
283 "[ 1, 112, 112, 32 ]", // outputShape
284 "[ 32, 3, 3, 3 ]", // filterShape
285 GenerateInts(32*3*3*3), // filterData
286 "[ 32 ]", // biasShape
287 GenerateInts(32*4), // biasData
288 "2") // stride w and h
289 {}
290};
291
Sadik Armagan1625efc2021-06-10 18:24:34 +0100292TEST_CASE_FIXTURE(Conv2DShapeTestFixture, "ParseConv2D_112x112_out")
telsoa01c577f2c2018-08-31 09:22:23 +0100293{
294}
295
296struct ReluConv2DWithBiasesFixture : Conv2DWithBiasesFixture
297{
298 ReluConv2DWithBiasesFixture()
299 : Conv2DWithBiasesFixture("[ 1, 2, 2, 1 ]", // inputShape
300 "[ 1, 2, 2, 1 ]", // outputShape
301 "[ 1, 2, 2, 1 ]", // filterShape
302 "[ 2,1, 0,6 ]", // filterData
303 "[ 1 ]", // biasShape
304 "[ 16, 0, 0, 0 ]", // biasData
305 "1", // stride w and h
306 "RELU", // activation
307 "1.0", // filter scale
308 "4", // filter zero point
309 "2.0", // output scale
310 "20") // output zero point
311 {}
312};
313
Sadik Armagan1625efc2021-06-10 18:24:34 +0100314TEST_CASE_FIXTURE(ReluConv2DWithBiasesFixture, "ParseConv2DAndReluWithBias")
telsoa01c577f2c2018-08-31 09:22:23 +0100315{
316 uint8_t bias = 16;
317 uint8_t outZero = 20;
318 uint8_t fz = 4; // filter zero point
319
Derek Lambertif90c56d2020-01-10 17:14:08 +0000320 RunTest<4, armnn::DataType::QAsymmU8>(
telsoa01c577f2c2018-08-31 09:22:23 +0100321 0,
322 {
323 1, 2,
324 4, 8,
325 },
326 // factors to consider:
327 // - the filter zero point is non zero, hence the (x-fz)
328 // - the output scale is 2 hence the /2
329 // - output zero point is non zero, hence the +outZero
330 // - RELU cuts negative values and then we add the output zero point
331 {
332 std::max(outZero, static_cast<uint8_t>((1*(2-fz) + 2*(1-fz) + 4*(0-fz) + 8*(6-fz) + bias)/2 + outZero)),
333 std::max(outZero, static_cast<uint8_t>((2*(2-fz) + 0*(1-fz) + 8*(0-fz) + 0*(6-fz) + bias)/2 + outZero)),
334 std::max(outZero, static_cast<uint8_t>((4*(2-fz) + 8*(1-fz) + 0*(0-fz) + 0*(6-fz) + bias)/2 + outZero)),
335 std::max(outZero, static_cast<uint8_t>((8*(2-fz) + 0*(1-fz) + 0*(0-fz) + 0*(6-fz) + bias)/2 + outZero))
336 });
337}
338
339struct Relu6Conv2DWithBiasesFixture : Conv2DWithBiasesFixture
340{
341 Relu6Conv2DWithBiasesFixture()
342 : Conv2DWithBiasesFixture("[ 1, 2, 2, 1 ]", // inputShape
343 "[ 1, 2, 2, 1 ]", // outputShape
344 "[ 1, 2, 2, 1 ]", // filterShape
345 "[ 2,1, 0,6 ]", // filterData
346 "[ 1 ]", // biasShape
347 "[ 0, 0, 0, 0 ]", // biasData
348 "1", // stride w and h
349 "RELU6", // activation
350 "1.0", // filter scale
351 "0", // filter zero point
352 "2.0", // output scale
353 "0") // output zero point
354 {}
355};
356
Sadik Armagan1625efc2021-06-10 18:24:34 +0100357TEST_CASE_FIXTURE(Relu6Conv2DWithBiasesFixture, "ParseConv2DAndRelu6WithBias")
telsoa01c577f2c2018-08-31 09:22:23 +0100358{
359 uint8_t relu6Min = 6 / 2; // divide by output scale
360
Derek Lambertif90c56d2020-01-10 17:14:08 +0000361 RunTest<4, armnn::DataType::QAsymmU8>(
telsoa01c577f2c2018-08-31 09:22:23 +0100362 0,
363 {
364 1, 2,
365 4, 1,
366 },
367 // factors to consider:
368 // - the output scale is 2 hence the /2
369 // - RELU6 cuts output values at +6
370 {
371 std::min(relu6Min, static_cast<uint8_t>((1*2 + 2*1 + 4*0 + 1*6)/2)),
372 std::min(relu6Min, static_cast<uint8_t>((2*2 + 0*1 + 1*0 + 0*6)/2)),
373 std::min(relu6Min, static_cast<uint8_t>((4*2 + 1*1 + 0*0 + 0*6)/2)),
374 std::min(relu6Min, static_cast<uint8_t>((1*2 + 0*1 + 0*0 + 0*6)/2))
375 });
376}
377
Jan Eilersea835e72021-04-21 16:58:28 +0100378
379struct PerChannelConv2DFixture : public ParserFlatbuffersFixture
380{
381 explicit PerChannelConv2DFixture()
382 {
383 m_JsonString = R"(
384 {
385 "version": 3,
386 "operator_codes": [
387 {
388 "builtin_code": "CONV_2D",
389 "version": 3
390 }
391 ],
392 "subgraphs": [
393 {
394 "tensors": [
395 {
396 "shape": [
397 1,
398 4,
399 4,
400 2
401 ],
402 "type": "INT8",
403 "buffer": 1,
404 "name": "input",
405 "quantization": {
406 "min": [
407 -50.0
408 ],
409 "max": [
410 49.0
411 ],
412 "scale": [
413 0.388235
414 ],
415 "zero_point": [
416 1
417 ],
418 "details_type": "NONE",
419 "quantized_dimension": 0
420 },
421 "is_variable": false
422 },
423 {
424 "shape": [
425 4
426 ],
427 "type": "INT32",
428 "buffer": 2,
429 "name": "model/conv2d/Conv2D",
430 "quantization": {
431 "scale": [
432 0.001523,
433 0.001197,
434 0.001517,
435 0.001364
436 ],
437 "zero_point": [
438 0,
439 0,
440 0,
441 0
442 ],
443 "details_type": "NONE",
444 "quantized_dimension": 0
445 },
446 "is_variable": false
447 },
448 {
449 "shape": [
450 4,
451 2,
452 2,
453 2
454 ],
455 "type": "INT8",
456 "buffer": 3,
457 "name": "model/conv2d/Conv2D1",
458 "quantization": {
459 "min": [
460 -0.498056,
461 -0.362561,
462 -0.307959,
463 -0.207799
464 ],
465 "max": [
466 0.339136,
467 0.391629,
468 0.496193,
469 0.446191
470 ],
471 "scale": [
472 0.003922,
473 0.003084,
474 0.003907,
475 0.003513
476 ],
477 "zero_point": [
478 0,
479 0,
480 0,
481 0
482 ],
483 "details_type": "NONE",
484 "quantized_dimension": 0
485 },
486 "is_variable": false
487 },
488 {
489 "shape": [
490 1,
491 4,
492 4,
493 4
494 ],
495 "type": "INT8",
496 "buffer": 4,
497 "name": "Identity",
498 "quantization": {
499 "min": [
500 -66.578751
501 ],
502 "max": [
503 70.137619
504 ],
505 "scale": [
506 0.536143
507 ],
508 "zero_point": [
509 -4
510 ],
511 "details_type": "NONE",
512 "quantized_dimension": 0
513 },
514 "is_variable": false
515 }
516 ],
517 "inputs": [
518 0
519 ],
520 "outputs": [
521 3
522 ],
523 "operators": [
524 {
525 "opcode_index": 0,
526 "inputs": [
527 0,
528 2,
529 1
530 ],
531 "outputs": [
532 3
533 ],
534 "builtin_options_type": "Conv2DOptions",
535 "builtin_options": {
536 "padding": "SAME",
537 "stride_w": 1,
538 "stride_h": 1,
539 "fused_activation_function": "NONE",
540 "dilation_w_factor": 1,
541 "dilation_h_factor": 1
542 },
543 "custom_options_format": "FLEXBUFFERS"
544 }
545 ],
546 "name": "main"
547 }
548 ],
549 "description": "MLIR Converted.",
550 "buffers": [
551 {
552 },
553 {
554 },
555 {
556 "data": [
557 0,
558 0,
559 0,
560 0,
561 0,
562 0,
563 0,
564 0,
565 0,
566 0,
567 0,
568 0,
569 0,
570 0,
571 0,
572 0
573 ]
574 },
575 {
576 "data": [
577 157,
578 201,
579 86,
580 129,
581 17,
582 33,
583 209,
584 13,
585 76,
586 249,
587 127,
588 138,
589 35,
590 18,
591 250,
592 233,
593 15,
594 205,
595 98,
596 127,
597 68,
598 196,
599 246,
600 177,
601 65,
602 197,
603 230,
604 246,
605 127,
606 66,
607 212,
608 30
609 ]
610 },
611 {
612 },
613 {
614 "data": [
615 49,
616 46,
617 53,
618 46,
619 48,
620 0,
621 0,
622 0,
623 0,
624 0,
625 0,
626 0,
627 0,
628 0,
629 0,
630 0
631 ]
632 }
633 ],
634 "metadata": [
635 {
636 "name": "min_runtime_version",
637 "buffer": 5
638 }
639 ]
640 }
641 )";
642 SetupSingleInputSingleOutput("input", "Identity");
643 }
644};
645
Sadik Armagan1625efc2021-06-10 18:24:34 +0100646TEST_CASE_FIXTURE(PerChannelConv2DFixture, "ParsePerChannelConv2D")
Jan Eilersea835e72021-04-21 16:58:28 +0100647{
648 RunTest<4, armnn::DataType::QAsymmS8>(
649 0,
650 {
651 -11, 40,-26, 11,-28, 8, 0, -8,
652 -10, 34, 47, 0,-33,-14, 28, 35,
653 6,-28,-26, 8, 13, 33,-31,-41,
654 31,-20,-31,-16, 8,-18,-44, 0
655 },
656 {
657 -21,-17,-23,-14, -1,-14, 1, 9,
658 1,-12,-22,-23, 2, -1, -3, 12,
659 7, 6, 8,-13,-21, -6,-31, 0,
660 9, -6, 24, 0,-22, -4, -7,-22,
661 -7, -9, 9, 11,-11,-16, 9,-27,
662 -1, 0,-26, 0, 9,-12, -8,-18,
663 -11, -3,-15, 7, 16, -2, -8, -7,
664 -14,-15,-14, 3, 9,-12, -6,-11
665 });
666}
667
Mike Kelly5880b912022-01-28 16:18:54 +0000668struct Conv2FloatWithInt8WeightsAndBiasesFixture : Conv2DWithBiasesFixture
669{
670 Conv2FloatWithInt8WeightsAndBiasesFixture()
671 : Conv2DWithBiasesFixture("[ 1, 2, 2, 1 ]", // inputShape
672 "[ 1, 2, 2, 1 ]", // outputShape
673 "[ 1, 2, 2, 1 ]", // filterShape
674 "[ 2,1, 0,6 ]", // filterData
675 "[ 1 ]", // biasShape
Mike Kelly0506ef02023-01-03 16:29:44 +0000676 "[ 10 ]", // biasData
Mike Kelly5880b912022-01-28 16:18:54 +0000677 "1", // stride w and h
678 "NONE", // activation
679 "1.0", // filterScale
680 "0", // filterZeroPoint
681 "2.0", // outputScale
682 "0", // outputZeroPoint
683 "FLOAT32", // dataType
684 "INT8", // filterDataType
685 "INT8") // biasDataType
686 {}
687};
688
689TEST_CASE_FIXTURE(Conv2FloatWithInt8WeightsAndBiasesFixture, "ParseConv2FloatWithInt8WeightsAndBiasesFixture")
690{
691 RunTest<4, armnn::DataType::Float32>(
692 0,
693 {
694 1, 2,
695 3, 4,
696 },
697 {
698 (1*2 + 2*1 + 3*0 + 4*6 + 10),
699 (2*2 + 0*1 + 4*0 + 0*6 + 10),
700 (3*2 + 4*1 + 0*0 + 0*6 + 10),
701 (4*2 + 0*1 + 0*0 + 0*6 + 10)
702 });
703}
704
Sadik Armagan1625efc2021-06-10 18:24:34 +0100705}