blob: d38e934b82c60039d7cc6fb6a52112f17decad49 [file] [log] [blame]
Ryan OShea49ed0df2022-09-21 16:09:41 +01001//
Mike Kelly0e3fe102023-01-23 19:32:06 +00002// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
Ryan OShea49ed0df2022-09-21 16:09:41 +01003// SPDX-License-Identifier: MIT
4//
5
6#include "BatchMatMulTestHelper.hpp"
7
8#include <armnn_delegate.hpp>
9
10#include <flatbuffers/flatbuffers.h>
Ryan OShea49ed0df2022-09-21 16:09:41 +010011
12#include <doctest/doctest.h>
13
14namespace armnnDelegate
15{
16
17 void BatchMatMul2DFp32SimpleTest(std::vector<armnn::BackendId>& backends)
18 {
19 // Set input data
20 std::vector<int32_t> LHSInputShape { 2, 2 };
21 std::vector<int32_t> RHSInputShape { 2, 2 };
22 std::vector<int32_t> outputShape { 2, 2 };
23
24 std::vector<float> LHSInputValues = { 1, 2,
25 3, 4 };
26
27 std::vector<float> RHSInputValues = { 5, 6,
28 7, 8 };
29
30 std::vector<float> expectedOutputValues = { 19, 22,
31 43, 50 };
32
33 BatchMatMulTest<float>(tflite::BuiltinOperator_BATCH_MATMUL,
34 ::tflite::TensorType_FLOAT32,
35 backends,
36 LHSInputShape,
37 RHSInputShape,
38 outputShape,
39 LHSInputValues,
40 RHSInputValues,
41 expectedOutputValues,
42 false,
43 false);
44 }
45 void BatchMatMul2DInt8SimpleTest(std::vector<armnn::BackendId>& backends)
46 {
47 // Set input data
48 std::vector<int32_t> LHSInputShape { 2, 2 };
49 std::vector<int32_t> RHSInputShape { 2, 2 };
50 std::vector<int32_t> outputShape { 2, 2 };
51
52 std::vector<int8_t> LHSInputValues = { 1, 2,
53 3, 4 };
54
55 std::vector<int8_t> RHSInputValues = { 5, 6,
56 7, 8 };
57
58 std::vector<int8_t> expectedOutputValues = { 19, 22,
59 43, 50 };
60
61 BatchMatMulTest<int8_t>(tflite::BuiltinOperator_BATCH_MATMUL,
62 ::tflite::TensorType_INT8,
63 backends,
64 LHSInputShape,
65 RHSInputShape,
66 outputShape,
67 LHSInputValues,
68 RHSInputValues,
69 expectedOutputValues,
70 false,
71 false);
72 }
73
74 void BatchMatMul3DFp32SimpleTest(std::vector<armnn::BackendId>& backends)
75 {
76 // Set input data
77 std::vector<int32_t> LHSInputShape { 1,2,2 };
78 std::vector<int32_t> RHSInputShape { 1,2,2 };
79 std::vector<int32_t> outputShape { 1,2,2 };
80
81 std::vector<float> LHSInputValues = { 1, 2,
82 3, 4 };
83
84 std::vector<float> RHSInputValues = { 5, 6,
85 7, 8 };
86
87 std::vector<float> expectedOutputValues = { 19, 22,
88 43, 50 };
89
90 BatchMatMulTest<float>(tflite::BuiltinOperator_BATCH_MATMUL,
91 ::tflite::TensorType_FLOAT32,
92 backends,
93 LHSInputShape,
94 RHSInputShape,
95 outputShape,
96 LHSInputValues,
97 RHSInputValues,
98 expectedOutputValues,
99 false,
100 false);
101 }
102
103 void BatchMatMul3DInt8SimpleTest(std::vector<armnn::BackendId>& backends)
104 {
105 // Set input data
106 std::vector<int32_t> LHSInputShape { 1,2,2 };
107 std::vector<int32_t> RHSInputShape { 1,2,2 };
108 std::vector<int32_t> outputShape { 1,2,2 };
109
110 std::vector<int8_t> LHSInputValues = { 1, 2,
111 3, 4 };
112
113 std::vector<int8_t> RHSInputValues = { 5, 6,
114 7, 8 };
115
116 std::vector<int8_t> expectedOutputValues = { 19, 22,
117 43, 50 };
118
119 BatchMatMulTest<int8_t>(tflite::BuiltinOperator_BATCH_MATMUL,
120 ::tflite::TensorType_INT8,
121 backends,
122 LHSInputShape,
123 RHSInputShape,
124 outputShape,
125 LHSInputValues,
126 RHSInputValues,
127 expectedOutputValues,
128 false,
129 false);
130 }
131
132 void BatchMatMul4DFp32SimpleTest(std::vector<armnn::BackendId>& backends)
133 {
134 // Set input data
135 std::vector<int32_t> LHSInputShape { 1,1,2,2 };
136 std::vector<int32_t> RHSInputShape { 1,1,2,2 };
137 std::vector<int32_t> outputShape { 1,1,2,2 };
138
139 std::vector<float> LHSInputValues = { 1, 2,
140 3, 4 };
141
142 std::vector<float> RHSInputValues = { 5, 6,
143 7, 8 };
144
145 std::vector<float> expectedOutputValues = { 19, 22,
146 43, 50 };
147
148 BatchMatMulTest<float>(tflite::BuiltinOperator_BATCH_MATMUL,
149 ::tflite::TensorType_FLOAT32,
150 backends,
151 LHSInputShape,
152 RHSInputShape,
153 outputShape,
154 LHSInputValues,
155 RHSInputValues,
156 expectedOutputValues,
157 false,
158 false);
159 }
160
161 void BatchMatMul4DInt8SimpleTest(std::vector<armnn::BackendId>& backends)
162 {
163 // Set input data
164 std::vector<int32_t> LHSInputShape { 1,1,2,2};
165 std::vector<int32_t> RHSInputShape { 1,1,2,2 };
166 std::vector<int32_t> outputShape { 1,1,2,2 };
167
168 std::vector<int8_t> LHSInputValues = { 1, 2,
169 3, 4 };
170
171 std::vector<int8_t> RHSInputValues = { 5, 6,
172 7, 8 };
173
174 std::vector<int8_t> expectedOutputValues = { 19, 22,
175 43, 50 };
176
177 BatchMatMulTest<int8_t>(tflite::BuiltinOperator_BATCH_MATMUL,
178 ::tflite::TensorType_INT8,
179 backends,
180 LHSInputShape,
181 RHSInputShape,
182 outputShape,
183 LHSInputValues,
184 RHSInputValues,
185 expectedOutputValues,
186 false,
187 false);
188 }
189
190 void BatchMatMul3DFp32BatchTest(std::vector<armnn::BackendId>& backends)
191 {
192 // Set input data
193 std::vector<int32_t> LHSInputShape { 2,2,2 };
194 std::vector<int32_t> RHSInputShape { 2,2,2 };
195 std::vector<int32_t> outputShape { 2,2,2 };
196
197 std::vector<float> LHSInputValues = { 1, 2,
198 3, 4,
199
200 9, 10,
201 11, 12 };
202
203 std::vector<float> RHSInputValues = { 5, 6,
204 7, 8,
205
206 13, 14,
207 15, 16 };
208
209 std::vector<float> expectedOutputValues = { 19, 22,
210 43, 50,
211
212 267, 286,
213 323, 346 };
214
215 BatchMatMulTest<float>(tflite::BuiltinOperator_BATCH_MATMUL,
216 ::tflite::TensorType_FLOAT32,
217 backends,
218 LHSInputShape,
219 RHSInputShape,
220 outputShape,
221 LHSInputValues,
222 RHSInputValues,
223 expectedOutputValues,
224 false,
225 false);
226 }
227
228 void BatchMatMul3DInt8BatchTest(std::vector<armnn::BackendId>& backends)
229 {
230 // Set input data
231 std::vector<int32_t> LHSInputShape { 2,2,2 };
232 std::vector<int32_t> RHSInputShape { 2,2,2 };
233 std::vector<int32_t> outputShape { 2,2,2 };
234
235 std::vector<int8_t> LHSInputValues = { 1, 2,
236 3, 4,
237
238 9, 10,
239 11, 12 };
240
241 std::vector<int8_t> RHSInputValues = { 5, 6,
242 7, 8,
243
244 1, 2,
245 3, 4 };
246
247 std::vector<int8_t> expectedOutputValues = { 19, 22,
248 43, 50,
249
250 39, 58,
251 47, 70 };
252
253 BatchMatMulTest<int8_t>(tflite::BuiltinOperator_BATCH_MATMUL,
254 ::tflite::TensorType_INT8,
255 backends,
256 LHSInputShape,
257 RHSInputShape,
258 outputShape,
259 LHSInputValues,
260 RHSInputValues,
261 expectedOutputValues,
262 false,
263 false);
264 }
265
266 void BatchMatMul3DFp32BroadcastTest(std::vector<armnn::BackendId>& backends)
267 {
268 // Set input data
269 std::vector<int32_t> LHSInputShape { 2,2,2 };
Teresa Charlin94916a52022-10-19 08:48:07 +0100270 std::vector<int32_t> RHSInputShape { 2,2 };
Ryan OShea49ed0df2022-09-21 16:09:41 +0100271 std::vector<int32_t> outputShape { 2,2,2 };
272
273 std::vector<float> LHSInputValues = { 1, 2,
274 3, 4,
275
276 9, 10,
277 11, 12 };
278
279 std::vector<float> RHSInputValues = { 13, 14,
280 15, 16 };
281
282 std::vector<float> expectedOutputValues = { 43, 46,
283 99, 106,
284
285 267, 286,
286 323, 346 };
287
288 BatchMatMulTest<float>(tflite::BuiltinOperator_BATCH_MATMUL,
289 ::tflite::TensorType_FLOAT32,
290 backends,
291 LHSInputShape,
292 RHSInputShape,
293 outputShape,
294 LHSInputValues,
295 RHSInputValues,
296 expectedOutputValues,
297 false,
298 false);
299 }
300
301 void BatchMatMul3DInt8BroadcastTest(std::vector<armnn::BackendId>& backends)
302 {
303 // Set input data
304 std::vector<int32_t> LHSInputShape { 2,2,2 };
Teresa Charlin97a3aef2023-01-10 10:32:51 +0000305 std::vector<int32_t> RHSInputShape { 2,2 };
Ryan OShea49ed0df2022-09-21 16:09:41 +0100306 std::vector<int32_t> outputShape { 2,2,2 };
307
308 std::vector<int8_t> LHSInputValues = { 1, 2,
309 3, 4,
310
311 9, 10,
312 11, 12 };
313
314 std::vector<int8_t> RHSInputValues = { 1, 2,
315 3, 4 };
316
317 std::vector<int8_t> expectedOutputValues = { 7, 10,
318 15, 22,
319
320 39, 58,
321 47, 70 };
322
323 BatchMatMulTest<int8_t>(tflite::BuiltinOperator_BATCH_MATMUL,
324 ::tflite::TensorType_INT8,
325 backends,
326 LHSInputShape,
327 RHSInputShape,
328 outputShape,
329 LHSInputValues,
330 RHSInputValues,
331 expectedOutputValues,
332 false,
333 false);
334 }
335
336 void BatchMatMul3D2DFp32BroadcastTest(std::vector<armnn::BackendId>& backends)
337 {
338 // Set input data
339 std::vector<int32_t> LHSInputShape { 2,2,2 };
340 std::vector<int32_t> RHSInputShape { 2,2 };
341 std::vector<int32_t> outputShape { 2,2,2 };
342
343 std::vector<float> LHSInputValues = { 1, 2,
344 3, 4,
345
346 9, 10,
347 11, 12 };
348
349 std::vector<float> RHSInputValues = { 13, 14,
350 15, 16 };
351
352 std::vector<float> expectedOutputValues = { 43, 46,
353 99, 106,
354
355 267, 286,
356 323, 346 };
357
358 BatchMatMulTest<float>(tflite::BuiltinOperator_BATCH_MATMUL,
359 ::tflite::TensorType_FLOAT32,
360 backends,
361 LHSInputShape,
362 RHSInputShape,
363 outputShape,
364 LHSInputValues,
365 RHSInputValues,
366 expectedOutputValues,
367 false,
368 false);
369 }
370
371 void BatchMatMul3D2DInt8BroadcastTest(std::vector<armnn::BackendId>& backends)
372 {
373 // Set input data
374 std::vector<int32_t> LHSInputShape { 2,2,2 };
375 std::vector<int32_t> RHSInputShape { 2,2 };
376 std::vector<int32_t> outputShape { 2,2,2 };
377
378 std::vector<int8_t> LHSInputValues = { 1, 2,
379 3, 4,
380
381 9, 10,
382 11, 12 };
383
384 std::vector<int8_t> RHSInputValues = { 1, 2,
385 3, 4 };
386
387 std::vector<int8_t> expectedOutputValues = { 7, 10,
388 15, 22,
389
390 39, 58,
391 47, 70 };
392
393 BatchMatMulTest<int8_t>(tflite::BuiltinOperator_BATCH_MATMUL,
394 ::tflite::TensorType_INT8,
395 backends,
396 LHSInputShape,
397 RHSInputShape,
398 outputShape,
399 LHSInputValues,
400 RHSInputValues,
401 expectedOutputValues,
402 false,
403 false);
404 }
405
406 void BatchMatMul2DFp32TinyTest(std::vector<armnn::BackendId>& backends)
407 {
408 // Set input data
409 std::vector<int32_t> LHSInputShape { 1,1 };
410 std::vector<int32_t> RHSInputShape { 1,1 };
411 std::vector<int32_t> outputShape { 1,1 };
412
413 std::vector<float> LHSInputValues = { 3 };
414
415 std::vector<float> RHSInputValues = { 5 };
416
417 std::vector<float> expectedOutputValues = { 15 };
418
419 BatchMatMulTest<float>(tflite::BuiltinOperator_BATCH_MATMUL,
420 ::tflite::TensorType_FLOAT32,
421 backends,
422 LHSInputShape,
423 RHSInputShape,
424 outputShape,
425 LHSInputValues,
426 RHSInputValues,
427 expectedOutputValues,
428 false,
429 false);
430 }
431 void BatchMatMul2DInt8TinyTest(std::vector<armnn::BackendId>& backends)
432 {
433 // Set input data
434 std::vector<int32_t> LHSInputShape { 1,1 };
435 std::vector<int32_t> RHSInputShape { 1,1 };
436 std::vector<int32_t> outputShape { 1,1 };
437
438 std::vector<int8_t> LHSInputValues = { 3 };
439
440 std::vector<int8_t> RHSInputValues = { 5 };
441
442 std::vector<int8_t> expectedOutputValues = { 15 };
443
444 BatchMatMulTest<int8_t>(tflite::BuiltinOperator_BATCH_MATMUL,
445 ::tflite::TensorType_INT8,
446 backends,
447 LHSInputShape,
448 RHSInputShape,
449 outputShape,
450 LHSInputValues,
451 RHSInputValues,
452 expectedOutputValues,
453 false,
454 false);
455 }
456
457 void BatchMatMulNonSquareFp32Test(std::vector<armnn::BackendId>& backends)
458 {
459 // Set input data
460 std::vector<int32_t> LHSInputShape { 2,5,3 };
461 std::vector<int32_t> RHSInputShape { 2,3,4 };
462 std::vector<int32_t> outputShape { 2,5,4 };
463
464 std::vector<float> LHSInputValues = { 8, 8, 4,
465 6, 1, 3,
466 8, 8, 3,
467 8, 9, 8,
468 5, 4, 4,
469
470 1, 8, 5,
471 7, 1, 1,
472 8, 7, 9,
473 3, 2, 7,
474 8, 5, 3 };
475
476 std::vector<float> RHSInputValues = { 6, 2, 3, 2,
477 6, 2, 2, 8,
478 3, 7, 8, 1,
479
480 7, 2, 9, 5,
481 2, 3, 1, 3,
482 2, 7, 7, 5 };
483
484 std::vector<float> expectedOutputValues = { 108, 60, 72, 84,
485 51, 35, 44, 23,
486 105, 53, 64, 83,
487 126, 90, 106, 96,
488 66, 46, 55, 46,
489
490 33, 61, 52, 54,
491 53, 24, 71, 43,
492 88, 100, 142, 106,
493 39, 61, 78, 56,
494 72, 52, 98, 70 };
495
496 BatchMatMulTest<float>(tflite::BuiltinOperator_BATCH_MATMUL,
497 ::tflite::TensorType_FLOAT32,
498 backends,
499 LHSInputShape,
500 RHSInputShape,
501 outputShape,
502 LHSInputValues,
503 RHSInputValues,
504 expectedOutputValues,
505 false,
506 false);
507 }
508
509 void BatchMatMulNonSquareInt8Test(std::vector<armnn::BackendId>& backends)
510 {
511 // Set input data
512 std::vector<int32_t> LHSInputShape { 2,5,3 };
513 std::vector<int32_t> RHSInputShape { 2,3,4 };
514 std::vector<int32_t> outputShape { 2,5,4 };
515
516 std::vector<int8_t> LHSInputValues = { 8, 8, 4,
517 6, 1, 3,
518 8, 8, 3,
519 8, 9, 8,
520 5, 4, 4,
521
522 1, 8, 5,
523 7, 1, 1,
524 8, 7, 9,
525 3, 2, 7,
526 8, 5, 3 };
527
528 std::vector<int8_t> RHSInputValues = { 6, 2, 3, 2,
529 6, 2, 2, 8,
530 3, 7, 8, 1,
531
532 7, 2, 3, 5,
533 2, 3, 1, 3,
534 2, 7, 7, 5 };
535
536 std::vector<int8_t> expectedOutputValues = { 108, 60, 72, 84,
537 51, 35, 44, 23,
538 105, 53, 64, 83,
539 126, 90, 106, 96,
540 66, 46, 55, 46,
541
542 33, 61, 46, 54,
543 53, 24, 29, 43,
544 88, 100, 94, 106,
545 39, 61, 60, 56,
546 72, 52, 50, 70 };
547
548 BatchMatMulTest<int8_t>(tflite::BuiltinOperator_BATCH_MATMUL,
549 ::tflite::TensorType_INT8,
550 backends,
551 LHSInputShape,
552 RHSInputShape,
553 outputShape,
554 LHSInputValues,
555 RHSInputValues,
556 expectedOutputValues,
557 false,
558 false);
559 }
560
561 void BatchMatMul2DFp32SimpleAdjointTest(std::vector<armnn::BackendId>& backends)
562 {
563 // Set input data
564 std::vector<int32_t> LHSInputShape { 3,3 };
565 std::vector<int32_t> RHSInputShape { 3,3 };
566 std::vector<int32_t> outputShape { 3,3 };
567
568 std::vector<float> LHSInputValues = { 3, 1, 1,
569 1, 3, -1,
570 2, 4, 1 };
571
572 std::vector<float> RHSInputValues = { 1, 0, 0,
573 0, 1, 0,
574 0, 0, 1 };
575
576 std::vector<float> expectedOutputValues = { 3, 1, 2,
577 1, 3, 4,
578 1, -1, 1 };
579
580 BatchMatMulTest<float>(tflite::BuiltinOperator_BATCH_MATMUL,
581 ::tflite::TensorType_FLOAT32,
582 backends,
583 LHSInputShape,
584 RHSInputShape,
585 outputShape,
586 LHSInputValues,
587 RHSInputValues,
588 expectedOutputValues,
589 true,
590 false);
591 }
592
593 void BatchMatMul2DInt8SimpleAdjointTest(std::vector<armnn::BackendId>& backends)
594 {
595 // Set input data
596 std::vector<int32_t> LHSInputShape { 3,3 };
597 std::vector<int32_t> RHSInputShape { 3,3 };
598 std::vector<int32_t> outputShape { 3,3 };
599
600 std::vector<int8_t> LHSInputValues = { 3, 1, 1,
601 1, 3, -1,
602 2, 4, 1 };
603
604 std::vector<int8_t> RHSInputValues = { 1, 0, 0,
605 0, 1, 0,
606 0, 0, 1 };
607
608 std::vector<int8_t> expectedOutputValues = { 3, 1, 2,
609 1, 3, 4,
610 1, -1, 1 };
611
612 BatchMatMulTest<int8_t>(tflite::BuiltinOperator_BATCH_MATMUL,
613 ::tflite::TensorType_INT8,
614 backends,
615 LHSInputShape,
616 RHSInputShape,
617 outputShape,
618 LHSInputValues,
619 RHSInputValues,
620 expectedOutputValues,
621 true,
622 false);
623 }
624
625 TEST_SUITE("BATCH_MATMUL_CpuRefTests")
626 {
627 TEST_CASE("BATCH_MATMUL_Fp32_CpuRefTests")
628 {
629 std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
630 BatchMatMul2DFp32SimpleTest (backends);
631 BatchMatMul3DFp32SimpleTest (backends);
632 BatchMatMul4DFp32SimpleTest (backends);
633 BatchMatMul3DFp32BatchTest (backends);
634 BatchMatMul3DFp32BroadcastTest (backends);
635 BatchMatMul3D2DFp32BroadcastTest (backends);
636 BatchMatMul2DFp32TinyTest (backends);
637 BatchMatMulNonSquareFp32Test (backends);
638 BatchMatMul2DFp32SimpleAdjointTest(backends);
639 }
640
641 TEST_CASE("BATCH_MATMUL_Int8_CpuRefTests")
642 {
643 std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
644 BatchMatMul2DInt8SimpleTest (backends);
645 BatchMatMul3DInt8SimpleTest (backends);
646 BatchMatMul4DInt8SimpleTest (backends);
647 BatchMatMul3DInt8BatchTest (backends);
648 BatchMatMul3DInt8BroadcastTest (backends);
649 BatchMatMul3D2DInt8BroadcastTest (backends);
650 BatchMatMul2DInt8TinyTest (backends);
651 BatchMatMulNonSquareInt8Test (backends);
652 BatchMatMul2DInt8SimpleAdjointTest(backends);
653 }
654 }
655
Teresa Charlin0f86ecf2022-10-13 15:47:08 +0100656 TEST_SUITE("BATCH_MATMUL_CpuAccTests")
657 {
658 TEST_CASE("BATCH_MATMUL_Fp32_CpuAccTests")
659 {
660 std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
661 BatchMatMul2DFp32SimpleTest (backends);
662 BatchMatMul3DFp32SimpleTest (backends);
663 BatchMatMul4DFp32SimpleTest (backends);
664 BatchMatMul3DFp32BatchTest (backends);
Teresa Charlin0f86ecf2022-10-13 15:47:08 +0100665 BatchMatMul2DFp32TinyTest (backends);
666 BatchMatMulNonSquareFp32Test (backends);
667 BatchMatMul2DFp32SimpleAdjointTest(backends);
668 }
Teresa Charlin1fe6c812022-11-01 15:59:50 +0000669
670 TEST_CASE("BATCH_MATMUL_Int8_CpuAccTests")
671 {
672 std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
673 BatchMatMul2DInt8SimpleTest (backends);
674 BatchMatMul3DInt8SimpleTest (backends);
675 BatchMatMul4DInt8SimpleTest (backends);
676 BatchMatMul3DInt8BatchTest (backends);
677 BatchMatMul2DInt8TinyTest (backends);
678 BatchMatMulNonSquareInt8Test (backends);
679 BatchMatMul2DInt8SimpleAdjointTest(backends);
680 }
Teresa Charlin0f86ecf2022-10-13 15:47:08 +0100681 }
Teresa Charlin97a3aef2023-01-10 10:32:51 +0000682
Teresa Charlin94916a52022-10-19 08:48:07 +0100683 TEST_SUITE("BATCH_MATMUL_GpuAccTests")
684 {
685 TEST_CASE("BATCH_MATMUL_Fp32_GpuAccTests")
686 {
687 std::vector <armnn::BackendId> backends = {armnn::Compute::GpuAcc};
688 BatchMatMul2DFp32SimpleTest (backends);
689 BatchMatMul3DFp32SimpleTest (backends);
Mike Kelly0e3fe102023-01-23 19:32:06 +0000690 BatchMatMul4DFp32SimpleTest (backends);
Teresa Charlin94916a52022-10-19 08:48:07 +0100691 BatchMatMul3DFp32BatchTest (backends);
Teresa Charlin94916a52022-10-19 08:48:07 +0100692 BatchMatMul2DFp32TinyTest (backends);
693 BatchMatMulNonSquareFp32Test (backends);
694 BatchMatMul2DFp32SimpleAdjointTest(backends);
695 }
Teresa Charlin97a3aef2023-01-10 10:32:51 +0000696
697 TEST_CASE("BATCH_MATMUL_Int8_GpuAccTests")
698 {
699 std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
700 BatchMatMul2DInt8SimpleTest (backends);
701 BatchMatMul3DInt8SimpleTest (backends);
702 BatchMatMul3DInt8BatchTest (backends);
703 BatchMatMul2DInt8TinyTest (backends);
704 BatchMatMulNonSquareInt8Test (backends);
705 BatchMatMul2DInt8SimpleAdjointTest(backends);
706 }
Teresa Charlin94916a52022-10-19 08:48:07 +0100707 }
Ryan OShea49ed0df2022-09-21 16:09:41 +0100708}