blob: 74bd97f103e577411947fc9e0eb457b58c2450f1 [file] [log] [blame]
Samuel Yap6b478092022-07-06 15:36:03 +01001//
2// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "BatchMatMulTestImpl.hpp"
7
8#include <armnn/backends/IBackendInternal.hpp>
9#include <armnn/backends/Workload.hpp>
10#include <armnn/backends/WorkloadData.hpp>
11#include <armnn/backends/WorkloadFactory.hpp>
12
13#include <armnnTestUtils/WorkloadTestUtils.hpp>
14#include <armnnUtils/QuantizeHelper.hpp>
15#include <armnnTestUtils/TensorCopyUtils.hpp>
16#include <armnn/Optional.hpp>
17
18
19template<armnn::DataType ArmnnType, typename T, std::size_t NumDims>
20LayerTestResult<T, NumDims> BatchMatMulTestImpl(
21 armnn::IWorkloadFactory& workloadFactory,
22 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
23 const armnn::ITensorHandleFactory& tensorHandleFactory,
24 armnn::BatchMatMulDescriptor descriptor,
25 const std::vector<T>& inputX,
26 const std::vector<T>& inputY,
27 const std::vector<T>& outputExpected,
28 const armnn::TensorInfo& inputXInfo,
29 const armnn::TensorInfo& inputYInfo,
30 const armnn::TensorInfo& outputInfo)
31{
32 std::vector<T> outputActual(outputInfo.GetNumElements());
33
34 std::unique_ptr<armnn::ITensorHandle> inputXHandle = tensorHandleFactory.CreateTensorHandle(inputXInfo);
35 std::unique_ptr<armnn::ITensorHandle> inputYHandle = tensorHandleFactory.CreateTensorHandle(inputYInfo);
36 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputInfo);
37
38 armnn::BatchMatMulQueueDescriptor queueDescriptor;
39 queueDescriptor.m_Parameters = descriptor;
40 armnn::WorkloadInfo workloadInfo;
41
42 AddInputToWorkload(queueDescriptor, workloadInfo, inputXInfo, inputXHandle.get());
43 AddInputToWorkload(queueDescriptor, workloadInfo, inputYInfo, inputYHandle.get());
44 AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, outputHandle.get());
45
46 auto workload = workloadFactory.CreateWorkload(armnn::LayerType::BatchMatMul, queueDescriptor, workloadInfo);
47
48 inputXHandle->Allocate();
49 inputYHandle->Allocate();
50 outputHandle->Allocate();
51
52 CopyDataToITensorHandle(inputXHandle.get(), inputX.data());
53 CopyDataToITensorHandle(inputYHandle.get(), inputY.data());
54
55 workload->PostAllocationConfigure();
56 ExecuteWorkload(*workload, memoryManager);
57
58 CopyDataFromITensorHandle(outputActual.data(), outputHandle.get());
59
60 return LayerTestResult<T, NumDims>(outputActual,
61 outputExpected,
62 outputHandle->GetShape(),
63 outputInfo.GetShape());
64}
65
66template<armnn::DataType ArmnnType, typename T>
67LayerTestResult<T, 2> BatchMatMul2DSimpleTest(
68 armnn::IWorkloadFactory& workloadFactory,
69 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
70 const armnn::ITensorHandleFactory& tensorHandleFactory)
71{
72 auto descriptor = armnn::BatchMatMulDescriptor(); // Arbitrary layout with no transpose/adjointing
73
Teresa Charlin0f86ecf2022-10-13 15:47:08 +010074 float qScale = 1.0f;
Samuel Yap6b478092022-07-06 15:36:03 +010075 int32_t qOffset = 0;
76
Samuel Yap6b478092022-07-06 15:36:03 +010077 armnn::TensorInfo inputXInfo({2,2}, ArmnnType, qScale, qOffset);
78 armnn::TensorInfo inputYInfo({2,2}, ArmnnType, qScale, qOffset);
79 armnn::TensorInfo outputInfo({2,2}, ArmnnType, qScale, qOffset);
80
81 std::vector<T> inputX = armnnUtils::QuantizedVector<T>({
82 1, 2,
83 3, 4
84 }, qScale, qOffset);
85
86 std::vector<T> inputY = armnnUtils::QuantizedVector<T>({
87 5, 6,
88 7, 8
89 }, qScale, qOffset);
90
91 std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({
92 19, 22,
93 43, 50
94 }, qScale, qOffset);
95
96 return BatchMatMulTestImpl<ArmnnType, T, 2>(workloadFactory,
97 memoryManager,
98 tensorHandleFactory,
99 descriptor,
100 inputX,
101 inputY,
102 outputExpected,
103 inputXInfo,
104 inputYInfo,
105 outputInfo);
106}
107
108template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 2>
109BatchMatMul2DSimpleTest<armnn::DataType::BFloat16>(
110 armnn::IWorkloadFactory& workloadFactory,
111 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
112 const armnn::ITensorHandleFactory& tensorHandleFactory);
113
114template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 2>
115BatchMatMul2DSimpleTest<armnn::DataType::Float32>(
116 armnn::IWorkloadFactory& workloadFactory,
117 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
118 const armnn::ITensorHandleFactory& tensorHandleFactory);
119
120template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 2>
121BatchMatMul2DSimpleTest<armnn::DataType::Float16>(
122 armnn::IWorkloadFactory& workloadFactory,
123 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
124 const armnn::ITensorHandleFactory& tensorHandleFactory);
125
126template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 2>
127BatchMatMul2DSimpleTest<armnn::DataType::QAsymmS8>(
128 armnn::IWorkloadFactory& workloadFactory,
129 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
130 const armnn::ITensorHandleFactory& tensorHandleFactory);
131
132template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 2>
133BatchMatMul2DSimpleTest<armnn::DataType::QAsymmU8>(
134 armnn::IWorkloadFactory& workloadFactory,
135 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
136 const armnn::ITensorHandleFactory& tensorHandleFactory);
137
138template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 2>
139BatchMatMul2DSimpleTest<armnn::DataType::QSymmS16>(
140 armnn::IWorkloadFactory& workloadFactory,
141 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
142 const armnn::ITensorHandleFactory& tensorHandleFactory);
143
144template<armnn::DataType ArmnnType, typename T>
145LayerTestResult<T, 3> BatchMatMul3DSimpleTest(
146 armnn::IWorkloadFactory& workloadFactory,
147 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
148 const armnn::ITensorHandleFactory& tensorHandleFactory)
149{
150 auto descriptor = armnn::BatchMatMulDescriptor(); // Arbitrary layout with no transpose/adjointing
151
Teresa Charlin0f86ecf2022-10-13 15:47:08 +0100152 float qScale = 1.0f;
Samuel Yap6b478092022-07-06 15:36:03 +0100153 int32_t qOffset = 0;
154
Samuel Yap6b478092022-07-06 15:36:03 +0100155 armnn::TensorInfo inputXInfo({1,2,2}, ArmnnType, qScale, qOffset);
156 armnn::TensorInfo inputYInfo({1,2,2}, ArmnnType, qScale, qOffset);
157 armnn::TensorInfo outputInfo({1,2,2}, ArmnnType, qScale, qOffset);
158
159 std::vector<T> inputX = armnnUtils::QuantizedVector<T>({
160 1, 2,
161 3, 4
162 }, qScale, qOffset);
163
164 std::vector<T> inputY = armnnUtils::QuantizedVector<T>({
165 5, 6,
166 7, 8
167 }, qScale, qOffset);
168
169 std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({
170 19, 22,
171 43, 50
Samuel Yapdc8ed9d2022-08-08 14:07:42 +0100172 }, qScale, qOffset);
Samuel Yap6b478092022-07-06 15:36:03 +0100173
174 return BatchMatMulTestImpl<ArmnnType, T, 3>(workloadFactory,
175 memoryManager,
176 tensorHandleFactory,
177 descriptor,
178 inputX,
179 inputY,
180 outputExpected,
181 inputXInfo,
182 inputYInfo,
183 outputInfo);
184}
185
186template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 3>
187BatchMatMul3DSimpleTest<armnn::DataType::BFloat16>(
188 armnn::IWorkloadFactory& workloadFactory,
189 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
190 const armnn::ITensorHandleFactory& tensorHandleFactory);
191
192template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 3>
193BatchMatMul3DSimpleTest<armnn::DataType::Float32>(
194 armnn::IWorkloadFactory& workloadFactory,
195 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
196 const armnn::ITensorHandleFactory& tensorHandleFactory);
197
198template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 3>
199BatchMatMul3DSimpleTest<armnn::DataType::Float16>(
200 armnn::IWorkloadFactory& workloadFactory,
201 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
202 const armnn::ITensorHandleFactory& tensorHandleFactory);
203
204template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 3>
205BatchMatMul3DSimpleTest<armnn::DataType::QAsymmS8>(
206 armnn::IWorkloadFactory& workloadFactory,
207 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
208 const armnn::ITensorHandleFactory& tensorHandleFactory);
209
210template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 3>
211BatchMatMul3DSimpleTest<armnn::DataType::QAsymmU8>(
212 armnn::IWorkloadFactory& workloadFactory,
213 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
214 const armnn::ITensorHandleFactory& tensorHandleFactory);
215
216template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 3>
217BatchMatMul3DSimpleTest<armnn::DataType::QSymmS16>(
218 armnn::IWorkloadFactory& workloadFactory,
219 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
220 const armnn::ITensorHandleFactory& tensorHandleFactory);
221
222template<armnn::DataType ArmnnType, typename T>
223LayerTestResult<T, 4> BatchMatMulNCHWSimpleTest(
224 armnn::IWorkloadFactory& workloadFactory,
225 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
226 const armnn::ITensorHandleFactory& tensorHandleFactory)
227{
Samuel Yapdc8ed9d2022-08-08 14:07:42 +0100228 auto descriptor = armnn::BatchMatMulDescriptor(); // Default arbitrary layout is treated the same as NCHW
Samuel Yap6b478092022-07-06 15:36:03 +0100229
Teresa Charlin0f86ecf2022-10-13 15:47:08 +0100230 float qScale = 1.0f;
Samuel Yap6b478092022-07-06 15:36:03 +0100231 int32_t qOffset = 0;
232
Samuel Yap6b478092022-07-06 15:36:03 +0100233 armnn::TensorInfo inputXInfo({1,1,2,2}, ArmnnType, qScale, qOffset);
234 armnn::TensorInfo inputYInfo({1,1,2,2}, ArmnnType, qScale, qOffset);
235 armnn::TensorInfo outputInfo({1,1,2,2}, ArmnnType, qScale, qOffset);
236
237 std::vector<T> inputX = armnnUtils::QuantizedVector<T>({
238 1, 2,
239 3, 4
240 }, qScale, qOffset);
241
242 std::vector<T> inputY = armnnUtils::QuantizedVector<T>({
243 5, 6,
244 7, 8
245 }, qScale, qOffset);
246
247 std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({
248 19, 22,
249 43, 50
Samuel Yapdc8ed9d2022-08-08 14:07:42 +0100250 }, qScale, qOffset);
Samuel Yap6b478092022-07-06 15:36:03 +0100251
252 return BatchMatMulTestImpl<ArmnnType, T, 4>(workloadFactory,
253 memoryManager,
254 tensorHandleFactory,
255 descriptor,
256 inputX,
257 inputY,
258 outputExpected,
259 inputXInfo,
260 inputYInfo,
261 outputInfo);
262}
263
264template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 4>
265BatchMatMulNCHWSimpleTest<armnn::DataType::BFloat16>(
266 armnn::IWorkloadFactory& workloadFactory,
267 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
268 const armnn::ITensorHandleFactory& tensorHandleFactory);
269
270template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
271BatchMatMulNCHWSimpleTest<armnn::DataType::Float32>(
272 armnn::IWorkloadFactory& workloadFactory,
273 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
274 const armnn::ITensorHandleFactory& tensorHandleFactory);
275
276template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
277BatchMatMulNCHWSimpleTest<armnn::DataType::Float16>(
278 armnn::IWorkloadFactory& workloadFactory,
279 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
280 const armnn::ITensorHandleFactory& tensorHandleFactory);
281
282template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
283BatchMatMulNCHWSimpleTest<armnn::DataType::QAsymmS8>(
284 armnn::IWorkloadFactory& workloadFactory,
285 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
286 const armnn::ITensorHandleFactory& tensorHandleFactory);
287
288template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
289BatchMatMulNCHWSimpleTest<armnn::DataType::QAsymmU8>(
290 armnn::IWorkloadFactory& workloadFactory,
291 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
292 const armnn::ITensorHandleFactory& tensorHandleFactory);
293
294template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
295BatchMatMulNCHWSimpleTest<armnn::DataType::QSymmS16>(
296 armnn::IWorkloadFactory& workloadFactory,
297 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
298 const armnn::ITensorHandleFactory& tensorHandleFactory);
299
300template<armnn::DataType ArmnnType, typename T>
301LayerTestResult<T, 4> BatchMatMulNHWCSimpleTest(
302 armnn::IWorkloadFactory& workloadFactory,
303 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
304 const armnn::ITensorHandleFactory& tensorHandleFactory)
305{
Samuel Yapdc8ed9d2022-08-08 14:07:42 +0100306 auto descriptor = armnn::BatchMatMulDescriptor(false,
307 false,
308 false,
309 false,
310 armnn::DataLayout::NHWC,
311 armnn::DataLayout::NHWC);
Samuel Yap6b478092022-07-06 15:36:03 +0100312
Teresa Charlin0f86ecf2022-10-13 15:47:08 +0100313 float qScale = 1.0f;
Samuel Yap6b478092022-07-06 15:36:03 +0100314 int32_t qOffset = 0;
315
Samuel Yap6b478092022-07-06 15:36:03 +0100316 armnn::TensorInfo inputXInfo({1,2,2,1}, ArmnnType, qScale, qOffset);
317 armnn::TensorInfo inputYInfo({1,2,2,1}, ArmnnType, qScale, qOffset);
318 armnn::TensorInfo outputInfo({1,2,2,1}, ArmnnType, qScale, qOffset);
319
320 std::vector<T> inputX = armnnUtils::QuantizedVector<T>({
321 1, 2,
322 3, 4
323 }, qScale, qOffset);
324
325 std::vector<T> inputY = armnnUtils::QuantizedVector<T>({
326 5, 6,
327 7, 8
328 }, qScale, qOffset);
329
330 std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({
331 19, 22,
332 43, 50
Samuel Yapdc8ed9d2022-08-08 14:07:42 +0100333 }, qScale, qOffset);
Samuel Yap6b478092022-07-06 15:36:03 +0100334
335 return BatchMatMulTestImpl<ArmnnType, T, 4>(workloadFactory,
336 memoryManager,
337 tensorHandleFactory,
338 descriptor,
339 inputX,
340 inputY,
341 outputExpected,
342 inputXInfo,
343 inputYInfo,
344 outputInfo);
345}
346
347template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 4>
348BatchMatMulNHWCSimpleTest<armnn::DataType::BFloat16>(
349 armnn::IWorkloadFactory& workloadFactory,
350 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
351 const armnn::ITensorHandleFactory& tensorHandleFactory);
352
353template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
354BatchMatMulNHWCSimpleTest<armnn::DataType::Float32>(
355 armnn::IWorkloadFactory& workloadFactory,
356 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
357 const armnn::ITensorHandleFactory& tensorHandleFactory);
358
359template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
360BatchMatMulNHWCSimpleTest<armnn::DataType::Float16>(
361 armnn::IWorkloadFactory& workloadFactory,
362 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
363 const armnn::ITensorHandleFactory& tensorHandleFactory);
364
365template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
366BatchMatMulNHWCSimpleTest<armnn::DataType::QAsymmS8>(
367 armnn::IWorkloadFactory& workloadFactory,
368 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
369 const armnn::ITensorHandleFactory& tensorHandleFactory);
370
371template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
372BatchMatMulNHWCSimpleTest<armnn::DataType::QAsymmU8>(
373 armnn::IWorkloadFactory& workloadFactory,
374 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
375 const armnn::ITensorHandleFactory& tensorHandleFactory);
376
377template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
378BatchMatMulNHWCSimpleTest<armnn::DataType::QSymmS16>(
379 armnn::IWorkloadFactory& workloadFactory,
380 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
381 const armnn::ITensorHandleFactory& tensorHandleFactory);
382
383template<armnn::DataType ArmnnType, typename T>
384LayerTestResult<T, 3> BatchMatMul3DBatchTest(
385 armnn::IWorkloadFactory& workloadFactory,
386 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
387 const armnn::ITensorHandleFactory& tensorHandleFactory)
388{
389 auto descriptor = armnn::BatchMatMulDescriptor(); // Arbitrary layout with no transpose/adjointing
390
Teresa Charlin0f86ecf2022-10-13 15:47:08 +0100391 float qScale = 1.0f;
Samuel Yap6b478092022-07-06 15:36:03 +0100392 int32_t qOffset = 0;
393
Samuel Yap6b478092022-07-06 15:36:03 +0100394 armnn::TensorInfo inputXInfo({2,2,2}, ArmnnType, qScale, qOffset);
395 armnn::TensorInfo inputYInfo({2,2,2}, ArmnnType, qScale, qOffset);
396 armnn::TensorInfo outputInfo({2,2,2}, ArmnnType, qScale, qOffset);
397
398 std::vector<T> inputX = armnnUtils::QuantizedVector<T>({
399 1, 2,
400 3, 4,
401
402 9, 10,
403 11, 12
404 }, qScale, qOffset);
405
406 std::vector<T> inputY = armnnUtils::QuantizedVector<T>({
407 5, 6,
408 7, 8,
409
410 13, 14,
411 15, 16
412 }, qScale, qOffset);
413
414 std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({
415 19, 22,
416 43, 50,
417
418 267, 286,
419 323, 346
Samuel Yapdc8ed9d2022-08-08 14:07:42 +0100420 }, qScale, qOffset);
Samuel Yap6b478092022-07-06 15:36:03 +0100421
422 return BatchMatMulTestImpl<ArmnnType, T, 3>(workloadFactory,
423 memoryManager,
424 tensorHandleFactory,
425 descriptor,
426 inputX,
427 inputY,
428 outputExpected,
429 inputXInfo,
430 inputYInfo,
431 outputInfo);
432}
433
434template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 3>
435BatchMatMul3DBatchTest<armnn::DataType::BFloat16>(
436 armnn::IWorkloadFactory& workloadFactory,
437 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
438 const armnn::ITensorHandleFactory& tensorHandleFactory);
439
440template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 3>
441BatchMatMul3DBatchTest<armnn::DataType::Float32>(
442 armnn::IWorkloadFactory& workloadFactory,
443 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
444 const armnn::ITensorHandleFactory& tensorHandleFactory);
445
446template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 3>
447BatchMatMul3DBatchTest<armnn::DataType::Float16>(
448 armnn::IWorkloadFactory& workloadFactory,
449 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
450 const armnn::ITensorHandleFactory& tensorHandleFactory);
451
452template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 3>
453BatchMatMul3DBatchTest<armnn::DataType::QAsymmS8>(
454 armnn::IWorkloadFactory& workloadFactory,
455 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
456 const armnn::ITensorHandleFactory& tensorHandleFactory);
457
458template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 3>
459BatchMatMul3DBatchTest<armnn::DataType::QAsymmU8>(
460 armnn::IWorkloadFactory& workloadFactory,
461 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
462 const armnn::ITensorHandleFactory& tensorHandleFactory);
463
464template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 3>
465BatchMatMul3DBatchTest<armnn::DataType::QSymmS16>(
466 armnn::IWorkloadFactory& workloadFactory,
467 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
468 const armnn::ITensorHandleFactory& tensorHandleFactory);
469
470template<armnn::DataType ArmnnType, typename T>
471LayerTestResult<T, 3> BatchMatMul3DBroadcastTest(
472 armnn::IWorkloadFactory& workloadFactory,
473 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
474 const armnn::ITensorHandleFactory& tensorHandleFactory)
475{
476 auto descriptor = armnn::BatchMatMulDescriptor(); // Arbitrary layout with no transpose/adjointing
477
Teresa Charlin0f86ecf2022-10-13 15:47:08 +0100478 float qScale = 1.0f;
Samuel Yap6b478092022-07-06 15:36:03 +0100479 int32_t qOffset = 0;
480
Samuel Yap6b478092022-07-06 15:36:03 +0100481 armnn::TensorInfo inputXInfo({2,2,2}, ArmnnType, qScale, qOffset);
482 armnn::TensorInfo inputYInfo({1,2,2}, ArmnnType, qScale, qOffset);
483 armnn::TensorInfo outputInfo({2,2,2}, ArmnnType, qScale, qOffset);
484
485 std::vector<T> inputX = armnnUtils::QuantizedVector<T>({
486 1, 2,
487 3, 4,
488
489 9, 10,
490 11, 12
491 }, qScale, qOffset);
492
493 std::vector<T> inputY = armnnUtils::QuantizedVector<T>({
494 13, 14,
495 15, 16
496 }, qScale, qOffset);
497
498 std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({
499 43, 46,
500 99, 106,
501
502 267, 286,
503 323, 346
Samuel Yapdc8ed9d2022-08-08 14:07:42 +0100504 }, qScale, qOffset);
Samuel Yap6b478092022-07-06 15:36:03 +0100505
506 return BatchMatMulTestImpl<ArmnnType, T, 3>(workloadFactory,
507 memoryManager,
508 tensorHandleFactory,
509 descriptor,
510 inputX,
511 inputY,
512 outputExpected,
513 inputXInfo,
514 inputYInfo,
515 outputInfo);
516}
517
518template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 3>
519BatchMatMul3DBroadcastTest<armnn::DataType::BFloat16>(
520 armnn::IWorkloadFactory& workloadFactory,
521 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
522 const armnn::ITensorHandleFactory& tensorHandleFactory);
523
524template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 3>
525BatchMatMul3DBroadcastTest<armnn::DataType::Float32>(
526 armnn::IWorkloadFactory& workloadFactory,
527 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
528 const armnn::ITensorHandleFactory& tensorHandleFactory);
529
530template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 3>
531BatchMatMul3DBroadcastTest<armnn::DataType::Float16>(
532 armnn::IWorkloadFactory& workloadFactory,
533 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
534 const armnn::ITensorHandleFactory& tensorHandleFactory);
535
536template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 3>
537BatchMatMul3DBroadcastTest<armnn::DataType::QAsymmS8>(
538 armnn::IWorkloadFactory& workloadFactory,
539 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
540 const armnn::ITensorHandleFactory& tensorHandleFactory);
541
542template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 3>
543BatchMatMul3DBroadcastTest<armnn::DataType::QAsymmU8>(
544 armnn::IWorkloadFactory& workloadFactory,
545 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
546 const armnn::ITensorHandleFactory& tensorHandleFactory);
547
548template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 3>
549BatchMatMul3DBroadcastTest<armnn::DataType::QSymmS16>(
550 armnn::IWorkloadFactory& workloadFactory,
551 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
552 const armnn::ITensorHandleFactory& tensorHandleFactory);
553
554template<armnn::DataType ArmnnType, typename T>
555LayerTestResult<T, 3> BatchMatMul3D2DBroadcastTest(
556 armnn::IWorkloadFactory& workloadFactory,
557 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
558 const armnn::ITensorHandleFactory& tensorHandleFactory)
559{
560 auto descriptor = armnn::BatchMatMulDescriptor(); // Arbitrary layout with no transpose/adjointing
561
Teresa Charlin0f86ecf2022-10-13 15:47:08 +0100562 float qScale = 1.0f;
Samuel Yap6b478092022-07-06 15:36:03 +0100563 int32_t qOffset = 0;
564
Samuel Yap6b478092022-07-06 15:36:03 +0100565 armnn::TensorInfo inputXInfo({2,2,2}, ArmnnType, qScale, qOffset);
566 armnn::TensorInfo inputYInfo({2,2}, ArmnnType, qScale, qOffset);
567 armnn::TensorInfo outputInfo({2,2,2}, ArmnnType, qScale, qOffset);
568
569 std::vector<T> inputX = armnnUtils::QuantizedVector<T>({
570 1, 2,
571 3, 4,
572
573 9, 10,
574 11, 12
575 }, qScale, qOffset);
576
577 std::vector<T> inputY = armnnUtils::QuantizedVector<T>({
578 13, 14,
579 15, 16
580 }, qScale, qOffset);
581
582 std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({
583 43, 46,
584 99, 106,
585
586 267, 286,
587 323, 346
Samuel Yapdc8ed9d2022-08-08 14:07:42 +0100588 }, qScale, qOffset);
Samuel Yap6b478092022-07-06 15:36:03 +0100589
590 return BatchMatMulTestImpl<ArmnnType, T, 3>(workloadFactory,
591 memoryManager,
592 tensorHandleFactory,
593 descriptor,
594 inputX,
595 inputY,
596 outputExpected,
597 inputXInfo,
598 inputYInfo,
599 outputInfo);
600}
601
602template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 3>
603BatchMatMul3D2DBroadcastTest<armnn::DataType::BFloat16>(
604 armnn::IWorkloadFactory& workloadFactory,
605 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
606 const armnn::ITensorHandleFactory& tensorHandleFactory);
607
608template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 3>
609BatchMatMul3D2DBroadcastTest<armnn::DataType::Float32>(
610 armnn::IWorkloadFactory& workloadFactory,
611 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
612 const armnn::ITensorHandleFactory& tensorHandleFactory);
613
614template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 3>
615BatchMatMul3D2DBroadcastTest<armnn::DataType::Float16>(
616 armnn::IWorkloadFactory& workloadFactory,
617 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
618 const armnn::ITensorHandleFactory& tensorHandleFactory);
619
620template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 3>
621BatchMatMul3D2DBroadcastTest<armnn::DataType::QAsymmS8>(
622 armnn::IWorkloadFactory& workloadFactory,
623 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
624 const armnn::ITensorHandleFactory& tensorHandleFactory);
625
626template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 3>
627BatchMatMul3D2DBroadcastTest<armnn::DataType::QAsymmU8>(
628 armnn::IWorkloadFactory& workloadFactory,
629 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
630 const armnn::ITensorHandleFactory& tensorHandleFactory);
631
632template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 3>
633BatchMatMul3D2DBroadcastTest<armnn::DataType::QSymmS16>(
634 armnn::IWorkloadFactory& workloadFactory,
635 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
636 const armnn::ITensorHandleFactory& tensorHandleFactory);
637
638template<armnn::DataType ArmnnType, typename T>
639LayerTestResult<T, 5> BatchMatMulNDHWCNHWCTest(
640 armnn::IWorkloadFactory& workloadFactory,
641 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
642 const armnn::ITensorHandleFactory& tensorHandleFactory)
643{
Samuel Yapdc8ed9d2022-08-08 14:07:42 +0100644 auto descriptor = armnn::BatchMatMulDescriptor(false,
645 false,
646 false,
647 false,
648 armnn::DataLayout::NDHWC,
649 armnn::DataLayout::NHWC);
Samuel Yap6b478092022-07-06 15:36:03 +0100650
Teresa Charlin0f86ecf2022-10-13 15:47:08 +0100651 float qScale = 1.0f;
Samuel Yap6b478092022-07-06 15:36:03 +0100652 int32_t qOffset = 0;
653
Samuel Yap6b478092022-07-06 15:36:03 +0100654 armnn::TensorInfo inputXInfo({1,1,2,2,2}, ArmnnType, qScale, qOffset);
655 armnn::TensorInfo inputYInfo({1,2,2,2}, ArmnnType, qScale, qOffset);
656 armnn::TensorInfo outputInfo({1,1,2,2,2}, ArmnnType, qScale, qOffset);
657
658 std::vector<T> inputX = armnnUtils::QuantizedVector<T>({
659 1, 20,
660 3, 22,
661
662 2, 21,
663 4, 23
664 }, qScale, qOffset);
665
666 std::vector<T> inputY = armnnUtils::QuantizedVector<T>({
667 5, 24,
668 7, 26,
669
670 6, 25,
671 8, 27
672 }, qScale, qOffset);
673
674 std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({
675 23, 1030,
676 31, 1114,
677
678 34, 1079,
679 46, 1167
Samuel Yapdc8ed9d2022-08-08 14:07:42 +0100680 }, qScale, qOffset);
Samuel Yap6b478092022-07-06 15:36:03 +0100681
682 return BatchMatMulTestImpl<ArmnnType, T, 5>(workloadFactory,
683 memoryManager,
684 tensorHandleFactory,
685 descriptor,
686 inputX,
687 inputY,
688 outputExpected,
689 inputXInfo,
690 inputYInfo,
691 outputInfo);
692}
693
694template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 5>
695BatchMatMulNDHWCNHWCTest<armnn::DataType::BFloat16>(
696 armnn::IWorkloadFactory& workloadFactory,
697 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
698 const armnn::ITensorHandleFactory& tensorHandleFactory);
699
700template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 5>
701BatchMatMulNDHWCNHWCTest<armnn::DataType::Float32>(
702 armnn::IWorkloadFactory& workloadFactory,
703 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
704 const armnn::ITensorHandleFactory& tensorHandleFactory);
705
706template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 5>
707BatchMatMulNDHWCNHWCTest<armnn::DataType::Float16>(
708 armnn::IWorkloadFactory& workloadFactory,
709 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
710 const armnn::ITensorHandleFactory& tensorHandleFactory);
711
712template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 5>
713BatchMatMulNDHWCNHWCTest<armnn::DataType::QAsymmS8>(
714 armnn::IWorkloadFactory& workloadFactory,
715 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
716 const armnn::ITensorHandleFactory& tensorHandleFactory);
717
718template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 5>
719BatchMatMulNDHWCNHWCTest<armnn::DataType::QAsymmU8>(
720 armnn::IWorkloadFactory& workloadFactory,
721 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
722 const armnn::ITensorHandleFactory& tensorHandleFactory);
723
724template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 5>
725BatchMatMulNDHWCNHWCTest<armnn::DataType::QSymmS16>(
726 armnn::IWorkloadFactory& workloadFactory,
727 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
728 const armnn::ITensorHandleFactory& tensorHandleFactory);
729
730template<armnn::DataType ArmnnType, typename T>
731LayerTestResult<T, 2> BatchMatMul2DTinyTest(
732 armnn::IWorkloadFactory& workloadFactory,
733 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
734 const armnn::ITensorHandleFactory& tensorHandleFactory)
735{
736 auto descriptor = armnn::BatchMatMulDescriptor(); // Arbitrary layout with no transpose/adjointing
737
Teresa Charlin0f86ecf2022-10-13 15:47:08 +0100738 float qScale = 1.0f;
Samuel Yap6b478092022-07-06 15:36:03 +0100739 int32_t qOffset = 0;
740
Samuel Yap6b478092022-07-06 15:36:03 +0100741 armnn::TensorInfo inputXInfo({1,1}, ArmnnType, qScale, qOffset);
742 armnn::TensorInfo inputYInfo({1,1}, ArmnnType, qScale, qOffset);
743 armnn::TensorInfo outputInfo({1,1}, ArmnnType, qScale, qOffset);
744
745 std::vector<T> inputX = armnnUtils::QuantizedVector<T>({
746 3
747 }, qScale, qOffset);
748
749 std::vector<T> inputY = armnnUtils::QuantizedVector<T>({
750 5
751 }, qScale, qOffset);
752
753 std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({
754 15
755 }, qScale, qOffset);
756
757 return BatchMatMulTestImpl<ArmnnType, T, 2>(workloadFactory,
758 memoryManager,
759 tensorHandleFactory,
760 descriptor,
761 inputX,
762 inputY,
763 outputExpected,
764 inputXInfo,
765 inputYInfo,
766 outputInfo);
767}
768
769template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 2>
770BatchMatMul2DTinyTest<armnn::DataType::BFloat16>(
771 armnn::IWorkloadFactory& workloadFactory,
772 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
773 const armnn::ITensorHandleFactory& tensorHandleFactory);
774
775template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 2>
776BatchMatMul2DTinyTest<armnn::DataType::Float32>(
777 armnn::IWorkloadFactory& workloadFactory,
778 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
779 const armnn::ITensorHandleFactory& tensorHandleFactory);
780
781template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 2>
782BatchMatMul2DTinyTest<armnn::DataType::Float16>(
783 armnn::IWorkloadFactory& workloadFactory,
784 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
785 const armnn::ITensorHandleFactory& tensorHandleFactory);
786
787template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 2>
788BatchMatMul2DTinyTest<armnn::DataType::QAsymmS8>(
789 armnn::IWorkloadFactory& workloadFactory,
790 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
791 const armnn::ITensorHandleFactory& tensorHandleFactory);
792
793template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 2>
794BatchMatMul2DTinyTest<armnn::DataType::QAsymmU8>(
795 armnn::IWorkloadFactory& workloadFactory,
796 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
797 const armnn::ITensorHandleFactory& tensorHandleFactory);
798
799template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 2>
800BatchMatMul2DTinyTest<armnn::DataType::QSymmS16>(
801 armnn::IWorkloadFactory& workloadFactory,
802 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
803 const armnn::ITensorHandleFactory& tensorHandleFactory);
804
805template<armnn::DataType ArmnnType, typename T>
806LayerTestResult<T, 3> BatchMatMul3DNonSquareTest(
807 armnn::IWorkloadFactory& workloadFactory,
808 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
809 const armnn::ITensorHandleFactory& tensorHandleFactory)
810{
811 auto descriptor = armnn::BatchMatMulDescriptor(); // Arbitrary layout with no transpose/adjointing
812
Teresa Charlin0f86ecf2022-10-13 15:47:08 +0100813 float qScale = 1.0f;
Samuel Yap6b478092022-07-06 15:36:03 +0100814 int32_t qOffset = 0;
815
Samuel Yap6b478092022-07-06 15:36:03 +0100816 armnn::TensorInfo inputXInfo({2,5,3}, ArmnnType, qScale, qOffset);
817 armnn::TensorInfo inputYInfo({2,3,4}, ArmnnType, qScale, qOffset);
818 armnn::TensorInfo outputInfo({2,5,4}, ArmnnType, qScale, qOffset);
819
820 std::vector<T> inputX = armnnUtils::QuantizedVector<T>({
821 8, 8, 4,
822 6, 1, 3,
823 8, 8, 3,
824 8, 9, 8,
825 5, 4, 4,
826
827 1, 8, 5,
828 7, 1, 1,
829 8, 7, 9,
830 3, 2, 7,
831 8, 5, 3
832 }, qScale, qOffset);
833
834 std::vector<T> inputY = armnnUtils::QuantizedVector<T>({
835 6, 2, 3, 2,
836 6, 2, 2, 8,
837 3, 7, 8, 1,
838
839 7, 2, 9, 5,
840 2, 3, 1, 3,
841 2, 7, 7, 5
842 }, qScale, qOffset);
843
844 std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({
845 108, 60, 72, 84,
846 51, 35, 44, 23,
847 105, 53, 64, 83,
848 126, 90, 106, 96,
849 66, 46, 55, 46,
850
851 33, 61, 52, 54,
852 53, 24, 71, 43,
853 88, 100, 142, 106,
854 39, 61, 78, 56,
855 72, 52, 98, 70
Samuel Yapdc8ed9d2022-08-08 14:07:42 +0100856 }, qScale, qOffset);
Samuel Yap6b478092022-07-06 15:36:03 +0100857
858 return BatchMatMulTestImpl<ArmnnType, T, 3>(workloadFactory,
859 memoryManager,
860 tensorHandleFactory,
861 descriptor,
862 inputX,
863 inputY,
864 outputExpected,
865 inputXInfo,
866 inputYInfo,
867 outputInfo);
868}
869
870template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 3>
871BatchMatMul3DNonSquareTest<armnn::DataType::BFloat16>(
872 armnn::IWorkloadFactory& workloadFactory,
873 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
874 const armnn::ITensorHandleFactory& tensorHandleFactory);
875
876template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 3>
877BatchMatMul3DNonSquareTest<armnn::DataType::Float32>(
878 armnn::IWorkloadFactory& workloadFactory,
879 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
880 const armnn::ITensorHandleFactory& tensorHandleFactory);
881
882template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 3>
883BatchMatMul3DNonSquareTest<armnn::DataType::Float16>(
884 armnn::IWorkloadFactory& workloadFactory,
885 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
886 const armnn::ITensorHandleFactory& tensorHandleFactory);
887
888template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 3>
889BatchMatMul3DNonSquareTest<armnn::DataType::QAsymmS8>(
890 armnn::IWorkloadFactory& workloadFactory,
891 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
892 const armnn::ITensorHandleFactory& tensorHandleFactory);
893
894template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 3>
895BatchMatMul3DNonSquareTest<armnn::DataType::QAsymmU8>(
896 armnn::IWorkloadFactory& workloadFactory,
897 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
898 const armnn::ITensorHandleFactory& tensorHandleFactory);
899
900template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 3>
901BatchMatMul3DNonSquareTest<armnn::DataType::QSymmS16>(
902 armnn::IWorkloadFactory& workloadFactory,
903 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Samuel Yapdc8ed9d2022-08-08 14:07:42 +0100904 const armnn::ITensorHandleFactory& tensorHandleFactory);
905
906template<armnn::DataType ArmnnType, typename T>
907LayerTestResult<T, 2> BatchMatMul2DTranspSimpleTest(
908 armnn::IWorkloadFactory& workloadFactory,
909 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
910 const armnn::ITensorHandleFactory& tensorHandleFactory)
911{
912 auto descriptor = armnn::BatchMatMulDescriptor(true,
913 false,
914 false,
915 false);
916
Teresa Charlin0f86ecf2022-10-13 15:47:08 +0100917 float qScale = 1.0f;
Samuel Yapdc8ed9d2022-08-08 14:07:42 +0100918 int32_t qOffset = 0;
919
Samuel Yapdc8ed9d2022-08-08 14:07:42 +0100920 armnn::TensorInfo inputXInfo({2,3}, ArmnnType, qScale, qOffset);
921 armnn::TensorInfo inputYInfo({2,3}, ArmnnType, qScale, qOffset);
922 armnn::TensorInfo outputInfo({3,3}, ArmnnType, qScale, qOffset);
923
924 std::vector<T> inputX = armnnUtils::QuantizedVector<T>({
925 1, 2, 3,
926 4, 5, 6
927 }, qScale, qOffset);
928
929 std::vector<T> inputY = armnnUtils::QuantizedVector<T>({
930 7, 8, 9,
931 10, 11, 12
932 }, qScale, qOffset);
933
934 std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({
935 47, 52, 57,
936 64, 71, 78,
937 81, 90, 99
938 }, qScale, qOffset);
939
940 return BatchMatMulTestImpl<ArmnnType, T, 2>(workloadFactory,
941 memoryManager,
942 tensorHandleFactory,
943 descriptor,
944 inputX,
945 inputY,
946 outputExpected,
947 inputXInfo,
948 inputYInfo,
949 outputInfo);
950}
951
952template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 2>
953BatchMatMul2DTranspSimpleTest<armnn::DataType::BFloat16>(
954 armnn::IWorkloadFactory& workloadFactory,
955 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
956 const armnn::ITensorHandleFactory& tensorHandleFactory);
957
958template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 2>
959BatchMatMul2DTranspSimpleTest<armnn::DataType::Float32>(
960 armnn::IWorkloadFactory& workloadFactory,
961 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
962 const armnn::ITensorHandleFactory& tensorHandleFactory);
963
964template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 2>
965BatchMatMul2DTranspSimpleTest<armnn::DataType::Float16>(
966 armnn::IWorkloadFactory& workloadFactory,
967 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
968 const armnn::ITensorHandleFactory& tensorHandleFactory);
969
970template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 2>
971BatchMatMul2DTranspSimpleTest<armnn::DataType::QAsymmS8>(
972 armnn::IWorkloadFactory& workloadFactory,
973 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
974 const armnn::ITensorHandleFactory& tensorHandleFactory);
975
976template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 2>
977BatchMatMul2DTranspSimpleTest<armnn::DataType::QAsymmU8>(
978 armnn::IWorkloadFactory& workloadFactory,
979 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
980 const armnn::ITensorHandleFactory& tensorHandleFactory);
981
982template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 2>
983BatchMatMul2DTranspSimpleTest<armnn::DataType::QSymmS16>(
984 armnn::IWorkloadFactory& workloadFactory,
985 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
986 const armnn::ITensorHandleFactory& tensorHandleFactory);
987
988template<armnn::DataType ArmnnType, typename T>
989LayerTestResult<T, 2> BatchMatMul2DAdjointSimpleTest(
990 armnn::IWorkloadFactory& workloadFactory,
991 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
992 const armnn::ITensorHandleFactory& tensorHandleFactory)
993{
994 auto descriptor = armnn::BatchMatMulDescriptor(false,
995 false,
996 true,
997 false);
998
Teresa Charlin0f86ecf2022-10-13 15:47:08 +0100999 float qScale = 1.0f;
Samuel Yapdc8ed9d2022-08-08 14:07:42 +01001000 int32_t qOffset = 0;
1001
Samuel Yapdc8ed9d2022-08-08 14:07:42 +01001002 armnn::TensorInfo inputXInfo({3,3}, ArmnnType, qScale, qOffset);
1003 armnn::TensorInfo inputYInfo({3,3}, ArmnnType, qScale, qOffset);
1004 armnn::TensorInfo outputInfo({3,3}, ArmnnType, qScale, qOffset);
1005
1006 std::vector<T> inputX = armnnUtils::QuantizedVector<T>({
1007 3, 1, 1,
1008 1, 3, -1,
1009 2, 4, 1
1010 }, qScale, qOffset);
1011
1012 std::vector<T> inputY = armnnUtils::QuantizedVector<T>({
1013 1, 0, 0,
1014 0, 1, 0,
1015 0, 0, 1
1016 }, qScale, qOffset);
1017
1018 std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({
1019 7, 3, -4,
1020 -3, 1, 4,
1021 -2, -10, 8
1022 }, qScale, qOffset);
1023
1024 switch (ArmnnType)
1025 {
1026 case armnn::DataType::QAsymmU8:
1027 outputExpected = armnnUtils::QuantizedVector<T>({
1028 3, 3, 0,
1029 0, 1, 1,
1030 0, 0, 8
1031 }, qScale, qOffset);
1032 break;
1033 default:
1034 break;
1035 }
1036
1037 return BatchMatMulTestImpl<ArmnnType, T, 2>(workloadFactory,
1038 memoryManager,
1039 tensorHandleFactory,
1040 descriptor,
1041 inputX,
1042 inputY,
1043 outputExpected,
1044 inputXInfo,
1045 inputYInfo,
1046 outputInfo);
1047}
1048
1049template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 2>
1050BatchMatMul2DAdjointSimpleTest<armnn::DataType::BFloat16>(
1051 armnn::IWorkloadFactory& workloadFactory,
1052 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1053 const armnn::ITensorHandleFactory& tensorHandleFactory);
1054
1055template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 2>
1056BatchMatMul2DAdjointSimpleTest<armnn::DataType::Float32>(
1057 armnn::IWorkloadFactory& workloadFactory,
1058 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1059 const armnn::ITensorHandleFactory& tensorHandleFactory);
1060
1061template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 2>
1062BatchMatMul2DAdjointSimpleTest<armnn::DataType::Float16>(
1063 armnn::IWorkloadFactory& workloadFactory,
1064 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1065 const armnn::ITensorHandleFactory& tensorHandleFactory);
1066
1067template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 2>
1068BatchMatMul2DAdjointSimpleTest<armnn::DataType::QAsymmS8>(
1069 armnn::IWorkloadFactory& workloadFactory,
1070 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1071 const armnn::ITensorHandleFactory& tensorHandleFactory);
1072
1073template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 2>
1074BatchMatMul2DAdjointSimpleTest<armnn::DataType::QAsymmU8>(
1075 armnn::IWorkloadFactory& workloadFactory,
1076 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1077 const armnn::ITensorHandleFactory& tensorHandleFactory);
1078
1079template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 2>
1080BatchMatMul2DAdjointSimpleTest<armnn::DataType::QSymmS16>(
1081 armnn::IWorkloadFactory& workloadFactory,
1082 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1083 const armnn::ITensorHandleFactory& tensorHandleFactory);
1084
1085template<armnn::DataType ArmnnType, typename T>
1086LayerTestResult<T, 4> BatchMatMulNHWCParamsTest(
1087 armnn::IWorkloadFactory& workloadFactory,
1088 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1089 const armnn::ITensorHandleFactory& tensorHandleFactory)
1090{
1091 auto descriptor = armnn::BatchMatMulDescriptor(false,
1092 true,
1093 true,
1094 false,
1095 armnn::DataLayout::NHWC,
1096 armnn::DataLayout::NHWC);
1097
Teresa Charlin0f86ecf2022-10-13 15:47:08 +01001098 float qScale = 1.0f;
Samuel Yapdc8ed9d2022-08-08 14:07:42 +01001099 int32_t qOffset = 0;
1100
Samuel Yapdc8ed9d2022-08-08 14:07:42 +01001101 armnn::TensorInfo inputXInfo({1,4,4,2}, ArmnnType, qScale, qOffset);
1102 armnn::TensorInfo inputYInfo({2,2,4,1}, ArmnnType, qScale, qOffset);
1103 armnn::TensorInfo outputInfo({2,4,2,2}, ArmnnType, qScale, qOffset);
1104
1105 std::vector<T> inputX = armnnUtils::QuantizedVector<T>({
1106 1, -3, 1, 4, 4, 9, 1, 2,
1107 2, 4, 2, 2, 10, 7, 6, -5,
1108 3, 8, 9, 9, 21, 1, 17, 7,
1109 5, 11, 11, 8, 29, 3, 23, 6
1110 }, qScale, qOffset);
1111
1112 std::vector<T> inputY = armnnUtils::QuantizedVector<T>({
1113 1, 2, 3, 4,
1114 5, 6, 7, 8,
1115
1116 9, 10, 11, 12,
1117 13, 14, 15, 16
1118 }, qScale, qOffset);
1119
1120 std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({
1121 28, 625, 140, 585,
1122 8, 110, -8, 1662,
1123 -24, 401, -120, 921,
1124 12, 131, 108, -501,
1125
1126 252, 545, 364, 505,
1127 -24, 3214, -40, 4766,
1128 -216, 1441, -312, 1961,
1129 204, -1133, 300, -1765
1130 }, qScale, qOffset);
1131
1132 switch (ArmnnType)
1133 {
1134 case armnn::DataType::QAsymmU8:
1135 outputExpected = armnnUtils::QuantizedVector<T>({
1136 28, 80, 140, 80,
1137 8, 45, 0, 255,
1138 0, 18, 0, 18,
1139 12, 0, 108, 0,
1140
1141 252, 80, 255, 80,
1142 0, 255, 0, 255,
1143 0, 18, 0, 18,
1144 204, 0, 255, 0
1145 }, qScale, qOffset);
1146 break;
1147 default:
1148 break;
1149 }
1150
1151 return BatchMatMulTestImpl<ArmnnType, T, 4>(workloadFactory,
1152 memoryManager,
1153 tensorHandleFactory,
1154 descriptor,
1155 inputX,
1156 inputY,
1157 outputExpected,
1158 inputXInfo,
1159 inputYInfo,
1160 outputInfo);
1161}
1162
1163template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 4>
1164BatchMatMulNHWCParamsTest<armnn::DataType::BFloat16>(
1165 armnn::IWorkloadFactory& workloadFactory,
1166 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1167 const armnn::ITensorHandleFactory& tensorHandleFactory);
1168
1169template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
1170BatchMatMulNHWCParamsTest<armnn::DataType::Float32>(
1171 armnn::IWorkloadFactory& workloadFactory,
1172 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1173 const armnn::ITensorHandleFactory& tensorHandleFactory);
1174
1175template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
1176BatchMatMulNHWCParamsTest<armnn::DataType::Float16>(
1177 armnn::IWorkloadFactory& workloadFactory,
1178 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1179 const armnn::ITensorHandleFactory& tensorHandleFactory);
1180
1181template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
1182BatchMatMulNHWCParamsTest<armnn::DataType::QAsymmS8>(
1183 armnn::IWorkloadFactory& workloadFactory,
1184 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1185 const armnn::ITensorHandleFactory& tensorHandleFactory);
1186
1187template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
1188BatchMatMulNHWCParamsTest<armnn::DataType::QAsymmU8>(
1189 armnn::IWorkloadFactory& workloadFactory,
1190 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1191 const armnn::ITensorHandleFactory& tensorHandleFactory);
1192
1193template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
1194BatchMatMulNHWCParamsTest<armnn::DataType::QSymmS16>(
1195 armnn::IWorkloadFactory& workloadFactory,
1196 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Samuel Yap6b478092022-07-06 15:36:03 +01001197 const armnn::ITensorHandleFactory& tensorHandleFactory);