blob: 504ca1d304e5c148cc795f3ac9c81f46e73be119 [file] [log] [blame]
Samuel Yap6b478092022-07-06 15:36:03 +01001//
Teresa Charlin1fe6c812022-11-01 15:59:50 +00002// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
Samuel Yap6b478092022-07-06 15:36:03 +01003// SPDX-License-Identifier: MIT
4//
5
6#include "BatchMatMulTestImpl.hpp"
7
8#include <armnn/backends/IBackendInternal.hpp>
9#include <armnn/backends/Workload.hpp>
10#include <armnn/backends/WorkloadData.hpp>
11#include <armnn/backends/WorkloadFactory.hpp>
12
13#include <armnnTestUtils/WorkloadTestUtils.hpp>
14#include <armnnUtils/QuantizeHelper.hpp>
15#include <armnnTestUtils/TensorCopyUtils.hpp>
16#include <armnn/Optional.hpp>
Teresa Charlin1fe6c812022-11-01 15:59:50 +000017#include <armnn/BackendHelper.hpp>
Samuel Yap6b478092022-07-06 15:36:03 +010018
19
20template<armnn::DataType ArmnnType, typename T, std::size_t NumDims>
21LayerTestResult<T, NumDims> BatchMatMulTestImpl(
22 armnn::IWorkloadFactory& workloadFactory,
23 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
24 const armnn::ITensorHandleFactory& tensorHandleFactory,
25 armnn::BatchMatMulDescriptor descriptor,
26 const std::vector<T>& inputX,
27 const std::vector<T>& inputY,
28 const std::vector<T>& outputExpected,
29 const armnn::TensorInfo& inputXInfo,
30 const armnn::TensorInfo& inputYInfo,
31 const armnn::TensorInfo& outputInfo)
32{
Teresa Charlin1fe6c812022-11-01 15:59:50 +000033 LayerTestResult<T, NumDims> result(outputInfo);
Samuel Yap6b478092022-07-06 15:36:03 +010034 std::vector<T> outputActual(outputInfo.GetNumElements());
35
36 std::unique_ptr<armnn::ITensorHandle> inputXHandle = tensorHandleFactory.CreateTensorHandle(inputXInfo);
37 std::unique_ptr<armnn::ITensorHandle> inputYHandle = tensorHandleFactory.CreateTensorHandle(inputYInfo);
38 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputInfo);
39
40 armnn::BatchMatMulQueueDescriptor queueDescriptor;
Teresa Charlin1fe6c812022-11-01 15:59:50 +000041 queueDescriptor.m_Parameters = std::move(descriptor);
Samuel Yap6b478092022-07-06 15:36:03 +010042 armnn::WorkloadInfo workloadInfo;
43
44 AddInputToWorkload(queueDescriptor, workloadInfo, inputXInfo, inputXHandle.get());
45 AddInputToWorkload(queueDescriptor, workloadInfo, inputYInfo, inputYHandle.get());
46 AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, outputHandle.get());
47
Teresa Charlin1fe6c812022-11-01 15:59:50 +000048 // Don't execute if BatchMatMul is not supported, as an exception will be raised.
49 const armnn::BackendId& backend = workloadFactory.GetBackendId();
50 std::string reasonIfUnsupported;
51 armnn::LayerSupportHandle handle = armnn::GetILayerSupportByBackendId(backend);
52 result.m_Supported = handle.IsBatchMatMulSupported(inputXInfo,
53 inputYInfo,
54 outputInfo,
55 queueDescriptor.m_Parameters,
56 reasonIfUnsupported);
57 if (!result.m_Supported)
58 {
59 return result;
60 }
61
Samuel Yap6b478092022-07-06 15:36:03 +010062 auto workload = workloadFactory.CreateWorkload(armnn::LayerType::BatchMatMul, queueDescriptor, workloadInfo);
63
64 inputXHandle->Allocate();
65 inputYHandle->Allocate();
66 outputHandle->Allocate();
67
68 CopyDataToITensorHandle(inputXHandle.get(), inputX.data());
69 CopyDataToITensorHandle(inputYHandle.get(), inputY.data());
70
71 workload->PostAllocationConfigure();
72 ExecuteWorkload(*workload, memoryManager);
73
74 CopyDataFromITensorHandle(outputActual.data(), outputHandle.get());
75
76 return LayerTestResult<T, NumDims>(outputActual,
77 outputExpected,
78 outputHandle->GetShape(),
79 outputInfo.GetShape());
80}
81
82template<armnn::DataType ArmnnType, typename T>
83LayerTestResult<T, 2> BatchMatMul2DSimpleTest(
84 armnn::IWorkloadFactory& workloadFactory,
85 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
86 const armnn::ITensorHandleFactory& tensorHandleFactory)
87{
88 auto descriptor = armnn::BatchMatMulDescriptor(); // Arbitrary layout with no transpose/adjointing
89
Teresa Charlin0f86ecf2022-10-13 15:47:08 +010090 float qScale = 1.0f;
Samuel Yap6b478092022-07-06 15:36:03 +010091 int32_t qOffset = 0;
92
Samuel Yap6b478092022-07-06 15:36:03 +010093 armnn::TensorInfo inputXInfo({2,2}, ArmnnType, qScale, qOffset);
94 armnn::TensorInfo inputYInfo({2,2}, ArmnnType, qScale, qOffset);
95 armnn::TensorInfo outputInfo({2,2}, ArmnnType, qScale, qOffset);
96
97 std::vector<T> inputX = armnnUtils::QuantizedVector<T>({
98 1, 2,
99 3, 4
100 }, qScale, qOffset);
101
102 std::vector<T> inputY = armnnUtils::QuantizedVector<T>({
103 5, 6,
104 7, 8
105 }, qScale, qOffset);
106
107 std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({
108 19, 22,
109 43, 50
110 }, qScale, qOffset);
111
112 return BatchMatMulTestImpl<ArmnnType, T, 2>(workloadFactory,
113 memoryManager,
114 tensorHandleFactory,
115 descriptor,
116 inputX,
117 inputY,
118 outputExpected,
119 inputXInfo,
120 inputYInfo,
121 outputInfo);
122}
123
124template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 2>
125BatchMatMul2DSimpleTest<armnn::DataType::BFloat16>(
126 armnn::IWorkloadFactory& workloadFactory,
127 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
128 const armnn::ITensorHandleFactory& tensorHandleFactory);
129
130template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 2>
131BatchMatMul2DSimpleTest<armnn::DataType::Float32>(
132 armnn::IWorkloadFactory& workloadFactory,
133 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
134 const armnn::ITensorHandleFactory& tensorHandleFactory);
135
136template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 2>
137BatchMatMul2DSimpleTest<armnn::DataType::Float16>(
138 armnn::IWorkloadFactory& workloadFactory,
139 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
140 const armnn::ITensorHandleFactory& tensorHandleFactory);
141
142template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 2>
143BatchMatMul2DSimpleTest<armnn::DataType::QAsymmS8>(
144 armnn::IWorkloadFactory& workloadFactory,
145 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
146 const armnn::ITensorHandleFactory& tensorHandleFactory);
147
148template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 2>
149BatchMatMul2DSimpleTest<armnn::DataType::QAsymmU8>(
150 armnn::IWorkloadFactory& workloadFactory,
151 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
152 const armnn::ITensorHandleFactory& tensorHandleFactory);
153
154template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 2>
155BatchMatMul2DSimpleTest<armnn::DataType::QSymmS16>(
156 armnn::IWorkloadFactory& workloadFactory,
157 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
158 const armnn::ITensorHandleFactory& tensorHandleFactory);
159
160template<armnn::DataType ArmnnType, typename T>
161LayerTestResult<T, 3> BatchMatMul3DSimpleTest(
162 armnn::IWorkloadFactory& workloadFactory,
163 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
164 const armnn::ITensorHandleFactory& tensorHandleFactory)
165{
166 auto descriptor = armnn::BatchMatMulDescriptor(); // Arbitrary layout with no transpose/adjointing
167
Teresa Charlin0f86ecf2022-10-13 15:47:08 +0100168 float qScale = 1.0f;
Samuel Yap6b478092022-07-06 15:36:03 +0100169 int32_t qOffset = 0;
170
Samuel Yap6b478092022-07-06 15:36:03 +0100171 armnn::TensorInfo inputXInfo({1,2,2}, ArmnnType, qScale, qOffset);
172 armnn::TensorInfo inputYInfo({1,2,2}, ArmnnType, qScale, qOffset);
173 armnn::TensorInfo outputInfo({1,2,2}, ArmnnType, qScale, qOffset);
174
175 std::vector<T> inputX = armnnUtils::QuantizedVector<T>({
176 1, 2,
177 3, 4
178 }, qScale, qOffset);
179
180 std::vector<T> inputY = armnnUtils::QuantizedVector<T>({
181 5, 6,
182 7, 8
183 }, qScale, qOffset);
184
185 std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({
186 19, 22,
187 43, 50
Samuel Yapdc8ed9d2022-08-08 14:07:42 +0100188 }, qScale, qOffset);
Samuel Yap6b478092022-07-06 15:36:03 +0100189
190 return BatchMatMulTestImpl<ArmnnType, T, 3>(workloadFactory,
191 memoryManager,
192 tensorHandleFactory,
193 descriptor,
194 inputX,
195 inputY,
196 outputExpected,
197 inputXInfo,
198 inputYInfo,
199 outputInfo);
200}
201
202template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 3>
203BatchMatMul3DSimpleTest<armnn::DataType::BFloat16>(
204 armnn::IWorkloadFactory& workloadFactory,
205 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
206 const armnn::ITensorHandleFactory& tensorHandleFactory);
207
208template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 3>
209BatchMatMul3DSimpleTest<armnn::DataType::Float32>(
210 armnn::IWorkloadFactory& workloadFactory,
211 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
212 const armnn::ITensorHandleFactory& tensorHandleFactory);
213
214template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 3>
215BatchMatMul3DSimpleTest<armnn::DataType::Float16>(
216 armnn::IWorkloadFactory& workloadFactory,
217 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
218 const armnn::ITensorHandleFactory& tensorHandleFactory);
219
220template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 3>
221BatchMatMul3DSimpleTest<armnn::DataType::QAsymmS8>(
222 armnn::IWorkloadFactory& workloadFactory,
223 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
224 const armnn::ITensorHandleFactory& tensorHandleFactory);
225
226template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 3>
227BatchMatMul3DSimpleTest<armnn::DataType::QAsymmU8>(
228 armnn::IWorkloadFactory& workloadFactory,
229 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
230 const armnn::ITensorHandleFactory& tensorHandleFactory);
231
232template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 3>
233BatchMatMul3DSimpleTest<armnn::DataType::QSymmS16>(
234 armnn::IWorkloadFactory& workloadFactory,
235 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
236 const armnn::ITensorHandleFactory& tensorHandleFactory);
237
238template<armnn::DataType ArmnnType, typename T>
239LayerTestResult<T, 4> BatchMatMulNCHWSimpleTest(
240 armnn::IWorkloadFactory& workloadFactory,
241 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
242 const armnn::ITensorHandleFactory& tensorHandleFactory)
243{
Samuel Yapdc8ed9d2022-08-08 14:07:42 +0100244 auto descriptor = armnn::BatchMatMulDescriptor(); // Default arbitrary layout is treated the same as NCHW
Samuel Yap6b478092022-07-06 15:36:03 +0100245
Teresa Charlin0f86ecf2022-10-13 15:47:08 +0100246 float qScale = 1.0f;
Samuel Yap6b478092022-07-06 15:36:03 +0100247 int32_t qOffset = 0;
248
Samuel Yap6b478092022-07-06 15:36:03 +0100249 armnn::TensorInfo inputXInfo({1,1,2,2}, ArmnnType, qScale, qOffset);
250 armnn::TensorInfo inputYInfo({1,1,2,2}, ArmnnType, qScale, qOffset);
251 armnn::TensorInfo outputInfo({1,1,2,2}, ArmnnType, qScale, qOffset);
252
253 std::vector<T> inputX = armnnUtils::QuantizedVector<T>({
254 1, 2,
255 3, 4
256 }, qScale, qOffset);
257
258 std::vector<T> inputY = armnnUtils::QuantizedVector<T>({
259 5, 6,
260 7, 8
261 }, qScale, qOffset);
262
263 std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({
264 19, 22,
265 43, 50
Samuel Yapdc8ed9d2022-08-08 14:07:42 +0100266 }, qScale, qOffset);
Samuel Yap6b478092022-07-06 15:36:03 +0100267
268 return BatchMatMulTestImpl<ArmnnType, T, 4>(workloadFactory,
269 memoryManager,
270 tensorHandleFactory,
271 descriptor,
272 inputX,
273 inputY,
274 outputExpected,
275 inputXInfo,
276 inputYInfo,
277 outputInfo);
278}
279
280template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 4>
281BatchMatMulNCHWSimpleTest<armnn::DataType::BFloat16>(
282 armnn::IWorkloadFactory& workloadFactory,
283 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
284 const armnn::ITensorHandleFactory& tensorHandleFactory);
285
286template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
287BatchMatMulNCHWSimpleTest<armnn::DataType::Float32>(
288 armnn::IWorkloadFactory& workloadFactory,
289 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
290 const armnn::ITensorHandleFactory& tensorHandleFactory);
291
292template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
293BatchMatMulNCHWSimpleTest<armnn::DataType::Float16>(
294 armnn::IWorkloadFactory& workloadFactory,
295 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
296 const armnn::ITensorHandleFactory& tensorHandleFactory);
297
298template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
299BatchMatMulNCHWSimpleTest<armnn::DataType::QAsymmS8>(
300 armnn::IWorkloadFactory& workloadFactory,
301 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
302 const armnn::ITensorHandleFactory& tensorHandleFactory);
303
304template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
305BatchMatMulNCHWSimpleTest<armnn::DataType::QAsymmU8>(
306 armnn::IWorkloadFactory& workloadFactory,
307 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
308 const armnn::ITensorHandleFactory& tensorHandleFactory);
309
310template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
311BatchMatMulNCHWSimpleTest<armnn::DataType::QSymmS16>(
312 armnn::IWorkloadFactory& workloadFactory,
313 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
314 const armnn::ITensorHandleFactory& tensorHandleFactory);
315
316template<armnn::DataType ArmnnType, typename T>
317LayerTestResult<T, 4> BatchMatMulNHWCSimpleTest(
318 armnn::IWorkloadFactory& workloadFactory,
319 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
320 const armnn::ITensorHandleFactory& tensorHandleFactory)
321{
Samuel Yapdc8ed9d2022-08-08 14:07:42 +0100322 auto descriptor = armnn::BatchMatMulDescriptor(false,
323 false,
324 false,
325 false,
326 armnn::DataLayout::NHWC,
327 armnn::DataLayout::NHWC);
Samuel Yap6b478092022-07-06 15:36:03 +0100328
Teresa Charlin0f86ecf2022-10-13 15:47:08 +0100329 float qScale = 1.0f;
Samuel Yap6b478092022-07-06 15:36:03 +0100330 int32_t qOffset = 0;
331
Samuel Yap6b478092022-07-06 15:36:03 +0100332 armnn::TensorInfo inputXInfo({1,2,2,1}, ArmnnType, qScale, qOffset);
333 armnn::TensorInfo inputYInfo({1,2,2,1}, ArmnnType, qScale, qOffset);
334 armnn::TensorInfo outputInfo({1,2,2,1}, ArmnnType, qScale, qOffset);
335
336 std::vector<T> inputX = armnnUtils::QuantizedVector<T>({
337 1, 2,
338 3, 4
339 }, qScale, qOffset);
340
341 std::vector<T> inputY = armnnUtils::QuantizedVector<T>({
342 5, 6,
343 7, 8
344 }, qScale, qOffset);
345
346 std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({
347 19, 22,
348 43, 50
Samuel Yapdc8ed9d2022-08-08 14:07:42 +0100349 }, qScale, qOffset);
Samuel Yap6b478092022-07-06 15:36:03 +0100350
351 return BatchMatMulTestImpl<ArmnnType, T, 4>(workloadFactory,
352 memoryManager,
353 tensorHandleFactory,
354 descriptor,
355 inputX,
356 inputY,
357 outputExpected,
358 inputXInfo,
359 inputYInfo,
360 outputInfo);
361}
362
363template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 4>
364BatchMatMulNHWCSimpleTest<armnn::DataType::BFloat16>(
365 armnn::IWorkloadFactory& workloadFactory,
366 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
367 const armnn::ITensorHandleFactory& tensorHandleFactory);
368
369template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
370BatchMatMulNHWCSimpleTest<armnn::DataType::Float32>(
371 armnn::IWorkloadFactory& workloadFactory,
372 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
373 const armnn::ITensorHandleFactory& tensorHandleFactory);
374
375template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
376BatchMatMulNHWCSimpleTest<armnn::DataType::Float16>(
377 armnn::IWorkloadFactory& workloadFactory,
378 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
379 const armnn::ITensorHandleFactory& tensorHandleFactory);
380
381template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
382BatchMatMulNHWCSimpleTest<armnn::DataType::QAsymmS8>(
383 armnn::IWorkloadFactory& workloadFactory,
384 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
385 const armnn::ITensorHandleFactory& tensorHandleFactory);
386
387template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
388BatchMatMulNHWCSimpleTest<armnn::DataType::QAsymmU8>(
389 armnn::IWorkloadFactory& workloadFactory,
390 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
391 const armnn::ITensorHandleFactory& tensorHandleFactory);
392
393template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
394BatchMatMulNHWCSimpleTest<armnn::DataType::QSymmS16>(
395 armnn::IWorkloadFactory& workloadFactory,
396 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
397 const armnn::ITensorHandleFactory& tensorHandleFactory);
398
399template<armnn::DataType ArmnnType, typename T>
400LayerTestResult<T, 3> BatchMatMul3DBatchTest(
401 armnn::IWorkloadFactory& workloadFactory,
402 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
403 const armnn::ITensorHandleFactory& tensorHandleFactory)
404{
405 auto descriptor = armnn::BatchMatMulDescriptor(); // Arbitrary layout with no transpose/adjointing
406
Teresa Charlin0f86ecf2022-10-13 15:47:08 +0100407 float qScale = 1.0f;
Samuel Yap6b478092022-07-06 15:36:03 +0100408 int32_t qOffset = 0;
409
Samuel Yap6b478092022-07-06 15:36:03 +0100410 armnn::TensorInfo inputXInfo({2,2,2}, ArmnnType, qScale, qOffset);
411 armnn::TensorInfo inputYInfo({2,2,2}, ArmnnType, qScale, qOffset);
412 armnn::TensorInfo outputInfo({2,2,2}, ArmnnType, qScale, qOffset);
413
414 std::vector<T> inputX = armnnUtils::QuantizedVector<T>({
415 1, 2,
416 3, 4,
417
418 9, 10,
419 11, 12
420 }, qScale, qOffset);
421
422 std::vector<T> inputY = armnnUtils::QuantizedVector<T>({
423 5, 6,
424 7, 8,
425
426 13, 14,
427 15, 16
428 }, qScale, qOffset);
429
430 std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({
431 19, 22,
432 43, 50,
433
434 267, 286,
435 323, 346
Samuel Yapdc8ed9d2022-08-08 14:07:42 +0100436 }, qScale, qOffset);
Samuel Yap6b478092022-07-06 15:36:03 +0100437
438 return BatchMatMulTestImpl<ArmnnType, T, 3>(workloadFactory,
439 memoryManager,
440 tensorHandleFactory,
441 descriptor,
442 inputX,
443 inputY,
444 outputExpected,
445 inputXInfo,
446 inputYInfo,
447 outputInfo);
448}
449
450template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 3>
451BatchMatMul3DBatchTest<armnn::DataType::BFloat16>(
452 armnn::IWorkloadFactory& workloadFactory,
453 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
454 const armnn::ITensorHandleFactory& tensorHandleFactory);
455
456template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 3>
457BatchMatMul3DBatchTest<armnn::DataType::Float32>(
458 armnn::IWorkloadFactory& workloadFactory,
459 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
460 const armnn::ITensorHandleFactory& tensorHandleFactory);
461
462template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 3>
463BatchMatMul3DBatchTest<armnn::DataType::Float16>(
464 armnn::IWorkloadFactory& workloadFactory,
465 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
466 const armnn::ITensorHandleFactory& tensorHandleFactory);
467
468template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 3>
469BatchMatMul3DBatchTest<armnn::DataType::QAsymmS8>(
470 armnn::IWorkloadFactory& workloadFactory,
471 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
472 const armnn::ITensorHandleFactory& tensorHandleFactory);
473
474template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 3>
475BatchMatMul3DBatchTest<armnn::DataType::QAsymmU8>(
476 armnn::IWorkloadFactory& workloadFactory,
477 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
478 const armnn::ITensorHandleFactory& tensorHandleFactory);
479
480template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 3>
481BatchMatMul3DBatchTest<armnn::DataType::QSymmS16>(
482 armnn::IWorkloadFactory& workloadFactory,
483 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
484 const armnn::ITensorHandleFactory& tensorHandleFactory);
485
486template<armnn::DataType ArmnnType, typename T>
487LayerTestResult<T, 3> BatchMatMul3DBroadcastTest(
488 armnn::IWorkloadFactory& workloadFactory,
489 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
490 const armnn::ITensorHandleFactory& tensorHandleFactory)
491{
492 auto descriptor = armnn::BatchMatMulDescriptor(); // Arbitrary layout with no transpose/adjointing
493
Teresa Charlin0f86ecf2022-10-13 15:47:08 +0100494 float qScale = 1.0f;
Samuel Yap6b478092022-07-06 15:36:03 +0100495 int32_t qOffset = 0;
496
Samuel Yap6b478092022-07-06 15:36:03 +0100497 armnn::TensorInfo inputXInfo({2,2,2}, ArmnnType, qScale, qOffset);
498 armnn::TensorInfo inputYInfo({1,2,2}, ArmnnType, qScale, qOffset);
499 armnn::TensorInfo outputInfo({2,2,2}, ArmnnType, qScale, qOffset);
500
501 std::vector<T> inputX = armnnUtils::QuantizedVector<T>({
502 1, 2,
503 3, 4,
504
505 9, 10,
506 11, 12
507 }, qScale, qOffset);
508
509 std::vector<T> inputY = armnnUtils::QuantizedVector<T>({
510 13, 14,
511 15, 16
512 }, qScale, qOffset);
513
514 std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({
515 43, 46,
516 99, 106,
517
518 267, 286,
519 323, 346
Samuel Yapdc8ed9d2022-08-08 14:07:42 +0100520 }, qScale, qOffset);
Samuel Yap6b478092022-07-06 15:36:03 +0100521
522 return BatchMatMulTestImpl<ArmnnType, T, 3>(workloadFactory,
523 memoryManager,
524 tensorHandleFactory,
525 descriptor,
526 inputX,
527 inputY,
528 outputExpected,
529 inputXInfo,
530 inputYInfo,
531 outputInfo);
532}
533
534template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 3>
535BatchMatMul3DBroadcastTest<armnn::DataType::BFloat16>(
536 armnn::IWorkloadFactory& workloadFactory,
537 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
538 const armnn::ITensorHandleFactory& tensorHandleFactory);
539
540template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 3>
541BatchMatMul3DBroadcastTest<armnn::DataType::Float32>(
542 armnn::IWorkloadFactory& workloadFactory,
543 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
544 const armnn::ITensorHandleFactory& tensorHandleFactory);
545
546template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 3>
547BatchMatMul3DBroadcastTest<armnn::DataType::Float16>(
548 armnn::IWorkloadFactory& workloadFactory,
549 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
550 const armnn::ITensorHandleFactory& tensorHandleFactory);
551
552template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 3>
553BatchMatMul3DBroadcastTest<armnn::DataType::QAsymmS8>(
554 armnn::IWorkloadFactory& workloadFactory,
555 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
556 const armnn::ITensorHandleFactory& tensorHandleFactory);
557
558template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 3>
559BatchMatMul3DBroadcastTest<armnn::DataType::QAsymmU8>(
560 armnn::IWorkloadFactory& workloadFactory,
561 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
562 const armnn::ITensorHandleFactory& tensorHandleFactory);
563
564template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 3>
565BatchMatMul3DBroadcastTest<armnn::DataType::QSymmS16>(
566 armnn::IWorkloadFactory& workloadFactory,
567 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
568 const armnn::ITensorHandleFactory& tensorHandleFactory);
569
570template<armnn::DataType ArmnnType, typename T>
571LayerTestResult<T, 3> BatchMatMul3D2DBroadcastTest(
572 armnn::IWorkloadFactory& workloadFactory,
573 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
574 const armnn::ITensorHandleFactory& tensorHandleFactory)
575{
576 auto descriptor = armnn::BatchMatMulDescriptor(); // Arbitrary layout with no transpose/adjointing
577
Teresa Charlin0f86ecf2022-10-13 15:47:08 +0100578 float qScale = 1.0f;
Samuel Yap6b478092022-07-06 15:36:03 +0100579 int32_t qOffset = 0;
580
Samuel Yap6b478092022-07-06 15:36:03 +0100581 armnn::TensorInfo inputXInfo({2,2,2}, ArmnnType, qScale, qOffset);
582 armnn::TensorInfo inputYInfo({2,2}, ArmnnType, qScale, qOffset);
583 armnn::TensorInfo outputInfo({2,2,2}, ArmnnType, qScale, qOffset);
584
585 std::vector<T> inputX = armnnUtils::QuantizedVector<T>({
586 1, 2,
587 3, 4,
588
589 9, 10,
590 11, 12
591 }, qScale, qOffset);
592
593 std::vector<T> inputY = armnnUtils::QuantizedVector<T>({
594 13, 14,
595 15, 16
596 }, qScale, qOffset);
597
598 std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({
599 43, 46,
600 99, 106,
601
602 267, 286,
603 323, 346
Samuel Yapdc8ed9d2022-08-08 14:07:42 +0100604 }, qScale, qOffset);
Samuel Yap6b478092022-07-06 15:36:03 +0100605
606 return BatchMatMulTestImpl<ArmnnType, T, 3>(workloadFactory,
607 memoryManager,
608 tensorHandleFactory,
609 descriptor,
610 inputX,
611 inputY,
612 outputExpected,
613 inputXInfo,
614 inputYInfo,
615 outputInfo);
616}
617
618template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 3>
619BatchMatMul3D2DBroadcastTest<armnn::DataType::BFloat16>(
620 armnn::IWorkloadFactory& workloadFactory,
621 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
622 const armnn::ITensorHandleFactory& tensorHandleFactory);
623
624template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 3>
625BatchMatMul3D2DBroadcastTest<armnn::DataType::Float32>(
626 armnn::IWorkloadFactory& workloadFactory,
627 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
628 const armnn::ITensorHandleFactory& tensorHandleFactory);
629
630template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 3>
631BatchMatMul3D2DBroadcastTest<armnn::DataType::Float16>(
632 armnn::IWorkloadFactory& workloadFactory,
633 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
634 const armnn::ITensorHandleFactory& tensorHandleFactory);
635
636template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 3>
637BatchMatMul3D2DBroadcastTest<armnn::DataType::QAsymmS8>(
638 armnn::IWorkloadFactory& workloadFactory,
639 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
640 const armnn::ITensorHandleFactory& tensorHandleFactory);
641
642template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 3>
643BatchMatMul3D2DBroadcastTest<armnn::DataType::QAsymmU8>(
644 armnn::IWorkloadFactory& workloadFactory,
645 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
646 const armnn::ITensorHandleFactory& tensorHandleFactory);
647
648template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 3>
649BatchMatMul3D2DBroadcastTest<armnn::DataType::QSymmS16>(
650 armnn::IWorkloadFactory& workloadFactory,
651 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
652 const armnn::ITensorHandleFactory& tensorHandleFactory);
653
654template<armnn::DataType ArmnnType, typename T>
655LayerTestResult<T, 5> BatchMatMulNDHWCNHWCTest(
656 armnn::IWorkloadFactory& workloadFactory,
657 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
658 const armnn::ITensorHandleFactory& tensorHandleFactory)
659{
Samuel Yapdc8ed9d2022-08-08 14:07:42 +0100660 auto descriptor = armnn::BatchMatMulDescriptor(false,
661 false,
662 false,
663 false,
664 armnn::DataLayout::NDHWC,
665 armnn::DataLayout::NHWC);
Samuel Yap6b478092022-07-06 15:36:03 +0100666
Teresa Charlin0f86ecf2022-10-13 15:47:08 +0100667 float qScale = 1.0f;
Samuel Yap6b478092022-07-06 15:36:03 +0100668 int32_t qOffset = 0;
669
Samuel Yap6b478092022-07-06 15:36:03 +0100670 armnn::TensorInfo inputXInfo({1,1,2,2,2}, ArmnnType, qScale, qOffset);
671 armnn::TensorInfo inputYInfo({1,2,2,2}, ArmnnType, qScale, qOffset);
672 armnn::TensorInfo outputInfo({1,1,2,2,2}, ArmnnType, qScale, qOffset);
673
674 std::vector<T> inputX = armnnUtils::QuantizedVector<T>({
675 1, 20,
676 3, 22,
677
678 2, 21,
679 4, 23
680 }, qScale, qOffset);
681
682 std::vector<T> inputY = armnnUtils::QuantizedVector<T>({
683 5, 24,
684 7, 26,
685
686 6, 25,
687 8, 27
688 }, qScale, qOffset);
689
690 std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({
691 23, 1030,
692 31, 1114,
693
694 34, 1079,
695 46, 1167
Samuel Yapdc8ed9d2022-08-08 14:07:42 +0100696 }, qScale, qOffset);
Samuel Yap6b478092022-07-06 15:36:03 +0100697
698 return BatchMatMulTestImpl<ArmnnType, T, 5>(workloadFactory,
699 memoryManager,
700 tensorHandleFactory,
701 descriptor,
702 inputX,
703 inputY,
704 outputExpected,
705 inputXInfo,
706 inputYInfo,
707 outputInfo);
708}
709
710template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 5>
711BatchMatMulNDHWCNHWCTest<armnn::DataType::BFloat16>(
712 armnn::IWorkloadFactory& workloadFactory,
713 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
714 const armnn::ITensorHandleFactory& tensorHandleFactory);
715
716template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 5>
717BatchMatMulNDHWCNHWCTest<armnn::DataType::Float32>(
718 armnn::IWorkloadFactory& workloadFactory,
719 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
720 const armnn::ITensorHandleFactory& tensorHandleFactory);
721
722template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 5>
723BatchMatMulNDHWCNHWCTest<armnn::DataType::Float16>(
724 armnn::IWorkloadFactory& workloadFactory,
725 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
726 const armnn::ITensorHandleFactory& tensorHandleFactory);
727
728template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 5>
729BatchMatMulNDHWCNHWCTest<armnn::DataType::QAsymmS8>(
730 armnn::IWorkloadFactory& workloadFactory,
731 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
732 const armnn::ITensorHandleFactory& tensorHandleFactory);
733
734template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 5>
735BatchMatMulNDHWCNHWCTest<armnn::DataType::QAsymmU8>(
736 armnn::IWorkloadFactory& workloadFactory,
737 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
738 const armnn::ITensorHandleFactory& tensorHandleFactory);
739
740template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 5>
741BatchMatMulNDHWCNHWCTest<armnn::DataType::QSymmS16>(
742 armnn::IWorkloadFactory& workloadFactory,
743 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
744 const armnn::ITensorHandleFactory& tensorHandleFactory);
745
746template<armnn::DataType ArmnnType, typename T>
747LayerTestResult<T, 2> BatchMatMul2DTinyTest(
748 armnn::IWorkloadFactory& workloadFactory,
749 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
750 const armnn::ITensorHandleFactory& tensorHandleFactory)
751{
752 auto descriptor = armnn::BatchMatMulDescriptor(); // Arbitrary layout with no transpose/adjointing
753
Teresa Charlin0f86ecf2022-10-13 15:47:08 +0100754 float qScale = 1.0f;
Samuel Yap6b478092022-07-06 15:36:03 +0100755 int32_t qOffset = 0;
756
Samuel Yap6b478092022-07-06 15:36:03 +0100757 armnn::TensorInfo inputXInfo({1,1}, ArmnnType, qScale, qOffset);
758 armnn::TensorInfo inputYInfo({1,1}, ArmnnType, qScale, qOffset);
759 armnn::TensorInfo outputInfo({1,1}, ArmnnType, qScale, qOffset);
760
761 std::vector<T> inputX = armnnUtils::QuantizedVector<T>({
762 3
763 }, qScale, qOffset);
764
765 std::vector<T> inputY = armnnUtils::QuantizedVector<T>({
766 5
767 }, qScale, qOffset);
768
769 std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({
770 15
771 }, qScale, qOffset);
772
773 return BatchMatMulTestImpl<ArmnnType, T, 2>(workloadFactory,
774 memoryManager,
775 tensorHandleFactory,
776 descriptor,
777 inputX,
778 inputY,
779 outputExpected,
780 inputXInfo,
781 inputYInfo,
782 outputInfo);
783}
784
785template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 2>
786BatchMatMul2DTinyTest<armnn::DataType::BFloat16>(
787 armnn::IWorkloadFactory& workloadFactory,
788 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
789 const armnn::ITensorHandleFactory& tensorHandleFactory);
790
791template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 2>
792BatchMatMul2DTinyTest<armnn::DataType::Float32>(
793 armnn::IWorkloadFactory& workloadFactory,
794 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
795 const armnn::ITensorHandleFactory& tensorHandleFactory);
796
797template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 2>
798BatchMatMul2DTinyTest<armnn::DataType::Float16>(
799 armnn::IWorkloadFactory& workloadFactory,
800 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
801 const armnn::ITensorHandleFactory& tensorHandleFactory);
802
803template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 2>
804BatchMatMul2DTinyTest<armnn::DataType::QAsymmS8>(
805 armnn::IWorkloadFactory& workloadFactory,
806 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
807 const armnn::ITensorHandleFactory& tensorHandleFactory);
808
809template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 2>
810BatchMatMul2DTinyTest<armnn::DataType::QAsymmU8>(
811 armnn::IWorkloadFactory& workloadFactory,
812 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
813 const armnn::ITensorHandleFactory& tensorHandleFactory);
814
815template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 2>
816BatchMatMul2DTinyTest<armnn::DataType::QSymmS16>(
817 armnn::IWorkloadFactory& workloadFactory,
818 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
819 const armnn::ITensorHandleFactory& tensorHandleFactory);
820
821template<armnn::DataType ArmnnType, typename T>
822LayerTestResult<T, 3> BatchMatMul3DNonSquareTest(
823 armnn::IWorkloadFactory& workloadFactory,
824 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
825 const armnn::ITensorHandleFactory& tensorHandleFactory)
826{
827 auto descriptor = armnn::BatchMatMulDescriptor(); // Arbitrary layout with no transpose/adjointing
828
Teresa Charlin0f86ecf2022-10-13 15:47:08 +0100829 float qScale = 1.0f;
Samuel Yap6b478092022-07-06 15:36:03 +0100830 int32_t qOffset = 0;
831
Samuel Yap6b478092022-07-06 15:36:03 +0100832 armnn::TensorInfo inputXInfo({2,5,3}, ArmnnType, qScale, qOffset);
833 armnn::TensorInfo inputYInfo({2,3,4}, ArmnnType, qScale, qOffset);
834 armnn::TensorInfo outputInfo({2,5,4}, ArmnnType, qScale, qOffset);
835
836 std::vector<T> inputX = armnnUtils::QuantizedVector<T>({
837 8, 8, 4,
838 6, 1, 3,
839 8, 8, 3,
840 8, 9, 8,
841 5, 4, 4,
842
843 1, 8, 5,
844 7, 1, 1,
845 8, 7, 9,
846 3, 2, 7,
847 8, 5, 3
848 }, qScale, qOffset);
849
850 std::vector<T> inputY = armnnUtils::QuantizedVector<T>({
851 6, 2, 3, 2,
852 6, 2, 2, 8,
853 3, 7, 8, 1,
854
855 7, 2, 9, 5,
856 2, 3, 1, 3,
857 2, 7, 7, 5
858 }, qScale, qOffset);
859
860 std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({
861 108, 60, 72, 84,
862 51, 35, 44, 23,
863 105, 53, 64, 83,
864 126, 90, 106, 96,
865 66, 46, 55, 46,
866
867 33, 61, 52, 54,
868 53, 24, 71, 43,
869 88, 100, 142, 106,
870 39, 61, 78, 56,
871 72, 52, 98, 70
Samuel Yapdc8ed9d2022-08-08 14:07:42 +0100872 }, qScale, qOffset);
Samuel Yap6b478092022-07-06 15:36:03 +0100873
874 return BatchMatMulTestImpl<ArmnnType, T, 3>(workloadFactory,
875 memoryManager,
876 tensorHandleFactory,
877 descriptor,
878 inputX,
879 inputY,
880 outputExpected,
881 inputXInfo,
882 inputYInfo,
883 outputInfo);
884}
885
886template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 3>
887BatchMatMul3DNonSquareTest<armnn::DataType::BFloat16>(
888 armnn::IWorkloadFactory& workloadFactory,
889 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
890 const armnn::ITensorHandleFactory& tensorHandleFactory);
891
892template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 3>
893BatchMatMul3DNonSquareTest<armnn::DataType::Float32>(
894 armnn::IWorkloadFactory& workloadFactory,
895 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
896 const armnn::ITensorHandleFactory& tensorHandleFactory);
897
898template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 3>
899BatchMatMul3DNonSquareTest<armnn::DataType::Float16>(
900 armnn::IWorkloadFactory& workloadFactory,
901 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
902 const armnn::ITensorHandleFactory& tensorHandleFactory);
903
904template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 3>
905BatchMatMul3DNonSquareTest<armnn::DataType::QAsymmS8>(
906 armnn::IWorkloadFactory& workloadFactory,
907 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
908 const armnn::ITensorHandleFactory& tensorHandleFactory);
909
910template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 3>
911BatchMatMul3DNonSquareTest<armnn::DataType::QAsymmU8>(
912 armnn::IWorkloadFactory& workloadFactory,
913 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
914 const armnn::ITensorHandleFactory& tensorHandleFactory);
915
916template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 3>
917BatchMatMul3DNonSquareTest<armnn::DataType::QSymmS16>(
918 armnn::IWorkloadFactory& workloadFactory,
919 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Samuel Yapdc8ed9d2022-08-08 14:07:42 +0100920 const armnn::ITensorHandleFactory& tensorHandleFactory);
921
922template<armnn::DataType ArmnnType, typename T>
923LayerTestResult<T, 2> BatchMatMul2DTranspSimpleTest(
924 armnn::IWorkloadFactory& workloadFactory,
925 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
926 const armnn::ITensorHandleFactory& tensorHandleFactory)
927{
928 auto descriptor = armnn::BatchMatMulDescriptor(true,
929 false,
930 false,
931 false);
932
Teresa Charlin0f86ecf2022-10-13 15:47:08 +0100933 float qScale = 1.0f;
Samuel Yapdc8ed9d2022-08-08 14:07:42 +0100934 int32_t qOffset = 0;
935
Samuel Yapdc8ed9d2022-08-08 14:07:42 +0100936 armnn::TensorInfo inputXInfo({2,3}, ArmnnType, qScale, qOffset);
937 armnn::TensorInfo inputYInfo({2,3}, ArmnnType, qScale, qOffset);
938 armnn::TensorInfo outputInfo({3,3}, ArmnnType, qScale, qOffset);
939
940 std::vector<T> inputX = armnnUtils::QuantizedVector<T>({
941 1, 2, 3,
942 4, 5, 6
943 }, qScale, qOffset);
944
945 std::vector<T> inputY = armnnUtils::QuantizedVector<T>({
946 7, 8, 9,
947 10, 11, 12
948 }, qScale, qOffset);
949
950 std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({
951 47, 52, 57,
952 64, 71, 78,
953 81, 90, 99
954 }, qScale, qOffset);
955
956 return BatchMatMulTestImpl<ArmnnType, T, 2>(workloadFactory,
957 memoryManager,
958 tensorHandleFactory,
959 descriptor,
960 inputX,
961 inputY,
962 outputExpected,
963 inputXInfo,
964 inputYInfo,
965 outputInfo);
966}
967
968template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 2>
969BatchMatMul2DTranspSimpleTest<armnn::DataType::BFloat16>(
970 armnn::IWorkloadFactory& workloadFactory,
971 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
972 const armnn::ITensorHandleFactory& tensorHandleFactory);
973
974template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 2>
975BatchMatMul2DTranspSimpleTest<armnn::DataType::Float32>(
976 armnn::IWorkloadFactory& workloadFactory,
977 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
978 const armnn::ITensorHandleFactory& tensorHandleFactory);
979
980template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 2>
981BatchMatMul2DTranspSimpleTest<armnn::DataType::Float16>(
982 armnn::IWorkloadFactory& workloadFactory,
983 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
984 const armnn::ITensorHandleFactory& tensorHandleFactory);
985
986template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 2>
987BatchMatMul2DTranspSimpleTest<armnn::DataType::QAsymmS8>(
988 armnn::IWorkloadFactory& workloadFactory,
989 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
990 const armnn::ITensorHandleFactory& tensorHandleFactory);
991
992template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 2>
993BatchMatMul2DTranspSimpleTest<armnn::DataType::QAsymmU8>(
994 armnn::IWorkloadFactory& workloadFactory,
995 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
996 const armnn::ITensorHandleFactory& tensorHandleFactory);
997
998template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 2>
999BatchMatMul2DTranspSimpleTest<armnn::DataType::QSymmS16>(
1000 armnn::IWorkloadFactory& workloadFactory,
1001 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1002 const armnn::ITensorHandleFactory& tensorHandleFactory);
1003
1004template<armnn::DataType ArmnnType, typename T>
1005LayerTestResult<T, 2> BatchMatMul2DAdjointSimpleTest(
1006 armnn::IWorkloadFactory& workloadFactory,
1007 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1008 const armnn::ITensorHandleFactory& tensorHandleFactory)
1009{
1010 auto descriptor = armnn::BatchMatMulDescriptor(false,
1011 false,
1012 true,
1013 false);
1014
Teresa Charlin0f86ecf2022-10-13 15:47:08 +01001015 float qScale = 1.0f;
Samuel Yapdc8ed9d2022-08-08 14:07:42 +01001016 int32_t qOffset = 0;
1017
Samuel Yapdc8ed9d2022-08-08 14:07:42 +01001018 armnn::TensorInfo inputXInfo({3,3}, ArmnnType, qScale, qOffset);
1019 armnn::TensorInfo inputYInfo({3,3}, ArmnnType, qScale, qOffset);
1020 armnn::TensorInfo outputInfo({3,3}, ArmnnType, qScale, qOffset);
1021
1022 std::vector<T> inputX = armnnUtils::QuantizedVector<T>({
1023 3, 1, 1,
1024 1, 3, -1,
1025 2, 4, 1
1026 }, qScale, qOffset);
1027
1028 std::vector<T> inputY = armnnUtils::QuantizedVector<T>({
1029 1, 0, 0,
1030 0, 1, 0,
1031 0, 0, 1
1032 }, qScale, qOffset);
1033
1034 std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({
1035 7, 3, -4,
1036 -3, 1, 4,
1037 -2, -10, 8
1038 }, qScale, qOffset);
1039
1040 switch (ArmnnType)
1041 {
1042 case armnn::DataType::QAsymmU8:
1043 outputExpected = armnnUtils::QuantizedVector<T>({
1044 3, 3, 0,
1045 0, 1, 1,
1046 0, 0, 8
1047 }, qScale, qOffset);
1048 break;
1049 default:
1050 break;
1051 }
1052
1053 return BatchMatMulTestImpl<ArmnnType, T, 2>(workloadFactory,
1054 memoryManager,
1055 tensorHandleFactory,
1056 descriptor,
1057 inputX,
1058 inputY,
1059 outputExpected,
1060 inputXInfo,
1061 inputYInfo,
1062 outputInfo);
1063}
1064
1065template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 2>
1066BatchMatMul2DAdjointSimpleTest<armnn::DataType::BFloat16>(
1067 armnn::IWorkloadFactory& workloadFactory,
1068 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1069 const armnn::ITensorHandleFactory& tensorHandleFactory);
1070
1071template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 2>
1072BatchMatMul2DAdjointSimpleTest<armnn::DataType::Float32>(
1073 armnn::IWorkloadFactory& workloadFactory,
1074 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1075 const armnn::ITensorHandleFactory& tensorHandleFactory);
1076
1077template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 2>
1078BatchMatMul2DAdjointSimpleTest<armnn::DataType::Float16>(
1079 armnn::IWorkloadFactory& workloadFactory,
1080 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1081 const armnn::ITensorHandleFactory& tensorHandleFactory);
1082
1083template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 2>
1084BatchMatMul2DAdjointSimpleTest<armnn::DataType::QAsymmS8>(
1085 armnn::IWorkloadFactory& workloadFactory,
1086 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1087 const armnn::ITensorHandleFactory& tensorHandleFactory);
1088
1089template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 2>
1090BatchMatMul2DAdjointSimpleTest<armnn::DataType::QAsymmU8>(
1091 armnn::IWorkloadFactory& workloadFactory,
1092 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1093 const armnn::ITensorHandleFactory& tensorHandleFactory);
1094
1095template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 2>
1096BatchMatMul2DAdjointSimpleTest<armnn::DataType::QSymmS16>(
1097 armnn::IWorkloadFactory& workloadFactory,
1098 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1099 const armnn::ITensorHandleFactory& tensorHandleFactory);
1100
1101template<armnn::DataType ArmnnType, typename T>
1102LayerTestResult<T, 4> BatchMatMulNHWCParamsTest(
1103 armnn::IWorkloadFactory& workloadFactory,
1104 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1105 const armnn::ITensorHandleFactory& tensorHandleFactory)
1106{
1107 auto descriptor = armnn::BatchMatMulDescriptor(false,
1108 true,
1109 true,
1110 false,
1111 armnn::DataLayout::NHWC,
1112 armnn::DataLayout::NHWC);
1113
Teresa Charlin0f86ecf2022-10-13 15:47:08 +01001114 float qScale = 1.0f;
Samuel Yapdc8ed9d2022-08-08 14:07:42 +01001115 int32_t qOffset = 0;
1116
Samuel Yapdc8ed9d2022-08-08 14:07:42 +01001117 armnn::TensorInfo inputXInfo({1,4,4,2}, ArmnnType, qScale, qOffset);
1118 armnn::TensorInfo inputYInfo({2,2,4,1}, ArmnnType, qScale, qOffset);
1119 armnn::TensorInfo outputInfo({2,4,2,2}, ArmnnType, qScale, qOffset);
1120
1121 std::vector<T> inputX = armnnUtils::QuantizedVector<T>({
1122 1, -3, 1, 4, 4, 9, 1, 2,
1123 2, 4, 2, 2, 10, 7, 6, -5,
1124 3, 8, 9, 9, 21, 1, 17, 7,
1125 5, 11, 11, 8, 29, 3, 23, 6
1126 }, qScale, qOffset);
1127
1128 std::vector<T> inputY = armnnUtils::QuantizedVector<T>({
1129 1, 2, 3, 4,
1130 5, 6, 7, 8,
1131
1132 9, 10, 11, 12,
1133 13, 14, 15, 16
1134 }, qScale, qOffset);
1135
1136 std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>({
1137 28, 625, 140, 585,
1138 8, 110, -8, 1662,
1139 -24, 401, -120, 921,
1140 12, 131, 108, -501,
1141
1142 252, 545, 364, 505,
1143 -24, 3214, -40, 4766,
1144 -216, 1441, -312, 1961,
1145 204, -1133, 300, -1765
1146 }, qScale, qOffset);
1147
1148 switch (ArmnnType)
1149 {
1150 case armnn::DataType::QAsymmU8:
1151 outputExpected = armnnUtils::QuantizedVector<T>({
1152 28, 80, 140, 80,
1153 8, 45, 0, 255,
1154 0, 18, 0, 18,
1155 12, 0, 108, 0,
1156
1157 252, 80, 255, 80,
1158 0, 255, 0, 255,
1159 0, 18, 0, 18,
1160 204, 0, 255, 0
1161 }, qScale, qOffset);
1162 break;
1163 default:
1164 break;
1165 }
1166
1167 return BatchMatMulTestImpl<ArmnnType, T, 4>(workloadFactory,
1168 memoryManager,
1169 tensorHandleFactory,
1170 descriptor,
1171 inputX,
1172 inputY,
1173 outputExpected,
1174 inputXInfo,
1175 inputYInfo,
1176 outputInfo);
1177}
1178
1179template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 4>
1180BatchMatMulNHWCParamsTest<armnn::DataType::BFloat16>(
1181 armnn::IWorkloadFactory& workloadFactory,
1182 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1183 const armnn::ITensorHandleFactory& tensorHandleFactory);
1184
1185template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
1186BatchMatMulNHWCParamsTest<armnn::DataType::Float32>(
1187 armnn::IWorkloadFactory& workloadFactory,
1188 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1189 const armnn::ITensorHandleFactory& tensorHandleFactory);
1190
1191template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
1192BatchMatMulNHWCParamsTest<armnn::DataType::Float16>(
1193 armnn::IWorkloadFactory& workloadFactory,
1194 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1195 const armnn::ITensorHandleFactory& tensorHandleFactory);
1196
1197template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
1198BatchMatMulNHWCParamsTest<armnn::DataType::QAsymmS8>(
1199 armnn::IWorkloadFactory& workloadFactory,
1200 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1201 const armnn::ITensorHandleFactory& tensorHandleFactory);
1202
1203template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
1204BatchMatMulNHWCParamsTest<armnn::DataType::QAsymmU8>(
1205 armnn::IWorkloadFactory& workloadFactory,
1206 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1207 const armnn::ITensorHandleFactory& tensorHandleFactory);
1208
1209template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
1210BatchMatMulNHWCParamsTest<armnn::DataType::QSymmS16>(
1211 armnn::IWorkloadFactory& workloadFactory,
1212 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Samuel Yap6b478092022-07-06 15:36:03 +01001213 const armnn::ITensorHandleFactory& tensorHandleFactory);