blob: 45cf48b40eda4722b13667e2fb38a2bdcf8f06a8 [file] [log] [blame]
Matthew Sloyanb63a3112021-09-08 13:05:51 +01001//
2// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "Conv3dTestImpl.hpp"
7
8#include <QuantizeHelper.hpp>
9
10#include <armnnUtils/DataLayoutIndexed.hpp>
11
Colm Donelan0c479742021-12-10 12:43:54 +000012#include <armnn/backends/TensorHandle.hpp>
Matthew Sloyanb63a3112021-09-08 13:05:51 +010013
Sadik Armagana097d2a2021-11-24 15:47:28 +000014#include <armnnTestUtils/DataLayoutUtils.hpp>
15#include <armnnTestUtils/TensorCopyUtils.hpp>
Colm Donelan0c479742021-12-10 12:43:54 +000016#include <armnnTestUtils/WorkloadTestUtils.hpp>
Matthew Sloyanb63a3112021-09-08 13:05:51 +010017
Sadik Armagana097d2a2021-11-24 15:47:28 +000018#include <TensorHelpers.hpp>
Matthew Sloyanb63a3112021-09-08 13:05:51 +010019
20using namespace armnnUtils;
21
22//
23// Helper templates
24//
25
26// Helper template that returns a quantized bias depending on the number of output channels.
27template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
28std::vector<T> GetBiasData(bool biasEnabled, float qScale, armnn::TensorInfo outputInfo, armnn::DataLayout layout)
29{
30 if(!biasEnabled)
31 {
32 return std::vector<T>();
33 }
34 else
35 {
36 const armnnUtils::DataLayoutIndexed dataLayoutIndexed(layout);
37 const unsigned int outputChannels = outputInfo.GetShape()[dataLayoutIndexed.GetChannelsIndex()];
38
39 switch (outputChannels)
40 {
41 case 1:
42 {
43 return QuantizedVector<T>({2}, qScale, 0);
44 }
45 case 2:
46 default:
47 {
48 return QuantizedVector<T>({0, 2}, qScale, 0);
49 }
50 }
51 }
52}
53
54// Modifies a std::vector in-place using a specified bias.
55template<typename T, typename B>
56void ApplyBiasToData(std::vector<T>& v, const std::vector<B>& bias,
57 float vScale, int32_t vOffset,
58 float bScale, int32_t bOffset)
59{
60 ARMNN_ASSERT_MSG((armnn::IsQuantizedType<T>() && vScale != 0.0f) || (!armnn::IsQuantizedType<T>()),
61 "Invalid type and parameter combination.");
62 ARMNN_ASSERT_MSG((armnn::IsQuantizedType<B>() && bScale != 0.0f) || (!armnn::IsQuantizedType<B>()),
63 "Invalid type and parameter combination.");
64
65 for (uint32_t i = 0; i < bias.size(); ++i)
66 {
Rob Hugheseb70c912021-10-07 08:53:58 +010067 for (size_t j = i; j < v.size(); j+=bias.size())
Matthew Sloyanb63a3112021-09-08 13:05:51 +010068 {
69 // Note we need to dequantize and re-quantize the image value and the bias.
70 float dBias = SelectiveDequantize(bias[i], bScale, bOffset);
71
72 T& outRef = v[j];
73 float dOutput = SelectiveDequantize(outRef, vScale, vOffset);
74 outRef = SelectiveQuantize<T>(dOutput + dBias, vScale, vOffset);
75 }
76 }
77}
78
79// Set the quantization scale and offset values for data types.
80template<armnn::DataType ArmnnType>
81void SetScaleOffset(float& qScale, int32_t& qOffset)
82{
83 switch (ArmnnType)
84 {
85 case armnn::DataType::QAsymmU8:
86 {
87 qScale = 0.1f;
88 qOffset = 128;
89 break;
90 }
91 case armnn::DataType::QAsymmS8:
Teresa Charlinec5f7d12021-10-22 17:15:00 +010092 {
93 qScale = 0.1f;
94 qOffset = 64;
95 break;
96 }
Matthew Sloyanb63a3112021-09-08 13:05:51 +010097 case armnn::DataType::QSymmS16:
98 {
99 qScale = 0.1f;
100 qOffset = 0;
101 break;
102 }
103 case armnn::DataType::BFloat16:
104 case armnn::DataType::Float16:
105 case armnn::DataType::Float32:
106 default:
107 {
Teresa Charlinec5f7d12021-10-22 17:15:00 +0100108 qScale = 1.f;
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100109 qOffset = 0;
110 break;
111 }
112 }
113}
114
115// Create a vector from 0 to size and quantize (if required).
116template <typename T>
117std::vector<T> CreateQuantizedData(int32_t size, float qScale, int32_t qOffset)
118{
119 std::vector<float> data;
120 for (int32_t i = 0; i < size; ++i)
121 {
122 data.push_back(static_cast<float>(i));
123 }
124
125 return QuantizedVector<T>(data, qScale, qOffset);
126}
127
128// Create a vector from 0 to size divided and then quantized (if required) to create smaller floating point values.
129template <typename T>
130std::vector<T> CreateSmallQuantizedData(int32_t size, float divisor, float qScale, int32_t qOffset)
131{
132 std::vector<float> data;
133 for (int32_t i = 0; i < size; ++i)
134 {
135 float value = static_cast<float>(i);
136 data.push_back(value/divisor);
137 }
138
139 return QuantizedVector<T>(data, qScale, qOffset);;
140}
141
142//
143// Convolution3d implementations
144//
145
146template<armnn::DataType ArmnnType,
147 armnn::DataType ArmnnBType,
148 typename T = armnn::ResolveType<ArmnnType>,
149 typename B = armnn::ResolveType<ArmnnBType>>
150LayerTestResult<T, 5> SimpleConvolution3dTestImpl(
151 armnn::IWorkloadFactory& workloadFactory,
152 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
153 const armnn::ITensorHandleFactory& tensorHandleFactory,
154 const std::vector<T>& input,
155 const std::vector<T>& kernel,
156 const std::vector<B>& bias,
157 const std::vector<T>& outputExpected,
158 const armnn::TensorShape& inputShape,
159 const armnn::TensorShape& kernelShape,
160 const armnn::TensorShape& outputExpectedShape,
161 const armnn::DataLayout dataLayout,
162 float qScale,
163 int32_t qOffset,
164 uint32_t strideX = 1,
165 uint32_t strideY = 1,
166 uint32_t strideZ = 1,
167 uint32_t dilationX = 1,
168 uint32_t dilationY = 1,
169 uint32_t dilationZ = 1,
170 uint32_t padLeft = 0,
171 uint32_t padTop = 0,
172 uint32_t padRight = 0,
173 uint32_t padBottom = 0,
174 uint32_t padFront = 0,
175 uint32_t padBack = 0)
176{
177 unsigned int inputNum = armnn::numeric_cast<unsigned int>(inputShape[0]);
178 unsigned int inputDepth = armnn::numeric_cast<unsigned int>(inputShape[1]);
179 unsigned int inputHeight = armnn::numeric_cast<unsigned int>(inputShape[2]);
180 unsigned int inputWidth = armnn::numeric_cast<unsigned int>(inputShape[3]);
181 unsigned int inputChannels = armnn::numeric_cast<unsigned int>(inputShape[4]);
182
183 // Conv3d weights/kernel layout: [D,H,W,I,O]
184 unsigned int kernelDepth = armnn::numeric_cast<unsigned int>(kernelShape[0]);
185 unsigned int kernelHeight = armnn::numeric_cast<unsigned int>(kernelShape[1]);
186 unsigned int kernelWidth = armnn::numeric_cast<unsigned int>(kernelShape[2]);
187 unsigned int kernelInChannels = armnn::numeric_cast<unsigned int>(kernelShape[3]);
188 unsigned int kernelOutChannels = armnn::numeric_cast<unsigned int>(kernelShape[4]);
189
190 unsigned int outputNum = armnn::numeric_cast<unsigned int>(outputExpectedShape[0]);
191 unsigned int outputDepth = armnn::numeric_cast<unsigned int>(outputExpectedShape[1]);
192 unsigned int outputHeight = armnn::numeric_cast<unsigned int>(outputExpectedShape[2]);
193 unsigned int outputWidth = armnn::numeric_cast<unsigned int>(outputExpectedShape[3]);
194 unsigned int outputChannels = armnn::numeric_cast<unsigned int>(outputExpectedShape[4]);
195
196 bool biasEnabled = bias.size() > 0;
197
198 // If a bias is used, its size must equal the number of output channels.
199 ARMNN_ASSERT(!biasEnabled || bias.size() == outputChannels);
200
201 // Creates the tensors.
202 armnn::TensorInfo inputTensorInfo({inputNum, inputDepth, inputHeight, inputWidth, inputChannels}, ArmnnType);
203 armnn::TensorInfo outputTensorInfo({outputNum, outputDepth, outputHeight, outputWidth, outputChannels}, ArmnnType);
204 armnn::TensorInfo kernelDesc({kernelDepth, kernelHeight, kernelWidth, kernelInChannels, kernelOutChannels},
205 ArmnnType);
206 armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, ArmnnBType);
207
208 // Set quantization parameters if the requested type is a quantized type.
209 if(armnn::IsQuantizedType<T>())
210 {
211 inputTensorInfo.SetQuantizationScale(qScale);
212 inputTensorInfo.SetQuantizationOffset(qOffset);
213 outputTensorInfo.SetQuantizationScale(qScale);
214 outputTensorInfo.SetQuantizationOffset(qOffset);
215 kernelDesc.SetQuantizationScale(qScale);
216 kernelDesc.SetQuantizationOffset(qOffset);
217 biasDesc.SetQuantizationScale(qScale*qScale);
218 biasDesc.SetQuantizationOffset(0);
219 }
220
221 // Construct the input data.
222 std::vector<T> inputData;
223 inputData.assign(input.data(), input.data() + inputNum*inputDepth*inputHeight*inputWidth*inputChannels);
224
225 // Construct the output data and apply bias if needed.
226 std::vector<T> outputData;
227 outputData.assign(outputExpected.data(), outputExpected.data() +
228 outputNum*outputDepth*outputHeight*outputWidth*outputChannels);
229
230 if (biasEnabled)
231 {
232 ApplyBiasToData(outputData, bias,
233 outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
234 biasDesc.GetQuantizationScale(), biasDesc.GetQuantizationOffset());
235 }
236
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100237 // Permute input and output if data layout is NCDHW.
238 if (dataLayout == armnn::DataLayout::NCDHW)
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100239 {
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100240 PermuteTensorNdhwcToNcdhw(inputTensorInfo, inputData);
241 PermuteTensorNdhwcToNcdhw(outputTensorInfo, outputData);
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100242 }
243
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100244 std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
245
246 std::unique_ptr<armnn::ITensorHandle> input0Handle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
247 std::unique_ptr<armnn::ITensorHandle> input1Handle = tensorHandleFactory.CreateTensorHandle(kernelDesc);
248 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
249
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100250 armnn::Convolution3dQueueDescriptor data;
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100251 data.m_Parameters.m_StrideX = strideX;
252 data.m_Parameters.m_StrideY = strideY;
253 data.m_Parameters.m_StrideZ = strideZ;
254 data.m_Parameters.m_PadLeft = padLeft;
255 data.m_Parameters.m_PadRight = padRight;
256 data.m_Parameters.m_PadTop = padTop;
257 data.m_Parameters.m_PadBottom = padBottom;
258 data.m_Parameters.m_PadFront = padFront;
259 data.m_Parameters.m_PadBack = padBack;
260 data.m_Parameters.m_DilationX = dilationX;
261 data.m_Parameters.m_DilationY = dilationY;
262 data.m_Parameters.m_DilationZ = dilationZ;
263 data.m_Parameters.m_DataLayout = dataLayout;
264 data.m_Parameters.m_BiasEnabled = biasEnabled;
265
266 armnn::WorkloadInfo info;
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100267 AddInputToWorkload(data, info, inputTensorInfo, input0Handle.get());
268 AddInputToWorkload(data, info, kernelDesc, input1Handle.get());
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100269 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
270
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100271 std::unique_ptr<armnn::ITensorHandle> input2Handle = nullptr;
272 if (biasEnabled)
273 {
274 input2Handle = tensorHandleFactory.CreateTensorHandle(biasDesc);
275 AddInputToWorkload(data, info, biasDesc, input2Handle.get());
276 }
277
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100278 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConvolution3d(data, info);
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100279 input0Handle->Allocate();
280 input1Handle->Allocate();
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100281 outputHandle->Allocate();
282
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100283 CopyDataToITensorHandle(input0Handle.get(), inputData.data());
284 CopyDataToITensorHandle(input1Handle.get(), kernel.data());
285 if (biasEnabled)
286 {
287 input2Handle->Allocate();
288 CopyDataToITensorHandle(input2Handle.get(), bias.data());
289 }
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100290
291 ExecuteWorkload(*workload, memoryManager);
292
293 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
294
295 return LayerTestResult<T, 5>(actualOutput,
296 outputData,
297 outputHandle->GetShape(),
298 outputTensorInfo.GetShape());
299}
300
301template<armnn::DataType ArmnnType,
302 armnn::DataType ArmnnBType,
303 typename T = armnn::ResolveType<ArmnnType>>
304LayerTestResult<T, 5> SimpleConvolution3d3x3x3TestCommon(
305 armnn::IWorkloadFactory& workloadFactory,
306 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
307 const armnn::ITensorHandleFactory& tensorHandleFactory,
308 bool biasEnabled,
309 armnn::DataLayout dataLayout)
310{
311 float qScale;
312 int32_t qOffset;
313 SetScaleOffset<ArmnnType>(qScale, qOffset);
314
315 armnn::TensorInfo inputDesc({ 1, 5, 5, 5, 1 }, ArmnnType);
316 std::vector<T> input = CreateQuantizedData<T>(125, qScale, qOffset);
317
318 armnn::TensorInfo kernelDesc({ 3, 3, 3, 1, 1 }, ArmnnType);
319 std::vector<T> kernel = QuantizedVector<T>(
320 {
321 1, 1, 1,
322 1, 1, 1,
323 1, 1, 1,
324
325 0, 0, 0,
326 0, 1, 0,
327 0, 0, 0,
328
329 1, 1, 1,
330 1, 1, 1,
331 1, 1, 1,
332 },
333 qScale, qOffset);
334
335 armnn::TensorInfo outputDesc({ 1, 3, 3, 3, 1 }, ArmnnType);
336 std::vector<T> outputData = QuantizedVector<T>(
337 {
338 589, 608, 627,
339 684, 703, 722,
340 779, 798, 817,
341
342 1064, 1083, 1102,
343 1159, 1178, 1197,
344 1254, 1273, 1292,
345
346 1539, 1558, 1577,
347 1634, 1653, 1672,
348 1729, 1748, 1767
349 },
350 qScale, qOffset);
351
352 return SimpleConvolution3dTestImpl<ArmnnType, ArmnnBType>(
353 workloadFactory,
354 memoryManager,
355 tensorHandleFactory,
356 input,
357 kernel,
358 GetBiasData<ArmnnBType>(biasEnabled, qScale * qScale, outputDesc, dataLayout),
359 outputData,
360 inputDesc.GetShape(),
361 kernelDesc.GetShape(),
362 outputDesc.GetShape(),
363 dataLayout,
364 qScale,
365 qOffset
366 );
367}
368
369template<armnn::DataType ArmnnType,
370 armnn::DataType ArmnnBType,
371 typename T = armnn::ResolveType<ArmnnType>>
372LayerTestResult<T, 5> Convolution3d2x2x2Strides3x5x5TestCommon(
373 armnn::IWorkloadFactory& workloadFactory,
374 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
375 const armnn::ITensorHandleFactory& tensorHandleFactory,
376 bool biasEnabled,
377 armnn::DataLayout dataLayout)
378{
379 float qScale;
380 int32_t qOffset;
381 SetScaleOffset<ArmnnType>(qScale, qOffset);
382
383 armnn::TensorInfo inputDesc({ 1, 3, 10, 10, 1 }, ArmnnType);
384 std::vector<T> input = CreateQuantizedData<T>(300, qScale, qOffset);
385
386 armnn::TensorInfo kernelDesc({ 3, 5, 5, 1, 1 }, ArmnnType);
387 std::vector<T> kernel = QuantizedVector<T>(
388 {
389 1, 1, 1, 1, 1,
390 1, 1, 1, 1, 1,
391 1, 1, 1, 1, 1,
392 1, 1, 1, 1, 1,
393 1, 1, 1, 1, 1,
394
395 0, 0, 0, 0, 0,
396 0, 0, 0, 0, 0,
397 0, 0, 0, 0, 0,
398 0, 0, 0, 0, 0,
399 0, 0, 0, 0, 0,
400
401 2, 2, 2, 2, 2,
402 2, 2, 2, 2, 2,
403 2, 2, 2, 2, 2,
404 2, 2, 2, 2, 2,
405 2, 2, 2, 2, 2,
406 },
407 qScale, qOffset);
408
409 armnn::TensorInfo outputDesc({ 1, 1, 3, 3, 1 }, ArmnnType);
410 std::vector<T> outputData = QuantizedVector<T>(
411 {
412 11650, 11800, 11950,
413
414 13150, 13300, 13450,
415
416 14650, 14800, 14950
417 },
418 qScale, qOffset);
419
420 return SimpleConvolution3dTestImpl<ArmnnType, ArmnnBType>(
421 workloadFactory,
422 memoryManager,
423 tensorHandleFactory,
424 input,
425 kernel,
426 GetBiasData<ArmnnBType>(biasEnabled, qScale * qScale, outputDesc, dataLayout),
427 outputData,
428 inputDesc.GetShape(),
429 kernelDesc.GetShape(),
430 outputDesc.GetShape(),
431 dataLayout,
432 qScale,
433 qOffset,
434 2, // strideX
435 2, // strideY
436 2 // strideZ
437 );
438}
439
440template<armnn::DataType ArmnnType,
441 armnn::DataType ArmnnBType,
442 typename T = armnn::ResolveType<ArmnnType>>
443LayerTestResult<T, 5> Convolution3d2x2x2Dilation2x2x2TestCommon(
444 armnn::IWorkloadFactory& workloadFactory,
445 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
446 const armnn::ITensorHandleFactory& tensorHandleFactory,
447 bool biasEnabled,
448 armnn::DataLayout dataLayout)
449{
450 float qScale;
451 int32_t qOffset;
452 SetScaleOffset<ArmnnType>(qScale, qOffset);
453
454 armnn::TensorInfo inputDesc({ 1, 5, 5, 5, 2 }, ArmnnType);
455 std::vector<T> input = CreateQuantizedData<T>(250, qScale, qOffset);
456
457 armnn::TensorInfo kernelDesc({ 2, 2, 2, 2, 2 }, ArmnnType);
458 std::vector<T> kernel = QuantizedVector<T>(
459 {
460 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1,
461 1, 1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1,
462 },
463 qScale, qOffset);
464
465 // Since the dilation rate is 3 this will dilate the kernel to be 4x4,
466 // therefore the output will be 2x2
467 armnn::TensorInfo outputDesc({ 1, 2, 2, 2, 2 }, ArmnnType);
468 std::vector<T> outputData = QuantizedVector<T>(
469 {
470 -1124, 974,
471 -1148, 978,
472
473 -1244, 994,
474 -1268, 998,
475
476 -1724, 1074,
477 -1748, 1078,
478
479 -1844, 1094,
480 -1868, 1098
481 },
482 qScale, qOffset);
483
484 return SimpleConvolution3dTestImpl<ArmnnType, ArmnnBType>(
485 workloadFactory,
486 memoryManager,
487 tensorHandleFactory,
488 input,
489 kernel,
490 GetBiasData<ArmnnBType>(biasEnabled, qScale * qScale, outputDesc, dataLayout),
491 outputData,
492 inputDesc.GetShape(),
493 kernelDesc.GetShape(),
494 outputDesc.GetShape(),
495 dataLayout,
496 qScale,
497 qOffset,
498 1, // strideX
499 1, // strideY
500 1, // strideZ
501 3, // dilationX
502 3, // dilationY
503 3 // dilationZ
504 );
505}
506
507template<armnn::DataType ArmnnType,
508 armnn::DataType ArmnnBType,
509 typename T = armnn::ResolveType<ArmnnType>>
510LayerTestResult<T, 5> Convolution3dPaddingSame3x3x3TestCommon(
511 armnn::IWorkloadFactory& workloadFactory,
512 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
513 const armnn::ITensorHandleFactory& tensorHandleFactory,
514 bool biasEnabled,
515 armnn::DataLayout dataLayout)
516{
517 float qScale;
518 int32_t qOffset;
519 SetScaleOffset<ArmnnType>(qScale, qOffset);
520
521 armnn::TensorInfo inputDesc({ 1, 5, 5, 5, 1 }, ArmnnType);
522 std::vector<T> input = CreateQuantizedData<T>(125, qScale, qOffset);
523
524 armnn::TensorInfo kernelDesc({ 3, 3, 3, 1, 1 }, ArmnnType);
525 std::vector<T> kernel = QuantizedVector<T>(
526 {
527 1, 1, 1,
528 1, 1, 1,
529 1, 1, 1,
530
531 0, 0, 0,
532 0, 0, 0,
533 0, 0, 0,
534
535 1, 1, 1,
536 1, 1, 1,
537 1, 1, 1,
538 },
539 qScale, qOffset);
540
541 armnn::TensorInfo outputDesc({ 1, 5, 5, 5, 1 }, ArmnnType);
542 std::vector<T> outputData = QuantizedVector<T>(
543 {
544 112, 171, 177, 183, 124,
545 183, 279, 288, 297, 201,
546 213, 324, 333, 342, 231,
547 243, 369, 378, 387, 261,
548 172, 261, 267, 273, 184,
549
550 224, 342, 354, 366, 248,
551 366, 558, 576, 594, 402,
552 426, 648, 666, 684, 462,
553 486, 738, 756, 774, 522,
554 344, 522, 534, 546, 368,
555
556 424, 642, 654, 666, 448,
557 666, 1008, 1026, 1044, 702,
558 726, 1098, 1116, 1134, 762,
559 786, 1188, 1206, 1224, 822,
560 544, 822, 834, 846, 568,
561 624, 942, 954, 966, 648,
562
563 966, 1458, 1476, 1494, 1002,
564 1026, 1548, 1566, 1584, 1062,
565 1086, 1638, 1656, 1674, 1122,
566 744, 1122, 1134, 1146, 768,
567 312, 471, 477, 483, 324,
568 483, 729, 738, 747, 501,
569 513, 774, 783, 792, 531,
570 543, 819, 828, 837, 561,
571 372, 561, 567, 573, 384
572 },
573 qScale, qOffset);
574
575 return SimpleConvolution3dTestImpl<ArmnnType, ArmnnBType>(
576 workloadFactory,
577 memoryManager,
578 tensorHandleFactory,
579 input,
580 kernel,
581 GetBiasData<ArmnnBType>(biasEnabled, qScale * qScale, outputDesc, dataLayout),
582 outputData,
583 inputDesc.GetShape(),
584 kernelDesc.GetShape(),
585 outputDesc.GetShape(),
586 dataLayout,
587 qScale,
588 qOffset,
589 1, // strideX
590 1, // strideY
591 1, // strideZ
592 1, // dilationX
593 1, // dilationY
594 1, // dilationZ
595 1, // padLeft
596 1, // padTop
597 1, // padRight
598 1, // padBottom
599 1, // padFront
600 1 // padBack
601 );
602}
603
604LayerTestResult<float, 5> Convolution3dStrideDilationPadding3x3x3TestCommonFloat32(
605 armnn::IWorkloadFactory& workloadFactory,
606 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
607 const armnn::ITensorHandleFactory& tensorHandleFactory,
608 bool biasEnabled,
609 armnn::DataLayout dataLayout)
610{
611 float qScale = 0.f;
612 int32_t qOffset = 0;
613
614 armnn::TensorInfo inputDesc({ 1, 3, 10, 10, 2 }, armnn::DataType::Float32);
615 std::vector<float> input = CreateSmallQuantizedData<float>(600, 100.0f, qScale, qOffset);
616
617 armnn::TensorInfo kernelDesc({ 3, 3, 3, 2, 2 }, armnn::DataType::Float32);
618 std::vector<float> kernel = CreateSmallQuantizedData<float>(108, 100.0f, qScale, qOffset);
619
620 // Since the dilation rate is 2 this will dilate the kernel to be 5x5: d(K-1)+1 --> 2 x (3-1) + 1 = 5,
621 // therefore the output will be 1x4x4: (I − K + 2P)/S +1 => trunc((10 - 3 + 2x2 )/3 + 1))
622 // where, dilation size = d = 2; kernel size = K = 3; input size = I = 10; padding size = P = 2; stride = S = 3
623 armnn::TensorInfo outputDesc({ 1, 1, 4, 4, 2 }, armnn::DataType::Float32);
624 std::vector<float> outputData =
625 {
626 12.0312f, 12.2268f, 17.7512f, 18.0494f,
627 18.176f, 18.4814f, 5.6912f, 5.7938f,
628 19.1664f, 19.5078f, 28.119f, 28.6383f,
629 28.6914f, 29.2215f, 8.9094f, 9.0873f,
630
631 23.1264f, 23.5398f, 33.843f, 34.4703f,
632 34.4154f, 35.0535f, 10.6734f, 10.8873f,
633 6.2712f, 6.417f, 9.0718f, 9.2929f,
634 9.2194f, 9.4441f, 2.7862f, 2.8615f
635 };
636
637 return SimpleConvolution3dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
638 workloadFactory,
639 memoryManager,
640 tensorHandleFactory,
641 input,
642 kernel,
643 GetBiasData<armnn::DataType::Float32>(biasEnabled, qScale * qScale, outputDesc, dataLayout),
644 outputData,
645 inputDesc.GetShape(),
646 kernelDesc.GetShape(),
647 outputDesc.GetShape(),
648 dataLayout,
649 qScale,
650 qOffset,
651 3, // strideX
652 3, // strideY
653 3, // strideZ
654 2, // dilationX
655 2, // dilationY
656 2, // dilationZ
657 1, // padLeft
658 1, // padTop
659 1, // padRight
660 1, // padBottom
661 1, // padFront
662 1 // padBack
663 );
664}
665
666LayerTestResult<float, 5> Convolution3d2x2x2Stride3x3x3SmallTestCommonFloat32(
667 armnn::IWorkloadFactory& workloadFactory,
668 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
669 const armnn::ITensorHandleFactory& tensorHandleFactory,
670 bool biasEnabled,
671 armnn::DataLayout dataLayout)
672{
673 float qScale = 0.f;
674 int32_t qOffset = 0;
675
676 armnn::TensorInfo inputDesc({ 1, 3, 10, 10, 1 }, armnn::DataType::Float32);
677 std::vector<float> input = CreateSmallQuantizedData<float>(300, 100.0f, qScale, qOffset);
678
679 armnn::TensorInfo kernelDesc({ 3, 3, 3, 1, 1 }, armnn::DataType::Float32);
680 std::vector<float> kernel =
681 {
682 0.125977f, 0.150391f, 0.101562f,
683 0.0585938f, 0.0864258f, 0.043457f,
684 0.034668f, 0.0322266f, 0.0385742f,
685
686 0.125977f, 0.150391f, -0.101562f,
687 -0.0585938f,-0.0864258f,-0.043457f,
688 -0.0104630f, 0.0154114f, 0.0013768f,
689
690 0.0344238f, 0.035644f, 0.0495605f,
691 0.0683594f, 0.099121f, -0.0461426f,
692 -0.0996094f,-0.126953f, -0.043457f,
693 };
694
695 armnn::TensorInfo outputDesc({ 1, 1, 4, 4, 1 }, armnn::DataType::Float32);
696 std::vector<float> outputData =
697 {
698 -0.08156067f, -0.06891209f, -0.05589598f, -0.04310101f,
699 0.04584253f, 0.05855697f, 0.07129729f, 0.08325434f,
700 0.17304349f, 0.18521416f, 0.19818866f, 0.21096253f,
701 0.29965734f, 0.312698f, 0.32547557f, 0.33818722f
702 };
703
704 return SimpleConvolution3dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
705 workloadFactory,
706 memoryManager,
707 tensorHandleFactory,
708 input,
709 kernel,
710 GetBiasData<armnn::DataType::Float32>(biasEnabled, qScale * qScale, outputDesc, dataLayout),
711 outputData,
712 inputDesc.GetShape(),
713 kernelDesc.GetShape(),
714 outputDesc.GetShape(),
715 dataLayout,
716 qScale,
717 qOffset,
718 2, // strideX
719 2, // strideY
720 2 // strideZ
721 );
722}
723
724LayerTestResult<armnn::Half, 5> Convolution3d2x3x3TestCommonFloat16(
725 armnn::IWorkloadFactory& workloadFactory,
726 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
727 const armnn::ITensorHandleFactory& tensorHandleFactory,
728 bool biasEnabled,
729 armnn::DataLayout dataLayout)
730{
731 using namespace half_float::literal;
732
733 float qScale = 0.f;
734 int32_t qOffset = 0;
735
736 armnn::TensorInfo inputDesc({ 1, 2, 3, 3, 2 }, armnn::DataType::Float16);
737 const std::vector<armnn::Half> input =
738 {
739 1._h, 2._h, 3._h,
740 4._h, 5._h, 6._h,
741
742 7._h, 8._h, 9._h,
743 10._h, 11._h, 12._h,
744
745 13._h, 14._h, 15._h,
746 16._h, 17._h, 18._h,
747
748 19._h, 20._h, 21._h,
749 22._h, 23._h, 24._h,
750
751 25._h, 26._h, 27._h,
752 28._h, 29._h, 30._h,
753
754 31._h, 32._h, 33._h,
755 34._h, 35._h, 36._h
756 };
757
758 armnn::TensorInfo kernelDesc({ 2, 2, 2, 2, 2 }, armnn::DataType::Float16);
759 std::vector<armnn::Half> kernel =
760 {
761 -1._h, -1._h, -1._h, -1._h, -1._h, -1._h, -1._h, -1._h,
762 -1._h, -1._h, -1._h, 1._h, 1._h, 1._h, -1._h, -1._h,
763 1._h, 1._h, -1._h, 1._h, -1._h, 1._h, -1._h, 1._h,
764 -1._h, -1._h, -1._h, 1._h, -1._h, 1._h, -1._h, 1._h,
765 };
766
767 armnn::TensorInfo outputDesc({ 1, 1, 2, 2, 2 }, armnn::DataType::Float16);
768 std::vector<armnn::Half> outputData =
769 {
770 -176._h, 128._h,
771 -200._h, 132._h,
772
773 -248._h, 140._h,
774 -272._h, 144._h
775 };
776
777 return SimpleConvolution3dTestImpl<armnn::DataType::Float16, armnn::DataType::Float16>(
778 workloadFactory,
779 memoryManager,
780 tensorHandleFactory,
781 input,
782 kernel,
783 GetBiasData<armnn::DataType::Float16>(biasEnabled, qScale * qScale, outputDesc, dataLayout),
784 outputData,
785 inputDesc.GetShape(),
786 kernelDesc.GetShape(),
787 outputDesc.GetShape(),
788 dataLayout,
789 qScale,
790 qOffset
791 );
792}
793
794LayerTestResult<armnn::Half, 5> Convolution3d2x2x2SmallTestCommonFloat16(
795 armnn::IWorkloadFactory& workloadFactory,
796 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
797 const armnn::ITensorHandleFactory& tensorHandleFactory,
798 bool biasEnabled,
799 armnn::DataLayout dataLayout)
800{
801 using namespace half_float::literal;
802
803 float qScale = 0.f;
804 int32_t qOffset = 0;
805
806 armnn::TensorInfo inputDesc({ 1, 2, 4, 4, 1 }, armnn::DataType::Float16);
807 const std::vector<armnn::Half> input =
808 {
809 0.0367984_h, 0.0380895_h, 0.0420157_h, 0.0675631_h,
810 0.0938920_h, 0.0476106_h, 0.1035490_h, 0.1260370_h,
811 0.0461647_h, 0.0883828_h, 0.1159540_h, 0.0498519_h,
812 0.0104630_h, 0.0154114_h, 0.00137681_h, 0.0344238_h,
813
814 0.0356445_h, 0.0495605_h, 0.0683594_h, 0.0991211_h,
815 0.0461426_h, 0.0996094_h, 0.1269530_h, 0.0393066_h,
816 0.103516_h, 0.032544_h, 0.124334_h, 0.0564566_h,
817 0.0123544_h, 0.0461647_h, 0.0883828_h, 0.1159540_h,
818 };
819
820 armnn::TensorInfo kernelDesc({ 2, 2, 2, 1, 1 }, armnn::DataType::Float16);
821 std::vector<armnn::Half> kernel =
822 {
823 -0.126184_h, -0.150468_h,
824 -0.101412_h, -0.0586369_h,
825
826 -0.0435089_h, 0.0347555_h,
827 0.0323111_h, 0.0385381_h
828 };
829
830 armnn::TensorInfo outputDesc({ 1, 1, 3, 3, 1 }, armnn::DataType::Float16);
831 std::vector<armnn::Half> outputData =
832 {
833 -0.01718917_h, -0.01370182_h, -0.02727737_h,
834
835 -0.02282543_h, -0.03144084_h, -0.04468598_h,
836
837 -0.02228982_h, -0.02244923_h, -0.02042268_h
838 };
839
840 return SimpleConvolution3dTestImpl<armnn::DataType::Float16, armnn::DataType::Float16>(
841 workloadFactory,
842 memoryManager,
843 tensorHandleFactory,
844 input,
845 kernel,
846 GetBiasData<armnn::DataType::Float16>(biasEnabled, qScale * qScale, outputDesc, dataLayout),
847 outputData,
848 inputDesc.GetShape(),
849 kernelDesc.GetShape(),
850 outputDesc.GetShape(),
851 dataLayout,
852 qScale,
853 qOffset
854 );
855}
856
857LayerTestResult<float, 5> SimpleConvolution3d3x3x3Float32Test(
858 armnn::IWorkloadFactory& workloadFactory,
859 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
860 const armnn::ITensorHandleFactory& tensorHandleFactory,
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100861 bool biasEnabled,
862 armnn::DataLayout dataLayout)
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100863{
864 return SimpleConvolution3d3x3x3TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100865 workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100866}
867
868LayerTestResult<int8_t, 5> SimpleConvolution3d3x3x3Int8Test(
869 armnn::IWorkloadFactory& workloadFactory,
870 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
871 const armnn::ITensorHandleFactory& tensorHandleFactory,
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100872 bool biasEnabled,
873 armnn::DataLayout dataLayout)
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100874{
875 return SimpleConvolution3d3x3x3TestCommon<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100876 workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100877}
878
879LayerTestResult<uint8_t, 5> SimpleConvolution3d3x3x3Uint8Test(
880 armnn::IWorkloadFactory& workloadFactory,
881 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
882 const armnn::ITensorHandleFactory& tensorHandleFactory,
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100883 bool biasEnabled,
884 armnn::DataLayout dataLayout)
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100885{
886 return SimpleConvolution3d3x3x3TestCommon<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100887 workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100888}
889
890LayerTestResult<int16_t, 5> SimpleConvolution3d3x3x3Int16Test(
891 armnn::IWorkloadFactory& workloadFactory,
892 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
893 const armnn::ITensorHandleFactory& tensorHandleFactory,
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100894 bool biasEnabled,
895 armnn::DataLayout dataLayout)
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100896{
897 return SimpleConvolution3d3x3x3TestCommon<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100898 workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100899}
900
901
902LayerTestResult<float, 5> Convolution3d2x2x2Strides3x5x5Float32Test(
903 armnn::IWorkloadFactory& workloadFactory,
904 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
905 const armnn::ITensorHandleFactory& tensorHandleFactory,
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100906 bool biasEnabled,
907 armnn::DataLayout dataLayout)
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100908{
909 return Convolution3d2x2x2Strides3x5x5TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100910 workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100911}
912
913LayerTestResult<int8_t, 5> Convolution3d2x2x2Strides3x5x5Int8Test(
914 armnn::IWorkloadFactory& workloadFactory,
915 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
916 const armnn::ITensorHandleFactory& tensorHandleFactory,
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100917 bool biasEnabled,
918 armnn::DataLayout dataLayout)
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100919{
920 return Convolution3d2x2x2Strides3x5x5TestCommon<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100921 workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100922}
923
924LayerTestResult<uint8_t, 5> Convolution3d2x2x2Strides3x5x5Uint8Test(
925 armnn::IWorkloadFactory& workloadFactory,
926 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
927 const armnn::ITensorHandleFactory& tensorHandleFactory,
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100928 bool biasEnabled,
929 armnn::DataLayout dataLayout)
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100930{
931 return Convolution3d2x2x2Strides3x5x5TestCommon<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100932 workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100933}
934
935LayerTestResult<int16_t, 5> Convolution3d2x2x2Strides3x5x5Int16Test(
936 armnn::IWorkloadFactory& workloadFactory,
937 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
938 const armnn::ITensorHandleFactory& tensorHandleFactory,
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100939 bool biasEnabled,
940 armnn::DataLayout dataLayout)
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100941{
942 return Convolution3d2x2x2Strides3x5x5TestCommon<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100943 workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100944}
945
946LayerTestResult<float, 5> Convolution3d2x2x2Dilation2x2x2Float32Test(
947 armnn::IWorkloadFactory& workloadFactory,
948 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
949 const armnn::ITensorHandleFactory& tensorHandleFactory,
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100950 bool biasEnabled,
951 armnn::DataLayout dataLayout)
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100952{
953 return Convolution3d2x2x2Dilation2x2x2TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100954 workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100955}
956
957LayerTestResult<int8_t, 5> Convolution3d2x2x2Dilation2x2x2Int8Test(
958 armnn::IWorkloadFactory& workloadFactory,
959 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
960 const armnn::ITensorHandleFactory& tensorHandleFactory,
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100961 bool biasEnabled,
962 armnn::DataLayout dataLayout)
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100963{
964 return Convolution3d2x2x2Dilation2x2x2TestCommon<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100965 workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100966}
967
968LayerTestResult<uint8_t, 5> Convolution3d2x2x2Dilation2x2x2Uint8Test(
969 armnn::IWorkloadFactory& workloadFactory,
970 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
971 const armnn::ITensorHandleFactory& tensorHandleFactory,
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100972 bool biasEnabled,
973 armnn::DataLayout dataLayout)
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100974{
975 return Convolution3d2x2x2Dilation2x2x2TestCommon<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100976 workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100977}
978
979LayerTestResult<int16_t, 5> Convolution3d2x2x2Dilation2x2x2Int16Test(
980 armnn::IWorkloadFactory& workloadFactory,
981 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
982 const armnn::ITensorHandleFactory& tensorHandleFactory,
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100983 bool biasEnabled,
984 armnn::DataLayout dataLayout)
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100985{
986 return Convolution3d2x2x2Dilation2x2x2TestCommon<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100987 workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100988}
989
990LayerTestResult<float, 5> Convolution3dPaddingSame3x3x3Float32Test(
991 armnn::IWorkloadFactory& workloadFactory,
992 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
993 const armnn::ITensorHandleFactory& tensorHandleFactory,
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100994 bool biasEnabled,
995 armnn::DataLayout dataLayout)
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100996{
997 return Convolution3dPaddingSame3x3x3TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100998 workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100999}
1000
1001LayerTestResult<int8_t, 5> Convolution3dPaddingSame3x3x3Int8Test(
1002 armnn::IWorkloadFactory& workloadFactory,
1003 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1004 const armnn::ITensorHandleFactory& tensorHandleFactory,
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +01001005 bool biasEnabled,
1006 armnn::DataLayout dataLayout)
Matthew Sloyanb63a3112021-09-08 13:05:51 +01001007{
1008 return Convolution3dPaddingSame3x3x3TestCommon<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +01001009 workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
Matthew Sloyanb63a3112021-09-08 13:05:51 +01001010}
1011
1012LayerTestResult<uint8_t, 5> Convolution3dPaddingSame3x3x3Uint8Test(
1013 armnn::IWorkloadFactory& workloadFactory,
1014 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1015 const armnn::ITensorHandleFactory& tensorHandleFactory,
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +01001016 bool biasEnabled,
1017 armnn::DataLayout dataLayout)
Matthew Sloyanb63a3112021-09-08 13:05:51 +01001018{
1019 return Convolution3dPaddingSame3x3x3TestCommon<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +01001020 workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
Matthew Sloyanb63a3112021-09-08 13:05:51 +01001021}
1022
1023LayerTestResult<int16_t, 5> Convolution3dPaddingSame3x3x3Int16Test(
1024 armnn::IWorkloadFactory& workloadFactory,
1025 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1026 const armnn::ITensorHandleFactory& tensorHandleFactory,
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +01001027 bool biasEnabled,
1028 armnn::DataLayout dataLayout)
Matthew Sloyanb63a3112021-09-08 13:05:51 +01001029{
1030 return Convolution3dPaddingSame3x3x3TestCommon<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +01001031 workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
Matthew Sloyanb63a3112021-09-08 13:05:51 +01001032}
1033
1034LayerTestResult<float, 5> Convolution3dStrideDilationPadding3x3x3Float32Test(
1035 armnn::IWorkloadFactory& workloadFactory,
1036 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1037 const armnn::ITensorHandleFactory& tensorHandleFactory,
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +01001038 bool biasEnabled,
1039 armnn::DataLayout dataLayout)
Matthew Sloyanb63a3112021-09-08 13:05:51 +01001040{
1041 return Convolution3dStrideDilationPadding3x3x3TestCommonFloat32(
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +01001042 workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
Matthew Sloyanb63a3112021-09-08 13:05:51 +01001043}
1044
1045LayerTestResult<float, 5> Convolution3d2x2x2Stride3x3x3SmallFloat32Test(
1046 armnn::IWorkloadFactory& workloadFactory,
1047 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1048 const armnn::ITensorHandleFactory& tensorHandleFactory,
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +01001049 bool biasEnabled,
1050 armnn::DataLayout dataLayout)
Matthew Sloyanb63a3112021-09-08 13:05:51 +01001051{
1052 return Convolution3d2x2x2Stride3x3x3SmallTestCommonFloat32(
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +01001053 workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
Matthew Sloyanb63a3112021-09-08 13:05:51 +01001054}
1055
1056LayerTestResult<armnn::Half, 5> Convolution3d2x3x3Float16Test(
1057 armnn::IWorkloadFactory& workloadFactory,
1058 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1059 const armnn::ITensorHandleFactory& tensorHandleFactory,
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +01001060 bool biasEnabled,
1061 armnn::DataLayout dataLayout)
Matthew Sloyanb63a3112021-09-08 13:05:51 +01001062{
1063 return Convolution3d2x3x3TestCommonFloat16(
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +01001064 workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
Matthew Sloyanb63a3112021-09-08 13:05:51 +01001065}
1066
1067LayerTestResult<armnn::Half, 5> Convolution3d2x2x2SmallFloat16Test(
1068 armnn::IWorkloadFactory& workloadFactory,
1069 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1070 const armnn::ITensorHandleFactory& tensorHandleFactory,
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +01001071 bool biasEnabled,
1072 armnn::DataLayout dataLayout)
Matthew Sloyanb63a3112021-09-08 13:05:51 +01001073{
1074 return Convolution3d2x2x2SmallTestCommonFloat16(
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +01001075 workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
Matthew Sloyanb63a3112021-09-08 13:05:51 +01001076}