blob: d62ffedf3f3404646efca9ef0711e94d15a3e54f [file] [log] [blame]
Matthew Sloyanb63a3112021-09-08 13:05:51 +01001//
2// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "Conv3dTestImpl.hpp"
7
Colm Donelanc42a9872022-02-02 16:35:09 +00008#include <armnnUtils/QuantizeHelper.hpp>
Matthew Sloyanb63a3112021-09-08 13:05:51 +01009
10#include <armnnUtils/DataLayoutIndexed.hpp>
11
Colm Donelan0c479742021-12-10 12:43:54 +000012#include <armnn/backends/TensorHandle.hpp>
Matthew Sloyanb63a3112021-09-08 13:05:51 +010013
Sadik Armagana097d2a2021-11-24 15:47:28 +000014#include <armnnTestUtils/DataLayoutUtils.hpp>
15#include <armnnTestUtils/TensorCopyUtils.hpp>
Colm Donelan0c479742021-12-10 12:43:54 +000016#include <armnnTestUtils/WorkloadTestUtils.hpp>
Matthew Sloyanb63a3112021-09-08 13:05:51 +010017
Colm Donelanc42a9872022-02-02 16:35:09 +000018#include <armnnTestUtils/TensorHelpers.hpp>
Matthew Sloyanb63a3112021-09-08 13:05:51 +010019
20using namespace armnnUtils;
21
22//
23// Helper templates
24//
25
26// Helper template that returns a quantized bias depending on the number of output channels.
27template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
28std::vector<T> GetBiasData(bool biasEnabled, float qScale, armnn::TensorInfo outputInfo, armnn::DataLayout layout)
29{
30 if(!biasEnabled)
31 {
32 return std::vector<T>();
33 }
34 else
35 {
36 const armnnUtils::DataLayoutIndexed dataLayoutIndexed(layout);
37 const unsigned int outputChannels = outputInfo.GetShape()[dataLayoutIndexed.GetChannelsIndex()];
38
39 switch (outputChannels)
40 {
41 case 1:
42 {
43 return QuantizedVector<T>({2}, qScale, 0);
44 }
45 case 2:
46 default:
47 {
48 return QuantizedVector<T>({0, 2}, qScale, 0);
49 }
50 }
51 }
52}
53
54// Modifies a std::vector in-place using a specified bias.
55template<typename T, typename B>
56void ApplyBiasToData(std::vector<T>& v, const std::vector<B>& bias,
57 float vScale, int32_t vOffset,
58 float bScale, int32_t bOffset)
59{
60 ARMNN_ASSERT_MSG((armnn::IsQuantizedType<T>() && vScale != 0.0f) || (!armnn::IsQuantizedType<T>()),
61 "Invalid type and parameter combination.");
62 ARMNN_ASSERT_MSG((armnn::IsQuantizedType<B>() && bScale != 0.0f) || (!armnn::IsQuantizedType<B>()),
63 "Invalid type and parameter combination.");
64
65 for (uint32_t i = 0; i < bias.size(); ++i)
66 {
Rob Hugheseb70c912021-10-07 08:53:58 +010067 for (size_t j = i; j < v.size(); j+=bias.size())
Matthew Sloyanb63a3112021-09-08 13:05:51 +010068 {
69 // Note we need to dequantize and re-quantize the image value and the bias.
70 float dBias = SelectiveDequantize(bias[i], bScale, bOffset);
71
72 T& outRef = v[j];
73 float dOutput = SelectiveDequantize(outRef, vScale, vOffset);
74 outRef = SelectiveQuantize<T>(dOutput + dBias, vScale, vOffset);
75 }
76 }
77}
78
79// Set the quantization scale and offset values for data types.
80template<armnn::DataType ArmnnType>
81void SetScaleOffset(float& qScale, int32_t& qOffset)
82{
83 switch (ArmnnType)
84 {
85 case armnn::DataType::QAsymmU8:
86 {
87 qScale = 0.1f;
88 qOffset = 128;
89 break;
90 }
91 case armnn::DataType::QAsymmS8:
Teresa Charlinec5f7d12021-10-22 17:15:00 +010092 {
93 qScale = 0.1f;
94 qOffset = 64;
95 break;
96 }
Matthew Sloyanb63a3112021-09-08 13:05:51 +010097 case armnn::DataType::QSymmS16:
98 {
99 qScale = 0.1f;
100 qOffset = 0;
101 break;
102 }
103 case armnn::DataType::BFloat16:
104 case armnn::DataType::Float16:
105 case armnn::DataType::Float32:
106 default:
107 {
Teresa Charlinec5f7d12021-10-22 17:15:00 +0100108 qScale = 1.f;
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100109 qOffset = 0;
110 break;
111 }
112 }
113}
114
115// Create a vector from 0 to size and quantize (if required).
116template <typename T>
117std::vector<T> CreateQuantizedData(int32_t size, float qScale, int32_t qOffset)
118{
119 std::vector<float> data;
120 for (int32_t i = 0; i < size; ++i)
121 {
122 data.push_back(static_cast<float>(i));
123 }
124
125 return QuantizedVector<T>(data, qScale, qOffset);
126}
127
128// Create a vector from 0 to size divided and then quantized (if required) to create smaller floating point values.
129template <typename T>
130std::vector<T> CreateSmallQuantizedData(int32_t size, float divisor, float qScale, int32_t qOffset)
131{
132 std::vector<float> data;
133 for (int32_t i = 0; i < size; ++i)
134 {
135 float value = static_cast<float>(i);
136 data.push_back(value/divisor);
137 }
138
139 return QuantizedVector<T>(data, qScale, qOffset);;
140}
141
142//
143// Convolution3d implementations
144//
145
146template<armnn::DataType ArmnnType,
147 armnn::DataType ArmnnBType,
148 typename T = armnn::ResolveType<ArmnnType>,
149 typename B = armnn::ResolveType<ArmnnBType>>
150LayerTestResult<T, 5> SimpleConvolution3dTestImpl(
151 armnn::IWorkloadFactory& workloadFactory,
152 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
153 const armnn::ITensorHandleFactory& tensorHandleFactory,
154 const std::vector<T>& input,
155 const std::vector<T>& kernel,
156 const std::vector<B>& bias,
157 const std::vector<T>& outputExpected,
158 const armnn::TensorShape& inputShape,
159 const armnn::TensorShape& kernelShape,
160 const armnn::TensorShape& outputExpectedShape,
161 const armnn::DataLayout dataLayout,
162 float qScale,
163 int32_t qOffset,
164 uint32_t strideX = 1,
165 uint32_t strideY = 1,
166 uint32_t strideZ = 1,
167 uint32_t dilationX = 1,
168 uint32_t dilationY = 1,
169 uint32_t dilationZ = 1,
170 uint32_t padLeft = 0,
171 uint32_t padTop = 0,
172 uint32_t padRight = 0,
173 uint32_t padBottom = 0,
174 uint32_t padFront = 0,
175 uint32_t padBack = 0)
176{
177 unsigned int inputNum = armnn::numeric_cast<unsigned int>(inputShape[0]);
178 unsigned int inputDepth = armnn::numeric_cast<unsigned int>(inputShape[1]);
179 unsigned int inputHeight = armnn::numeric_cast<unsigned int>(inputShape[2]);
180 unsigned int inputWidth = armnn::numeric_cast<unsigned int>(inputShape[3]);
181 unsigned int inputChannels = armnn::numeric_cast<unsigned int>(inputShape[4]);
182
183 // Conv3d weights/kernel layout: [D,H,W,I,O]
184 unsigned int kernelDepth = armnn::numeric_cast<unsigned int>(kernelShape[0]);
185 unsigned int kernelHeight = armnn::numeric_cast<unsigned int>(kernelShape[1]);
186 unsigned int kernelWidth = armnn::numeric_cast<unsigned int>(kernelShape[2]);
187 unsigned int kernelInChannels = armnn::numeric_cast<unsigned int>(kernelShape[3]);
188 unsigned int kernelOutChannels = armnn::numeric_cast<unsigned int>(kernelShape[4]);
189
190 unsigned int outputNum = armnn::numeric_cast<unsigned int>(outputExpectedShape[0]);
191 unsigned int outputDepth = armnn::numeric_cast<unsigned int>(outputExpectedShape[1]);
192 unsigned int outputHeight = armnn::numeric_cast<unsigned int>(outputExpectedShape[2]);
193 unsigned int outputWidth = armnn::numeric_cast<unsigned int>(outputExpectedShape[3]);
194 unsigned int outputChannels = armnn::numeric_cast<unsigned int>(outputExpectedShape[4]);
195
196 bool biasEnabled = bias.size() > 0;
197
198 // If a bias is used, its size must equal the number of output channels.
199 ARMNN_ASSERT(!biasEnabled || bias.size() == outputChannels);
200
201 // Creates the tensors.
202 armnn::TensorInfo inputTensorInfo({inputNum, inputDepth, inputHeight, inputWidth, inputChannels}, ArmnnType);
203 armnn::TensorInfo outputTensorInfo({outputNum, outputDepth, outputHeight, outputWidth, outputChannels}, ArmnnType);
204 armnn::TensorInfo kernelDesc({kernelDepth, kernelHeight, kernelWidth, kernelInChannels, kernelOutChannels},
205 ArmnnType);
206 armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, ArmnnBType);
207
208 // Set quantization parameters if the requested type is a quantized type.
209 if(armnn::IsQuantizedType<T>())
210 {
211 inputTensorInfo.SetQuantizationScale(qScale);
212 inputTensorInfo.SetQuantizationOffset(qOffset);
213 outputTensorInfo.SetQuantizationScale(qScale);
214 outputTensorInfo.SetQuantizationOffset(qOffset);
215 kernelDesc.SetQuantizationScale(qScale);
216 kernelDesc.SetQuantizationOffset(qOffset);
217 biasDesc.SetQuantizationScale(qScale*qScale);
218 biasDesc.SetQuantizationOffset(0);
219 }
220
221 // Construct the input data.
222 std::vector<T> inputData;
223 inputData.assign(input.data(), input.data() + inputNum*inputDepth*inputHeight*inputWidth*inputChannels);
224
225 // Construct the output data and apply bias if needed.
226 std::vector<T> outputData;
227 outputData.assign(outputExpected.data(), outputExpected.data() +
228 outputNum*outputDepth*outputHeight*outputWidth*outputChannels);
229
230 if (biasEnabled)
231 {
232 ApplyBiasToData(outputData, bias,
233 outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
234 biasDesc.GetQuantizationScale(), biasDesc.GetQuantizationOffset());
235 }
236
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100237 // Permute input and output if data layout is NCDHW.
238 if (dataLayout == armnn::DataLayout::NCDHW)
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100239 {
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100240 PermuteTensorNdhwcToNcdhw(inputTensorInfo, inputData);
241 PermuteTensorNdhwcToNcdhw(outputTensorInfo, outputData);
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100242 }
243
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100244 std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
245
246 std::unique_ptr<armnn::ITensorHandle> input0Handle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
247 std::unique_ptr<armnn::ITensorHandle> input1Handle = tensorHandleFactory.CreateTensorHandle(kernelDesc);
248 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
249
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100250 armnn::Convolution3dQueueDescriptor data;
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100251 data.m_Parameters.m_StrideX = strideX;
252 data.m_Parameters.m_StrideY = strideY;
253 data.m_Parameters.m_StrideZ = strideZ;
254 data.m_Parameters.m_PadLeft = padLeft;
255 data.m_Parameters.m_PadRight = padRight;
256 data.m_Parameters.m_PadTop = padTop;
257 data.m_Parameters.m_PadBottom = padBottom;
258 data.m_Parameters.m_PadFront = padFront;
259 data.m_Parameters.m_PadBack = padBack;
260 data.m_Parameters.m_DilationX = dilationX;
261 data.m_Parameters.m_DilationY = dilationY;
262 data.m_Parameters.m_DilationZ = dilationZ;
263 data.m_Parameters.m_DataLayout = dataLayout;
264 data.m_Parameters.m_BiasEnabled = biasEnabled;
265
266 armnn::WorkloadInfo info;
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100267 AddInputToWorkload(data, info, inputTensorInfo, input0Handle.get());
268 AddInputToWorkload(data, info, kernelDesc, input1Handle.get());
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100269 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
270
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100271 std::unique_ptr<armnn::ITensorHandle> input2Handle = nullptr;
272 if (biasEnabled)
273 {
274 input2Handle = tensorHandleFactory.CreateTensorHandle(biasDesc);
275 AddInputToWorkload(data, info, biasDesc, input2Handle.get());
276 }
277
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000278 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Convolution3d,
279 data,
280 info);
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100281 input0Handle->Allocate();
282 input1Handle->Allocate();
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100283 outputHandle->Allocate();
284
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100285 CopyDataToITensorHandle(input0Handle.get(), inputData.data());
286 CopyDataToITensorHandle(input1Handle.get(), kernel.data());
287 if (biasEnabled)
288 {
289 input2Handle->Allocate();
290 CopyDataToITensorHandle(input2Handle.get(), bias.data());
291 }
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100292
293 ExecuteWorkload(*workload, memoryManager);
294
295 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
296
297 return LayerTestResult<T, 5>(actualOutput,
298 outputData,
299 outputHandle->GetShape(),
300 outputTensorInfo.GetShape());
301}
302
303template<armnn::DataType ArmnnType,
304 armnn::DataType ArmnnBType,
305 typename T = armnn::ResolveType<ArmnnType>>
306LayerTestResult<T, 5> SimpleConvolution3d3x3x3TestCommon(
307 armnn::IWorkloadFactory& workloadFactory,
308 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
309 const armnn::ITensorHandleFactory& tensorHandleFactory,
310 bool biasEnabled,
311 armnn::DataLayout dataLayout)
312{
313 float qScale;
314 int32_t qOffset;
315 SetScaleOffset<ArmnnType>(qScale, qOffset);
316
317 armnn::TensorInfo inputDesc({ 1, 5, 5, 5, 1 }, ArmnnType);
318 std::vector<T> input = CreateQuantizedData<T>(125, qScale, qOffset);
319
320 armnn::TensorInfo kernelDesc({ 3, 3, 3, 1, 1 }, ArmnnType);
321 std::vector<T> kernel = QuantizedVector<T>(
322 {
323 1, 1, 1,
324 1, 1, 1,
325 1, 1, 1,
326
327 0, 0, 0,
328 0, 1, 0,
329 0, 0, 0,
330
331 1, 1, 1,
332 1, 1, 1,
333 1, 1, 1,
334 },
335 qScale, qOffset);
336
337 armnn::TensorInfo outputDesc({ 1, 3, 3, 3, 1 }, ArmnnType);
338 std::vector<T> outputData = QuantizedVector<T>(
339 {
340 589, 608, 627,
341 684, 703, 722,
342 779, 798, 817,
343
344 1064, 1083, 1102,
345 1159, 1178, 1197,
346 1254, 1273, 1292,
347
348 1539, 1558, 1577,
349 1634, 1653, 1672,
350 1729, 1748, 1767
351 },
352 qScale, qOffset);
353
354 return SimpleConvolution3dTestImpl<ArmnnType, ArmnnBType>(
355 workloadFactory,
356 memoryManager,
357 tensorHandleFactory,
358 input,
359 kernel,
360 GetBiasData<ArmnnBType>(biasEnabled, qScale * qScale, outputDesc, dataLayout),
361 outputData,
362 inputDesc.GetShape(),
363 kernelDesc.GetShape(),
364 outputDesc.GetShape(),
365 dataLayout,
366 qScale,
367 qOffset
368 );
369}
370
371template<armnn::DataType ArmnnType,
372 armnn::DataType ArmnnBType,
373 typename T = armnn::ResolveType<ArmnnType>>
374LayerTestResult<T, 5> Convolution3d2x2x2Strides3x5x5TestCommon(
375 armnn::IWorkloadFactory& workloadFactory,
376 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
377 const armnn::ITensorHandleFactory& tensorHandleFactory,
378 bool biasEnabled,
379 armnn::DataLayout dataLayout)
380{
381 float qScale;
382 int32_t qOffset;
383 SetScaleOffset<ArmnnType>(qScale, qOffset);
384
385 armnn::TensorInfo inputDesc({ 1, 3, 10, 10, 1 }, ArmnnType);
386 std::vector<T> input = CreateQuantizedData<T>(300, qScale, qOffset);
387
388 armnn::TensorInfo kernelDesc({ 3, 5, 5, 1, 1 }, ArmnnType);
389 std::vector<T> kernel = QuantizedVector<T>(
390 {
391 1, 1, 1, 1, 1,
392 1, 1, 1, 1, 1,
393 1, 1, 1, 1, 1,
394 1, 1, 1, 1, 1,
395 1, 1, 1, 1, 1,
396
397 0, 0, 0, 0, 0,
398 0, 0, 0, 0, 0,
399 0, 0, 0, 0, 0,
400 0, 0, 0, 0, 0,
401 0, 0, 0, 0, 0,
402
403 2, 2, 2, 2, 2,
404 2, 2, 2, 2, 2,
405 2, 2, 2, 2, 2,
406 2, 2, 2, 2, 2,
407 2, 2, 2, 2, 2,
408 },
409 qScale, qOffset);
410
411 armnn::TensorInfo outputDesc({ 1, 1, 3, 3, 1 }, ArmnnType);
412 std::vector<T> outputData = QuantizedVector<T>(
413 {
414 11650, 11800, 11950,
415
416 13150, 13300, 13450,
417
418 14650, 14800, 14950
419 },
420 qScale, qOffset);
421
422 return SimpleConvolution3dTestImpl<ArmnnType, ArmnnBType>(
423 workloadFactory,
424 memoryManager,
425 tensorHandleFactory,
426 input,
427 kernel,
428 GetBiasData<ArmnnBType>(biasEnabled, qScale * qScale, outputDesc, dataLayout),
429 outputData,
430 inputDesc.GetShape(),
431 kernelDesc.GetShape(),
432 outputDesc.GetShape(),
433 dataLayout,
434 qScale,
435 qOffset,
436 2, // strideX
437 2, // strideY
438 2 // strideZ
439 );
440}
441
442template<armnn::DataType ArmnnType,
443 armnn::DataType ArmnnBType,
444 typename T = armnn::ResolveType<ArmnnType>>
445LayerTestResult<T, 5> Convolution3d2x2x2Dilation2x2x2TestCommon(
446 armnn::IWorkloadFactory& workloadFactory,
447 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
448 const armnn::ITensorHandleFactory& tensorHandleFactory,
449 bool biasEnabled,
450 armnn::DataLayout dataLayout)
451{
452 float qScale;
453 int32_t qOffset;
454 SetScaleOffset<ArmnnType>(qScale, qOffset);
455
456 armnn::TensorInfo inputDesc({ 1, 5, 5, 5, 2 }, ArmnnType);
457 std::vector<T> input = CreateQuantizedData<T>(250, qScale, qOffset);
458
459 armnn::TensorInfo kernelDesc({ 2, 2, 2, 2, 2 }, ArmnnType);
460 std::vector<T> kernel = QuantizedVector<T>(
461 {
462 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1,
463 1, 1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1,
464 },
465 qScale, qOffset);
466
467 // Since the dilation rate is 3 this will dilate the kernel to be 4x4,
468 // therefore the output will be 2x2
469 armnn::TensorInfo outputDesc({ 1, 2, 2, 2, 2 }, ArmnnType);
470 std::vector<T> outputData = QuantizedVector<T>(
471 {
472 -1124, 974,
473 -1148, 978,
474
475 -1244, 994,
476 -1268, 998,
477
478 -1724, 1074,
479 -1748, 1078,
480
481 -1844, 1094,
482 -1868, 1098
483 },
484 qScale, qOffset);
485
486 return SimpleConvolution3dTestImpl<ArmnnType, ArmnnBType>(
487 workloadFactory,
488 memoryManager,
489 tensorHandleFactory,
490 input,
491 kernel,
492 GetBiasData<ArmnnBType>(biasEnabled, qScale * qScale, outputDesc, dataLayout),
493 outputData,
494 inputDesc.GetShape(),
495 kernelDesc.GetShape(),
496 outputDesc.GetShape(),
497 dataLayout,
498 qScale,
499 qOffset,
500 1, // strideX
501 1, // strideY
502 1, // strideZ
503 3, // dilationX
504 3, // dilationY
505 3 // dilationZ
506 );
507}
508
509template<armnn::DataType ArmnnType,
510 armnn::DataType ArmnnBType,
511 typename T = armnn::ResolveType<ArmnnType>>
512LayerTestResult<T, 5> Convolution3dPaddingSame3x3x3TestCommon(
513 armnn::IWorkloadFactory& workloadFactory,
514 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
515 const armnn::ITensorHandleFactory& tensorHandleFactory,
516 bool biasEnabled,
517 armnn::DataLayout dataLayout)
518{
519 float qScale;
520 int32_t qOffset;
521 SetScaleOffset<ArmnnType>(qScale, qOffset);
522
523 armnn::TensorInfo inputDesc({ 1, 5, 5, 5, 1 }, ArmnnType);
524 std::vector<T> input = CreateQuantizedData<T>(125, qScale, qOffset);
525
526 armnn::TensorInfo kernelDesc({ 3, 3, 3, 1, 1 }, ArmnnType);
527 std::vector<T> kernel = QuantizedVector<T>(
528 {
529 1, 1, 1,
530 1, 1, 1,
531 1, 1, 1,
532
533 0, 0, 0,
534 0, 0, 0,
535 0, 0, 0,
536
537 1, 1, 1,
538 1, 1, 1,
539 1, 1, 1,
540 },
541 qScale, qOffset);
542
543 armnn::TensorInfo outputDesc({ 1, 5, 5, 5, 1 }, ArmnnType);
544 std::vector<T> outputData = QuantizedVector<T>(
545 {
546 112, 171, 177, 183, 124,
547 183, 279, 288, 297, 201,
548 213, 324, 333, 342, 231,
549 243, 369, 378, 387, 261,
550 172, 261, 267, 273, 184,
551
552 224, 342, 354, 366, 248,
553 366, 558, 576, 594, 402,
554 426, 648, 666, 684, 462,
555 486, 738, 756, 774, 522,
556 344, 522, 534, 546, 368,
557
558 424, 642, 654, 666, 448,
559 666, 1008, 1026, 1044, 702,
560 726, 1098, 1116, 1134, 762,
561 786, 1188, 1206, 1224, 822,
562 544, 822, 834, 846, 568,
563 624, 942, 954, 966, 648,
564
565 966, 1458, 1476, 1494, 1002,
566 1026, 1548, 1566, 1584, 1062,
567 1086, 1638, 1656, 1674, 1122,
568 744, 1122, 1134, 1146, 768,
569 312, 471, 477, 483, 324,
570 483, 729, 738, 747, 501,
571 513, 774, 783, 792, 531,
572 543, 819, 828, 837, 561,
573 372, 561, 567, 573, 384
574 },
575 qScale, qOffset);
576
577 return SimpleConvolution3dTestImpl<ArmnnType, ArmnnBType>(
578 workloadFactory,
579 memoryManager,
580 tensorHandleFactory,
581 input,
582 kernel,
583 GetBiasData<ArmnnBType>(biasEnabled, qScale * qScale, outputDesc, dataLayout),
584 outputData,
585 inputDesc.GetShape(),
586 kernelDesc.GetShape(),
587 outputDesc.GetShape(),
588 dataLayout,
589 qScale,
590 qOffset,
591 1, // strideX
592 1, // strideY
593 1, // strideZ
594 1, // dilationX
595 1, // dilationY
596 1, // dilationZ
597 1, // padLeft
598 1, // padTop
599 1, // padRight
600 1, // padBottom
601 1, // padFront
602 1 // padBack
603 );
604}
605
606LayerTestResult<float, 5> Convolution3dStrideDilationPadding3x3x3TestCommonFloat32(
607 armnn::IWorkloadFactory& workloadFactory,
608 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
609 const armnn::ITensorHandleFactory& tensorHandleFactory,
610 bool biasEnabled,
611 armnn::DataLayout dataLayout)
612{
613 float qScale = 0.f;
614 int32_t qOffset = 0;
615
616 armnn::TensorInfo inputDesc({ 1, 3, 10, 10, 2 }, armnn::DataType::Float32);
617 std::vector<float> input = CreateSmallQuantizedData<float>(600, 100.0f, qScale, qOffset);
618
619 armnn::TensorInfo kernelDesc({ 3, 3, 3, 2, 2 }, armnn::DataType::Float32);
620 std::vector<float> kernel = CreateSmallQuantizedData<float>(108, 100.0f, qScale, qOffset);
621
622 // Since the dilation rate is 2 this will dilate the kernel to be 5x5: d(K-1)+1 --> 2 x (3-1) + 1 = 5,
623 // therefore the output will be 1x4x4: (I − K + 2P)/S +1 => trunc((10 - 3 + 2x2 )/3 + 1))
624 // where, dilation size = d = 2; kernel size = K = 3; input size = I = 10; padding size = P = 2; stride = S = 3
625 armnn::TensorInfo outputDesc({ 1, 1, 4, 4, 2 }, armnn::DataType::Float32);
626 std::vector<float> outputData =
627 {
628 12.0312f, 12.2268f, 17.7512f, 18.0494f,
629 18.176f, 18.4814f, 5.6912f, 5.7938f,
630 19.1664f, 19.5078f, 28.119f, 28.6383f,
631 28.6914f, 29.2215f, 8.9094f, 9.0873f,
632
633 23.1264f, 23.5398f, 33.843f, 34.4703f,
634 34.4154f, 35.0535f, 10.6734f, 10.8873f,
635 6.2712f, 6.417f, 9.0718f, 9.2929f,
636 9.2194f, 9.4441f, 2.7862f, 2.8615f
637 };
638
639 return SimpleConvolution3dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
640 workloadFactory,
641 memoryManager,
642 tensorHandleFactory,
643 input,
644 kernel,
645 GetBiasData<armnn::DataType::Float32>(biasEnabled, qScale * qScale, outputDesc, dataLayout),
646 outputData,
647 inputDesc.GetShape(),
648 kernelDesc.GetShape(),
649 outputDesc.GetShape(),
650 dataLayout,
651 qScale,
652 qOffset,
653 3, // strideX
654 3, // strideY
655 3, // strideZ
656 2, // dilationX
657 2, // dilationY
658 2, // dilationZ
659 1, // padLeft
660 1, // padTop
661 1, // padRight
662 1, // padBottom
663 1, // padFront
664 1 // padBack
665 );
666}
667
668LayerTestResult<float, 5> Convolution3d2x2x2Stride3x3x3SmallTestCommonFloat32(
669 armnn::IWorkloadFactory& workloadFactory,
670 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
671 const armnn::ITensorHandleFactory& tensorHandleFactory,
672 bool biasEnabled,
673 armnn::DataLayout dataLayout)
674{
675 float qScale = 0.f;
676 int32_t qOffset = 0;
677
678 armnn::TensorInfo inputDesc({ 1, 3, 10, 10, 1 }, armnn::DataType::Float32);
679 std::vector<float> input = CreateSmallQuantizedData<float>(300, 100.0f, qScale, qOffset);
680
681 armnn::TensorInfo kernelDesc({ 3, 3, 3, 1, 1 }, armnn::DataType::Float32);
682 std::vector<float> kernel =
683 {
684 0.125977f, 0.150391f, 0.101562f,
685 0.0585938f, 0.0864258f, 0.043457f,
686 0.034668f, 0.0322266f, 0.0385742f,
687
688 0.125977f, 0.150391f, -0.101562f,
689 -0.0585938f,-0.0864258f,-0.043457f,
690 -0.0104630f, 0.0154114f, 0.0013768f,
691
692 0.0344238f, 0.035644f, 0.0495605f,
693 0.0683594f, 0.099121f, -0.0461426f,
694 -0.0996094f,-0.126953f, -0.043457f,
695 };
696
697 armnn::TensorInfo outputDesc({ 1, 1, 4, 4, 1 }, armnn::DataType::Float32);
698 std::vector<float> outputData =
699 {
700 -0.08156067f, -0.06891209f, -0.05589598f, -0.04310101f,
701 0.04584253f, 0.05855697f, 0.07129729f, 0.08325434f,
702 0.17304349f, 0.18521416f, 0.19818866f, 0.21096253f,
703 0.29965734f, 0.312698f, 0.32547557f, 0.33818722f
704 };
705
706 return SimpleConvolution3dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
707 workloadFactory,
708 memoryManager,
709 tensorHandleFactory,
710 input,
711 kernel,
712 GetBiasData<armnn::DataType::Float32>(biasEnabled, qScale * qScale, outputDesc, dataLayout),
713 outputData,
714 inputDesc.GetShape(),
715 kernelDesc.GetShape(),
716 outputDesc.GetShape(),
717 dataLayout,
718 qScale,
719 qOffset,
720 2, // strideX
721 2, // strideY
722 2 // strideZ
723 );
724}
725
726LayerTestResult<armnn::Half, 5> Convolution3d2x3x3TestCommonFloat16(
727 armnn::IWorkloadFactory& workloadFactory,
728 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
729 const armnn::ITensorHandleFactory& tensorHandleFactory,
730 bool biasEnabled,
731 armnn::DataLayout dataLayout)
732{
733 using namespace half_float::literal;
734
735 float qScale = 0.f;
736 int32_t qOffset = 0;
737
738 armnn::TensorInfo inputDesc({ 1, 2, 3, 3, 2 }, armnn::DataType::Float16);
739 const std::vector<armnn::Half> input =
740 {
741 1._h, 2._h, 3._h,
742 4._h, 5._h, 6._h,
743
744 7._h, 8._h, 9._h,
745 10._h, 11._h, 12._h,
746
747 13._h, 14._h, 15._h,
748 16._h, 17._h, 18._h,
749
750 19._h, 20._h, 21._h,
751 22._h, 23._h, 24._h,
752
753 25._h, 26._h, 27._h,
754 28._h, 29._h, 30._h,
755
756 31._h, 32._h, 33._h,
757 34._h, 35._h, 36._h
758 };
759
760 armnn::TensorInfo kernelDesc({ 2, 2, 2, 2, 2 }, armnn::DataType::Float16);
761 std::vector<armnn::Half> kernel =
762 {
763 -1._h, -1._h, -1._h, -1._h, -1._h, -1._h, -1._h, -1._h,
764 -1._h, -1._h, -1._h, 1._h, 1._h, 1._h, -1._h, -1._h,
765 1._h, 1._h, -1._h, 1._h, -1._h, 1._h, -1._h, 1._h,
766 -1._h, -1._h, -1._h, 1._h, -1._h, 1._h, -1._h, 1._h,
767 };
768
769 armnn::TensorInfo outputDesc({ 1, 1, 2, 2, 2 }, armnn::DataType::Float16);
770 std::vector<armnn::Half> outputData =
771 {
772 -176._h, 128._h,
773 -200._h, 132._h,
774
775 -248._h, 140._h,
776 -272._h, 144._h
777 };
778
779 return SimpleConvolution3dTestImpl<armnn::DataType::Float16, armnn::DataType::Float16>(
780 workloadFactory,
781 memoryManager,
782 tensorHandleFactory,
783 input,
784 kernel,
785 GetBiasData<armnn::DataType::Float16>(biasEnabled, qScale * qScale, outputDesc, dataLayout),
786 outputData,
787 inputDesc.GetShape(),
788 kernelDesc.GetShape(),
789 outputDesc.GetShape(),
790 dataLayout,
791 qScale,
792 qOffset
793 );
794}
795
796LayerTestResult<armnn::Half, 5> Convolution3d2x2x2SmallTestCommonFloat16(
797 armnn::IWorkloadFactory& workloadFactory,
798 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
799 const armnn::ITensorHandleFactory& tensorHandleFactory,
800 bool biasEnabled,
801 armnn::DataLayout dataLayout)
802{
803 using namespace half_float::literal;
804
805 float qScale = 0.f;
806 int32_t qOffset = 0;
807
808 armnn::TensorInfo inputDesc({ 1, 2, 4, 4, 1 }, armnn::DataType::Float16);
809 const std::vector<armnn::Half> input =
810 {
811 0.0367984_h, 0.0380895_h, 0.0420157_h, 0.0675631_h,
812 0.0938920_h, 0.0476106_h, 0.1035490_h, 0.1260370_h,
813 0.0461647_h, 0.0883828_h, 0.1159540_h, 0.0498519_h,
814 0.0104630_h, 0.0154114_h, 0.00137681_h, 0.0344238_h,
815
816 0.0356445_h, 0.0495605_h, 0.0683594_h, 0.0991211_h,
817 0.0461426_h, 0.0996094_h, 0.1269530_h, 0.0393066_h,
818 0.103516_h, 0.032544_h, 0.124334_h, 0.0564566_h,
819 0.0123544_h, 0.0461647_h, 0.0883828_h, 0.1159540_h,
820 };
821
822 armnn::TensorInfo kernelDesc({ 2, 2, 2, 1, 1 }, armnn::DataType::Float16);
823 std::vector<armnn::Half> kernel =
824 {
825 -0.126184_h, -0.150468_h,
826 -0.101412_h, -0.0586369_h,
827
828 -0.0435089_h, 0.0347555_h,
829 0.0323111_h, 0.0385381_h
830 };
831
832 armnn::TensorInfo outputDesc({ 1, 1, 3, 3, 1 }, armnn::DataType::Float16);
833 std::vector<armnn::Half> outputData =
834 {
835 -0.01718917_h, -0.01370182_h, -0.02727737_h,
836
837 -0.02282543_h, -0.03144084_h, -0.04468598_h,
838
839 -0.02228982_h, -0.02244923_h, -0.02042268_h
840 };
841
842 return SimpleConvolution3dTestImpl<armnn::DataType::Float16, armnn::DataType::Float16>(
843 workloadFactory,
844 memoryManager,
845 tensorHandleFactory,
846 input,
847 kernel,
848 GetBiasData<armnn::DataType::Float16>(biasEnabled, qScale * qScale, outputDesc, dataLayout),
849 outputData,
850 inputDesc.GetShape(),
851 kernelDesc.GetShape(),
852 outputDesc.GetShape(),
853 dataLayout,
854 qScale,
855 qOffset
856 );
857}
858
859LayerTestResult<float, 5> SimpleConvolution3d3x3x3Float32Test(
860 armnn::IWorkloadFactory& workloadFactory,
861 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
862 const armnn::ITensorHandleFactory& tensorHandleFactory,
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100863 bool biasEnabled,
864 armnn::DataLayout dataLayout)
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100865{
866 return SimpleConvolution3d3x3x3TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100867 workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100868}
869
870LayerTestResult<int8_t, 5> SimpleConvolution3d3x3x3Int8Test(
871 armnn::IWorkloadFactory& workloadFactory,
872 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
873 const armnn::ITensorHandleFactory& tensorHandleFactory,
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100874 bool biasEnabled,
875 armnn::DataLayout dataLayout)
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100876{
877 return SimpleConvolution3d3x3x3TestCommon<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100878 workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100879}
880
881LayerTestResult<uint8_t, 5> SimpleConvolution3d3x3x3Uint8Test(
882 armnn::IWorkloadFactory& workloadFactory,
883 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
884 const armnn::ITensorHandleFactory& tensorHandleFactory,
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100885 bool biasEnabled,
886 armnn::DataLayout dataLayout)
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100887{
888 return SimpleConvolution3d3x3x3TestCommon<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100889 workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100890}
891
892LayerTestResult<int16_t, 5> SimpleConvolution3d3x3x3Int16Test(
893 armnn::IWorkloadFactory& workloadFactory,
894 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
895 const armnn::ITensorHandleFactory& tensorHandleFactory,
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100896 bool biasEnabled,
897 armnn::DataLayout dataLayout)
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100898{
899 return SimpleConvolution3d3x3x3TestCommon<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100900 workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100901}
902
903
904LayerTestResult<float, 5> Convolution3d2x2x2Strides3x5x5Float32Test(
905 armnn::IWorkloadFactory& workloadFactory,
906 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
907 const armnn::ITensorHandleFactory& tensorHandleFactory,
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100908 bool biasEnabled,
909 armnn::DataLayout dataLayout)
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100910{
911 return Convolution3d2x2x2Strides3x5x5TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100912 workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100913}
914
915LayerTestResult<int8_t, 5> Convolution3d2x2x2Strides3x5x5Int8Test(
916 armnn::IWorkloadFactory& workloadFactory,
917 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
918 const armnn::ITensorHandleFactory& tensorHandleFactory,
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100919 bool biasEnabled,
920 armnn::DataLayout dataLayout)
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100921{
922 return Convolution3d2x2x2Strides3x5x5TestCommon<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100923 workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100924}
925
926LayerTestResult<uint8_t, 5> Convolution3d2x2x2Strides3x5x5Uint8Test(
927 armnn::IWorkloadFactory& workloadFactory,
928 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
929 const armnn::ITensorHandleFactory& tensorHandleFactory,
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100930 bool biasEnabled,
931 armnn::DataLayout dataLayout)
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100932{
933 return Convolution3d2x2x2Strides3x5x5TestCommon<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100934 workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100935}
936
937LayerTestResult<int16_t, 5> Convolution3d2x2x2Strides3x5x5Int16Test(
938 armnn::IWorkloadFactory& workloadFactory,
939 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
940 const armnn::ITensorHandleFactory& tensorHandleFactory,
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100941 bool biasEnabled,
942 armnn::DataLayout dataLayout)
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100943{
944 return Convolution3d2x2x2Strides3x5x5TestCommon<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100945 workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100946}
947
948LayerTestResult<float, 5> Convolution3d2x2x2Dilation2x2x2Float32Test(
949 armnn::IWorkloadFactory& workloadFactory,
950 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
951 const armnn::ITensorHandleFactory& tensorHandleFactory,
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100952 bool biasEnabled,
953 armnn::DataLayout dataLayout)
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100954{
955 return Convolution3d2x2x2Dilation2x2x2TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100956 workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100957}
958
959LayerTestResult<int8_t, 5> Convolution3d2x2x2Dilation2x2x2Int8Test(
960 armnn::IWorkloadFactory& workloadFactory,
961 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
962 const armnn::ITensorHandleFactory& tensorHandleFactory,
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100963 bool biasEnabled,
964 armnn::DataLayout dataLayout)
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100965{
966 return Convolution3d2x2x2Dilation2x2x2TestCommon<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100967 workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100968}
969
970LayerTestResult<uint8_t, 5> Convolution3d2x2x2Dilation2x2x2Uint8Test(
971 armnn::IWorkloadFactory& workloadFactory,
972 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
973 const armnn::ITensorHandleFactory& tensorHandleFactory,
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100974 bool biasEnabled,
975 armnn::DataLayout dataLayout)
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100976{
977 return Convolution3d2x2x2Dilation2x2x2TestCommon<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100978 workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100979}
980
981LayerTestResult<int16_t, 5> Convolution3d2x2x2Dilation2x2x2Int16Test(
982 armnn::IWorkloadFactory& workloadFactory,
983 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
984 const armnn::ITensorHandleFactory& tensorHandleFactory,
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100985 bool biasEnabled,
986 armnn::DataLayout dataLayout)
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100987{
988 return Convolution3d2x2x2Dilation2x2x2TestCommon<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100989 workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100990}
991
992LayerTestResult<float, 5> Convolution3dPaddingSame3x3x3Float32Test(
993 armnn::IWorkloadFactory& workloadFactory,
994 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
995 const armnn::ITensorHandleFactory& tensorHandleFactory,
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +0100996 bool biasEnabled,
997 armnn::DataLayout dataLayout)
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100998{
999 return Convolution3dPaddingSame3x3x3TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +01001000 workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
Matthew Sloyanb63a3112021-09-08 13:05:51 +01001001}
1002
1003LayerTestResult<int8_t, 5> Convolution3dPaddingSame3x3x3Int8Test(
1004 armnn::IWorkloadFactory& workloadFactory,
1005 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1006 const armnn::ITensorHandleFactory& tensorHandleFactory,
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +01001007 bool biasEnabled,
1008 armnn::DataLayout dataLayout)
Matthew Sloyanb63a3112021-09-08 13:05:51 +01001009{
1010 return Convolution3dPaddingSame3x3x3TestCommon<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +01001011 workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
Matthew Sloyanb63a3112021-09-08 13:05:51 +01001012}
1013
1014LayerTestResult<uint8_t, 5> Convolution3dPaddingSame3x3x3Uint8Test(
1015 armnn::IWorkloadFactory& workloadFactory,
1016 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1017 const armnn::ITensorHandleFactory& tensorHandleFactory,
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +01001018 bool biasEnabled,
1019 armnn::DataLayout dataLayout)
Matthew Sloyanb63a3112021-09-08 13:05:51 +01001020{
1021 return Convolution3dPaddingSame3x3x3TestCommon<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +01001022 workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
Matthew Sloyanb63a3112021-09-08 13:05:51 +01001023}
1024
1025LayerTestResult<int16_t, 5> Convolution3dPaddingSame3x3x3Int16Test(
1026 armnn::IWorkloadFactory& workloadFactory,
1027 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1028 const armnn::ITensorHandleFactory& tensorHandleFactory,
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +01001029 bool biasEnabled,
1030 armnn::DataLayout dataLayout)
Matthew Sloyanb63a3112021-09-08 13:05:51 +01001031{
1032 return Convolution3dPaddingSame3x3x3TestCommon<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +01001033 workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
Matthew Sloyanb63a3112021-09-08 13:05:51 +01001034}
1035
1036LayerTestResult<float, 5> Convolution3dStrideDilationPadding3x3x3Float32Test(
1037 armnn::IWorkloadFactory& workloadFactory,
1038 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1039 const armnn::ITensorHandleFactory& tensorHandleFactory,
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +01001040 bool biasEnabled,
1041 armnn::DataLayout dataLayout)
Matthew Sloyanb63a3112021-09-08 13:05:51 +01001042{
1043 return Convolution3dStrideDilationPadding3x3x3TestCommonFloat32(
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +01001044 workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
Matthew Sloyanb63a3112021-09-08 13:05:51 +01001045}
1046
1047LayerTestResult<float, 5> Convolution3d2x2x2Stride3x3x3SmallFloat32Test(
1048 armnn::IWorkloadFactory& workloadFactory,
1049 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1050 const armnn::ITensorHandleFactory& tensorHandleFactory,
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +01001051 bool biasEnabled,
1052 armnn::DataLayout dataLayout)
Matthew Sloyanb63a3112021-09-08 13:05:51 +01001053{
1054 return Convolution3d2x2x2Stride3x3x3SmallTestCommonFloat32(
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +01001055 workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
Matthew Sloyanb63a3112021-09-08 13:05:51 +01001056}
1057
1058LayerTestResult<armnn::Half, 5> Convolution3d2x3x3Float16Test(
1059 armnn::IWorkloadFactory& workloadFactory,
1060 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1061 const armnn::ITensorHandleFactory& tensorHandleFactory,
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +01001062 bool biasEnabled,
1063 armnn::DataLayout dataLayout)
Matthew Sloyanb63a3112021-09-08 13:05:51 +01001064{
1065 return Convolution3d2x3x3TestCommonFloat16(
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +01001066 workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
Matthew Sloyanb63a3112021-09-08 13:05:51 +01001067}
1068
1069LayerTestResult<armnn::Half, 5> Convolution3d2x2x2SmallFloat16Test(
1070 armnn::IWorkloadFactory& workloadFactory,
1071 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1072 const armnn::ITensorHandleFactory& tensorHandleFactory,
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +01001073 bool biasEnabled,
1074 armnn::DataLayout dataLayout)
Matthew Sloyanb63a3112021-09-08 13:05:51 +01001075{
1076 return Convolution3d2x2x2SmallTestCommonFloat16(
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +01001077 workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
Matthew Sloyanb63a3112021-09-08 13:05:51 +01001078}