blob: 53c1e9d1b7f316fa99ebf0a516693f763d179598 [file] [log] [blame]
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +00001//
2// Copyright © 2019 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "TransposeConvolution2dTestImpl.hpp"
7
8#include <armnn/ArmNN.hpp>
9
10#include <Permute.hpp>
11#include <QuantizeHelper.hpp>
12
13#include <backendsCommon/CpuTensorHandle.hpp>
14
15#include <backendsCommon/test/DataLayoutUtils.hpp>
16#include <backendsCommon/test/TensorCopyUtils.hpp>
17#include <backendsCommon/test/WorkloadTestUtils.hpp>
18
19#include <reference/RefWorkloadFactory.hpp>
20
21#include <test/TensorHelpers.hpp>
22
23#include <boost/test/unit_test.hpp>
24
25#include <string>
26#include <utility>
27#include <vector>
28
29namespace
30{
31
32template<typename T>
33using TensorData = std::pair<armnn::TensorInfo, std::vector<T>>;
34
35template<typename T>
36void VerifyInputTensorData(const TensorData<T>& data, const std::string& tensorName)
37{
38 if (data.first.GetNumElements() > data.second.size())
39 {
40 throw armnn::InvalidArgumentException("Size of data too small for " + tensorName + ": expected " +
41 std::to_string(data.first.GetNumElements()) + "but got " + std::to_string(data.second.size()));
42 }
43}
44
45template<typename T, typename BT>
46void TransposeConvolution2dTestImpl(armnn::IWorkloadFactory& workloadFactory,
47 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
48 const armnn::TransposeConvolution2dDescriptor& descriptor,
49 const TensorData<T>& input,
50 TensorData<T>& output,
51 const TensorData<T>& weights,
52 const armnn::Optional<TensorData<BT>>& biases)
53{
54 using namespace armnn;
55
56 VerifyInputTensorData(input, "input");
57 VerifyInputTensorData(weights, "biases");
58
59 if (descriptor.m_BiasEnabled)
60 {
61 if (!biases.has_value())
62 {
63 throw InvalidArgumentException("Bias enabled but no bias data provided");
64 }
65 VerifyInputTensorData(biases.value(), "biases");
66 }
67
68 // set up weights
69 ScopedCpuTensorHandle weightsTensor(weights.first);
70
71 TransposeConvolution2dQueueDescriptor queueDescriptor;
72 queueDescriptor.m_Parameters = descriptor;
73 queueDescriptor.m_Weight = &weightsTensor;
74
75 AllocateAndCopyDataToITensorHandle(&weightsTensor, weights.second.data());
76
77 std::unique_ptr<ScopedCpuTensorHandle> biasesTensor;
78 if (descriptor.m_BiasEnabled)
79 {
80 // set up biases
81 biasesTensor = std::make_unique<ScopedCpuTensorHandle>(biases.value().first);
82 queueDescriptor.m_Bias = biasesTensor.get();
83
84 AllocateAndCopyDataToITensorHandle(biasesTensor.get(), biases.value().second.data());
85 }
86
87 // set up input and output handles
88 std::unique_ptr<ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(input.first);
89 std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(output.first);
90
91 // set up workload
92 armnn::WorkloadInfo workloadInfo;
93 AddInputToWorkload(queueDescriptor, workloadInfo, input.first, inputHandle.get());
94 AddOutputToWorkload(queueDescriptor, workloadInfo, output.first, outputHandle.get());
95
96 std::unique_ptr<armnn::IWorkload> workload =
97 workloadFactory.CreateTransposeConvolution2d(queueDescriptor, workloadInfo);
98
99 inputHandle->Allocate();
100 outputHandle->Allocate();
101
102 CopyDataToITensorHandle(inputHandle.get(), input.second.data());
103
104 ExecuteWorkload(*workload, memoryManager);
105
106 // copy output
107 output.second = std::vector<T>(output.first.GetNumElements(), 0.0f);
108 CopyDataFromITensorHandle(output.second.data(), outputHandle.get());
109}
110
111template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
112LayerTestResult<T, 4> TransposeConvolution2dTest(
113 armnn::IWorkloadFactory& workloadFactory,
114 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
115 const armnn::TransposeConvolution2dDescriptor& descriptor,
116 armnn::TensorInfo& inputInfo,
117 const std::vector<float>& inputData,
118 armnn::TensorInfo& outputInfo,
119 const std::vector<float>& expectedOutputData,
120 armnn::TensorInfo& weightsInfo,
121 const std::vector<float>& weightsData,
122 armnn::TensorInfo& biasesInfo,
123 const std::vector<float>& biasesData)
124{
125 using namespace armnn;
126
127 // set up quantization parameters
128 if (armnn::IsQuantizedType<T>())
129 {
130 constexpr float qScale = 0.50f;
131 constexpr int32_t qOffset = 10;
132
133 inputInfo.SetQuantizationScale(qScale);
134 inputInfo.SetQuantizationOffset(qOffset);
135
136 outputInfo.SetQuantizationScale(qScale);
137 outputInfo.SetQuantizationOffset(qOffset);
138
139 weightsInfo.SetQuantizationScale(qScale);
140 weightsInfo.SetQuantizationOffset(qOffset);
141
142 biasesInfo.SetQuantizationScale(qScale * qScale);
143 biasesInfo.SetQuantizationOffset(0);
144 }
145
146 // set up input
147 TensorData<T> input =
148 {
149 inputInfo,
150 armnnUtils::QuantizedVector<T>(inputData, inputInfo.GetQuantizationScale(), inputInfo.GetQuantizationOffset())
151 };
152
153 // set up weights
154 TensorData<T> weights =
155 {
156 weightsInfo,
157 armnnUtils::QuantizedVector<T>(weightsData,
158 weightsInfo.GetQuantizationScale(),
159 weightsInfo.GetQuantizationOffset())
160 };
161
162 // set up biases
163 using BT = armnn::ResolveType<ArmnnBType>;
164 Optional<TensorData<BT>> optionalBiases;
165 if (descriptor.m_BiasEnabled)
166 {
167 TensorData<BT> biases =
168 {
169 biasesInfo,
170 armnnUtils::QuantizedVector<BT>(biasesData,
171 biasesInfo.GetQuantizationScale(),
172 biasesInfo.GetQuantizationOffset())
173 };
174
175 optionalBiases = Optional<TensorData<BT>>(biases);
176 }
177
178 // set up output
179 TensorData<T> output = { outputInfo, {} };
180
181 // execute test
182 TransposeConvolution2dTestImpl(workloadFactory,
183 memoryManager,
184 descriptor,
185 input,
186 output,
187 weights,
188 optionalBiases);
189
190 // construct result object
191 LayerTestResult<T, 4> testResult(outputInfo);
192 testResult.output = MakeTensor<T, 4>(outputInfo, output.second);
193 testResult.outputExpected = MakeTensor<T, 4>(outputInfo,
194 armnnUtils::QuantizedVector<T>(expectedOutputData,
195 outputInfo.GetQuantizationScale(),
196 outputInfo.GetQuantizationOffset()));
197
198 return testResult;
199}
200
201template<typename T>
202void SwizzleData(armnn::TensorInfo& inputInfo,
203 std::vector<T>& inputData,
204 armnn::TensorInfo& outputInfo,
205 std::vector<T>& outputData,
206 armnn::TensorInfo& weightsInfo,
207 std::vector<T>& weightsData)
208{
209 PermuteTensorNchwToNhwc<T>(inputInfo, inputData);
210 PermuteTensorNchwToNhwc<T>(outputInfo, outputData);
211 PermuteTensorNchwToNhwc<T>(weightsInfo, weightsData);
212}
213
214} // anonymous namespace
215
216template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
217LayerTestResult<T, 4> SimpleTransposeConvolution2dTest(
218 armnn::IWorkloadFactory& workloadFactory,
219 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
220 bool biasEnabled,
221 const armnn::DataLayout layout)
222{
223 using namespace armnn;
224
225 constexpr unsigned int batches = 1u;
226 constexpr unsigned int channels = 1u;
227
228 constexpr unsigned int wInput = 3u;
229 constexpr unsigned int hInput = wInput;
230
231 constexpr unsigned int wOutput = 5u;
232 constexpr unsigned int hOutput = wOutput;
233
234 constexpr unsigned int wWeights = 3u;
235 constexpr unsigned int hWeights = wWeights;
236
237 TensorShape inputShape = { batches, channels, hInput, wInput };
238 TensorShape outputShape = { batches, channels, hOutput, wOutput };
239 TensorShape weightsShape = { batches, channels, hWeights, wWeights };
240
241 TensorInfo inputInfo(inputShape, ArmnnType);
242 TensorInfo outputInfo(outputShape, ArmnnType);
243 TensorInfo weightsInfo(weightsShape, ArmnnType);
244 TensorInfo biasesInfo({ channels }, ArmnnBType);
245
246 std::vector<float> inputData =
247 {
248 1.f, 1.f, 1.f,
249 1.f, 1.f, 1.f,
250 1.f, 1.f, 1.f
251 };
252
253 std::vector<float> weightsData =
254 {
255 1.f, 2.f, 3.f,
256 4.f, 5.f, 6.f,
257 7.f, 8.f, 9.f
258 };
259
260 std::vector<float> biasesData = { 1.f };
261
262 std::vector<float> expectedOutputData =
263 {
264 1.f, 3.f, 6.f, 5.f, 3.f,
265 5.f, 12.f, 21.f, 16.f, 9.f,
266 12.f, 27.f, 45.f, 33.f, 18.f,
267 11.f, 24.f, 39.f, 28.f, 15.f,
268 7.f, 15.f, 24.f, 17.f, 9.f
269 };
270
271 if (biasEnabled)
272 {
273 // apply bias to expected output data
274 std::transform(expectedOutputData.begin(), expectedOutputData.end(), expectedOutputData.begin(),
275 [&](float f) -> float { return f + biasesData[0]; });
276 }
277
278 TransposeConvolution2dDescriptor descriptor;
279 descriptor.m_StrideX = 1;
280 descriptor.m_StrideY = 1;
281 descriptor.m_BiasEnabled = biasEnabled;
282 descriptor.m_DataLayout = layout;
283
284 // swizzle data if needed
285 if (layout == armnn::DataLayout::NHWC)
286 {
287 SwizzleData(inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData);
288 }
289
290 return TransposeConvolution2dTest<ArmnnType, ArmnnBType>(workloadFactory,
291 memoryManager,
292 descriptor,
293 inputInfo,
294 inputData,
295 outputInfo,
296 expectedOutputData,
297 weightsInfo,
298 weightsData,
299 biasesInfo,
300 biasesData);
301}
302
303template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
304LayerTestResult<T, 4> PaddedTransposeConvolution2dTest(
305 armnn::IWorkloadFactory& workloadFactory,
306 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
307 bool biasEnabled,
308 const armnn::DataLayout layout)
309{
310 using namespace armnn;
311
312 constexpr unsigned int batches = 1u;
313 constexpr unsigned int channels = 1u;
314
315 constexpr unsigned int wInput = 4u;
316 constexpr unsigned int hInput = wInput;
317
318 constexpr unsigned int wOutput = 2u;
319 constexpr unsigned int hOutput = wOutput;
320
321 constexpr unsigned int wWeights = 3u;
322 constexpr unsigned int hWeights = wWeights;
323
324 TensorShape inputShape = { batches, channels, hInput, wInput };
325 TensorShape outputShape = { batches, channels, hOutput, wOutput };
326 TensorShape weightsShape = { batches, channels, hWeights, wWeights };
327
328 TensorInfo inputInfo(inputShape, ArmnnType);
329 TensorInfo outputInfo(outputShape, ArmnnType);
330 TensorInfo weightsInfo(weightsShape, ArmnnType);
331 TensorInfo biasesInfo({ channels }, ArmnnBType);
332
333 std::vector<float> inputData =
334 {
335 1.f, 3.f, 2.f, 1.f,
336 1.f, 3.f, 3.f, 1.f,
337 2.f, 1.f, 1.f, 3.f,
338 3.f, 2.f, 3.f, 3.f
339 };
340
341 std::vector<float> weightsData =
342 {
343 1.f, 2.f, 3.f,
344 0.f, 1.f, 0.f,
345 2.f, 1.f, 2.f
346 };
347
348 std::vector<float> biasesData = { 1.f };
349
350 std::vector<float> expectedOutputData =
351 {
352 21.f, 21.f,
353 28.f, 27.f
354 };
355
356 if (biasEnabled)
357 {
358 // apply bias to expected output data
359 std::transform(expectedOutputData.begin(), expectedOutputData.end(), expectedOutputData.begin(),
360 [&](float f) -> float { return f + biasesData[0]; });
361 }
362
363 TransposeConvolution2dDescriptor descriptor;
364 descriptor.m_PadLeft = 2;
365 descriptor.m_PadRight = 2;
366 descriptor.m_PadTop = 2;
367 descriptor.m_PadBottom = 2;
368 descriptor.m_StrideX = 1;
369 descriptor.m_StrideY = 1;
370 descriptor.m_BiasEnabled = biasEnabled;
371 descriptor.m_DataLayout = layout;
372
373 // swizzle data if needed
374 if (layout == armnn::DataLayout::NHWC)
375 {
376 SwizzleData(inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData);
377 }
378
379 return TransposeConvolution2dTest<ArmnnType, ArmnnBType>(workloadFactory,
380 memoryManager,
381 descriptor,
382 inputInfo,
383 inputData,
384 outputInfo,
385 expectedOutputData,
386 weightsInfo,
387 weightsData,
388 biasesInfo,
389 biasesData);
390}
391
392template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
393LayerTestResult<T, 4> StridedTransposeConvolution2dTest(
394 armnn::IWorkloadFactory& workloadFactory,
395 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
396 bool biasEnabled,
397 const armnn::DataLayout layout)
398{
399 using namespace armnn;
400
401 constexpr unsigned int batches = 1u;
402 constexpr unsigned int channels = 1u;
403
404 constexpr unsigned int wInput = 3u;
405 constexpr unsigned int hInput = wInput;
406
407 constexpr unsigned int wOutput = 7u;
408 constexpr unsigned int hOutput = wOutput;
409
410 constexpr unsigned int wWeights = 3u;
411 constexpr unsigned int hWeights = wWeights;
412
413 TensorShape inputShape = { batches, channels, hInput, wInput };
414 TensorShape outputShape = { batches, channels, hOutput, wOutput };
415 TensorShape weightsShape = { batches, channels, hWeights, wWeights };
416
417 TensorInfo inputInfo(inputShape, ArmnnType);
418 TensorInfo outputInfo(outputShape, ArmnnType);
419 TensorInfo weightsInfo(weightsShape, ArmnnType);
420 TensorInfo biasesInfo({ channels }, ArmnnBType);
421
422 std::vector<float> inputData =
423 {
424 1.f, 1.f, 1.f,
425 1.f, 1.f, 1.f,
426 1.f, 1.f, 1.f
427 };
428
429 std::vector<float> weightsData =
430 {
431 1.f, 2.f, 3.f,
432 4.f, 5.f, 6.f,
433 7.f, 8.f, 9.f
434 };
435
436 std::vector<float> biasesData = { 1.f };
437
438 std::vector<float> expectedOutputData =
439 {
440 1.f, 2.f, 4.f, 2.f, 4.f, 2.f, 3.f,
441 4.f, 5.f, 10.f, 5.f, 10.f, 5.f, 6.f,
442 8.f, 10.f, 20.f, 10.f, 20.f, 10.f, 12.f,
443 4.f, 5.f, 10.f, 5.f, 10.f, 5.f, 6.f,
444 8.f, 10.f, 20.f, 10.f, 20.f, 10.f, 12.f,
445 4.f, 5.f, 10.f, 5.f, 10.f, 5.f, 6.f,
446 7.f, 8.f, 16.f, 8.f, 16.f, 8.f, 9.f
447 };
448
449 if (biasEnabled)
450 {
451 // apply bias to expected output data
452 std::transform(expectedOutputData.begin(), expectedOutputData.end(), expectedOutputData.begin(),
453 [&](float f) -> float { return f + biasesData[0]; });
454 }
455
456 TransposeConvolution2dDescriptor descriptor;
457 descriptor.m_StrideX = 2;
458 descriptor.m_StrideY = 2;
459 descriptor.m_BiasEnabled = biasEnabled;
460 descriptor.m_DataLayout = layout;
461
462 // swizzle data if needed
463 if (layout == armnn::DataLayout::NHWC)
464 {
465 SwizzleData(inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData);
466 }
467
468 return TransposeConvolution2dTest<ArmnnType, ArmnnBType>(workloadFactory,
469 memoryManager,
470 descriptor,
471 inputInfo,
472 inputData,
473 outputInfo,
474 expectedOutputData,
475 weightsInfo,
476 weightsData,
477 biasesInfo,
478 biasesData);
479}
480
481template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
482LayerTestResult<T, 4> MultiChannelTransposeConvolution2dTest(
483 armnn::IWorkloadFactory& workloadFactory,
484 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
485 const armnn::DataLayout layout)
486{
487 using namespace armnn;
488
489 TensorShape inputShape = { 1, 1, 2, 2 };
490 TensorShape outputShape = { 1, 2, 5, 5 };
491
492 // OIHW for NCHW; OHWI for NHWC
493 TensorShape weightsShape = { 2, 1, 3, 3 };
494 TensorShape biasesShape = { 2 };
495
496 TensorInfo inputInfo(inputShape, ArmnnType);
497 TensorInfo outputInfo(outputShape, ArmnnType);
498 TensorInfo weightsInfo(weightsShape, ArmnnType);
499 TensorInfo biasesInfo(biasesShape, ArmnnBType);
500
501 std::vector<float> inputData =
502 {
503 1.f, 2.f,
504 3.f, 4.f,
505 };
506
507 std::vector<float> weightsData =
508 {
509 1.f, 3.f, 5.f,
510 7.f, 9.f, 11.f,
511 13.f, 15.f, 17.f,
512
513 2.f, 4.f, 6.f,
514 8.f, 10.f, 12.f,
515 14.f, 16.f, 18.f
516 };
517
518 std::vector<float> biasesData = { -1.5f, -2.0f };
519
520 std::vector<float> expectedOutputData =
521 {
522 -0.5f, 1.5f, 5.5f, 4.5f, 8.5f,
523 5.5f, 7.5f, 23.5f, 16.5f, 20.5f,
524 14.5f, 22.5f, 60.5f, 40.5f, 52.5f,
525 19.5f, 25.5f, 59.5f, 34.5f, 42.5f,
526 37.5f, 43.5f, 101.5f, 58.5f, 66.5f,
527
528 0.0f, 2.0f, 8.0f, 6.0f, 10.0f,
529 6.0f, 8.0f, 26.0f, 18.0f, 22.0f,
530 18.0f, 26.0f, 70.0f, 46.0f, 58.0f,
531 22.0f, 28.0f, 66.0f, 38.0f, 46.0f,
532 40.0f, 46.0f, 108.0f, 62.0f, 70.0f
533 };
534
535 TransposeConvolution2dDescriptor descriptor;
536 descriptor.m_StrideX = 2;
537 descriptor.m_StrideY = 2;
538 descriptor.m_BiasEnabled = true;
539 descriptor.m_DataLayout = layout;
540
541 // swizzle data if needed
542 if (layout == armnn::DataLayout::NHWC)
543 {
544 SwizzleData(inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData);
545 }
546
547 return TransposeConvolution2dTest<ArmnnType, ArmnnBType>(workloadFactory,
548 memoryManager,
549 descriptor,
550 inputInfo,
551 inputData,
552 outputInfo,
553 expectedOutputData,
554 weightsInfo,
555 weightsData,
556 biasesInfo,
557 biasesData);
558}
559
560LayerTestResult<uint8_t, 4> TransposeConvolution2dPerAxisQuantTest(
561 armnn::IWorkloadFactory& workloadFactory,
562 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
563 const armnn::DataLayout layout)
564{
565 using namespace armnn;
566
567 const DataType inputType = DataType::QuantisedAsymm8;
568 const DataType kernelType = DataType::QuantizedSymm8PerAxis;
569 const DataType biasType = DataType::Signed32;
570
571 TensorInfo inputInfo ({ 1, 1, 2, 2 }, inputType, 0.50f, 10);
572 TensorInfo outputInfo({ 1, 2, 5, 5 }, inputType, 0.50f, 10);
573
574 const std::vector<float> quantScales{ 0.25f, 0.5f };
575 constexpr unsigned int quantDimension = 0;
576
577 TensorInfo kernelInfo({ 2, 1, 3, 3 }, kernelType, quantScales, quantDimension);
578
579 const std::vector<float> biasQuantScales{ 0.125f, 0.25f };
580 TensorInfo biasInfo({ 2 }, biasType, biasQuantScales, quantDimension);
581
582 std::vector<uint8_t> inputData =
583 {
584 12, 14,
585 16, 18
586 };
587
588 std::vector<int8_t> kernelData =
589 {
590 4, 12, 20,
591 28, 36, 44,
592 52, 60, 68,
593
594 4, 8, 12,
595 16, 20, 24,
596 28, 32, 36
597 };
598
599 std::vector<int32_t> biasData = { -12, -8 };
600
601 std::vector<uint8_t> expectedOutputData =
602 {
603 9, 13, 21, 19, 27,
604 21, 25, 57, 43, 51,
605 39, 55, 131, 91, 115,
606 49, 61, 129, 79, 95,
607 85, 97, 213, 127, 143,
608
609 10, 14, 26, 22, 30,
610 22, 26, 62, 46, 54,
611 46, 62, 150, 102, 126,
612 54, 66, 142, 86, 102,
613 90, 102, 226, 134, 150
614 };
615
616 if (layout == DataLayout::NHWC)
617 {
618 PermuteTensorNchwToNhwc(inputInfo, inputData);
619 PermuteTensorNchwToNhwc(kernelInfo, kernelData);
620 PermuteTensorNchwToNhwc(outputInfo, expectedOutputData);
621 }
622
623 TransposeConvolution2dDescriptor descriptor;
624 descriptor.m_StrideX = 2;
625 descriptor.m_StrideY = 2;
626 descriptor.m_BiasEnabled = true;
627 descriptor.m_DataLayout = layout;
628
629 std::unique_ptr<ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputInfo);
630 std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputInfo);
631
632 WorkloadInfo workloadInfo;
633 ScopedCpuTensorHandle weightTensor(kernelInfo);
634 ScopedCpuTensorHandle biasTensor(biasInfo);
635
636 AllocateAndCopyDataToITensorHandle(&weightTensor, kernelData.data());
637 AllocateAndCopyDataToITensorHandle(&biasTensor, biasData.data());
638
639 TransposeConvolution2dQueueDescriptor queueDescriptor;
640 queueDescriptor.m_Parameters = descriptor;
641 queueDescriptor.m_Weight = &weightTensor;
642 queueDescriptor.m_Bias = &biasTensor;
643
644 AddInputToWorkload(queueDescriptor, workloadInfo, inputInfo, inputHandle.get());
645 AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, outputHandle.get());
646
647 std::unique_ptr<IWorkload> workload = workloadFactory.CreateTransposeConvolution2d(queueDescriptor, workloadInfo);
648 inputHandle->Allocate();
649 outputHandle->Allocate();
650
651 CopyDataToITensorHandle(inputHandle.get(), inputData.data());
652
653 ExecuteWorkload(*workload, memoryManager);
654
655 LayerTestResult<uint8_t, 4> ret(outputInfo);
656 CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get());
657 ret.outputExpected = MakeTensor<uint8_t, 4>(outputInfo, expectedOutputData);
658
659 return ret;
660}
661
662//
663// Explicit template specializations
664//
665
666template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
667SimpleTransposeConvolution2dTest<armnn::DataType::Float32, armnn::DataType::Float32>(
668 armnn::IWorkloadFactory& workloadFactory,
669 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
670 bool biasEnabled,
671 const armnn::DataLayout layout);
672
673template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
674SimpleTransposeConvolution2dTest<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
675 armnn::IWorkloadFactory& workloadFactory,
676 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
677 bool biasEnabled,
678 const armnn::DataLayout layout);
679
680template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
681SimpleTransposeConvolution2dTest<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
682 armnn::IWorkloadFactory& workloadFactory,
683 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
684 bool biasEnabled,
685 const armnn::DataLayout layout);
686
687template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
688PaddedTransposeConvolution2dTest<armnn::DataType::Float32, armnn::DataType::Float32>(
689 armnn::IWorkloadFactory& workloadFactory,
690 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
691 bool biasEnabled,
692 const armnn::DataLayout layout);
693
694template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
695PaddedTransposeConvolution2dTest<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
696 armnn::IWorkloadFactory& workloadFactory,
697 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
698 bool biasEnabled,
699 const armnn::DataLayout layout);
700
701template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
702PaddedTransposeConvolution2dTest<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
703 armnn::IWorkloadFactory& workloadFactory,
704 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
705 bool biasEnabled,
706 const armnn::DataLayout layout);
707
708template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
709StridedTransposeConvolution2dTest<armnn::DataType::Float32, armnn::DataType::Float32>(
710 armnn::IWorkloadFactory& workloadFactory,
711 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
712 bool biasEnabled,
713 const armnn::DataLayout layout);
714
715template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
716StridedTransposeConvolution2dTest<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
717 armnn::IWorkloadFactory& workloadFactory,
718 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
719 bool biasEnabled,
720 const armnn::DataLayout layout);
721
722template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
723StridedTransposeConvolution2dTest<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
724 armnn::IWorkloadFactory& workloadFactory,
725 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
726 bool biasEnabled,
727 const armnn::DataLayout layout);
728
729template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
730MultiChannelTransposeConvolution2dTest<armnn::DataType::Float32, armnn::DataType::Float32>(
731 armnn::IWorkloadFactory& workloadFactory,
732 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
733 const armnn::DataLayout layout);
734
735template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
736MultiChannelTransposeConvolution2dTest<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
737 armnn::IWorkloadFactory& workloadFactory,
738 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
739 const armnn::DataLayout layout);
740
741template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
742MultiChannelTransposeConvolution2dTest<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
743 armnn::IWorkloadFactory& workloadFactory,
744 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
745 const armnn::DataLayout layout);