blob: 813c623cff9fee3504310464ff6b264f79786516 [file] [log] [blame]
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +00001//
2// Copyright © 2019 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "TransposeConvolution2dTestImpl.hpp"
7
Matteo Martincighe011d202019-11-28 11:35:47 +00008#include <QuantizeHelper.hpp>
9
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +000010
Matteo Martincighe011d202019-11-28 11:35:47 +000011#include <armnnUtils/Permute.hpp>
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +000012
13#include <backendsCommon/CpuTensorHandle.hpp>
14
15#include <backendsCommon/test/DataLayoutUtils.hpp>
16#include <backendsCommon/test/TensorCopyUtils.hpp>
17#include <backendsCommon/test/WorkloadTestUtils.hpp>
18
19#include <reference/RefWorkloadFactory.hpp>
20
21#include <test/TensorHelpers.hpp>
22
23#include <boost/test/unit_test.hpp>
24
25#include <string>
26#include <utility>
27#include <vector>
28
29namespace
30{
31
32template<typename T>
33using TensorData = std::pair<armnn::TensorInfo, std::vector<T>>;
34
35template<typename T>
36void VerifyInputTensorData(const TensorData<T>& data, const std::string& tensorName)
37{
38 if (data.first.GetNumElements() > data.second.size())
39 {
40 throw armnn::InvalidArgumentException("Size of data too small for " + tensorName + ": expected " +
41 std::to_string(data.first.GetNumElements()) + "but got " + std::to_string(data.second.size()));
42 }
43}
44
45template<typename T, typename BT>
46void TransposeConvolution2dTestImpl(armnn::IWorkloadFactory& workloadFactory,
47 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
48 const armnn::TransposeConvolution2dDescriptor& descriptor,
49 const TensorData<T>& input,
50 TensorData<T>& output,
51 const TensorData<T>& weights,
52 const armnn::Optional<TensorData<BT>>& biases)
53{
Jan Eilers8eb25602020-03-09 12:13:48 +000054 IgnoreUnused(memoryManager);
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +000055 using namespace armnn;
56
57 VerifyInputTensorData(input, "input");
58 VerifyInputTensorData(weights, "biases");
59
60 if (descriptor.m_BiasEnabled)
61 {
62 if (!biases.has_value())
63 {
64 throw InvalidArgumentException("Bias enabled but no bias data provided");
65 }
66 VerifyInputTensorData(biases.value(), "biases");
67 }
68
69 // set up weights
70 ScopedCpuTensorHandle weightsTensor(weights.first);
71
72 TransposeConvolution2dQueueDescriptor queueDescriptor;
73 queueDescriptor.m_Parameters = descriptor;
74 queueDescriptor.m_Weight = &weightsTensor;
75
76 AllocateAndCopyDataToITensorHandle(&weightsTensor, weights.second.data());
77
78 std::unique_ptr<ScopedCpuTensorHandle> biasesTensor;
79 if (descriptor.m_BiasEnabled)
80 {
81 // set up biases
82 biasesTensor = std::make_unique<ScopedCpuTensorHandle>(biases.value().first);
83 queueDescriptor.m_Bias = biasesTensor.get();
84
85 AllocateAndCopyDataToITensorHandle(biasesTensor.get(), biases.value().second.data());
86 }
87
88 // set up input and output handles
89 std::unique_ptr<ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(input.first);
90 std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(output.first);
91
92 // set up workload
93 armnn::WorkloadInfo workloadInfo;
94 AddInputToWorkload(queueDescriptor, workloadInfo, input.first, inputHandle.get());
95 AddOutputToWorkload(queueDescriptor, workloadInfo, output.first, outputHandle.get());
96
97 std::unique_ptr<armnn::IWorkload> workload =
98 workloadFactory.CreateTransposeConvolution2d(queueDescriptor, workloadInfo);
99
100 inputHandle->Allocate();
101 outputHandle->Allocate();
102
103 CopyDataToITensorHandle(inputHandle.get(), input.second.data());
104
105 ExecuteWorkload(*workload, memoryManager);
106
107 // copy output
108 output.second = std::vector<T>(output.first.GetNumElements(), 0.0f);
109 CopyDataFromITensorHandle(output.second.data(), outputHandle.get());
110}
111
112template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
113LayerTestResult<T, 4> TransposeConvolution2dTest(
114 armnn::IWorkloadFactory& workloadFactory,
115 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
116 const armnn::TransposeConvolution2dDescriptor& descriptor,
117 armnn::TensorInfo& inputInfo,
118 const std::vector<float>& inputData,
119 armnn::TensorInfo& outputInfo,
120 const std::vector<float>& expectedOutputData,
121 armnn::TensorInfo& weightsInfo,
122 const std::vector<float>& weightsData,
123 armnn::TensorInfo& biasesInfo,
124 const std::vector<float>& biasesData)
125{
126 using namespace armnn;
127
128 // set up quantization parameters
129 if (armnn::IsQuantizedType<T>())
130 {
131 constexpr float qScale = 0.50f;
132 constexpr int32_t qOffset = 10;
133
134 inputInfo.SetQuantizationScale(qScale);
135 inputInfo.SetQuantizationOffset(qOffset);
136
137 outputInfo.SetQuantizationScale(qScale);
138 outputInfo.SetQuantizationOffset(qOffset);
139
140 weightsInfo.SetQuantizationScale(qScale);
141 weightsInfo.SetQuantizationOffset(qOffset);
142
143 biasesInfo.SetQuantizationScale(qScale * qScale);
144 biasesInfo.SetQuantizationOffset(0);
145 }
146
147 // set up input
148 TensorData<T> input =
149 {
150 inputInfo,
151 armnnUtils::QuantizedVector<T>(inputData, inputInfo.GetQuantizationScale(), inputInfo.GetQuantizationOffset())
152 };
153
154 // set up weights
155 TensorData<T> weights =
156 {
157 weightsInfo,
158 armnnUtils::QuantizedVector<T>(weightsData,
159 weightsInfo.GetQuantizationScale(),
160 weightsInfo.GetQuantizationOffset())
161 };
162
163 // set up biases
164 using BT = armnn::ResolveType<ArmnnBType>;
165 Optional<TensorData<BT>> optionalBiases;
166 if (descriptor.m_BiasEnabled)
167 {
168 TensorData<BT> biases =
169 {
170 biasesInfo,
171 armnnUtils::QuantizedVector<BT>(biasesData,
172 biasesInfo.GetQuantizationScale(),
173 biasesInfo.GetQuantizationOffset())
174 };
175
176 optionalBiases = Optional<TensorData<BT>>(biases);
177 }
178
179 // set up output
180 TensorData<T> output = { outputInfo, {} };
181
182 // execute test
183 TransposeConvolution2dTestImpl(workloadFactory,
184 memoryManager,
185 descriptor,
186 input,
187 output,
188 weights,
189 optionalBiases);
190
191 // construct result object
192 LayerTestResult<T, 4> testResult(outputInfo);
193 testResult.output = MakeTensor<T, 4>(outputInfo, output.second);
194 testResult.outputExpected = MakeTensor<T, 4>(outputInfo,
195 armnnUtils::QuantizedVector<T>(expectedOutputData,
196 outputInfo.GetQuantizationScale(),
197 outputInfo.GetQuantizationOffset()));
198
199 return testResult;
200}
201
202template<typename T>
203void SwizzleData(armnn::TensorInfo& inputInfo,
204 std::vector<T>& inputData,
205 armnn::TensorInfo& outputInfo,
206 std::vector<T>& outputData,
207 armnn::TensorInfo& weightsInfo,
208 std::vector<T>& weightsData)
209{
210 PermuteTensorNchwToNhwc<T>(inputInfo, inputData);
211 PermuteTensorNchwToNhwc<T>(outputInfo, outputData);
212 PermuteTensorNchwToNhwc<T>(weightsInfo, weightsData);
213}
214
215} // anonymous namespace
216
217template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
218LayerTestResult<T, 4> SimpleTransposeConvolution2dTest(
219 armnn::IWorkloadFactory& workloadFactory,
220 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
221 bool biasEnabled,
222 const armnn::DataLayout layout)
223{
224 using namespace armnn;
225
226 constexpr unsigned int batches = 1u;
227 constexpr unsigned int channels = 1u;
228
229 constexpr unsigned int wInput = 3u;
230 constexpr unsigned int hInput = wInput;
231
232 constexpr unsigned int wOutput = 5u;
233 constexpr unsigned int hOutput = wOutput;
234
235 constexpr unsigned int wWeights = 3u;
236 constexpr unsigned int hWeights = wWeights;
237
238 TensorShape inputShape = { batches, channels, hInput, wInput };
239 TensorShape outputShape = { batches, channels, hOutput, wOutput };
240 TensorShape weightsShape = { batches, channels, hWeights, wWeights };
241
242 TensorInfo inputInfo(inputShape, ArmnnType);
243 TensorInfo outputInfo(outputShape, ArmnnType);
244 TensorInfo weightsInfo(weightsShape, ArmnnType);
245 TensorInfo biasesInfo({ channels }, ArmnnBType);
246
247 std::vector<float> inputData =
248 {
249 1.f, 1.f, 1.f,
250 1.f, 1.f, 1.f,
251 1.f, 1.f, 1.f
252 };
253
254 std::vector<float> weightsData =
255 {
256 1.f, 2.f, 3.f,
257 4.f, 5.f, 6.f,
258 7.f, 8.f, 9.f
259 };
260
261 std::vector<float> biasesData = { 1.f };
262
263 std::vector<float> expectedOutputData =
264 {
265 1.f, 3.f, 6.f, 5.f, 3.f,
266 5.f, 12.f, 21.f, 16.f, 9.f,
267 12.f, 27.f, 45.f, 33.f, 18.f,
268 11.f, 24.f, 39.f, 28.f, 15.f,
269 7.f, 15.f, 24.f, 17.f, 9.f
270 };
271
272 if (biasEnabled)
273 {
274 // apply bias to expected output data
275 std::transform(expectedOutputData.begin(), expectedOutputData.end(), expectedOutputData.begin(),
276 [&](float f) -> float { return f + biasesData[0]; });
277 }
278
279 TransposeConvolution2dDescriptor descriptor;
280 descriptor.m_StrideX = 1;
281 descriptor.m_StrideY = 1;
282 descriptor.m_BiasEnabled = biasEnabled;
283 descriptor.m_DataLayout = layout;
284
285 // swizzle data if needed
286 if (layout == armnn::DataLayout::NHWC)
287 {
288 SwizzleData(inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData);
289 }
290
291 return TransposeConvolution2dTest<ArmnnType, ArmnnBType>(workloadFactory,
292 memoryManager,
293 descriptor,
294 inputInfo,
295 inputData,
296 outputInfo,
297 expectedOutputData,
298 weightsInfo,
299 weightsData,
300 biasesInfo,
301 biasesData);
302}
303
304template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
305LayerTestResult<T, 4> PaddedTransposeConvolution2dTest(
306 armnn::IWorkloadFactory& workloadFactory,
307 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
308 bool biasEnabled,
309 const armnn::DataLayout layout)
310{
311 using namespace armnn;
312
313 constexpr unsigned int batches = 1u;
314 constexpr unsigned int channels = 1u;
315
316 constexpr unsigned int wInput = 4u;
317 constexpr unsigned int hInput = wInput;
318
319 constexpr unsigned int wOutput = 2u;
320 constexpr unsigned int hOutput = wOutput;
321
322 constexpr unsigned int wWeights = 3u;
323 constexpr unsigned int hWeights = wWeights;
324
325 TensorShape inputShape = { batches, channels, hInput, wInput };
326 TensorShape outputShape = { batches, channels, hOutput, wOutput };
327 TensorShape weightsShape = { batches, channels, hWeights, wWeights };
328
329 TensorInfo inputInfo(inputShape, ArmnnType);
330 TensorInfo outputInfo(outputShape, ArmnnType);
331 TensorInfo weightsInfo(weightsShape, ArmnnType);
332 TensorInfo biasesInfo({ channels }, ArmnnBType);
333
334 std::vector<float> inputData =
335 {
336 1.f, 3.f, 2.f, 1.f,
337 1.f, 3.f, 3.f, 1.f,
338 2.f, 1.f, 1.f, 3.f,
339 3.f, 2.f, 3.f, 3.f
340 };
341
342 std::vector<float> weightsData =
343 {
344 1.f, 2.f, 3.f,
345 0.f, 1.f, 0.f,
346 2.f, 1.f, 2.f
347 };
348
349 std::vector<float> biasesData = { 1.f };
350
351 std::vector<float> expectedOutputData =
352 {
353 21.f, 21.f,
354 28.f, 27.f
355 };
356
357 if (biasEnabled)
358 {
359 // apply bias to expected output data
360 std::transform(expectedOutputData.begin(), expectedOutputData.end(), expectedOutputData.begin(),
361 [&](float f) -> float { return f + biasesData[0]; });
362 }
363
364 TransposeConvolution2dDescriptor descriptor;
365 descriptor.m_PadLeft = 2;
366 descriptor.m_PadRight = 2;
367 descriptor.m_PadTop = 2;
368 descriptor.m_PadBottom = 2;
369 descriptor.m_StrideX = 1;
370 descriptor.m_StrideY = 1;
371 descriptor.m_BiasEnabled = biasEnabled;
372 descriptor.m_DataLayout = layout;
373
374 // swizzle data if needed
375 if (layout == armnn::DataLayout::NHWC)
376 {
377 SwizzleData(inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData);
378 }
379
380 return TransposeConvolution2dTest<ArmnnType, ArmnnBType>(workloadFactory,
381 memoryManager,
382 descriptor,
383 inputInfo,
384 inputData,
385 outputInfo,
386 expectedOutputData,
387 weightsInfo,
388 weightsData,
389 biasesInfo,
390 biasesData);
391}
392
393template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
394LayerTestResult<T, 4> StridedTransposeConvolution2dTest(
395 armnn::IWorkloadFactory& workloadFactory,
396 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
397 bool biasEnabled,
398 const armnn::DataLayout layout)
399{
400 using namespace armnn;
401
402 constexpr unsigned int batches = 1u;
403 constexpr unsigned int channels = 1u;
404
405 constexpr unsigned int wInput = 3u;
406 constexpr unsigned int hInput = wInput;
407
408 constexpr unsigned int wOutput = 7u;
409 constexpr unsigned int hOutput = wOutput;
410
411 constexpr unsigned int wWeights = 3u;
412 constexpr unsigned int hWeights = wWeights;
413
414 TensorShape inputShape = { batches, channels, hInput, wInput };
415 TensorShape outputShape = { batches, channels, hOutput, wOutput };
416 TensorShape weightsShape = { batches, channels, hWeights, wWeights };
417
418 TensorInfo inputInfo(inputShape, ArmnnType);
419 TensorInfo outputInfo(outputShape, ArmnnType);
420 TensorInfo weightsInfo(weightsShape, ArmnnType);
421 TensorInfo biasesInfo({ channels }, ArmnnBType);
422
423 std::vector<float> inputData =
424 {
425 1.f, 1.f, 1.f,
426 1.f, 1.f, 1.f,
427 1.f, 1.f, 1.f
428 };
429
430 std::vector<float> weightsData =
431 {
432 1.f, 2.f, 3.f,
433 4.f, 5.f, 6.f,
434 7.f, 8.f, 9.f
435 };
436
437 std::vector<float> biasesData = { 1.f };
438
439 std::vector<float> expectedOutputData =
440 {
441 1.f, 2.f, 4.f, 2.f, 4.f, 2.f, 3.f,
442 4.f, 5.f, 10.f, 5.f, 10.f, 5.f, 6.f,
443 8.f, 10.f, 20.f, 10.f, 20.f, 10.f, 12.f,
444 4.f, 5.f, 10.f, 5.f, 10.f, 5.f, 6.f,
445 8.f, 10.f, 20.f, 10.f, 20.f, 10.f, 12.f,
446 4.f, 5.f, 10.f, 5.f, 10.f, 5.f, 6.f,
447 7.f, 8.f, 16.f, 8.f, 16.f, 8.f, 9.f
448 };
449
450 if (biasEnabled)
451 {
452 // apply bias to expected output data
453 std::transform(expectedOutputData.begin(), expectedOutputData.end(), expectedOutputData.begin(),
454 [&](float f) -> float { return f + biasesData[0]; });
455 }
456
457 TransposeConvolution2dDescriptor descriptor;
458 descriptor.m_StrideX = 2;
459 descriptor.m_StrideY = 2;
460 descriptor.m_BiasEnabled = biasEnabled;
461 descriptor.m_DataLayout = layout;
462
463 // swizzle data if needed
464 if (layout == armnn::DataLayout::NHWC)
465 {
466 SwizzleData(inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData);
467 }
468
469 return TransposeConvolution2dTest<ArmnnType, ArmnnBType>(workloadFactory,
470 memoryManager,
471 descriptor,
472 inputInfo,
473 inputData,
474 outputInfo,
475 expectedOutputData,
476 weightsInfo,
477 weightsData,
478 biasesInfo,
479 biasesData);
480}
481
482template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
483LayerTestResult<T, 4> MultiChannelTransposeConvolution2dTest(
484 armnn::IWorkloadFactory& workloadFactory,
485 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
486 const armnn::DataLayout layout)
487{
488 using namespace armnn;
489
490 TensorShape inputShape = { 1, 1, 2, 2 };
491 TensorShape outputShape = { 1, 2, 5, 5 };
492
493 // OIHW for NCHW; OHWI for NHWC
494 TensorShape weightsShape = { 2, 1, 3, 3 };
495 TensorShape biasesShape = { 2 };
496
497 TensorInfo inputInfo(inputShape, ArmnnType);
498 TensorInfo outputInfo(outputShape, ArmnnType);
499 TensorInfo weightsInfo(weightsShape, ArmnnType);
500 TensorInfo biasesInfo(biasesShape, ArmnnBType);
501
502 std::vector<float> inputData =
503 {
504 1.f, 2.f,
505 3.f, 4.f,
506 };
507
508 std::vector<float> weightsData =
509 {
510 1.f, 3.f, 5.f,
511 7.f, 9.f, 11.f,
512 13.f, 15.f, 17.f,
513
514 2.f, 4.f, 6.f,
515 8.f, 10.f, 12.f,
516 14.f, 16.f, 18.f
517 };
518
519 std::vector<float> biasesData = { -1.5f, -2.0f };
520
521 std::vector<float> expectedOutputData =
522 {
523 -0.5f, 1.5f, 5.5f, 4.5f, 8.5f,
524 5.5f, 7.5f, 23.5f, 16.5f, 20.5f,
525 14.5f, 22.5f, 60.5f, 40.5f, 52.5f,
526 19.5f, 25.5f, 59.5f, 34.5f, 42.5f,
527 37.5f, 43.5f, 101.5f, 58.5f, 66.5f,
528
529 0.0f, 2.0f, 8.0f, 6.0f, 10.0f,
530 6.0f, 8.0f, 26.0f, 18.0f, 22.0f,
531 18.0f, 26.0f, 70.0f, 46.0f, 58.0f,
532 22.0f, 28.0f, 66.0f, 38.0f, 46.0f,
533 40.0f, 46.0f, 108.0f, 62.0f, 70.0f
534 };
535
536 TransposeConvolution2dDescriptor descriptor;
537 descriptor.m_StrideX = 2;
538 descriptor.m_StrideY = 2;
539 descriptor.m_BiasEnabled = true;
540 descriptor.m_DataLayout = layout;
541
542 // swizzle data if needed
543 if (layout == armnn::DataLayout::NHWC)
544 {
545 SwizzleData(inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData);
546 }
547
548 return TransposeConvolution2dTest<ArmnnType, ArmnnBType>(workloadFactory,
549 memoryManager,
550 descriptor,
551 inputInfo,
552 inputData,
553 outputInfo,
554 expectedOutputData,
555 weightsInfo,
556 weightsData,
557 biasesInfo,
558 biasesData);
559}
560
561LayerTestResult<uint8_t, 4> TransposeConvolution2dPerAxisQuantTest(
562 armnn::IWorkloadFactory& workloadFactory,
563 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
564 const armnn::DataLayout layout)
565{
566 using namespace armnn;
567
Derek Lambertif90c56d2020-01-10 17:14:08 +0000568 const DataType inputType = DataType::QAsymmU8;
Derek Lambertid466a542020-01-22 15:37:29 +0000569 const DataType kernelType = DataType::QSymmS8;
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000570 const DataType biasType = DataType::Signed32;
571
572 TensorInfo inputInfo ({ 1, 1, 2, 2 }, inputType, 0.50f, 10);
573 TensorInfo outputInfo({ 1, 2, 5, 5 }, inputType, 0.50f, 10);
574
575 const std::vector<float> quantScales{ 0.25f, 0.5f };
576 constexpr unsigned int quantDimension = 0;
577
578 TensorInfo kernelInfo({ 2, 1, 3, 3 }, kernelType, quantScales, quantDimension);
579
580 const std::vector<float> biasQuantScales{ 0.125f, 0.25f };
581 TensorInfo biasInfo({ 2 }, biasType, biasQuantScales, quantDimension);
582
583 std::vector<uint8_t> inputData =
584 {
585 12, 14,
586 16, 18
587 };
588
589 std::vector<int8_t> kernelData =
590 {
591 4, 12, 20,
592 28, 36, 44,
593 52, 60, 68,
594
595 4, 8, 12,
596 16, 20, 24,
597 28, 32, 36
598 };
599
600 std::vector<int32_t> biasData = { -12, -8 };
601
602 std::vector<uint8_t> expectedOutputData =
603 {
604 9, 13, 21, 19, 27,
605 21, 25, 57, 43, 51,
606 39, 55, 131, 91, 115,
607 49, 61, 129, 79, 95,
608 85, 97, 213, 127, 143,
609
610 10, 14, 26, 22, 30,
611 22, 26, 62, 46, 54,
612 46, 62, 150, 102, 126,
613 54, 66, 142, 86, 102,
614 90, 102, 226, 134, 150
615 };
616
617 if (layout == DataLayout::NHWC)
618 {
619 PermuteTensorNchwToNhwc(inputInfo, inputData);
620 PermuteTensorNchwToNhwc(kernelInfo, kernelData);
621 PermuteTensorNchwToNhwc(outputInfo, expectedOutputData);
622 }
623
624 TransposeConvolution2dDescriptor descriptor;
625 descriptor.m_StrideX = 2;
626 descriptor.m_StrideY = 2;
627 descriptor.m_BiasEnabled = true;
628 descriptor.m_DataLayout = layout;
629
630 std::unique_ptr<ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputInfo);
631 std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputInfo);
632
633 WorkloadInfo workloadInfo;
634 ScopedCpuTensorHandle weightTensor(kernelInfo);
635 ScopedCpuTensorHandle biasTensor(biasInfo);
636
637 AllocateAndCopyDataToITensorHandle(&weightTensor, kernelData.data());
638 AllocateAndCopyDataToITensorHandle(&biasTensor, biasData.data());
639
640 TransposeConvolution2dQueueDescriptor queueDescriptor;
641 queueDescriptor.m_Parameters = descriptor;
642 queueDescriptor.m_Weight = &weightTensor;
643 queueDescriptor.m_Bias = &biasTensor;
644
645 AddInputToWorkload(queueDescriptor, workloadInfo, inputInfo, inputHandle.get());
646 AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, outputHandle.get());
647
648 std::unique_ptr<IWorkload> workload = workloadFactory.CreateTransposeConvolution2d(queueDescriptor, workloadInfo);
649 inputHandle->Allocate();
650 outputHandle->Allocate();
651
652 CopyDataToITensorHandle(inputHandle.get(), inputData.data());
653
654 ExecuteWorkload(*workload, memoryManager);
655
656 LayerTestResult<uint8_t, 4> ret(outputInfo);
657 CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get());
658 ret.outputExpected = MakeTensor<uint8_t, 4>(outputInfo, expectedOutputData);
659
660 return ret;
661}
662
663//
664// Explicit template specializations
665//
666
667template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
668SimpleTransposeConvolution2dTest<armnn::DataType::Float32, armnn::DataType::Float32>(
669 armnn::IWorkloadFactory& workloadFactory,
670 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
671 bool biasEnabled,
672 const armnn::DataLayout layout);
673
Sadik Armagan303980c2020-04-17 12:45:14 +0100674template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
675SimpleTransposeConvolution2dTest<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
676 armnn::IWorkloadFactory& workloadFactory,
677 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
678 bool biasEnabled,
679 const armnn::DataLayout layout);
680
Derek Lambertif90c56d2020-01-10 17:14:08 +0000681template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
682SimpleTransposeConvolution2dTest<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000683 armnn::IWorkloadFactory& workloadFactory,
684 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
685 bool biasEnabled,
686 const armnn::DataLayout layout);
687
Derek Lambertif90c56d2020-01-10 17:14:08 +0000688template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
689SimpleTransposeConvolution2dTest<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000690 armnn::IWorkloadFactory& workloadFactory,
691 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
692 bool biasEnabled,
693 const armnn::DataLayout layout);
694
695template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
696PaddedTransposeConvolution2dTest<armnn::DataType::Float32, armnn::DataType::Float32>(
697 armnn::IWorkloadFactory& workloadFactory,
698 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
699 bool biasEnabled,
700 const armnn::DataLayout layout);
701
Sadik Armagan303980c2020-04-17 12:45:14 +0100702template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
703PaddedTransposeConvolution2dTest<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
704 armnn::IWorkloadFactory& workloadFactory,
705 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
706 bool biasEnabled,
707 const armnn::DataLayout layout);
708
Derek Lambertif90c56d2020-01-10 17:14:08 +0000709template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
710PaddedTransposeConvolution2dTest<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000711 armnn::IWorkloadFactory& workloadFactory,
712 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
713 bool biasEnabled,
714 const armnn::DataLayout layout);
715
Derek Lambertif90c56d2020-01-10 17:14:08 +0000716template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
717PaddedTransposeConvolution2dTest<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000718 armnn::IWorkloadFactory& workloadFactory,
719 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
720 bool biasEnabled,
721 const armnn::DataLayout layout);
722
723template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
724StridedTransposeConvolution2dTest<armnn::DataType::Float32, armnn::DataType::Float32>(
725 armnn::IWorkloadFactory& workloadFactory,
726 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
727 bool biasEnabled,
728 const armnn::DataLayout layout);
729
Sadik Armagan303980c2020-04-17 12:45:14 +0100730template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
731StridedTransposeConvolution2dTest<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
732 armnn::IWorkloadFactory& workloadFactory,
733 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
734 bool biasEnabled,
735 const armnn::DataLayout layout);
736
Derek Lambertif90c56d2020-01-10 17:14:08 +0000737template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
738StridedTransposeConvolution2dTest<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000739 armnn::IWorkloadFactory& workloadFactory,
740 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
741 bool biasEnabled,
742 const armnn::DataLayout layout);
743
Derek Lambertif90c56d2020-01-10 17:14:08 +0000744template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
745StridedTransposeConvolution2dTest<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000746 armnn::IWorkloadFactory& workloadFactory,
747 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
748 bool biasEnabled,
749 const armnn::DataLayout layout);
750
751template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
752MultiChannelTransposeConvolution2dTest<armnn::DataType::Float32, armnn::DataType::Float32>(
753 armnn::IWorkloadFactory& workloadFactory,
754 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
755 const armnn::DataLayout layout);
756
Sadik Armagan303980c2020-04-17 12:45:14 +0100757template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
758MultiChannelTransposeConvolution2dTest<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
759 armnn::IWorkloadFactory& workloadFactory,
760 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
761 const armnn::DataLayout layout);
762
Derek Lambertif90c56d2020-01-10 17:14:08 +0000763template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
764MultiChannelTransposeConvolution2dTest<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000765 armnn::IWorkloadFactory& workloadFactory,
766 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
767 const armnn::DataLayout layout);
768
Derek Lambertif90c56d2020-01-10 17:14:08 +0000769template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
770MultiChannelTransposeConvolution2dTest<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000771 armnn::IWorkloadFactory& workloadFactory,
772 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
773 const armnn::DataLayout layout);