blob: d5a045eec71d0b348cdf0bbc9d604580bef9d5df [file] [log] [blame]
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +00001//
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +01002// Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +00003// SPDX-License-Identifier: MIT
4//
5
6#include "TransposeConvolution2dTestImpl.hpp"
7
Matteo Martincighe011d202019-11-28 11:35:47 +00008#include <QuantizeHelper.hpp>
9
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +000010
Matteo Martincighe011d202019-11-28 11:35:47 +000011#include <armnnUtils/Permute.hpp>
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +000012
13#include <backendsCommon/CpuTensorHandle.hpp>
14
15#include <backendsCommon/test/DataLayoutUtils.hpp>
16#include <backendsCommon/test/TensorCopyUtils.hpp>
17#include <backendsCommon/test/WorkloadTestUtils.hpp>
18
19#include <reference/RefWorkloadFactory.hpp>
20
21#include <test/TensorHelpers.hpp>
22
23#include <boost/test/unit_test.hpp>
24
25#include <string>
26#include <utility>
27#include <vector>
28
29namespace
30{
31
32template<typename T>
33using TensorData = std::pair<armnn::TensorInfo, std::vector<T>>;
34
35template<typename T>
36void VerifyInputTensorData(const TensorData<T>& data, const std::string& tensorName)
37{
38 if (data.first.GetNumElements() > data.second.size())
39 {
40 throw armnn::InvalidArgumentException("Size of data too small for " + tensorName + ": expected " +
41 std::to_string(data.first.GetNumElements()) + "but got " + std::to_string(data.second.size()));
42 }
43}
44
45template<typename T, typename BT>
46void TransposeConvolution2dTestImpl(armnn::IWorkloadFactory& workloadFactory,
47 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
48 const armnn::TransposeConvolution2dDescriptor& descriptor,
49 const TensorData<T>& input,
50 TensorData<T>& output,
51 const TensorData<T>& weights,
52 const armnn::Optional<TensorData<BT>>& biases)
53{
Jan Eilers8eb25602020-03-09 12:13:48 +000054 IgnoreUnused(memoryManager);
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +000055 using namespace armnn;
56
57 VerifyInputTensorData(input, "input");
58 VerifyInputTensorData(weights, "biases");
59
60 if (descriptor.m_BiasEnabled)
61 {
62 if (!biases.has_value())
63 {
64 throw InvalidArgumentException("Bias enabled but no bias data provided");
65 }
66 VerifyInputTensorData(biases.value(), "biases");
67 }
68
69 // set up weights
70 ScopedCpuTensorHandle weightsTensor(weights.first);
71
72 TransposeConvolution2dQueueDescriptor queueDescriptor;
73 queueDescriptor.m_Parameters = descriptor;
74 queueDescriptor.m_Weight = &weightsTensor;
75
76 AllocateAndCopyDataToITensorHandle(&weightsTensor, weights.second.data());
77
78 std::unique_ptr<ScopedCpuTensorHandle> biasesTensor;
79 if (descriptor.m_BiasEnabled)
80 {
81 // set up biases
82 biasesTensor = std::make_unique<ScopedCpuTensorHandle>(biases.value().first);
83 queueDescriptor.m_Bias = biasesTensor.get();
84
85 AllocateAndCopyDataToITensorHandle(biasesTensor.get(), biases.value().second.data());
86 }
87
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +010088 ARMNN_NO_DEPRECATE_WARN_BEGIN
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +000089 // set up input and output handles
90 std::unique_ptr<ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(input.first);
91 std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(output.first);
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +010092 ARMNN_NO_DEPRECATE_WARN_END
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +000093
94 // set up workload
95 armnn::WorkloadInfo workloadInfo;
96 AddInputToWorkload(queueDescriptor, workloadInfo, input.first, inputHandle.get());
97 AddOutputToWorkload(queueDescriptor, workloadInfo, output.first, outputHandle.get());
98
99 std::unique_ptr<armnn::IWorkload> workload =
100 workloadFactory.CreateTransposeConvolution2d(queueDescriptor, workloadInfo);
101
102 inputHandle->Allocate();
103 outputHandle->Allocate();
104
105 CopyDataToITensorHandle(inputHandle.get(), input.second.data());
106
107 ExecuteWorkload(*workload, memoryManager);
108
109 // copy output
Rob Hughesbb46dde2020-05-20 15:27:37 +0100110 output.second = std::vector<T>(output.first.GetNumElements(), T());
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000111 CopyDataFromITensorHandle(output.second.data(), outputHandle.get());
112}
113
114template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
115LayerTestResult<T, 4> TransposeConvolution2dTest(
116 armnn::IWorkloadFactory& workloadFactory,
117 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
118 const armnn::TransposeConvolution2dDescriptor& descriptor,
119 armnn::TensorInfo& inputInfo,
120 const std::vector<float>& inputData,
121 armnn::TensorInfo& outputInfo,
122 const std::vector<float>& expectedOutputData,
123 armnn::TensorInfo& weightsInfo,
124 const std::vector<float>& weightsData,
125 armnn::TensorInfo& biasesInfo,
126 const std::vector<float>& biasesData)
127{
128 using namespace armnn;
129
130 // set up quantization parameters
131 if (armnn::IsQuantizedType<T>())
132 {
133 constexpr float qScale = 0.50f;
134 constexpr int32_t qOffset = 10;
135
136 inputInfo.SetQuantizationScale(qScale);
137 inputInfo.SetQuantizationOffset(qOffset);
138
139 outputInfo.SetQuantizationScale(qScale);
140 outputInfo.SetQuantizationOffset(qOffset);
141
142 weightsInfo.SetQuantizationScale(qScale);
143 weightsInfo.SetQuantizationOffset(qOffset);
144
145 biasesInfo.SetQuantizationScale(qScale * qScale);
146 biasesInfo.SetQuantizationOffset(0);
147 }
148
149 // set up input
150 TensorData<T> input =
151 {
152 inputInfo,
153 armnnUtils::QuantizedVector<T>(inputData, inputInfo.GetQuantizationScale(), inputInfo.GetQuantizationOffset())
154 };
155
156 // set up weights
157 TensorData<T> weights =
158 {
159 weightsInfo,
160 armnnUtils::QuantizedVector<T>(weightsData,
161 weightsInfo.GetQuantizationScale(),
162 weightsInfo.GetQuantizationOffset())
163 };
164
165 // set up biases
166 using BT = armnn::ResolveType<ArmnnBType>;
167 Optional<TensorData<BT>> optionalBiases;
168 if (descriptor.m_BiasEnabled)
169 {
170 TensorData<BT> biases =
171 {
172 biasesInfo,
173 armnnUtils::QuantizedVector<BT>(biasesData,
174 biasesInfo.GetQuantizationScale(),
175 biasesInfo.GetQuantizationOffset())
176 };
177
178 optionalBiases = Optional<TensorData<BT>>(biases);
179 }
180
181 // set up output
182 TensorData<T> output = { outputInfo, {} };
183
184 // execute test
185 TransposeConvolution2dTestImpl(workloadFactory,
186 memoryManager,
187 descriptor,
188 input,
189 output,
190 weights,
191 optionalBiases);
192
193 // construct result object
194 LayerTestResult<T, 4> testResult(outputInfo);
195 testResult.output = MakeTensor<T, 4>(outputInfo, output.second);
196 testResult.outputExpected = MakeTensor<T, 4>(outputInfo,
197 armnnUtils::QuantizedVector<T>(expectedOutputData,
198 outputInfo.GetQuantizationScale(),
199 outputInfo.GetQuantizationOffset()));
200
201 return testResult;
202}
203
204template<typename T>
205void SwizzleData(armnn::TensorInfo& inputInfo,
206 std::vector<T>& inputData,
207 armnn::TensorInfo& outputInfo,
208 std::vector<T>& outputData,
209 armnn::TensorInfo& weightsInfo,
210 std::vector<T>& weightsData)
211{
212 PermuteTensorNchwToNhwc<T>(inputInfo, inputData);
213 PermuteTensorNchwToNhwc<T>(outputInfo, outputData);
214 PermuteTensorNchwToNhwc<T>(weightsInfo, weightsData);
215}
216
217} // anonymous namespace
218
219template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
220LayerTestResult<T, 4> SimpleTransposeConvolution2dTest(
221 armnn::IWorkloadFactory& workloadFactory,
222 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
223 bool biasEnabled,
224 const armnn::DataLayout layout)
225{
226 using namespace armnn;
227
228 constexpr unsigned int batches = 1u;
229 constexpr unsigned int channels = 1u;
230
231 constexpr unsigned int wInput = 3u;
232 constexpr unsigned int hInput = wInput;
233
234 constexpr unsigned int wOutput = 5u;
235 constexpr unsigned int hOutput = wOutput;
236
237 constexpr unsigned int wWeights = 3u;
238 constexpr unsigned int hWeights = wWeights;
239
240 TensorShape inputShape = { batches, channels, hInput, wInput };
241 TensorShape outputShape = { batches, channels, hOutput, wOutput };
242 TensorShape weightsShape = { batches, channels, hWeights, wWeights };
243
244 TensorInfo inputInfo(inputShape, ArmnnType);
245 TensorInfo outputInfo(outputShape, ArmnnType);
246 TensorInfo weightsInfo(weightsShape, ArmnnType);
247 TensorInfo biasesInfo({ channels }, ArmnnBType);
248
249 std::vector<float> inputData =
250 {
251 1.f, 1.f, 1.f,
252 1.f, 1.f, 1.f,
253 1.f, 1.f, 1.f
254 };
255
256 std::vector<float> weightsData =
257 {
258 1.f, 2.f, 3.f,
259 4.f, 5.f, 6.f,
260 7.f, 8.f, 9.f
261 };
262
263 std::vector<float> biasesData = { 1.f };
264
265 std::vector<float> expectedOutputData =
266 {
267 1.f, 3.f, 6.f, 5.f, 3.f,
268 5.f, 12.f, 21.f, 16.f, 9.f,
269 12.f, 27.f, 45.f, 33.f, 18.f,
270 11.f, 24.f, 39.f, 28.f, 15.f,
271 7.f, 15.f, 24.f, 17.f, 9.f
272 };
273
274 if (biasEnabled)
275 {
276 // apply bias to expected output data
277 std::transform(expectedOutputData.begin(), expectedOutputData.end(), expectedOutputData.begin(),
278 [&](float f) -> float { return f + biasesData[0]; });
279 }
280
281 TransposeConvolution2dDescriptor descriptor;
282 descriptor.m_StrideX = 1;
283 descriptor.m_StrideY = 1;
284 descriptor.m_BiasEnabled = biasEnabled;
285 descriptor.m_DataLayout = layout;
286
287 // swizzle data if needed
288 if (layout == armnn::DataLayout::NHWC)
289 {
290 SwizzleData(inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData);
291 }
292
293 return TransposeConvolution2dTest<ArmnnType, ArmnnBType>(workloadFactory,
294 memoryManager,
295 descriptor,
296 inputInfo,
297 inputData,
298 outputInfo,
299 expectedOutputData,
300 weightsInfo,
301 weightsData,
302 biasesInfo,
303 biasesData);
304}
305
306template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
307LayerTestResult<T, 4> PaddedTransposeConvolution2dTest(
308 armnn::IWorkloadFactory& workloadFactory,
309 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
310 bool biasEnabled,
311 const armnn::DataLayout layout)
312{
313 using namespace armnn;
314
315 constexpr unsigned int batches = 1u;
316 constexpr unsigned int channels = 1u;
317
318 constexpr unsigned int wInput = 4u;
319 constexpr unsigned int hInput = wInput;
320
321 constexpr unsigned int wOutput = 2u;
322 constexpr unsigned int hOutput = wOutput;
323
324 constexpr unsigned int wWeights = 3u;
325 constexpr unsigned int hWeights = wWeights;
326
327 TensorShape inputShape = { batches, channels, hInput, wInput };
328 TensorShape outputShape = { batches, channels, hOutput, wOutput };
329 TensorShape weightsShape = { batches, channels, hWeights, wWeights };
330
331 TensorInfo inputInfo(inputShape, ArmnnType);
332 TensorInfo outputInfo(outputShape, ArmnnType);
333 TensorInfo weightsInfo(weightsShape, ArmnnType);
334 TensorInfo biasesInfo({ channels }, ArmnnBType);
335
336 std::vector<float> inputData =
337 {
338 1.f, 3.f, 2.f, 1.f,
339 1.f, 3.f, 3.f, 1.f,
340 2.f, 1.f, 1.f, 3.f,
341 3.f, 2.f, 3.f, 3.f
342 };
343
344 std::vector<float> weightsData =
345 {
346 1.f, 2.f, 3.f,
347 0.f, 1.f, 0.f,
348 2.f, 1.f, 2.f
349 };
350
351 std::vector<float> biasesData = { 1.f };
352
353 std::vector<float> expectedOutputData =
354 {
355 21.f, 21.f,
356 28.f, 27.f
357 };
358
359 if (biasEnabled)
360 {
361 // apply bias to expected output data
362 std::transform(expectedOutputData.begin(), expectedOutputData.end(), expectedOutputData.begin(),
363 [&](float f) -> float { return f + biasesData[0]; });
364 }
365
366 TransposeConvolution2dDescriptor descriptor;
367 descriptor.m_PadLeft = 2;
368 descriptor.m_PadRight = 2;
369 descriptor.m_PadTop = 2;
370 descriptor.m_PadBottom = 2;
371 descriptor.m_StrideX = 1;
372 descriptor.m_StrideY = 1;
373 descriptor.m_BiasEnabled = biasEnabled;
374 descriptor.m_DataLayout = layout;
375
376 // swizzle data if needed
377 if (layout == armnn::DataLayout::NHWC)
378 {
379 SwizzleData(inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData);
380 }
381
382 return TransposeConvolution2dTest<ArmnnType, ArmnnBType>(workloadFactory,
383 memoryManager,
384 descriptor,
385 inputInfo,
386 inputData,
387 outputInfo,
388 expectedOutputData,
389 weightsInfo,
390 weightsData,
391 biasesInfo,
392 biasesData);
393}
394
395template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
396LayerTestResult<T, 4> StridedTransposeConvolution2dTest(
397 armnn::IWorkloadFactory& workloadFactory,
398 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
399 bool biasEnabled,
400 const armnn::DataLayout layout)
401{
402 using namespace armnn;
403
404 constexpr unsigned int batches = 1u;
405 constexpr unsigned int channels = 1u;
406
407 constexpr unsigned int wInput = 3u;
408 constexpr unsigned int hInput = wInput;
409
410 constexpr unsigned int wOutput = 7u;
411 constexpr unsigned int hOutput = wOutput;
412
413 constexpr unsigned int wWeights = 3u;
414 constexpr unsigned int hWeights = wWeights;
415
416 TensorShape inputShape = { batches, channels, hInput, wInput };
417 TensorShape outputShape = { batches, channels, hOutput, wOutput };
418 TensorShape weightsShape = { batches, channels, hWeights, wWeights };
419
420 TensorInfo inputInfo(inputShape, ArmnnType);
421 TensorInfo outputInfo(outputShape, ArmnnType);
422 TensorInfo weightsInfo(weightsShape, ArmnnType);
423 TensorInfo biasesInfo({ channels }, ArmnnBType);
424
425 std::vector<float> inputData =
426 {
427 1.f, 1.f, 1.f,
428 1.f, 1.f, 1.f,
429 1.f, 1.f, 1.f
430 };
431
432 std::vector<float> weightsData =
433 {
434 1.f, 2.f, 3.f,
435 4.f, 5.f, 6.f,
436 7.f, 8.f, 9.f
437 };
438
439 std::vector<float> biasesData = { 1.f };
440
441 std::vector<float> expectedOutputData =
442 {
443 1.f, 2.f, 4.f, 2.f, 4.f, 2.f, 3.f,
444 4.f, 5.f, 10.f, 5.f, 10.f, 5.f, 6.f,
445 8.f, 10.f, 20.f, 10.f, 20.f, 10.f, 12.f,
446 4.f, 5.f, 10.f, 5.f, 10.f, 5.f, 6.f,
447 8.f, 10.f, 20.f, 10.f, 20.f, 10.f, 12.f,
448 4.f, 5.f, 10.f, 5.f, 10.f, 5.f, 6.f,
449 7.f, 8.f, 16.f, 8.f, 16.f, 8.f, 9.f
450 };
451
452 if (biasEnabled)
453 {
454 // apply bias to expected output data
455 std::transform(expectedOutputData.begin(), expectedOutputData.end(), expectedOutputData.begin(),
456 [&](float f) -> float { return f + biasesData[0]; });
457 }
458
459 TransposeConvolution2dDescriptor descriptor;
460 descriptor.m_StrideX = 2;
461 descriptor.m_StrideY = 2;
462 descriptor.m_BiasEnabled = biasEnabled;
463 descriptor.m_DataLayout = layout;
464
465 // swizzle data if needed
466 if (layout == armnn::DataLayout::NHWC)
467 {
468 SwizzleData(inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData);
469 }
470
471 return TransposeConvolution2dTest<ArmnnType, ArmnnBType>(workloadFactory,
472 memoryManager,
473 descriptor,
474 inputInfo,
475 inputData,
476 outputInfo,
477 expectedOutputData,
478 weightsInfo,
479 weightsData,
480 biasesInfo,
481 biasesData);
482}
483
484template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
485LayerTestResult<T, 4> MultiChannelTransposeConvolution2dTest(
486 armnn::IWorkloadFactory& workloadFactory,
487 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
488 const armnn::DataLayout layout)
489{
490 using namespace armnn;
491
492 TensorShape inputShape = { 1, 1, 2, 2 };
493 TensorShape outputShape = { 1, 2, 5, 5 };
494
495 // OIHW for NCHW; OHWI for NHWC
496 TensorShape weightsShape = { 2, 1, 3, 3 };
497 TensorShape biasesShape = { 2 };
498
499 TensorInfo inputInfo(inputShape, ArmnnType);
500 TensorInfo outputInfo(outputShape, ArmnnType);
501 TensorInfo weightsInfo(weightsShape, ArmnnType);
502 TensorInfo biasesInfo(biasesShape, ArmnnBType);
503
504 std::vector<float> inputData =
505 {
506 1.f, 2.f,
507 3.f, 4.f,
508 };
509
510 std::vector<float> weightsData =
511 {
512 1.f, 3.f, 5.f,
513 7.f, 9.f, 11.f,
514 13.f, 15.f, 17.f,
515
516 2.f, 4.f, 6.f,
517 8.f, 10.f, 12.f,
518 14.f, 16.f, 18.f
519 };
520
521 std::vector<float> biasesData = { -1.5f, -2.0f };
522
523 std::vector<float> expectedOutputData =
524 {
525 -0.5f, 1.5f, 5.5f, 4.5f, 8.5f,
526 5.5f, 7.5f, 23.5f, 16.5f, 20.5f,
527 14.5f, 22.5f, 60.5f, 40.5f, 52.5f,
528 19.5f, 25.5f, 59.5f, 34.5f, 42.5f,
529 37.5f, 43.5f, 101.5f, 58.5f, 66.5f,
530
531 0.0f, 2.0f, 8.0f, 6.0f, 10.0f,
532 6.0f, 8.0f, 26.0f, 18.0f, 22.0f,
533 18.0f, 26.0f, 70.0f, 46.0f, 58.0f,
534 22.0f, 28.0f, 66.0f, 38.0f, 46.0f,
535 40.0f, 46.0f, 108.0f, 62.0f, 70.0f
536 };
537
538 TransposeConvolution2dDescriptor descriptor;
539 descriptor.m_StrideX = 2;
540 descriptor.m_StrideY = 2;
541 descriptor.m_BiasEnabled = true;
542 descriptor.m_DataLayout = layout;
543
544 // swizzle data if needed
545 if (layout == armnn::DataLayout::NHWC)
546 {
547 SwizzleData(inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData);
548 }
549
550 return TransposeConvolution2dTest<ArmnnType, ArmnnBType>(workloadFactory,
551 memoryManager,
552 descriptor,
553 inputInfo,
554 inputData,
555 outputInfo,
556 expectedOutputData,
557 weightsInfo,
558 weightsData,
559 biasesInfo,
560 biasesData);
561}
562
563LayerTestResult<uint8_t, 4> TransposeConvolution2dPerAxisQuantTest(
564 armnn::IWorkloadFactory& workloadFactory,
565 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
566 const armnn::DataLayout layout)
567{
568 using namespace armnn;
569
Derek Lambertif90c56d2020-01-10 17:14:08 +0000570 const DataType inputType = DataType::QAsymmU8;
Derek Lambertid466a542020-01-22 15:37:29 +0000571 const DataType kernelType = DataType::QSymmS8;
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000572 const DataType biasType = DataType::Signed32;
573
574 TensorInfo inputInfo ({ 1, 1, 2, 2 }, inputType, 0.50f, 10);
575 TensorInfo outputInfo({ 1, 2, 5, 5 }, inputType, 0.50f, 10);
576
577 const std::vector<float> quantScales{ 0.25f, 0.5f };
578 constexpr unsigned int quantDimension = 0;
579
580 TensorInfo kernelInfo({ 2, 1, 3, 3 }, kernelType, quantScales, quantDimension);
581
582 const std::vector<float> biasQuantScales{ 0.125f, 0.25f };
583 TensorInfo biasInfo({ 2 }, biasType, biasQuantScales, quantDimension);
584
585 std::vector<uint8_t> inputData =
586 {
587 12, 14,
588 16, 18
589 };
590
591 std::vector<int8_t> kernelData =
592 {
593 4, 12, 20,
594 28, 36, 44,
595 52, 60, 68,
596
597 4, 8, 12,
598 16, 20, 24,
599 28, 32, 36
600 };
601
602 std::vector<int32_t> biasData = { -12, -8 };
603
604 std::vector<uint8_t> expectedOutputData =
605 {
606 9, 13, 21, 19, 27,
607 21, 25, 57, 43, 51,
608 39, 55, 131, 91, 115,
609 49, 61, 129, 79, 95,
610 85, 97, 213, 127, 143,
611
612 10, 14, 26, 22, 30,
613 22, 26, 62, 46, 54,
614 46, 62, 150, 102, 126,
615 54, 66, 142, 86, 102,
616 90, 102, 226, 134, 150
617 };
618
619 if (layout == DataLayout::NHWC)
620 {
621 PermuteTensorNchwToNhwc(inputInfo, inputData);
622 PermuteTensorNchwToNhwc(kernelInfo, kernelData);
623 PermuteTensorNchwToNhwc(outputInfo, expectedOutputData);
624 }
625
626 TransposeConvolution2dDescriptor descriptor;
627 descriptor.m_StrideX = 2;
628 descriptor.m_StrideY = 2;
629 descriptor.m_BiasEnabled = true;
630 descriptor.m_DataLayout = layout;
631
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +0100632 ARMNN_NO_DEPRECATE_WARN_BEGIN
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000633 std::unique_ptr<ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputInfo);
634 std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputInfo);
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +0100635 ARMNN_NO_DEPRECATE_WARN_END
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000636
637 WorkloadInfo workloadInfo;
638 ScopedCpuTensorHandle weightTensor(kernelInfo);
639 ScopedCpuTensorHandle biasTensor(biasInfo);
640
641 AllocateAndCopyDataToITensorHandle(&weightTensor, kernelData.data());
642 AllocateAndCopyDataToITensorHandle(&biasTensor, biasData.data());
643
644 TransposeConvolution2dQueueDescriptor queueDescriptor;
645 queueDescriptor.m_Parameters = descriptor;
646 queueDescriptor.m_Weight = &weightTensor;
647 queueDescriptor.m_Bias = &biasTensor;
648
649 AddInputToWorkload(queueDescriptor, workloadInfo, inputInfo, inputHandle.get());
650 AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, outputHandle.get());
651
652 std::unique_ptr<IWorkload> workload = workloadFactory.CreateTransposeConvolution2d(queueDescriptor, workloadInfo);
653 inputHandle->Allocate();
654 outputHandle->Allocate();
655
656 CopyDataToITensorHandle(inputHandle.get(), inputData.data());
657
658 ExecuteWorkload(*workload, memoryManager);
659
660 LayerTestResult<uint8_t, 4> ret(outputInfo);
661 CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get());
662 ret.outputExpected = MakeTensor<uint8_t, 4>(outputInfo, expectedOutputData);
663
664 return ret;
665}
666
667//
668// Explicit template specializations
669//
670
671template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
672SimpleTransposeConvolution2dTest<armnn::DataType::Float32, armnn::DataType::Float32>(
673 armnn::IWorkloadFactory& workloadFactory,
674 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
675 bool biasEnabled,
676 const armnn::DataLayout layout);
677
Sadik Armagan303980c2020-04-17 12:45:14 +0100678template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
679SimpleTransposeConvolution2dTest<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
680 armnn::IWorkloadFactory& workloadFactory,
681 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
682 bool biasEnabled,
683 const armnn::DataLayout layout);
684
Derek Lambertif90c56d2020-01-10 17:14:08 +0000685template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
686SimpleTransposeConvolution2dTest<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000687 armnn::IWorkloadFactory& workloadFactory,
688 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
689 bool biasEnabled,
690 const armnn::DataLayout layout);
691
Derek Lambertif90c56d2020-01-10 17:14:08 +0000692template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
693SimpleTransposeConvolution2dTest<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000694 armnn::IWorkloadFactory& workloadFactory,
695 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
696 bool biasEnabled,
697 const armnn::DataLayout layout);
698
699template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
700PaddedTransposeConvolution2dTest<armnn::DataType::Float32, armnn::DataType::Float32>(
701 armnn::IWorkloadFactory& workloadFactory,
702 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
703 bool biasEnabled,
704 const armnn::DataLayout layout);
705
Sadik Armagan303980c2020-04-17 12:45:14 +0100706template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
707PaddedTransposeConvolution2dTest<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
708 armnn::IWorkloadFactory& workloadFactory,
709 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
710 bool biasEnabled,
711 const armnn::DataLayout layout);
712
Derek Lambertif90c56d2020-01-10 17:14:08 +0000713template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
714PaddedTransposeConvolution2dTest<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000715 armnn::IWorkloadFactory& workloadFactory,
716 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
717 bool biasEnabled,
718 const armnn::DataLayout layout);
719
Derek Lambertif90c56d2020-01-10 17:14:08 +0000720template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
721PaddedTransposeConvolution2dTest<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000722 armnn::IWorkloadFactory& workloadFactory,
723 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
724 bool biasEnabled,
725 const armnn::DataLayout layout);
726
727template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
728StridedTransposeConvolution2dTest<armnn::DataType::Float32, armnn::DataType::Float32>(
729 armnn::IWorkloadFactory& workloadFactory,
730 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
731 bool biasEnabled,
732 const armnn::DataLayout layout);
733
Sadik Armagan303980c2020-04-17 12:45:14 +0100734template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
735StridedTransposeConvolution2dTest<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
736 armnn::IWorkloadFactory& workloadFactory,
737 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
738 bool biasEnabled,
739 const armnn::DataLayout layout);
740
Derek Lambertif90c56d2020-01-10 17:14:08 +0000741template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
742StridedTransposeConvolution2dTest<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000743 armnn::IWorkloadFactory& workloadFactory,
744 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
745 bool biasEnabled,
746 const armnn::DataLayout layout);
747
Derek Lambertif90c56d2020-01-10 17:14:08 +0000748template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
749StridedTransposeConvolution2dTest<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000750 armnn::IWorkloadFactory& workloadFactory,
751 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
752 bool biasEnabled,
753 const armnn::DataLayout layout);
754
755template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
756MultiChannelTransposeConvolution2dTest<armnn::DataType::Float32, armnn::DataType::Float32>(
757 armnn::IWorkloadFactory& workloadFactory,
758 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
759 const armnn::DataLayout layout);
760
Sadik Armagan303980c2020-04-17 12:45:14 +0100761template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
762MultiChannelTransposeConvolution2dTest<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
763 armnn::IWorkloadFactory& workloadFactory,
764 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
765 const armnn::DataLayout layout);
766
Derek Lambertif90c56d2020-01-10 17:14:08 +0000767template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
768MultiChannelTransposeConvolution2dTest<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000769 armnn::IWorkloadFactory& workloadFactory,
770 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
771 const armnn::DataLayout layout);
772
Derek Lambertif90c56d2020-01-10 17:14:08 +0000773template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
774MultiChannelTransposeConvolution2dTest<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000775 armnn::IWorkloadFactory& workloadFactory,
776 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
777 const armnn::DataLayout layout);