blob: 85ce7e5e6fe06aa6add965d386b39071329cefbe [file] [log] [blame]
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +00001//
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +01002// Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +00003// SPDX-License-Identifier: MIT
4//
5
6#include "TransposeConvolution2dTestImpl.hpp"
7
Matteo Martincighe011d202019-11-28 11:35:47 +00008#include <QuantizeHelper.hpp>
9
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +000010
Matteo Martincighe011d202019-11-28 11:35:47 +000011#include <armnnUtils/Permute.hpp>
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +000012
James Conroy1f58f032021-04-27 17:13:27 +010013#include <backendsCommon/TensorHandle.hpp>
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +000014
15#include <backendsCommon/test/DataLayoutUtils.hpp>
16#include <backendsCommon/test/TensorCopyUtils.hpp>
17#include <backendsCommon/test/WorkloadTestUtils.hpp>
18
19#include <reference/RefWorkloadFactory.hpp>
20
21#include <test/TensorHelpers.hpp>
22
23#include <boost/test/unit_test.hpp>
24
25#include <string>
26#include <utility>
27#include <vector>
28
29namespace
30{
31
32template<typename T>
33using TensorData = std::pair<armnn::TensorInfo, std::vector<T>>;
34
35template<typename T>
36void VerifyInputTensorData(const TensorData<T>& data, const std::string& tensorName)
37{
38 if (data.first.GetNumElements() > data.second.size())
39 {
40 throw armnn::InvalidArgumentException("Size of data too small for " + tensorName + ": expected " +
41 std::to_string(data.first.GetNumElements()) + "but got " + std::to_string(data.second.size()));
42 }
43}
44
45template<typename T, typename BT>
46void TransposeConvolution2dTestImpl(armnn::IWorkloadFactory& workloadFactory,
47 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williamsec36d3e2020-08-28 13:17:05 +010048 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +000049 const armnn::TransposeConvolution2dDescriptor& descriptor,
50 const TensorData<T>& input,
51 TensorData<T>& output,
52 const TensorData<T>& weights,
53 const armnn::Optional<TensorData<BT>>& biases)
54{
Jan Eilers8eb25602020-03-09 12:13:48 +000055 IgnoreUnused(memoryManager);
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +000056 using namespace armnn;
57
58 VerifyInputTensorData(input, "input");
59 VerifyInputTensorData(weights, "biases");
60
61 if (descriptor.m_BiasEnabled)
62 {
63 if (!biases.has_value())
64 {
65 throw InvalidArgumentException("Bias enabled but no bias data provided");
66 }
67 VerifyInputTensorData(biases.value(), "biases");
68 }
69
70 // set up weights
James Conroy1f58f032021-04-27 17:13:27 +010071 ScopedTensorHandle weightsTensor(weights.first);
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +000072
73 TransposeConvolution2dQueueDescriptor queueDescriptor;
74 queueDescriptor.m_Parameters = descriptor;
75 queueDescriptor.m_Weight = &weightsTensor;
76
77 AllocateAndCopyDataToITensorHandle(&weightsTensor, weights.second.data());
78
James Conroy1f58f032021-04-27 17:13:27 +010079 std::unique_ptr<ScopedTensorHandle> biasesTensor;
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +000080 if (descriptor.m_BiasEnabled)
81 {
82 // set up biases
James Conroy1f58f032021-04-27 17:13:27 +010083 biasesTensor = std::make_unique<ScopedTensorHandle>(biases.value().first);
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +000084 queueDescriptor.m_Bias = biasesTensor.get();
85
86 AllocateAndCopyDataToITensorHandle(biasesTensor.get(), biases.value().second.data());
87 }
88
89 // set up input and output handles
Finn Williamsec36d3e2020-08-28 13:17:05 +010090 std::unique_ptr<ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(input.first);
91 std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(output.first);
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +000092
93 // set up workload
94 armnn::WorkloadInfo workloadInfo;
95 AddInputToWorkload(queueDescriptor, workloadInfo, input.first, inputHandle.get());
96 AddOutputToWorkload(queueDescriptor, workloadInfo, output.first, outputHandle.get());
97
98 std::unique_ptr<armnn::IWorkload> workload =
99 workloadFactory.CreateTransposeConvolution2d(queueDescriptor, workloadInfo);
100
101 inputHandle->Allocate();
102 outputHandle->Allocate();
103
104 CopyDataToITensorHandle(inputHandle.get(), input.second.data());
105
106 ExecuteWorkload(*workload, memoryManager);
107
108 // copy output
Rob Hughesbb46dde2020-05-20 15:27:37 +0100109 output.second = std::vector<T>(output.first.GetNumElements(), T());
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000110 CopyDataFromITensorHandle(output.second.data(), outputHandle.get());
111}
112
113template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
114LayerTestResult<T, 4> TransposeConvolution2dTest(
115 armnn::IWorkloadFactory& workloadFactory,
116 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williamsec36d3e2020-08-28 13:17:05 +0100117 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000118 const armnn::TransposeConvolution2dDescriptor& descriptor,
119 armnn::TensorInfo& inputInfo,
120 const std::vector<float>& inputData,
121 armnn::TensorInfo& outputInfo,
122 const std::vector<float>& expectedOutputData,
123 armnn::TensorInfo& weightsInfo,
124 const std::vector<float>& weightsData,
125 armnn::TensorInfo& biasesInfo,
126 const std::vector<float>& biasesData)
127{
128 using namespace armnn;
129
130 // set up quantization parameters
131 if (armnn::IsQuantizedType<T>())
132 {
133 constexpr float qScale = 0.50f;
134 constexpr int32_t qOffset = 10;
135
136 inputInfo.SetQuantizationScale(qScale);
137 inputInfo.SetQuantizationOffset(qOffset);
138
139 outputInfo.SetQuantizationScale(qScale);
140 outputInfo.SetQuantizationOffset(qOffset);
141
142 weightsInfo.SetQuantizationScale(qScale);
143 weightsInfo.SetQuantizationOffset(qOffset);
144
145 biasesInfo.SetQuantizationScale(qScale * qScale);
146 biasesInfo.SetQuantizationOffset(0);
147 }
148
149 // set up input
150 TensorData<T> input =
151 {
152 inputInfo,
153 armnnUtils::QuantizedVector<T>(inputData, inputInfo.GetQuantizationScale(), inputInfo.GetQuantizationOffset())
154 };
155
156 // set up weights
157 TensorData<T> weights =
158 {
159 weightsInfo,
160 armnnUtils::QuantizedVector<T>(weightsData,
161 weightsInfo.GetQuantizationScale(),
162 weightsInfo.GetQuantizationOffset())
163 };
164
165 // set up biases
166 using BT = armnn::ResolveType<ArmnnBType>;
167 Optional<TensorData<BT>> optionalBiases;
168 if (descriptor.m_BiasEnabled)
169 {
170 TensorData<BT> biases =
171 {
172 biasesInfo,
173 armnnUtils::QuantizedVector<BT>(biasesData,
174 biasesInfo.GetQuantizationScale(),
175 biasesInfo.GetQuantizationOffset())
176 };
177
178 optionalBiases = Optional<TensorData<BT>>(biases);
179 }
180
181 // set up output
182 TensorData<T> output = { outputInfo, {} };
183
184 // execute test
185 TransposeConvolution2dTestImpl(workloadFactory,
Finn Williamsec36d3e2020-08-28 13:17:05 +0100186 memoryManager,
187 tensorHandleFactory,
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000188 descriptor,
189 input,
190 output,
191 weights,
192 optionalBiases);
193
194 // construct result object
195 LayerTestResult<T, 4> testResult(outputInfo);
196 testResult.output = MakeTensor<T, 4>(outputInfo, output.second);
197 testResult.outputExpected = MakeTensor<T, 4>(outputInfo,
198 armnnUtils::QuantizedVector<T>(expectedOutputData,
199 outputInfo.GetQuantizationScale(),
200 outputInfo.GetQuantizationOffset()));
201
202 return testResult;
203}
204
205template<typename T>
206void SwizzleData(armnn::TensorInfo& inputInfo,
207 std::vector<T>& inputData,
208 armnn::TensorInfo& outputInfo,
209 std::vector<T>& outputData,
210 armnn::TensorInfo& weightsInfo,
211 std::vector<T>& weightsData)
212{
213 PermuteTensorNchwToNhwc<T>(inputInfo, inputData);
214 PermuteTensorNchwToNhwc<T>(outputInfo, outputData);
215 PermuteTensorNchwToNhwc<T>(weightsInfo, weightsData);
216}
217
218} // anonymous namespace
219
220template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
221LayerTestResult<T, 4> SimpleTransposeConvolution2dTest(
222 armnn::IWorkloadFactory& workloadFactory,
223 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williamsec36d3e2020-08-28 13:17:05 +0100224 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000225 bool biasEnabled,
226 const armnn::DataLayout layout)
227{
228 using namespace armnn;
229
230 constexpr unsigned int batches = 1u;
231 constexpr unsigned int channels = 1u;
232
233 constexpr unsigned int wInput = 3u;
234 constexpr unsigned int hInput = wInput;
235
236 constexpr unsigned int wOutput = 5u;
237 constexpr unsigned int hOutput = wOutput;
238
239 constexpr unsigned int wWeights = 3u;
240 constexpr unsigned int hWeights = wWeights;
241
242 TensorShape inputShape = { batches, channels, hInput, wInput };
243 TensorShape outputShape = { batches, channels, hOutput, wOutput };
244 TensorShape weightsShape = { batches, channels, hWeights, wWeights };
245
246 TensorInfo inputInfo(inputShape, ArmnnType);
247 TensorInfo outputInfo(outputShape, ArmnnType);
248 TensorInfo weightsInfo(weightsShape, ArmnnType);
249 TensorInfo biasesInfo({ channels }, ArmnnBType);
250
251 std::vector<float> inputData =
252 {
253 1.f, 1.f, 1.f,
254 1.f, 1.f, 1.f,
255 1.f, 1.f, 1.f
256 };
257
258 std::vector<float> weightsData =
259 {
260 1.f, 2.f, 3.f,
261 4.f, 5.f, 6.f,
262 7.f, 8.f, 9.f
263 };
264
265 std::vector<float> biasesData = { 1.f };
266
267 std::vector<float> expectedOutputData =
268 {
269 1.f, 3.f, 6.f, 5.f, 3.f,
270 5.f, 12.f, 21.f, 16.f, 9.f,
271 12.f, 27.f, 45.f, 33.f, 18.f,
272 11.f, 24.f, 39.f, 28.f, 15.f,
273 7.f, 15.f, 24.f, 17.f, 9.f
274 };
275
276 if (biasEnabled)
277 {
278 // apply bias to expected output data
279 std::transform(expectedOutputData.begin(), expectedOutputData.end(), expectedOutputData.begin(),
280 [&](float f) -> float { return f + biasesData[0]; });
281 }
282
283 TransposeConvolution2dDescriptor descriptor;
284 descriptor.m_StrideX = 1;
285 descriptor.m_StrideY = 1;
286 descriptor.m_BiasEnabled = biasEnabled;
287 descriptor.m_DataLayout = layout;
288
289 // swizzle data if needed
290 if (layout == armnn::DataLayout::NHWC)
291 {
292 SwizzleData(inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData);
293 }
294
295 return TransposeConvolution2dTest<ArmnnType, ArmnnBType>(workloadFactory,
296 memoryManager,
Finn Williamsec36d3e2020-08-28 13:17:05 +0100297 tensorHandleFactory,
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000298 descriptor,
299 inputInfo,
300 inputData,
301 outputInfo,
302 expectedOutputData,
303 weightsInfo,
304 weightsData,
305 biasesInfo,
306 biasesData);
307}
308
309template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
310LayerTestResult<T, 4> PaddedTransposeConvolution2dTest(
311 armnn::IWorkloadFactory& workloadFactory,
312 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williamsec36d3e2020-08-28 13:17:05 +0100313 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000314 bool biasEnabled,
315 const armnn::DataLayout layout)
316{
317 using namespace armnn;
318
319 constexpr unsigned int batches = 1u;
320 constexpr unsigned int channels = 1u;
321
322 constexpr unsigned int wInput = 4u;
323 constexpr unsigned int hInput = wInput;
324
325 constexpr unsigned int wOutput = 2u;
326 constexpr unsigned int hOutput = wOutput;
327
328 constexpr unsigned int wWeights = 3u;
329 constexpr unsigned int hWeights = wWeights;
330
331 TensorShape inputShape = { batches, channels, hInput, wInput };
332 TensorShape outputShape = { batches, channels, hOutput, wOutput };
333 TensorShape weightsShape = { batches, channels, hWeights, wWeights };
334
335 TensorInfo inputInfo(inputShape, ArmnnType);
336 TensorInfo outputInfo(outputShape, ArmnnType);
337 TensorInfo weightsInfo(weightsShape, ArmnnType);
338 TensorInfo biasesInfo({ channels }, ArmnnBType);
339
340 std::vector<float> inputData =
341 {
342 1.f, 3.f, 2.f, 1.f,
343 1.f, 3.f, 3.f, 1.f,
344 2.f, 1.f, 1.f, 3.f,
345 3.f, 2.f, 3.f, 3.f
346 };
347
348 std::vector<float> weightsData =
349 {
350 1.f, 2.f, 3.f,
351 0.f, 1.f, 0.f,
352 2.f, 1.f, 2.f
353 };
354
355 std::vector<float> biasesData = { 1.f };
356
357 std::vector<float> expectedOutputData =
358 {
359 21.f, 21.f,
360 28.f, 27.f
361 };
362
363 if (biasEnabled)
364 {
365 // apply bias to expected output data
366 std::transform(expectedOutputData.begin(), expectedOutputData.end(), expectedOutputData.begin(),
367 [&](float f) -> float { return f + biasesData[0]; });
368 }
369
370 TransposeConvolution2dDescriptor descriptor;
371 descriptor.m_PadLeft = 2;
372 descriptor.m_PadRight = 2;
373 descriptor.m_PadTop = 2;
374 descriptor.m_PadBottom = 2;
375 descriptor.m_StrideX = 1;
376 descriptor.m_StrideY = 1;
377 descriptor.m_BiasEnabled = biasEnabled;
378 descriptor.m_DataLayout = layout;
379
380 // swizzle data if needed
381 if (layout == armnn::DataLayout::NHWC)
382 {
383 SwizzleData(inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData);
384 }
385
386 return TransposeConvolution2dTest<ArmnnType, ArmnnBType>(workloadFactory,
387 memoryManager,
Finn Williamsec36d3e2020-08-28 13:17:05 +0100388 tensorHandleFactory,
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000389 descriptor,
390 inputInfo,
391 inputData,
392 outputInfo,
393 expectedOutputData,
394 weightsInfo,
395 weightsData,
396 biasesInfo,
397 biasesData);
398}
399
400template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
401LayerTestResult<T, 4> StridedTransposeConvolution2dTest(
402 armnn::IWorkloadFactory& workloadFactory,
403 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williamsec36d3e2020-08-28 13:17:05 +0100404 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000405 bool biasEnabled,
406 const armnn::DataLayout layout)
407{
408 using namespace armnn;
409
410 constexpr unsigned int batches = 1u;
411 constexpr unsigned int channels = 1u;
412
413 constexpr unsigned int wInput = 3u;
414 constexpr unsigned int hInput = wInput;
415
416 constexpr unsigned int wOutput = 7u;
417 constexpr unsigned int hOutput = wOutput;
418
419 constexpr unsigned int wWeights = 3u;
420 constexpr unsigned int hWeights = wWeights;
421
422 TensorShape inputShape = { batches, channels, hInput, wInput };
423 TensorShape outputShape = { batches, channels, hOutput, wOutput };
424 TensorShape weightsShape = { batches, channels, hWeights, wWeights };
425
426 TensorInfo inputInfo(inputShape, ArmnnType);
427 TensorInfo outputInfo(outputShape, ArmnnType);
428 TensorInfo weightsInfo(weightsShape, ArmnnType);
429 TensorInfo biasesInfo({ channels }, ArmnnBType);
430
431 std::vector<float> inputData =
432 {
433 1.f, 1.f, 1.f,
434 1.f, 1.f, 1.f,
435 1.f, 1.f, 1.f
436 };
437
438 std::vector<float> weightsData =
439 {
440 1.f, 2.f, 3.f,
441 4.f, 5.f, 6.f,
442 7.f, 8.f, 9.f
443 };
444
445 std::vector<float> biasesData = { 1.f };
446
447 std::vector<float> expectedOutputData =
448 {
449 1.f, 2.f, 4.f, 2.f, 4.f, 2.f, 3.f,
450 4.f, 5.f, 10.f, 5.f, 10.f, 5.f, 6.f,
451 8.f, 10.f, 20.f, 10.f, 20.f, 10.f, 12.f,
452 4.f, 5.f, 10.f, 5.f, 10.f, 5.f, 6.f,
453 8.f, 10.f, 20.f, 10.f, 20.f, 10.f, 12.f,
454 4.f, 5.f, 10.f, 5.f, 10.f, 5.f, 6.f,
455 7.f, 8.f, 16.f, 8.f, 16.f, 8.f, 9.f
456 };
457
458 if (biasEnabled)
459 {
460 // apply bias to expected output data
461 std::transform(expectedOutputData.begin(), expectedOutputData.end(), expectedOutputData.begin(),
462 [&](float f) -> float { return f + biasesData[0]; });
463 }
464
465 TransposeConvolution2dDescriptor descriptor;
466 descriptor.m_StrideX = 2;
467 descriptor.m_StrideY = 2;
468 descriptor.m_BiasEnabled = biasEnabled;
469 descriptor.m_DataLayout = layout;
470
471 // swizzle data if needed
472 if (layout == armnn::DataLayout::NHWC)
473 {
474 SwizzleData(inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData);
475 }
476
477 return TransposeConvolution2dTest<ArmnnType, ArmnnBType>(workloadFactory,
478 memoryManager,
Finn Williamsec36d3e2020-08-28 13:17:05 +0100479 tensorHandleFactory,
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000480 descriptor,
481 inputInfo,
482 inputData,
483 outputInfo,
484 expectedOutputData,
485 weightsInfo,
486 weightsData,
487 biasesInfo,
488 biasesData);
489}
490
491template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
492LayerTestResult<T, 4> MultiChannelTransposeConvolution2dTest(
493 armnn::IWorkloadFactory& workloadFactory,
494 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williamsec36d3e2020-08-28 13:17:05 +0100495 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000496 const armnn::DataLayout layout)
497{
498 using namespace armnn;
499
500 TensorShape inputShape = { 1, 1, 2, 2 };
501 TensorShape outputShape = { 1, 2, 5, 5 };
502
503 // OIHW for NCHW; OHWI for NHWC
504 TensorShape weightsShape = { 2, 1, 3, 3 };
505 TensorShape biasesShape = { 2 };
506
507 TensorInfo inputInfo(inputShape, ArmnnType);
508 TensorInfo outputInfo(outputShape, ArmnnType);
509 TensorInfo weightsInfo(weightsShape, ArmnnType);
510 TensorInfo biasesInfo(biasesShape, ArmnnBType);
511
512 std::vector<float> inputData =
513 {
514 1.f, 2.f,
515 3.f, 4.f,
516 };
517
518 std::vector<float> weightsData =
519 {
520 1.f, 3.f, 5.f,
521 7.f, 9.f, 11.f,
522 13.f, 15.f, 17.f,
523
524 2.f, 4.f, 6.f,
525 8.f, 10.f, 12.f,
526 14.f, 16.f, 18.f
527 };
528
529 std::vector<float> biasesData = { -1.5f, -2.0f };
530
531 std::vector<float> expectedOutputData =
532 {
533 -0.5f, 1.5f, 5.5f, 4.5f, 8.5f,
534 5.5f, 7.5f, 23.5f, 16.5f, 20.5f,
535 14.5f, 22.5f, 60.5f, 40.5f, 52.5f,
536 19.5f, 25.5f, 59.5f, 34.5f, 42.5f,
537 37.5f, 43.5f, 101.5f, 58.5f, 66.5f,
538
539 0.0f, 2.0f, 8.0f, 6.0f, 10.0f,
540 6.0f, 8.0f, 26.0f, 18.0f, 22.0f,
541 18.0f, 26.0f, 70.0f, 46.0f, 58.0f,
542 22.0f, 28.0f, 66.0f, 38.0f, 46.0f,
543 40.0f, 46.0f, 108.0f, 62.0f, 70.0f
544 };
545
546 TransposeConvolution2dDescriptor descriptor;
547 descriptor.m_StrideX = 2;
548 descriptor.m_StrideY = 2;
549 descriptor.m_BiasEnabled = true;
550 descriptor.m_DataLayout = layout;
551
552 // swizzle data if needed
553 if (layout == armnn::DataLayout::NHWC)
554 {
555 SwizzleData(inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData);
556 }
557
558 return TransposeConvolution2dTest<ArmnnType, ArmnnBType>(workloadFactory,
559 memoryManager,
Finn Williamsec36d3e2020-08-28 13:17:05 +0100560 tensorHandleFactory,
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000561 descriptor,
562 inputInfo,
563 inputData,
564 outputInfo,
565 expectedOutputData,
566 weightsInfo,
567 weightsData,
568 biasesInfo,
569 biasesData);
570}
571
572LayerTestResult<uint8_t, 4> TransposeConvolution2dPerAxisQuantTest(
573 armnn::IWorkloadFactory& workloadFactory,
574 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williamsec36d3e2020-08-28 13:17:05 +0100575 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000576 const armnn::DataLayout layout)
577{
578 using namespace armnn;
579
Derek Lambertif90c56d2020-01-10 17:14:08 +0000580 const DataType inputType = DataType::QAsymmU8;
Derek Lambertid466a542020-01-22 15:37:29 +0000581 const DataType kernelType = DataType::QSymmS8;
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000582 const DataType biasType = DataType::Signed32;
583
584 TensorInfo inputInfo ({ 1, 1, 2, 2 }, inputType, 0.50f, 10);
585 TensorInfo outputInfo({ 1, 2, 5, 5 }, inputType, 0.50f, 10);
586
587 const std::vector<float> quantScales{ 0.25f, 0.5f };
588 constexpr unsigned int quantDimension = 0;
589
590 TensorInfo kernelInfo({ 2, 1, 3, 3 }, kernelType, quantScales, quantDimension);
591
592 const std::vector<float> biasQuantScales{ 0.125f, 0.25f };
593 TensorInfo biasInfo({ 2 }, biasType, biasQuantScales, quantDimension);
594
595 std::vector<uint8_t> inputData =
596 {
597 12, 14,
598 16, 18
599 };
600
601 std::vector<int8_t> kernelData =
602 {
603 4, 12, 20,
604 28, 36, 44,
605 52, 60, 68,
606
607 4, 8, 12,
608 16, 20, 24,
609 28, 32, 36
610 };
611
612 std::vector<int32_t> biasData = { -12, -8 };
613
614 std::vector<uint8_t> expectedOutputData =
615 {
616 9, 13, 21, 19, 27,
617 21, 25, 57, 43, 51,
618 39, 55, 131, 91, 115,
619 49, 61, 129, 79, 95,
620 85, 97, 213, 127, 143,
621
622 10, 14, 26, 22, 30,
623 22, 26, 62, 46, 54,
624 46, 62, 150, 102, 126,
625 54, 66, 142, 86, 102,
626 90, 102, 226, 134, 150
627 };
628
629 if (layout == DataLayout::NHWC)
630 {
631 PermuteTensorNchwToNhwc(inputInfo, inputData);
632 PermuteTensorNchwToNhwc(kernelInfo, kernelData);
633 PermuteTensorNchwToNhwc(outputInfo, expectedOutputData);
634 }
635
636 TransposeConvolution2dDescriptor descriptor;
637 descriptor.m_StrideX = 2;
638 descriptor.m_StrideY = 2;
639 descriptor.m_BiasEnabled = true;
640 descriptor.m_DataLayout = layout;
641
Finn Williamsec36d3e2020-08-28 13:17:05 +0100642 std::unique_ptr<ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo);
643 std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputInfo);
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000644
645 WorkloadInfo workloadInfo;
James Conroy1f58f032021-04-27 17:13:27 +0100646 ScopedTensorHandle weightTensor(kernelInfo);
647 ScopedTensorHandle biasTensor(biasInfo);
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000648
649 AllocateAndCopyDataToITensorHandle(&weightTensor, kernelData.data());
650 AllocateAndCopyDataToITensorHandle(&biasTensor, biasData.data());
651
652 TransposeConvolution2dQueueDescriptor queueDescriptor;
653 queueDescriptor.m_Parameters = descriptor;
654 queueDescriptor.m_Weight = &weightTensor;
655 queueDescriptor.m_Bias = &biasTensor;
656
657 AddInputToWorkload(queueDescriptor, workloadInfo, inputInfo, inputHandle.get());
658 AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, outputHandle.get());
659
660 std::unique_ptr<IWorkload> workload = workloadFactory.CreateTransposeConvolution2d(queueDescriptor, workloadInfo);
661 inputHandle->Allocate();
662 outputHandle->Allocate();
663
664 CopyDataToITensorHandle(inputHandle.get(), inputData.data());
665
666 ExecuteWorkload(*workload, memoryManager);
667
668 LayerTestResult<uint8_t, 4> ret(outputInfo);
669 CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get());
670 ret.outputExpected = MakeTensor<uint8_t, 4>(outputInfo, expectedOutputData);
671
672 return ret;
673}
674
675//
676// Explicit template specializations
677//
678
679template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
680SimpleTransposeConvolution2dTest<armnn::DataType::Float32, armnn::DataType::Float32>(
681 armnn::IWorkloadFactory& workloadFactory,
682 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williamsec36d3e2020-08-28 13:17:05 +0100683 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000684 bool biasEnabled,
685 const armnn::DataLayout layout);
686
Sadik Armagan303980c2020-04-17 12:45:14 +0100687template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
688SimpleTransposeConvolution2dTest<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
689 armnn::IWorkloadFactory& workloadFactory,
690 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williamsec36d3e2020-08-28 13:17:05 +0100691 const armnn::ITensorHandleFactory& tensorHandleFactory,
Sadik Armagan303980c2020-04-17 12:45:14 +0100692 bool biasEnabled,
693 const armnn::DataLayout layout);
694
Derek Lambertif90c56d2020-01-10 17:14:08 +0000695template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
696SimpleTransposeConvolution2dTest<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000697 armnn::IWorkloadFactory& workloadFactory,
698 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williamsec36d3e2020-08-28 13:17:05 +0100699 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000700 bool biasEnabled,
701 const armnn::DataLayout layout);
702
Derek Lambertif90c56d2020-01-10 17:14:08 +0000703template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
704SimpleTransposeConvolution2dTest<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000705 armnn::IWorkloadFactory& workloadFactory,
706 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williamsec36d3e2020-08-28 13:17:05 +0100707 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000708 bool biasEnabled,
709 const armnn::DataLayout layout);
710
711template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
712PaddedTransposeConvolution2dTest<armnn::DataType::Float32, armnn::DataType::Float32>(
713 armnn::IWorkloadFactory& workloadFactory,
714 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williamsec36d3e2020-08-28 13:17:05 +0100715 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000716 bool biasEnabled,
717 const armnn::DataLayout layout);
718
Sadik Armagan303980c2020-04-17 12:45:14 +0100719template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
720PaddedTransposeConvolution2dTest<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
721 armnn::IWorkloadFactory& workloadFactory,
722 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williamsec36d3e2020-08-28 13:17:05 +0100723 const armnn::ITensorHandleFactory& tensorHandleFactory,
Sadik Armagan303980c2020-04-17 12:45:14 +0100724 bool biasEnabled,
725 const armnn::DataLayout layout);
726
Derek Lambertif90c56d2020-01-10 17:14:08 +0000727template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
728PaddedTransposeConvolution2dTest<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000729 armnn::IWorkloadFactory& workloadFactory,
730 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williamsec36d3e2020-08-28 13:17:05 +0100731 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000732 bool biasEnabled,
733 const armnn::DataLayout layout);
734
Derek Lambertif90c56d2020-01-10 17:14:08 +0000735template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
736PaddedTransposeConvolution2dTest<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000737 armnn::IWorkloadFactory& workloadFactory,
738 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williamsec36d3e2020-08-28 13:17:05 +0100739 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000740 bool biasEnabled,
741 const armnn::DataLayout layout);
742
743template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
744StridedTransposeConvolution2dTest<armnn::DataType::Float32, armnn::DataType::Float32>(
745 armnn::IWorkloadFactory& workloadFactory,
746 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williamsec36d3e2020-08-28 13:17:05 +0100747 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000748 bool biasEnabled,
749 const armnn::DataLayout layout);
750
Sadik Armagan303980c2020-04-17 12:45:14 +0100751template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
752StridedTransposeConvolution2dTest<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
753 armnn::IWorkloadFactory& workloadFactory,
754 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williamsec36d3e2020-08-28 13:17:05 +0100755 const armnn::ITensorHandleFactory& tensorHandleFactory,
Sadik Armagan303980c2020-04-17 12:45:14 +0100756 bool biasEnabled,
757 const armnn::DataLayout layout);
758
Derek Lambertif90c56d2020-01-10 17:14:08 +0000759template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
760StridedTransposeConvolution2dTest<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000761 armnn::IWorkloadFactory& workloadFactory,
762 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williamsec36d3e2020-08-28 13:17:05 +0100763 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000764 bool biasEnabled,
765 const armnn::DataLayout layout);
766
Derek Lambertif90c56d2020-01-10 17:14:08 +0000767template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
768StridedTransposeConvolution2dTest<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000769 armnn::IWorkloadFactory& workloadFactory,
770 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williamsec36d3e2020-08-28 13:17:05 +0100771 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000772 bool biasEnabled,
773 const armnn::DataLayout layout);
774
775template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
776MultiChannelTransposeConvolution2dTest<armnn::DataType::Float32, armnn::DataType::Float32>(
777 armnn::IWorkloadFactory& workloadFactory,
778 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williamsec36d3e2020-08-28 13:17:05 +0100779 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000780 const armnn::DataLayout layout);
781
Sadik Armagan303980c2020-04-17 12:45:14 +0100782template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
783MultiChannelTransposeConvolution2dTest<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
784 armnn::IWorkloadFactory& workloadFactory,
785 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williamsec36d3e2020-08-28 13:17:05 +0100786 const armnn::ITensorHandleFactory& tensorHandleFactory,
Sadik Armagan303980c2020-04-17 12:45:14 +0100787 const armnn::DataLayout layout);
788
Derek Lambertif90c56d2020-01-10 17:14:08 +0000789template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
790MultiChannelTransposeConvolution2dTest<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000791 armnn::IWorkloadFactory& workloadFactory,
792 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williamsec36d3e2020-08-28 13:17:05 +0100793 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000794 const armnn::DataLayout layout);
795
Derek Lambertif90c56d2020-01-10 17:14:08 +0000796template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
797MultiChannelTransposeConvolution2dTest<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000798 armnn::IWorkloadFactory& workloadFactory,
799 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williamsec36d3e2020-08-28 13:17:05 +0100800 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000801 const armnn::DataLayout layout);