blob: cd775729cd2e7d325264cde1f5590820cd6f4cd4 [file] [log] [blame]
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +00001//
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +01002// Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +00003// SPDX-License-Identifier: MIT
4//
5
6#include "TransposeConvolution2dTestImpl.hpp"
7
Matteo Martincighe011d202019-11-28 11:35:47 +00008#include <QuantizeHelper.hpp>
9
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +000010
Matteo Martincighe011d202019-11-28 11:35:47 +000011#include <armnnUtils/Permute.hpp>
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +000012
James Conroy1f58f032021-04-27 17:13:27 +010013#include <backendsCommon/TensorHandle.hpp>
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +000014
15#include <backendsCommon/test/DataLayoutUtils.hpp>
16#include <backendsCommon/test/TensorCopyUtils.hpp>
17#include <backendsCommon/test/WorkloadTestUtils.hpp>
18
19#include <reference/RefWorkloadFactory.hpp>
20
21#include <test/TensorHelpers.hpp>
22
23#include <boost/test/unit_test.hpp>
24
25#include <string>
26#include <utility>
27#include <vector>
28
29namespace
30{
31
32template<typename T>
33using TensorData = std::pair<armnn::TensorInfo, std::vector<T>>;
34
35template<typename T>
36void VerifyInputTensorData(const TensorData<T>& data, const std::string& tensorName)
37{
38 if (data.first.GetNumElements() > data.second.size())
39 {
40 throw armnn::InvalidArgumentException("Size of data too small for " + tensorName + ": expected " +
41 std::to_string(data.first.GetNumElements()) + "but got " + std::to_string(data.second.size()));
42 }
43}
44
45template<typename T, typename BT>
46void TransposeConvolution2dTestImpl(armnn::IWorkloadFactory& workloadFactory,
47 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williamsec36d3e2020-08-28 13:17:05 +010048 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +000049 const armnn::TransposeConvolution2dDescriptor& descriptor,
50 const TensorData<T>& input,
51 TensorData<T>& output,
52 const TensorData<T>& weights,
53 const armnn::Optional<TensorData<BT>>& biases)
54{
Jan Eilers8eb25602020-03-09 12:13:48 +000055 IgnoreUnused(memoryManager);
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +000056 using namespace armnn;
57
58 VerifyInputTensorData(input, "input");
59 VerifyInputTensorData(weights, "biases");
60
61 if (descriptor.m_BiasEnabled)
62 {
63 if (!biases.has_value())
64 {
65 throw InvalidArgumentException("Bias enabled but no bias data provided");
66 }
67 VerifyInputTensorData(biases.value(), "biases");
68 }
69
70 // set up weights
James Conroy1f58f032021-04-27 17:13:27 +010071 ScopedTensorHandle weightsTensor(weights.first);
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +000072
73 TransposeConvolution2dQueueDescriptor queueDescriptor;
74 queueDescriptor.m_Parameters = descriptor;
75 queueDescriptor.m_Weight = &weightsTensor;
76
77 AllocateAndCopyDataToITensorHandle(&weightsTensor, weights.second.data());
78
James Conroy1f58f032021-04-27 17:13:27 +010079 std::unique_ptr<ScopedTensorHandle> biasesTensor;
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +000080 if (descriptor.m_BiasEnabled)
81 {
82 // set up biases
James Conroy1f58f032021-04-27 17:13:27 +010083 biasesTensor = std::make_unique<ScopedTensorHandle>(biases.value().first);
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +000084 queueDescriptor.m_Bias = biasesTensor.get();
85
86 AllocateAndCopyDataToITensorHandle(biasesTensor.get(), biases.value().second.data());
87 }
88
89 // set up input and output handles
Finn Williamsec36d3e2020-08-28 13:17:05 +010090 std::unique_ptr<ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(input.first);
91 std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(output.first);
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +000092
93 // set up workload
94 armnn::WorkloadInfo workloadInfo;
95 AddInputToWorkload(queueDescriptor, workloadInfo, input.first, inputHandle.get());
96 AddOutputToWorkload(queueDescriptor, workloadInfo, output.first, outputHandle.get());
97
98 std::unique_ptr<armnn::IWorkload> workload =
99 workloadFactory.CreateTransposeConvolution2d(queueDescriptor, workloadInfo);
100
101 inputHandle->Allocate();
102 outputHandle->Allocate();
103
104 CopyDataToITensorHandle(inputHandle.get(), input.second.data());
105
106 ExecuteWorkload(*workload, memoryManager);
107
108 // copy output
Rob Hughesbb46dde2020-05-20 15:27:37 +0100109 output.second = std::vector<T>(output.first.GetNumElements(), T());
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000110 CopyDataFromITensorHandle(output.second.data(), outputHandle.get());
111}
112
113template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
114LayerTestResult<T, 4> TransposeConvolution2dTest(
115 armnn::IWorkloadFactory& workloadFactory,
116 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williamsec36d3e2020-08-28 13:17:05 +0100117 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000118 const armnn::TransposeConvolution2dDescriptor& descriptor,
119 armnn::TensorInfo& inputInfo,
120 const std::vector<float>& inputData,
121 armnn::TensorInfo& outputInfo,
122 const std::vector<float>& expectedOutputData,
123 armnn::TensorInfo& weightsInfo,
124 const std::vector<float>& weightsData,
125 armnn::TensorInfo& biasesInfo,
126 const std::vector<float>& biasesData)
127{
128 using namespace armnn;
129
130 // set up quantization parameters
131 if (armnn::IsQuantizedType<T>())
132 {
133 constexpr float qScale = 0.50f;
134 constexpr int32_t qOffset = 10;
135
136 inputInfo.SetQuantizationScale(qScale);
137 inputInfo.SetQuantizationOffset(qOffset);
138
139 outputInfo.SetQuantizationScale(qScale);
140 outputInfo.SetQuantizationOffset(qOffset);
141
142 weightsInfo.SetQuantizationScale(qScale);
143 weightsInfo.SetQuantizationOffset(qOffset);
144
145 biasesInfo.SetQuantizationScale(qScale * qScale);
146 biasesInfo.SetQuantizationOffset(0);
147 }
148
149 // set up input
150 TensorData<T> input =
151 {
152 inputInfo,
153 armnnUtils::QuantizedVector<T>(inputData, inputInfo.GetQuantizationScale(), inputInfo.GetQuantizationOffset())
154 };
155
156 // set up weights
157 TensorData<T> weights =
158 {
159 weightsInfo,
160 armnnUtils::QuantizedVector<T>(weightsData,
161 weightsInfo.GetQuantizationScale(),
162 weightsInfo.GetQuantizationOffset())
163 };
164
165 // set up biases
166 using BT = armnn::ResolveType<ArmnnBType>;
167 Optional<TensorData<BT>> optionalBiases;
168 if (descriptor.m_BiasEnabled)
169 {
170 TensorData<BT> biases =
171 {
172 biasesInfo,
173 armnnUtils::QuantizedVector<BT>(biasesData,
174 biasesInfo.GetQuantizationScale(),
175 biasesInfo.GetQuantizationOffset())
176 };
177
178 optionalBiases = Optional<TensorData<BT>>(biases);
179 }
180
181 // set up output
182 TensorData<T> output = { outputInfo, {} };
183
184 // execute test
185 TransposeConvolution2dTestImpl(workloadFactory,
Sadik Armagan483c8112021-06-01 09:24:52 +0100186 memoryManager,
187 tensorHandleFactory,
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000188 descriptor,
189 input,
190 output,
191 weights,
192 optionalBiases);
193
194 // construct result object
195 LayerTestResult<T, 4> testResult(outputInfo);
Sadik Armagan483c8112021-06-01 09:24:52 +0100196 testResult.m_ActualData = output.second;
197 testResult.m_ExpectedData = armnnUtils::QuantizedVector<T>(expectedOutputData,
198 outputInfo.GetQuantizationScale(),
199 outputInfo.GetQuantizationOffset());
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000200
201 return testResult;
202}
203
204template<typename T>
205void SwizzleData(armnn::TensorInfo& inputInfo,
206 std::vector<T>& inputData,
207 armnn::TensorInfo& outputInfo,
208 std::vector<T>& outputData,
209 armnn::TensorInfo& weightsInfo,
210 std::vector<T>& weightsData)
211{
212 PermuteTensorNchwToNhwc<T>(inputInfo, inputData);
213 PermuteTensorNchwToNhwc<T>(outputInfo, outputData);
214 PermuteTensorNchwToNhwc<T>(weightsInfo, weightsData);
215}
216
217} // anonymous namespace
218
219template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
220LayerTestResult<T, 4> SimpleTransposeConvolution2dTest(
221 armnn::IWorkloadFactory& workloadFactory,
222 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williamsec36d3e2020-08-28 13:17:05 +0100223 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000224 bool biasEnabled,
225 const armnn::DataLayout layout)
226{
227 using namespace armnn;
228
229 constexpr unsigned int batches = 1u;
230 constexpr unsigned int channels = 1u;
231
232 constexpr unsigned int wInput = 3u;
233 constexpr unsigned int hInput = wInput;
234
235 constexpr unsigned int wOutput = 5u;
236 constexpr unsigned int hOutput = wOutput;
237
238 constexpr unsigned int wWeights = 3u;
239 constexpr unsigned int hWeights = wWeights;
240
241 TensorShape inputShape = { batches, channels, hInput, wInput };
242 TensorShape outputShape = { batches, channels, hOutput, wOutput };
243 TensorShape weightsShape = { batches, channels, hWeights, wWeights };
244
245 TensorInfo inputInfo(inputShape, ArmnnType);
246 TensorInfo outputInfo(outputShape, ArmnnType);
247 TensorInfo weightsInfo(weightsShape, ArmnnType);
248 TensorInfo biasesInfo({ channels }, ArmnnBType);
249
250 std::vector<float> inputData =
251 {
252 1.f, 1.f, 1.f,
253 1.f, 1.f, 1.f,
254 1.f, 1.f, 1.f
255 };
256
257 std::vector<float> weightsData =
258 {
259 1.f, 2.f, 3.f,
260 4.f, 5.f, 6.f,
261 7.f, 8.f, 9.f
262 };
263
264 std::vector<float> biasesData = { 1.f };
265
266 std::vector<float> expectedOutputData =
267 {
268 1.f, 3.f, 6.f, 5.f, 3.f,
269 5.f, 12.f, 21.f, 16.f, 9.f,
270 12.f, 27.f, 45.f, 33.f, 18.f,
271 11.f, 24.f, 39.f, 28.f, 15.f,
272 7.f, 15.f, 24.f, 17.f, 9.f
273 };
274
275 if (biasEnabled)
276 {
277 // apply bias to expected output data
278 std::transform(expectedOutputData.begin(), expectedOutputData.end(), expectedOutputData.begin(),
279 [&](float f) -> float { return f + biasesData[0]; });
280 }
281
282 TransposeConvolution2dDescriptor descriptor;
283 descriptor.m_StrideX = 1;
284 descriptor.m_StrideY = 1;
285 descriptor.m_BiasEnabled = biasEnabled;
286 descriptor.m_DataLayout = layout;
287
288 // swizzle data if needed
289 if (layout == armnn::DataLayout::NHWC)
290 {
291 SwizzleData(inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData);
292 }
293
294 return TransposeConvolution2dTest<ArmnnType, ArmnnBType>(workloadFactory,
295 memoryManager,
Finn Williamsec36d3e2020-08-28 13:17:05 +0100296 tensorHandleFactory,
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000297 descriptor,
298 inputInfo,
299 inputData,
300 outputInfo,
301 expectedOutputData,
302 weightsInfo,
303 weightsData,
304 biasesInfo,
305 biasesData);
306}
307
308template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
309LayerTestResult<T, 4> PaddedTransposeConvolution2dTest(
310 armnn::IWorkloadFactory& workloadFactory,
311 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williamsec36d3e2020-08-28 13:17:05 +0100312 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000313 bool biasEnabled,
314 const armnn::DataLayout layout)
315{
316 using namespace armnn;
317
318 constexpr unsigned int batches = 1u;
319 constexpr unsigned int channels = 1u;
320
321 constexpr unsigned int wInput = 4u;
322 constexpr unsigned int hInput = wInput;
323
324 constexpr unsigned int wOutput = 2u;
325 constexpr unsigned int hOutput = wOutput;
326
327 constexpr unsigned int wWeights = 3u;
328 constexpr unsigned int hWeights = wWeights;
329
330 TensorShape inputShape = { batches, channels, hInput, wInput };
331 TensorShape outputShape = { batches, channels, hOutput, wOutput };
332 TensorShape weightsShape = { batches, channels, hWeights, wWeights };
333
334 TensorInfo inputInfo(inputShape, ArmnnType);
335 TensorInfo outputInfo(outputShape, ArmnnType);
336 TensorInfo weightsInfo(weightsShape, ArmnnType);
337 TensorInfo biasesInfo({ channels }, ArmnnBType);
338
339 std::vector<float> inputData =
340 {
341 1.f, 3.f, 2.f, 1.f,
342 1.f, 3.f, 3.f, 1.f,
343 2.f, 1.f, 1.f, 3.f,
344 3.f, 2.f, 3.f, 3.f
345 };
346
347 std::vector<float> weightsData =
348 {
349 1.f, 2.f, 3.f,
350 0.f, 1.f, 0.f,
351 2.f, 1.f, 2.f
352 };
353
354 std::vector<float> biasesData = { 1.f };
355
356 std::vector<float> expectedOutputData =
357 {
358 21.f, 21.f,
359 28.f, 27.f
360 };
361
362 if (biasEnabled)
363 {
364 // apply bias to expected output data
365 std::transform(expectedOutputData.begin(), expectedOutputData.end(), expectedOutputData.begin(),
366 [&](float f) -> float { return f + biasesData[0]; });
367 }
368
369 TransposeConvolution2dDescriptor descriptor;
370 descriptor.m_PadLeft = 2;
371 descriptor.m_PadRight = 2;
372 descriptor.m_PadTop = 2;
373 descriptor.m_PadBottom = 2;
374 descriptor.m_StrideX = 1;
375 descriptor.m_StrideY = 1;
376 descriptor.m_BiasEnabled = biasEnabled;
377 descriptor.m_DataLayout = layout;
378
379 // swizzle data if needed
380 if (layout == armnn::DataLayout::NHWC)
381 {
382 SwizzleData(inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData);
383 }
384
385 return TransposeConvolution2dTest<ArmnnType, ArmnnBType>(workloadFactory,
386 memoryManager,
Finn Williamsec36d3e2020-08-28 13:17:05 +0100387 tensorHandleFactory,
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000388 descriptor,
389 inputInfo,
390 inputData,
391 outputInfo,
392 expectedOutputData,
393 weightsInfo,
394 weightsData,
395 biasesInfo,
396 biasesData);
397}
398
399template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
400LayerTestResult<T, 4> StridedTransposeConvolution2dTest(
401 armnn::IWorkloadFactory& workloadFactory,
402 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williamsec36d3e2020-08-28 13:17:05 +0100403 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000404 bool biasEnabled,
405 const armnn::DataLayout layout)
406{
407 using namespace armnn;
408
409 constexpr unsigned int batches = 1u;
410 constexpr unsigned int channels = 1u;
411
412 constexpr unsigned int wInput = 3u;
413 constexpr unsigned int hInput = wInput;
414
415 constexpr unsigned int wOutput = 7u;
416 constexpr unsigned int hOutput = wOutput;
417
418 constexpr unsigned int wWeights = 3u;
419 constexpr unsigned int hWeights = wWeights;
420
421 TensorShape inputShape = { batches, channels, hInput, wInput };
422 TensorShape outputShape = { batches, channels, hOutput, wOutput };
423 TensorShape weightsShape = { batches, channels, hWeights, wWeights };
424
425 TensorInfo inputInfo(inputShape, ArmnnType);
426 TensorInfo outputInfo(outputShape, ArmnnType);
427 TensorInfo weightsInfo(weightsShape, ArmnnType);
428 TensorInfo biasesInfo({ channels }, ArmnnBType);
429
430 std::vector<float> inputData =
431 {
432 1.f, 1.f, 1.f,
433 1.f, 1.f, 1.f,
434 1.f, 1.f, 1.f
435 };
436
437 std::vector<float> weightsData =
438 {
439 1.f, 2.f, 3.f,
440 4.f, 5.f, 6.f,
441 7.f, 8.f, 9.f
442 };
443
444 std::vector<float> biasesData = { 1.f };
445
446 std::vector<float> expectedOutputData =
447 {
448 1.f, 2.f, 4.f, 2.f, 4.f, 2.f, 3.f,
449 4.f, 5.f, 10.f, 5.f, 10.f, 5.f, 6.f,
450 8.f, 10.f, 20.f, 10.f, 20.f, 10.f, 12.f,
451 4.f, 5.f, 10.f, 5.f, 10.f, 5.f, 6.f,
452 8.f, 10.f, 20.f, 10.f, 20.f, 10.f, 12.f,
453 4.f, 5.f, 10.f, 5.f, 10.f, 5.f, 6.f,
454 7.f, 8.f, 16.f, 8.f, 16.f, 8.f, 9.f
455 };
456
457 if (biasEnabled)
458 {
459 // apply bias to expected output data
460 std::transform(expectedOutputData.begin(), expectedOutputData.end(), expectedOutputData.begin(),
461 [&](float f) -> float { return f + biasesData[0]; });
462 }
463
464 TransposeConvolution2dDescriptor descriptor;
465 descriptor.m_StrideX = 2;
466 descriptor.m_StrideY = 2;
467 descriptor.m_BiasEnabled = biasEnabled;
468 descriptor.m_DataLayout = layout;
469
470 // swizzle data if needed
471 if (layout == armnn::DataLayout::NHWC)
472 {
473 SwizzleData(inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData);
474 }
475
476 return TransposeConvolution2dTest<ArmnnType, ArmnnBType>(workloadFactory,
477 memoryManager,
Finn Williamsec36d3e2020-08-28 13:17:05 +0100478 tensorHandleFactory,
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000479 descriptor,
480 inputInfo,
481 inputData,
482 outputInfo,
483 expectedOutputData,
484 weightsInfo,
485 weightsData,
486 biasesInfo,
487 biasesData);
488}
489
490template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
491LayerTestResult<T, 4> MultiChannelTransposeConvolution2dTest(
492 armnn::IWorkloadFactory& workloadFactory,
493 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williamsec36d3e2020-08-28 13:17:05 +0100494 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000495 const armnn::DataLayout layout)
496{
497 using namespace armnn;
498
499 TensorShape inputShape = { 1, 1, 2, 2 };
500 TensorShape outputShape = { 1, 2, 5, 5 };
501
502 // OIHW for NCHW; OHWI for NHWC
503 TensorShape weightsShape = { 2, 1, 3, 3 };
504 TensorShape biasesShape = { 2 };
505
506 TensorInfo inputInfo(inputShape, ArmnnType);
507 TensorInfo outputInfo(outputShape, ArmnnType);
508 TensorInfo weightsInfo(weightsShape, ArmnnType);
509 TensorInfo biasesInfo(biasesShape, ArmnnBType);
510
511 std::vector<float> inputData =
512 {
513 1.f, 2.f,
514 3.f, 4.f,
515 };
516
517 std::vector<float> weightsData =
518 {
519 1.f, 3.f, 5.f,
520 7.f, 9.f, 11.f,
521 13.f, 15.f, 17.f,
522
523 2.f, 4.f, 6.f,
524 8.f, 10.f, 12.f,
525 14.f, 16.f, 18.f
526 };
527
528 std::vector<float> biasesData = { -1.5f, -2.0f };
529
530 std::vector<float> expectedOutputData =
531 {
532 -0.5f, 1.5f, 5.5f, 4.5f, 8.5f,
533 5.5f, 7.5f, 23.5f, 16.5f, 20.5f,
534 14.5f, 22.5f, 60.5f, 40.5f, 52.5f,
535 19.5f, 25.5f, 59.5f, 34.5f, 42.5f,
536 37.5f, 43.5f, 101.5f, 58.5f, 66.5f,
537
538 0.0f, 2.0f, 8.0f, 6.0f, 10.0f,
539 6.0f, 8.0f, 26.0f, 18.0f, 22.0f,
540 18.0f, 26.0f, 70.0f, 46.0f, 58.0f,
541 22.0f, 28.0f, 66.0f, 38.0f, 46.0f,
542 40.0f, 46.0f, 108.0f, 62.0f, 70.0f
543 };
544
545 TransposeConvolution2dDescriptor descriptor;
546 descriptor.m_StrideX = 2;
547 descriptor.m_StrideY = 2;
548 descriptor.m_BiasEnabled = true;
549 descriptor.m_DataLayout = layout;
550
551 // swizzle data if needed
552 if (layout == armnn::DataLayout::NHWC)
553 {
554 SwizzleData(inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData);
555 }
556
557 return TransposeConvolution2dTest<ArmnnType, ArmnnBType>(workloadFactory,
558 memoryManager,
Finn Williamsec36d3e2020-08-28 13:17:05 +0100559 tensorHandleFactory,
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000560 descriptor,
561 inputInfo,
562 inputData,
563 outputInfo,
564 expectedOutputData,
565 weightsInfo,
566 weightsData,
567 biasesInfo,
568 biasesData);
569}
570
571LayerTestResult<uint8_t, 4> TransposeConvolution2dPerAxisQuantTest(
572 armnn::IWorkloadFactory& workloadFactory,
573 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williamsec36d3e2020-08-28 13:17:05 +0100574 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000575 const armnn::DataLayout layout)
576{
577 using namespace armnn;
578
Derek Lambertif90c56d2020-01-10 17:14:08 +0000579 const DataType inputType = DataType::QAsymmU8;
Derek Lambertid466a542020-01-22 15:37:29 +0000580 const DataType kernelType = DataType::QSymmS8;
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000581 const DataType biasType = DataType::Signed32;
582
583 TensorInfo inputInfo ({ 1, 1, 2, 2 }, inputType, 0.50f, 10);
584 TensorInfo outputInfo({ 1, 2, 5, 5 }, inputType, 0.50f, 10);
585
586 const std::vector<float> quantScales{ 0.25f, 0.5f };
587 constexpr unsigned int quantDimension = 0;
588
589 TensorInfo kernelInfo({ 2, 1, 3, 3 }, kernelType, quantScales, quantDimension);
590
591 const std::vector<float> biasQuantScales{ 0.125f, 0.25f };
592 TensorInfo biasInfo({ 2 }, biasType, biasQuantScales, quantDimension);
593
594 std::vector<uint8_t> inputData =
595 {
596 12, 14,
597 16, 18
598 };
599
600 std::vector<int8_t> kernelData =
601 {
602 4, 12, 20,
603 28, 36, 44,
604 52, 60, 68,
605
606 4, 8, 12,
607 16, 20, 24,
608 28, 32, 36
609 };
610
611 std::vector<int32_t> biasData = { -12, -8 };
612
Sadik Armagan483c8112021-06-01 09:24:52 +0100613 std::vector<uint8_t> actualOutput(outputInfo.GetNumElements());
614
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000615 std::vector<uint8_t> expectedOutputData =
616 {
617 9, 13, 21, 19, 27,
618 21, 25, 57, 43, 51,
619 39, 55, 131, 91, 115,
620 49, 61, 129, 79, 95,
621 85, 97, 213, 127, 143,
622
623 10, 14, 26, 22, 30,
624 22, 26, 62, 46, 54,
625 46, 62, 150, 102, 126,
626 54, 66, 142, 86, 102,
627 90, 102, 226, 134, 150
628 };
629
630 if (layout == DataLayout::NHWC)
631 {
632 PermuteTensorNchwToNhwc(inputInfo, inputData);
633 PermuteTensorNchwToNhwc(kernelInfo, kernelData);
634 PermuteTensorNchwToNhwc(outputInfo, expectedOutputData);
635 }
636
637 TransposeConvolution2dDescriptor descriptor;
638 descriptor.m_StrideX = 2;
639 descriptor.m_StrideY = 2;
640 descriptor.m_BiasEnabled = true;
641 descriptor.m_DataLayout = layout;
642
Finn Williamsec36d3e2020-08-28 13:17:05 +0100643 std::unique_ptr<ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo);
644 std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputInfo);
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000645
646 WorkloadInfo workloadInfo;
James Conroy1f58f032021-04-27 17:13:27 +0100647 ScopedTensorHandle weightTensor(kernelInfo);
648 ScopedTensorHandle biasTensor(biasInfo);
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000649
650 AllocateAndCopyDataToITensorHandle(&weightTensor, kernelData.data());
651 AllocateAndCopyDataToITensorHandle(&biasTensor, biasData.data());
652
653 TransposeConvolution2dQueueDescriptor queueDescriptor;
654 queueDescriptor.m_Parameters = descriptor;
655 queueDescriptor.m_Weight = &weightTensor;
656 queueDescriptor.m_Bias = &biasTensor;
657
658 AddInputToWorkload(queueDescriptor, workloadInfo, inputInfo, inputHandle.get());
659 AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, outputHandle.get());
660
661 std::unique_ptr<IWorkload> workload = workloadFactory.CreateTransposeConvolution2d(queueDescriptor, workloadInfo);
662 inputHandle->Allocate();
663 outputHandle->Allocate();
664
665 CopyDataToITensorHandle(inputHandle.get(), inputData.data());
666
667 ExecuteWorkload(*workload, memoryManager);
668
Sadik Armagan483c8112021-06-01 09:24:52 +0100669 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000670
Sadik Armagan483c8112021-06-01 09:24:52 +0100671 return LayerTestResult<uint8_t, 4>(actualOutput,
672 expectedOutputData,
673 outputHandle->GetShape(),
674 outputInfo.GetShape());
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000675}
676
677//
678// Explicit template specializations
679//
680
681template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
682SimpleTransposeConvolution2dTest<armnn::DataType::Float32, armnn::DataType::Float32>(
683 armnn::IWorkloadFactory& workloadFactory,
684 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williamsec36d3e2020-08-28 13:17:05 +0100685 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000686 bool biasEnabled,
687 const armnn::DataLayout layout);
688
Sadik Armagan303980c2020-04-17 12:45:14 +0100689template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
690SimpleTransposeConvolution2dTest<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
691 armnn::IWorkloadFactory& workloadFactory,
692 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williamsec36d3e2020-08-28 13:17:05 +0100693 const armnn::ITensorHandleFactory& tensorHandleFactory,
Sadik Armagan303980c2020-04-17 12:45:14 +0100694 bool biasEnabled,
695 const armnn::DataLayout layout);
696
Derek Lambertif90c56d2020-01-10 17:14:08 +0000697template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
698SimpleTransposeConvolution2dTest<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000699 armnn::IWorkloadFactory& workloadFactory,
700 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williamsec36d3e2020-08-28 13:17:05 +0100701 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000702 bool biasEnabled,
703 const armnn::DataLayout layout);
704
Derek Lambertif90c56d2020-01-10 17:14:08 +0000705template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
706SimpleTransposeConvolution2dTest<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000707 armnn::IWorkloadFactory& workloadFactory,
708 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williamsec36d3e2020-08-28 13:17:05 +0100709 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000710 bool biasEnabled,
711 const armnn::DataLayout layout);
712
713template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
714PaddedTransposeConvolution2dTest<armnn::DataType::Float32, armnn::DataType::Float32>(
715 armnn::IWorkloadFactory& workloadFactory,
716 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williamsec36d3e2020-08-28 13:17:05 +0100717 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000718 bool biasEnabled,
719 const armnn::DataLayout layout);
720
Sadik Armagan303980c2020-04-17 12:45:14 +0100721template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
722PaddedTransposeConvolution2dTest<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
723 armnn::IWorkloadFactory& workloadFactory,
724 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williamsec36d3e2020-08-28 13:17:05 +0100725 const armnn::ITensorHandleFactory& tensorHandleFactory,
Sadik Armagan303980c2020-04-17 12:45:14 +0100726 bool biasEnabled,
727 const armnn::DataLayout layout);
728
Derek Lambertif90c56d2020-01-10 17:14:08 +0000729template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
730PaddedTransposeConvolution2dTest<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000731 armnn::IWorkloadFactory& workloadFactory,
732 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williamsec36d3e2020-08-28 13:17:05 +0100733 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000734 bool biasEnabled,
735 const armnn::DataLayout layout);
736
Derek Lambertif90c56d2020-01-10 17:14:08 +0000737template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
738PaddedTransposeConvolution2dTest<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000739 armnn::IWorkloadFactory& workloadFactory,
740 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williamsec36d3e2020-08-28 13:17:05 +0100741 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000742 bool biasEnabled,
743 const armnn::DataLayout layout);
744
745template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
746StridedTransposeConvolution2dTest<armnn::DataType::Float32, armnn::DataType::Float32>(
747 armnn::IWorkloadFactory& workloadFactory,
748 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williamsec36d3e2020-08-28 13:17:05 +0100749 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000750 bool biasEnabled,
751 const armnn::DataLayout layout);
752
Sadik Armagan303980c2020-04-17 12:45:14 +0100753template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
754StridedTransposeConvolution2dTest<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
755 armnn::IWorkloadFactory& workloadFactory,
756 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williamsec36d3e2020-08-28 13:17:05 +0100757 const armnn::ITensorHandleFactory& tensorHandleFactory,
Sadik Armagan303980c2020-04-17 12:45:14 +0100758 bool biasEnabled,
759 const armnn::DataLayout layout);
760
Derek Lambertif90c56d2020-01-10 17:14:08 +0000761template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
762StridedTransposeConvolution2dTest<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000763 armnn::IWorkloadFactory& workloadFactory,
764 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williamsec36d3e2020-08-28 13:17:05 +0100765 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000766 bool biasEnabled,
767 const armnn::DataLayout layout);
768
Derek Lambertif90c56d2020-01-10 17:14:08 +0000769template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
770StridedTransposeConvolution2dTest<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000771 armnn::IWorkloadFactory& workloadFactory,
772 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williamsec36d3e2020-08-28 13:17:05 +0100773 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000774 bool biasEnabled,
775 const armnn::DataLayout layout);
776
777template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
778MultiChannelTransposeConvolution2dTest<armnn::DataType::Float32, armnn::DataType::Float32>(
779 armnn::IWorkloadFactory& workloadFactory,
780 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williamsec36d3e2020-08-28 13:17:05 +0100781 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000782 const armnn::DataLayout layout);
783
Sadik Armagan303980c2020-04-17 12:45:14 +0100784template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
785MultiChannelTransposeConvolution2dTest<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
786 armnn::IWorkloadFactory& workloadFactory,
787 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williamsec36d3e2020-08-28 13:17:05 +0100788 const armnn::ITensorHandleFactory& tensorHandleFactory,
Sadik Armagan303980c2020-04-17 12:45:14 +0100789 const armnn::DataLayout layout);
790
Derek Lambertif90c56d2020-01-10 17:14:08 +0000791template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
792MultiChannelTransposeConvolution2dTest<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000793 armnn::IWorkloadFactory& workloadFactory,
794 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williamsec36d3e2020-08-28 13:17:05 +0100795 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000796 const armnn::DataLayout layout);
797
Derek Lambertif90c56d2020-01-10 17:14:08 +0000798template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
799MultiChannelTransposeConvolution2dTest<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000800 armnn::IWorkloadFactory& workloadFactory,
801 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williamsec36d3e2020-08-28 13:17:05 +0100802 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +0000803 const armnn::DataLayout layout);