blob: a2b477cc2d7193a639415f5ee46b0bca70d8fbfe [file] [log] [blame]
Aron Virginas-Tar735a4502019-06-26 15:02:47 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
Aron Virginas-Tar735a4502019-06-26 15:02:47 +01005
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01006#pragma once
Aron Virginas-Tar735a4502019-06-26 15:02:47 +01007
8#include <armnn/ArmNN.hpp>
9
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010010#include <Permute.hpp>
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010011#include <QuantizeHelper.hpp>
Aron Virginas-Tar735a4502019-06-26 15:02:47 +010012#include <ResolveType.hpp>
13
14#include <backendsCommon/CpuTensorHandle.hpp>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010015
Aron Virginas-Tarf97f6da2019-10-01 18:35:44 +010016#include <backendsCommon/test/DataLayoutUtils.hpp>
Aron Virginas-Tar735a4502019-06-26 15:02:47 +010017#include <backendsCommon/test/TensorCopyUtils.hpp>
18#include <backendsCommon/test/WorkloadTestUtils.hpp>
19
20#include <reference/RefWorkloadFactory.hpp>
21
22#include <boost/test/unit_test.hpp>
23
24#include <string>
25#include <utility>
26#include <vector>
27
28namespace
29{
30
31template<typename T>
32using TensorData = std::pair<armnn::TensorInfo, std::vector<T>>;
33
34template<typename T>
35void VerifyInputTensorData(const TensorData<T>& data, const std::string& tensorName)
36{
37 if (data.first.GetNumElements() > data.second.size())
38 {
39 throw armnn::InvalidArgumentException("Size of data too small for " + tensorName + ": expected " +
40 std::to_string(data.first.GetNumElements()) + "but got " + std::to_string(data.second.size()));
41 }
42}
43
44template<typename T, typename BT>
45void TransposeConvolution2dTestImpl(armnn::IWorkloadFactory& workloadFactory,
46 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
47 const armnn::TransposeConvolution2dDescriptor& descriptor,
48 const TensorData<T>& input,
49 TensorData<T>& output,
50 const TensorData<T>& weights,
51 const armnn::Optional<TensorData<BT>>& biases)
52{
53 using namespace armnn;
54
55 VerifyInputTensorData(input, "input");
56 VerifyInputTensorData(weights, "biases");
57
58 if (descriptor.m_BiasEnabled)
59 {
60 if (!biases.has_value())
61 {
62 throw InvalidArgumentException("Bias enabled but no bias data provided");
63 }
64 VerifyInputTensorData(biases.value(), "biases");
65 }
66
67 // set up weights
68 ScopedCpuTensorHandle weightsTensor(weights.first);
69
70 TransposeConvolution2dQueueDescriptor queueDescriptor;
71 queueDescriptor.m_Parameters = descriptor;
72 queueDescriptor.m_Weight = &weightsTensor;
73
74 AllocateAndCopyDataToITensorHandle(&weightsTensor, weights.second.data());
75
76 std::unique_ptr<ScopedCpuTensorHandle> biasesTensor;
77 if (descriptor.m_BiasEnabled)
78 {
79 // set up biases
80 biasesTensor = std::make_unique<ScopedCpuTensorHandle>(biases.value().first);
81 queueDescriptor.m_Bias = biasesTensor.get();
82
83 AllocateAndCopyDataToITensorHandle(biasesTensor.get(), biases.value().second.data());
84 }
85
86 // set up input and output handles
87 std::unique_ptr<ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(input.first);
88 std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(output.first);
89
90 // set up workload
91 armnn::WorkloadInfo workloadInfo;
92 AddInputToWorkload(queueDescriptor, workloadInfo, input.first, inputHandle.get());
93 AddOutputToWorkload(queueDescriptor, workloadInfo, output.first, outputHandle.get());
94
95 std::unique_ptr<armnn::IWorkload> workload =
96 workloadFactory.CreateTransposeConvolution2d(queueDescriptor, workloadInfo);
97
98 inputHandle->Allocate();
99 outputHandle->Allocate();
100
101 CopyDataToITensorHandle(inputHandle.get(), input.second.data());
102
Aron Virginas-Tarf800de22019-08-16 17:49:42 +0100103 ExecuteWorkload(*workload, memoryManager);
Aron Virginas-Tar735a4502019-06-26 15:02:47 +0100104
105 // copy output
106 output.second = std::vector<T>(output.first.GetNumElements(), 0.0f);
107 CopyDataFromITensorHandle(output.second.data(), outputHandle.get());
108}
109
110template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tard8edabb2019-08-12 14:29:59 +0100111LayerTestResult<T, 4> TransposeConvolution2dTest(
Aron Virginas-Tar735a4502019-06-26 15:02:47 +0100112 armnn::IWorkloadFactory& workloadFactory,
113 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
114 const armnn::TransposeConvolution2dDescriptor& descriptor,
115 armnn::TensorInfo& inputInfo,
116 const std::vector<float>& inputData,
117 armnn::TensorInfo& outputInfo,
118 const std::vector<float>& expectedOutputData,
119 armnn::TensorInfo& weightsInfo,
120 const std::vector<float>& weightsData,
121 armnn::TensorInfo& biasesInfo,
122 const std::vector<float>& biasesData)
123{
124 using namespace armnn;
125
126 // set up quantization parameters
127 if (armnn::IsQuantizedType<T>())
128 {
Aron Virginas-Tard8edabb2019-08-12 14:29:59 +0100129 constexpr float qScale = 0.50f;
130 constexpr int32_t qOffset = 10;
Aron Virginas-Tar735a4502019-06-26 15:02:47 +0100131
132 inputInfo.SetQuantizationScale(qScale);
133 inputInfo.SetQuantizationOffset(qOffset);
134
135 outputInfo.SetQuantizationScale(qScale);
136 outputInfo.SetQuantizationOffset(qOffset);
137
138 weightsInfo.SetQuantizationScale(qScale);
139 weightsInfo.SetQuantizationOffset(qOffset);
140
141 biasesInfo.SetQuantizationScale(qScale * qScale);
142 biasesInfo.SetQuantizationOffset(0);
143 }
144
145 // set up input
146 TensorData<T> input =
147 {
148 inputInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100149 armnnUtils::QuantizedVector<T>(inputData, inputInfo.GetQuantizationScale(), inputInfo.GetQuantizationOffset())
Aron Virginas-Tar735a4502019-06-26 15:02:47 +0100150 };
151
152 // set up weights
153 TensorData<T> weights =
154 {
155 weightsInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100156 armnnUtils::QuantizedVector<T>(weightsData,
157 weightsInfo.GetQuantizationScale(),
158 weightsInfo.GetQuantizationOffset())
Aron Virginas-Tar735a4502019-06-26 15:02:47 +0100159 };
160
161 // set up biases
162 using BT = armnn::ResolveType<ArmnnBType>;
163 Optional<TensorData<BT>> optionalBiases;
164 if (descriptor.m_BiasEnabled)
165 {
166 TensorData<BT> biases =
167 {
168 biasesInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100169 armnnUtils::QuantizedVector<BT>(biasesData,
170 biasesInfo.GetQuantizationScale(),
171 biasesInfo.GetQuantizationOffset())
Aron Virginas-Tar735a4502019-06-26 15:02:47 +0100172 };
173
174 optionalBiases = Optional<TensorData<BT>>(biases);
175 }
176
177 // set up output
178 TensorData<T> output = { outputInfo, {} };
179
180 // execute test
181 TransposeConvolution2dTestImpl(workloadFactory,
182 memoryManager,
183 descriptor,
184 input,
185 output,
186 weights,
187 optionalBiases);
188
189 // construct result object
190 LayerTestResult<T, 4> testResult(outputInfo);
191 testResult.output = MakeTensor<T, 4>(outputInfo, output.second);
192 testResult.outputExpected = MakeTensor<T, 4>(outputInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100193 armnnUtils::QuantizedVector<T>(expectedOutputData,
194 outputInfo.GetQuantizationScale(),
195 outputInfo.GetQuantizationOffset()));
Aron Virginas-Tar735a4502019-06-26 15:02:47 +0100196
197 return testResult;
198}
199
Aron Virginas-Tard8edabb2019-08-12 14:29:59 +0100200template<typename T>
Aron Virginas-Tarf97f6da2019-10-01 18:35:44 +0100201void SwizzleData(armnn::TensorInfo& inputInfo,
Aron Virginas-Tard8edabb2019-08-12 14:29:59 +0100202 std::vector<T>& inputData,
Aron Virginas-Tarf97f6da2019-10-01 18:35:44 +0100203 armnn::TensorInfo& outputInfo,
Aron Virginas-Tard8edabb2019-08-12 14:29:59 +0100204 std::vector<T>& outputData,
Aron Virginas-Tarf97f6da2019-10-01 18:35:44 +0100205 armnn::TensorInfo& weightsInfo,
Aron Virginas-Tard8edabb2019-08-12 14:29:59 +0100206 std::vector<T>& weightsData)
207{
Aron Virginas-Tarf97f6da2019-10-01 18:35:44 +0100208 PermuteTensorNchwToNhwc<T>(inputInfo, inputData);
209 PermuteTensorNchwToNhwc<T>(outputInfo, outputData);
210 PermuteTensorNchwToNhwc<T>(weightsInfo, weightsData);
Aron Virginas-Tard8edabb2019-08-12 14:29:59 +0100211}
212
Aron Virginas-Tar735a4502019-06-26 15:02:47 +0100213} // anonymous namespace
214
215template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tard8edabb2019-08-12 14:29:59 +0100216LayerTestResult<T, 4> SimpleTransposeConvolution2dTest(
Aron Virginas-Tar735a4502019-06-26 15:02:47 +0100217 armnn::IWorkloadFactory& workloadFactory,
218 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
219 bool biasEnabled,
220 const armnn::DataLayout layout)
221{
222 using namespace armnn;
223
224 constexpr unsigned int batches = 1u;
225 constexpr unsigned int channels = 1u;
226
227 constexpr unsigned int wInput = 3u;
228 constexpr unsigned int hInput = wInput;
229
230 constexpr unsigned int wOutput = 5u;
231 constexpr unsigned int hOutput = wOutput;
232
233 constexpr unsigned int wWeights = 3u;
234 constexpr unsigned int hWeights = wWeights;
235
Aron Virginas-Tarf97f6da2019-10-01 18:35:44 +0100236 TensorShape inputShape = { batches, channels, hInput, wInput };
237 TensorShape outputShape = { batches, channels, hOutput, wOutput };
238 TensorShape weightsShape = { batches, channels, hWeights, wWeights };
Aron Virginas-Tar735a4502019-06-26 15:02:47 +0100239
240 TensorInfo inputInfo(inputShape, ArmnnType);
241 TensorInfo outputInfo(outputShape, ArmnnType);
242 TensorInfo weightsInfo(weightsShape, ArmnnType);
243 TensorInfo biasesInfo({ channels }, ArmnnBType);
244
245 std::vector<float> inputData =
246 {
247 1.f, 1.f, 1.f,
248 1.f, 1.f, 1.f,
249 1.f, 1.f, 1.f
250 };
251
252 std::vector<float> weightsData =
253 {
254 1.f, 2.f, 3.f,
255 4.f, 5.f, 6.f,
256 7.f, 8.f, 9.f
257 };
258
259 std::vector<float> biasesData = { 1.f };
260
261 std::vector<float> expectedOutputData =
262 {
263 1.f, 3.f, 6.f, 5.f, 3.f,
264 5.f, 12.f, 21.f, 16.f, 9.f,
265 12.f, 27.f, 45.f, 33.f, 18.f,
266 11.f, 24.f, 39.f, 28.f, 15.f,
267 7.f, 15.f, 24.f, 17.f, 9.f
268 };
269
270 if (biasEnabled)
271 {
272 // apply bias to expected output data
273 std::transform(expectedOutputData.begin(), expectedOutputData.end(), expectedOutputData.begin(),
274 [&](float f) -> float { return f + biasesData[0]; });
275 }
276
277 TransposeConvolution2dDescriptor descriptor;
278 descriptor.m_StrideX = 1;
279 descriptor.m_StrideY = 1;
280 descriptor.m_BiasEnabled = biasEnabled;
281 descriptor.m_DataLayout = layout;
282
283 // swizzle data if needed
284 if (layout == armnn::DataLayout::NHWC)
285 {
Aron Virginas-Tard8edabb2019-08-12 14:29:59 +0100286 SwizzleData(inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData);
Aron Virginas-Tar735a4502019-06-26 15:02:47 +0100287 }
288
Aron Virginas-Tard8edabb2019-08-12 14:29:59 +0100289 return TransposeConvolution2dTest<ArmnnType, ArmnnBType>(workloadFactory,
290 memoryManager,
291 descriptor,
292 inputInfo,
293 inputData,
294 outputInfo,
295 expectedOutputData,
296 weightsInfo,
297 weightsData,
298 biasesInfo,
299 biasesData);
Aron Virginas-Tar735a4502019-06-26 15:02:47 +0100300}
301
302template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tard8edabb2019-08-12 14:29:59 +0100303LayerTestResult<T, 4> PaddedTransposeConvolution2dTest(
Aron Virginas-Tar735a4502019-06-26 15:02:47 +0100304 armnn::IWorkloadFactory& workloadFactory,
305 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
306 bool biasEnabled,
307 const armnn::DataLayout layout)
308{
309 using namespace armnn;
310
311 constexpr unsigned int batches = 1u;
312 constexpr unsigned int channels = 1u;
313
314 constexpr unsigned int wInput = 4u;
315 constexpr unsigned int hInput = wInput;
316
317 constexpr unsigned int wOutput = 2u;
318 constexpr unsigned int hOutput = wOutput;
319
320 constexpr unsigned int wWeights = 3u;
321 constexpr unsigned int hWeights = wWeights;
322
Aron Virginas-Tarf97f6da2019-10-01 18:35:44 +0100323 TensorShape inputShape = { batches, channels, hInput, wInput };
324 TensorShape outputShape = { batches, channels, hOutput, wOutput };
325 TensorShape weightsShape = { batches, channels, hWeights, wWeights };
Aron Virginas-Tar735a4502019-06-26 15:02:47 +0100326
327 TensorInfo inputInfo(inputShape, ArmnnType);
328 TensorInfo outputInfo(outputShape, ArmnnType);
329 TensorInfo weightsInfo(weightsShape, ArmnnType);
330 TensorInfo biasesInfo({ channels }, ArmnnBType);
331
332 std::vector<float> inputData =
333 {
334 1.f, 3.f, 2.f, 1.f,
335 1.f, 3.f, 3.f, 1.f,
336 2.f, 1.f, 1.f, 3.f,
337 3.f, 2.f, 3.f, 3.f
338 };
339
340 std::vector<float> weightsData =
341 {
342 1.f, 2.f, 3.f,
343 0.f, 1.f, 0.f,
344 2.f, 1.f, 2.f
345 };
346
347 std::vector<float> biasesData = { 1.f };
348
349 std::vector<float> expectedOutputData =
350 {
351 21.f, 21.f,
352 28.f, 27.f
353 };
354
355 if (biasEnabled)
356 {
357 // apply bias to expected output data
358 std::transform(expectedOutputData.begin(), expectedOutputData.end(), expectedOutputData.begin(),
359 [&](float f) -> float { return f + biasesData[0]; });
360 }
361
362 TransposeConvolution2dDescriptor descriptor;
363 descriptor.m_PadLeft = 2;
364 descriptor.m_PadRight = 2;
365 descriptor.m_PadTop = 2;
366 descriptor.m_PadBottom = 2;
367 descriptor.m_StrideX = 1;
368 descriptor.m_StrideY = 1;
369 descriptor.m_BiasEnabled = biasEnabled;
370 descriptor.m_DataLayout = layout;
371
372 // swizzle data if needed
373 if (layout == armnn::DataLayout::NHWC)
374 {
Aron Virginas-Tard8edabb2019-08-12 14:29:59 +0100375 SwizzleData(inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData);
Aron Virginas-Tar735a4502019-06-26 15:02:47 +0100376 }
377
Aron Virginas-Tard8edabb2019-08-12 14:29:59 +0100378 return TransposeConvolution2dTest<ArmnnType, ArmnnBType>(workloadFactory,
379 memoryManager,
380 descriptor,
381 inputInfo,
382 inputData,
383 outputInfo,
384 expectedOutputData,
385 weightsInfo,
386 weightsData,
387 biasesInfo,
388 biasesData);
Aron Virginas-Tar735a4502019-06-26 15:02:47 +0100389}
390
Aron Virginas-Tard8edabb2019-08-12 14:29:59 +0100391template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
392LayerTestResult<T, 4> StridedTransposeConvolution2dTest(
393 armnn::IWorkloadFactory& workloadFactory,
394 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
395 bool biasEnabled,
396 const armnn::DataLayout layout)
Aron Virginas-Tar735a4502019-06-26 15:02:47 +0100397{
398 using namespace armnn;
399
400 constexpr unsigned int batches = 1u;
401 constexpr unsigned int channels = 1u;
402
403 constexpr unsigned int wInput = 3u;
404 constexpr unsigned int hInput = wInput;
405
406 constexpr unsigned int wOutput = 7u;
407 constexpr unsigned int hOutput = wOutput;
408
409 constexpr unsigned int wWeights = 3u;
410 constexpr unsigned int hWeights = wWeights;
411
Aron Virginas-Tarf97f6da2019-10-01 18:35:44 +0100412 TensorShape inputShape = { batches, channels, hInput, wInput };
413 TensorShape outputShape = { batches, channels, hOutput, wOutput };
414 TensorShape weightsShape = { batches, channels, hWeights, wWeights };
Aron Virginas-Tar735a4502019-06-26 15:02:47 +0100415
416 TensorInfo inputInfo(inputShape, ArmnnType);
417 TensorInfo outputInfo(outputShape, ArmnnType);
418 TensorInfo weightsInfo(weightsShape, ArmnnType);
419 TensorInfo biasesInfo({ channels }, ArmnnBType);
420
421 std::vector<float> inputData =
422 {
423 1.f, 1.f, 1.f,
424 1.f, 1.f, 1.f,
425 1.f, 1.f, 1.f
426 };
427
428 std::vector<float> weightsData =
429 {
430 1.f, 2.f, 3.f,
431 4.f, 5.f, 6.f,
432 7.f, 8.f, 9.f
433 };
434
435 std::vector<float> biasesData = { 1.f };
436
437 std::vector<float> expectedOutputData =
438 {
439 1.f, 2.f, 4.f, 2.f, 4.f, 2.f, 3.f,
440 4.f, 5.f, 10.f, 5.f, 10.f, 5.f, 6.f,
441 8.f, 10.f, 20.f, 10.f, 20.f, 10.f, 12.f,
442 4.f, 5.f, 10.f, 5.f, 10.f, 5.f, 6.f,
443 8.f, 10.f, 20.f, 10.f, 20.f, 10.f, 12.f,
444 4.f, 5.f, 10.f, 5.f, 10.f, 5.f, 6.f,
445 7.f, 8.f, 16.f, 8.f, 16.f, 8.f, 9.f
446 };
447
448 if (biasEnabled)
449 {
450 // apply bias to expected output data
451 std::transform(expectedOutputData.begin(), expectedOutputData.end(), expectedOutputData.begin(),
452 [&](float f) -> float { return f + biasesData[0]; });
453 }
454
455 TransposeConvolution2dDescriptor descriptor;
456 descriptor.m_StrideX = 2;
457 descriptor.m_StrideY = 2;
458 descriptor.m_BiasEnabled = biasEnabled;
459 descriptor.m_DataLayout = layout;
460
461 // swizzle data if needed
462 if (layout == armnn::DataLayout::NHWC)
463 {
Aron Virginas-Tard8edabb2019-08-12 14:29:59 +0100464 SwizzleData(inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData);
Aron Virginas-Tar735a4502019-06-26 15:02:47 +0100465 }
466
Aron Virginas-Tard8edabb2019-08-12 14:29:59 +0100467 return TransposeConvolution2dTest<ArmnnType, ArmnnBType>(workloadFactory,
468 memoryManager,
469 descriptor,
470 inputInfo,
471 inputData,
472 outputInfo,
473 expectedOutputData,
474 weightsInfo,
475 weightsData,
476 biasesInfo,
477 biasesData);
478}
479
480template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
481LayerTestResult<T, 4> MultiChannelTransposeConvolution2dTest(
482 armnn::IWorkloadFactory& workloadFactory,
483 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
484 const armnn::DataLayout layout)
485{
486 using namespace armnn;
487
Aron Virginas-Tarf97f6da2019-10-01 18:35:44 +0100488 TensorShape inputShape = { 1, 1, 2, 2 };
489 TensorShape outputShape = { 1, 2, 5, 5 };
Aron Virginas-Tard8edabb2019-08-12 14:29:59 +0100490
Aron Virginas-Taraec942c2019-08-14 14:37:42 +0100491 // OIHW for NCHW; OHWI for NHWC
Aron Virginas-Tarf97f6da2019-10-01 18:35:44 +0100492 TensorShape weightsShape = { 2, 1, 3, 3 };
Aron Virginas-Tard8edabb2019-08-12 14:29:59 +0100493 TensorShape biasesShape = { 2 };
494
495 TensorInfo inputInfo(inputShape, ArmnnType);
496 TensorInfo outputInfo(outputShape, ArmnnType);
497 TensorInfo weightsInfo(weightsShape, ArmnnType);
498 TensorInfo biasesInfo(biasesShape, ArmnnBType);
499
500 std::vector<float> inputData =
501 {
502 1.f, 2.f,
503 3.f, 4.f,
504 };
505
506 std::vector<float> weightsData =
507 {
508 1.f, 3.f, 5.f,
509 7.f, 9.f, 11.f,
510 13.f, 15.f, 17.f,
511
512 2.f, 4.f, 6.f,
513 8.f, 10.f, 12.f,
514 14.f, 16.f, 18.f
515 };
516
517 std::vector<float> biasesData = { -1.5f, -2.0f };
518
519 std::vector<float> expectedOutputData =
520 {
521 -0.5f, 1.5f, 5.5f, 4.5f, 8.5f,
522 5.5f, 7.5f, 23.5f, 16.5f, 20.5f,
523 14.5f, 22.5f, 60.5f, 40.5f, 52.5f,
524 19.5f, 25.5f, 59.5f, 34.5f, 42.5f,
525 37.5f, 43.5f, 101.5f, 58.5f, 66.5f,
526
527 0.0f, 2.0f, 8.0f, 6.0f, 10.0f,
528 6.0f, 8.0f, 26.0f, 18.0f, 22.0f,
529 18.0f, 26.0f, 70.0f, 46.0f, 58.0f,
530 22.0f, 28.0f, 66.0f, 38.0f, 46.0f,
531 40.0f, 46.0f, 108.0f, 62.0f, 70.0f,
532 };
533
534 TransposeConvolution2dDescriptor descriptor;
535 descriptor.m_StrideX = 2;
536 descriptor.m_StrideY = 2;
537 descriptor.m_BiasEnabled = true;
538 descriptor.m_DataLayout = layout;
539
540 // swizzle data if needed
541 if (layout == armnn::DataLayout::NHWC)
542 {
543 SwizzleData(inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData);
544 }
545
546 return TransposeConvolution2dTest<ArmnnType, ArmnnBType>(workloadFactory,
547 memoryManager,
548 descriptor,
549 inputInfo,
550 inputData,
551 outputInfo,
552 expectedOutputData,
553 weightsInfo,
554 weightsData,
555 biasesInfo,
556 biasesData);
557}