blob: 64caa3fce11d9fd831571c5bda372bf17fecc502 [file] [log] [blame]
Aron Virginas-Tar735a4502019-06-26 15:02:47 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5#pragma once
6
7#include "QuantizeHelper.hpp"
8
9#include <armnn/ArmNN.hpp>
10
11#include <ResolveType.hpp>
12
13#include <backendsCommon/CpuTensorHandle.hpp>
14#include <backendsCommon/test/CommonTestUtils.hpp>
15#include <backendsCommon/test/TensorCopyUtils.hpp>
16#include <backendsCommon/test/WorkloadTestUtils.hpp>
17
18#include <reference/RefWorkloadFactory.hpp>
19
20#include <boost/test/unit_test.hpp>
21
22#include <string>
23#include <utility>
24#include <vector>
25
26namespace
27{
28
29template<typename T>
30using TensorData = std::pair<armnn::TensorInfo, std::vector<T>>;
31
32template<typename T>
33void VerifyInputTensorData(const TensorData<T>& data, const std::string& tensorName)
34{
35 if (data.first.GetNumElements() > data.second.size())
36 {
37 throw armnn::InvalidArgumentException("Size of data too small for " + tensorName + ": expected " +
38 std::to_string(data.first.GetNumElements()) + "but got " + std::to_string(data.second.size()));
39 }
40}
41
42template<typename T, typename BT>
43void TransposeConvolution2dTestImpl(armnn::IWorkloadFactory& workloadFactory,
44 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
45 const armnn::TransposeConvolution2dDescriptor& descriptor,
46 const TensorData<T>& input,
47 TensorData<T>& output,
48 const TensorData<T>& weights,
49 const armnn::Optional<TensorData<BT>>& biases)
50{
51 using namespace armnn;
52
53 VerifyInputTensorData(input, "input");
54 VerifyInputTensorData(weights, "biases");
55
56 if (descriptor.m_BiasEnabled)
57 {
58 if (!biases.has_value())
59 {
60 throw InvalidArgumentException("Bias enabled but no bias data provided");
61 }
62 VerifyInputTensorData(biases.value(), "biases");
63 }
64
65 // set up weights
66 ScopedCpuTensorHandle weightsTensor(weights.first);
67
68 TransposeConvolution2dQueueDescriptor queueDescriptor;
69 queueDescriptor.m_Parameters = descriptor;
70 queueDescriptor.m_Weight = &weightsTensor;
71
72 AllocateAndCopyDataToITensorHandle(&weightsTensor, weights.second.data());
73
74 std::unique_ptr<ScopedCpuTensorHandle> biasesTensor;
75 if (descriptor.m_BiasEnabled)
76 {
77 // set up biases
78 biasesTensor = std::make_unique<ScopedCpuTensorHandle>(biases.value().first);
79 queueDescriptor.m_Bias = biasesTensor.get();
80
81 AllocateAndCopyDataToITensorHandle(biasesTensor.get(), biases.value().second.data());
82 }
83
84 // set up input and output handles
85 std::unique_ptr<ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(input.first);
86 std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(output.first);
87
88 // set up workload
89 armnn::WorkloadInfo workloadInfo;
90 AddInputToWorkload(queueDescriptor, workloadInfo, input.first, inputHandle.get());
91 AddOutputToWorkload(queueDescriptor, workloadInfo, output.first, outputHandle.get());
92
93 std::unique_ptr<armnn::IWorkload> workload =
94 workloadFactory.CreateTransposeConvolution2d(queueDescriptor, workloadInfo);
95
96 inputHandle->Allocate();
97 outputHandle->Allocate();
98
99 CopyDataToITensorHandle(inputHandle.get(), input.second.data());
100
Aron Virginas-Tarf800de22019-08-16 17:49:42 +0100101 ExecuteWorkload(*workload, memoryManager);
Aron Virginas-Tar735a4502019-06-26 15:02:47 +0100102
103 // copy output
104 output.second = std::vector<T>(output.first.GetNumElements(), 0.0f);
105 CopyDataFromITensorHandle(output.second.data(), outputHandle.get());
106}
107
108template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tard8edabb2019-08-12 14:29:59 +0100109LayerTestResult<T, 4> TransposeConvolution2dTest(
Aron Virginas-Tar735a4502019-06-26 15:02:47 +0100110 armnn::IWorkloadFactory& workloadFactory,
111 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
112 const armnn::TransposeConvolution2dDescriptor& descriptor,
113 armnn::TensorInfo& inputInfo,
114 const std::vector<float>& inputData,
115 armnn::TensorInfo& outputInfo,
116 const std::vector<float>& expectedOutputData,
117 armnn::TensorInfo& weightsInfo,
118 const std::vector<float>& weightsData,
119 armnn::TensorInfo& biasesInfo,
120 const std::vector<float>& biasesData)
121{
122 using namespace armnn;
123
124 // set up quantization parameters
125 if (armnn::IsQuantizedType<T>())
126 {
Aron Virginas-Tard8edabb2019-08-12 14:29:59 +0100127 constexpr float qScale = 0.50f;
128 constexpr int32_t qOffset = 10;
Aron Virginas-Tar735a4502019-06-26 15:02:47 +0100129
130 inputInfo.SetQuantizationScale(qScale);
131 inputInfo.SetQuantizationOffset(qOffset);
132
133 outputInfo.SetQuantizationScale(qScale);
134 outputInfo.SetQuantizationOffset(qOffset);
135
136 weightsInfo.SetQuantizationScale(qScale);
137 weightsInfo.SetQuantizationOffset(qOffset);
138
139 biasesInfo.SetQuantizationScale(qScale * qScale);
140 biasesInfo.SetQuantizationOffset(0);
141 }
142
143 // set up input
144 TensorData<T> input =
145 {
146 inputInfo,
147 QuantizedVector<T>(inputInfo.GetQuantizationScale(), inputInfo.GetQuantizationOffset(), inputData)
148 };
149
150 // set up weights
151 TensorData<T> weights =
152 {
153 weightsInfo,
154 QuantizedVector<T>(weightsInfo.GetQuantizationScale(), weightsInfo.GetQuantizationOffset(), weightsData)
155 };
156
157 // set up biases
158 using BT = armnn::ResolveType<ArmnnBType>;
159 Optional<TensorData<BT>> optionalBiases;
160 if (descriptor.m_BiasEnabled)
161 {
162 TensorData<BT> biases =
163 {
164 biasesInfo,
165 QuantizedVector<BT>(biasesInfo.GetQuantizationScale(), biasesInfo.GetQuantizationOffset(), biasesData)
166 };
167
168 optionalBiases = Optional<TensorData<BT>>(biases);
169 }
170
171 // set up output
172 TensorData<T> output = { outputInfo, {} };
173
174 // execute test
175 TransposeConvolution2dTestImpl(workloadFactory,
176 memoryManager,
177 descriptor,
178 input,
179 output,
180 weights,
181 optionalBiases);
182
183 // construct result object
184 LayerTestResult<T, 4> testResult(outputInfo);
185 testResult.output = MakeTensor<T, 4>(outputInfo, output.second);
186 testResult.outputExpected = MakeTensor<T, 4>(outputInfo,
187 QuantizedVector<T>(outputInfo.GetQuantizationScale(),
188 outputInfo.GetQuantizationOffset(),
189 expectedOutputData));
190
191 return testResult;
192}
193
Aron Virginas-Tard8edabb2019-08-12 14:29:59 +0100194template<typename T>
195void SwizzleData(const armnn::TensorInfo& inputInfo,
196 std::vector<T>& inputData,
197 const armnn::TensorInfo& outputInfo,
198 std::vector<T>& outputData,
199 const armnn::TensorInfo& weightsInfo,
200 std::vector<T>& weightsData)
201{
202 constexpr size_t dataTypeSize = sizeof(float);
203 const armnn::PermutationVector nchwToNhwc = { 0, 3, 1, 2 };
204
205 std::vector<T> tmp(inputData.size());
206 armnnUtils::Permute(inputInfo.GetShape(), nchwToNhwc, inputData.data(), tmp.data(), dataTypeSize);
207 inputData = tmp;
208
209 tmp.resize(weightsData.size());
210 armnnUtils::Permute(weightsInfo.GetShape(), nchwToNhwc, weightsData.data(), tmp.data(), dataTypeSize);
211 weightsData = tmp;
212
213 tmp.resize(outputData.size());
214 armnnUtils::Permute(outputInfo.GetShape(), nchwToNhwc, outputData.data(), tmp.data(), dataTypeSize);
215 outputData = tmp;
216}
217
Aron Virginas-Tar735a4502019-06-26 15:02:47 +0100218} // anonymous namespace
219
220template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tard8edabb2019-08-12 14:29:59 +0100221LayerTestResult<T, 4> SimpleTransposeConvolution2dTest(
Aron Virginas-Tar735a4502019-06-26 15:02:47 +0100222 armnn::IWorkloadFactory& workloadFactory,
223 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
224 bool biasEnabled,
225 const armnn::DataLayout layout)
226{
227 using namespace armnn;
228
229 constexpr unsigned int batches = 1u;
230 constexpr unsigned int channels = 1u;
231
232 constexpr unsigned int wInput = 3u;
233 constexpr unsigned int hInput = wInput;
234
235 constexpr unsigned int wOutput = 5u;
236 constexpr unsigned int hOutput = wOutput;
237
238 constexpr unsigned int wWeights = 3u;
239 constexpr unsigned int hWeights = wWeights;
240
241 TensorShape inputShape = MakeTensorShape(batches, channels, hInput, wInput, layout);
242 TensorShape outputShape = MakeTensorShape(batches, channels, hOutput, wOutput, layout);
243 TensorShape weightsShape = MakeTensorShape(batches, channels, hWeights, wWeights, layout);
244
245 TensorInfo inputInfo(inputShape, ArmnnType);
246 TensorInfo outputInfo(outputShape, ArmnnType);
247 TensorInfo weightsInfo(weightsShape, ArmnnType);
248 TensorInfo biasesInfo({ channels }, ArmnnBType);
249
250 std::vector<float> inputData =
251 {
252 1.f, 1.f, 1.f,
253 1.f, 1.f, 1.f,
254 1.f, 1.f, 1.f
255 };
256
257 std::vector<float> weightsData =
258 {
259 1.f, 2.f, 3.f,
260 4.f, 5.f, 6.f,
261 7.f, 8.f, 9.f
262 };
263
264 std::vector<float> biasesData = { 1.f };
265
266 std::vector<float> expectedOutputData =
267 {
268 1.f, 3.f, 6.f, 5.f, 3.f,
269 5.f, 12.f, 21.f, 16.f, 9.f,
270 12.f, 27.f, 45.f, 33.f, 18.f,
271 11.f, 24.f, 39.f, 28.f, 15.f,
272 7.f, 15.f, 24.f, 17.f, 9.f
273 };
274
275 if (biasEnabled)
276 {
277 // apply bias to expected output data
278 std::transform(expectedOutputData.begin(), expectedOutputData.end(), expectedOutputData.begin(),
279 [&](float f) -> float { return f + biasesData[0]; });
280 }
281
282 TransposeConvolution2dDescriptor descriptor;
283 descriptor.m_StrideX = 1;
284 descriptor.m_StrideY = 1;
285 descriptor.m_BiasEnabled = biasEnabled;
286 descriptor.m_DataLayout = layout;
287
288 // swizzle data if needed
289 if (layout == armnn::DataLayout::NHWC)
290 {
Aron Virginas-Tard8edabb2019-08-12 14:29:59 +0100291 SwizzleData(inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData);
Aron Virginas-Tar735a4502019-06-26 15:02:47 +0100292 }
293
Aron Virginas-Tard8edabb2019-08-12 14:29:59 +0100294 return TransposeConvolution2dTest<ArmnnType, ArmnnBType>(workloadFactory,
295 memoryManager,
296 descriptor,
297 inputInfo,
298 inputData,
299 outputInfo,
300 expectedOutputData,
301 weightsInfo,
302 weightsData,
303 biasesInfo,
304 biasesData);
Aron Virginas-Tar735a4502019-06-26 15:02:47 +0100305}
306
307template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tard8edabb2019-08-12 14:29:59 +0100308LayerTestResult<T, 4> PaddedTransposeConvolution2dTest(
Aron Virginas-Tar735a4502019-06-26 15:02:47 +0100309 armnn::IWorkloadFactory& workloadFactory,
310 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
311 bool biasEnabled,
312 const armnn::DataLayout layout)
313{
314 using namespace armnn;
315
316 constexpr unsigned int batches = 1u;
317 constexpr unsigned int channels = 1u;
318
319 constexpr unsigned int wInput = 4u;
320 constexpr unsigned int hInput = wInput;
321
322 constexpr unsigned int wOutput = 2u;
323 constexpr unsigned int hOutput = wOutput;
324
325 constexpr unsigned int wWeights = 3u;
326 constexpr unsigned int hWeights = wWeights;
327
328 TensorShape inputShape = MakeTensorShape(batches, channels, hInput, wInput, layout);
329 TensorShape outputShape = MakeTensorShape(batches, channels, hOutput, wOutput, layout);
330 TensorShape weightsShape = MakeTensorShape(batches, channels, hWeights, wWeights, layout);
331
332 TensorInfo inputInfo(inputShape, ArmnnType);
333 TensorInfo outputInfo(outputShape, ArmnnType);
334 TensorInfo weightsInfo(weightsShape, ArmnnType);
335 TensorInfo biasesInfo({ channels }, ArmnnBType);
336
337 std::vector<float> inputData =
338 {
339 1.f, 3.f, 2.f, 1.f,
340 1.f, 3.f, 3.f, 1.f,
341 2.f, 1.f, 1.f, 3.f,
342 3.f, 2.f, 3.f, 3.f
343 };
344
345 std::vector<float> weightsData =
346 {
347 1.f, 2.f, 3.f,
348 0.f, 1.f, 0.f,
349 2.f, 1.f, 2.f
350 };
351
352 std::vector<float> biasesData = { 1.f };
353
354 std::vector<float> expectedOutputData =
355 {
356 21.f, 21.f,
357 28.f, 27.f
358 };
359
360 if (biasEnabled)
361 {
362 // apply bias to expected output data
363 std::transform(expectedOutputData.begin(), expectedOutputData.end(), expectedOutputData.begin(),
364 [&](float f) -> float { return f + biasesData[0]; });
365 }
366
367 TransposeConvolution2dDescriptor descriptor;
368 descriptor.m_PadLeft = 2;
369 descriptor.m_PadRight = 2;
370 descriptor.m_PadTop = 2;
371 descriptor.m_PadBottom = 2;
372 descriptor.m_StrideX = 1;
373 descriptor.m_StrideY = 1;
374 descriptor.m_BiasEnabled = biasEnabled;
375 descriptor.m_DataLayout = layout;
376
377 // swizzle data if needed
378 if (layout == armnn::DataLayout::NHWC)
379 {
Aron Virginas-Tard8edabb2019-08-12 14:29:59 +0100380 SwizzleData(inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData);
Aron Virginas-Tar735a4502019-06-26 15:02:47 +0100381 }
382
Aron Virginas-Tard8edabb2019-08-12 14:29:59 +0100383 return TransposeConvolution2dTest<ArmnnType, ArmnnBType>(workloadFactory,
384 memoryManager,
385 descriptor,
386 inputInfo,
387 inputData,
388 outputInfo,
389 expectedOutputData,
390 weightsInfo,
391 weightsData,
392 biasesInfo,
393 biasesData);
Aron Virginas-Tar735a4502019-06-26 15:02:47 +0100394}
395
Aron Virginas-Tard8edabb2019-08-12 14:29:59 +0100396template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
397LayerTestResult<T, 4> StridedTransposeConvolution2dTest(
398 armnn::IWorkloadFactory& workloadFactory,
399 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
400 bool biasEnabled,
401 const armnn::DataLayout layout)
Aron Virginas-Tar735a4502019-06-26 15:02:47 +0100402{
403 using namespace armnn;
404
405 constexpr unsigned int batches = 1u;
406 constexpr unsigned int channels = 1u;
407
408 constexpr unsigned int wInput = 3u;
409 constexpr unsigned int hInput = wInput;
410
411 constexpr unsigned int wOutput = 7u;
412 constexpr unsigned int hOutput = wOutput;
413
414 constexpr unsigned int wWeights = 3u;
415 constexpr unsigned int hWeights = wWeights;
416
417 TensorShape inputShape = MakeTensorShape(batches, channels, hInput, wInput, layout);
418 TensorShape outputShape = MakeTensorShape(batches, channels, hOutput, wOutput, layout);
419 TensorShape weightsShape = MakeTensorShape(batches, channels, hWeights, wWeights, layout);
420
421 TensorInfo inputInfo(inputShape, ArmnnType);
422 TensorInfo outputInfo(outputShape, ArmnnType);
423 TensorInfo weightsInfo(weightsShape, ArmnnType);
424 TensorInfo biasesInfo({ channels }, ArmnnBType);
425
426 std::vector<float> inputData =
427 {
428 1.f, 1.f, 1.f,
429 1.f, 1.f, 1.f,
430 1.f, 1.f, 1.f
431 };
432
433 std::vector<float> weightsData =
434 {
435 1.f, 2.f, 3.f,
436 4.f, 5.f, 6.f,
437 7.f, 8.f, 9.f
438 };
439
440 std::vector<float> biasesData = { 1.f };
441
442 std::vector<float> expectedOutputData =
443 {
444 1.f, 2.f, 4.f, 2.f, 4.f, 2.f, 3.f,
445 4.f, 5.f, 10.f, 5.f, 10.f, 5.f, 6.f,
446 8.f, 10.f, 20.f, 10.f, 20.f, 10.f, 12.f,
447 4.f, 5.f, 10.f, 5.f, 10.f, 5.f, 6.f,
448 8.f, 10.f, 20.f, 10.f, 20.f, 10.f, 12.f,
449 4.f, 5.f, 10.f, 5.f, 10.f, 5.f, 6.f,
450 7.f, 8.f, 16.f, 8.f, 16.f, 8.f, 9.f
451 };
452
453 if (biasEnabled)
454 {
455 // apply bias to expected output data
456 std::transform(expectedOutputData.begin(), expectedOutputData.end(), expectedOutputData.begin(),
457 [&](float f) -> float { return f + biasesData[0]; });
458 }
459
460 TransposeConvolution2dDescriptor descriptor;
461 descriptor.m_StrideX = 2;
462 descriptor.m_StrideY = 2;
463 descriptor.m_BiasEnabled = biasEnabled;
464 descriptor.m_DataLayout = layout;
465
466 // swizzle data if needed
467 if (layout == armnn::DataLayout::NHWC)
468 {
Aron Virginas-Tard8edabb2019-08-12 14:29:59 +0100469 SwizzleData(inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData);
Aron Virginas-Tar735a4502019-06-26 15:02:47 +0100470 }
471
Aron Virginas-Tard8edabb2019-08-12 14:29:59 +0100472 return TransposeConvolution2dTest<ArmnnType, ArmnnBType>(workloadFactory,
473 memoryManager,
474 descriptor,
475 inputInfo,
476 inputData,
477 outputInfo,
478 expectedOutputData,
479 weightsInfo,
480 weightsData,
481 biasesInfo,
482 biasesData);
483}
484
485template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
486LayerTestResult<T, 4> MultiChannelTransposeConvolution2dTest(
487 armnn::IWorkloadFactory& workloadFactory,
488 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
489 const armnn::DataLayout layout)
490{
491 using namespace armnn;
492
493 TensorShape inputShape = MakeTensorShape(1, 1, 2, 2, layout);
494 TensorShape outputShape = MakeTensorShape(1, 2, 5, 5, layout);
495
Aron Virginas-Taraec942c2019-08-14 14:37:42 +0100496 // OIHW for NCHW; OHWI for NHWC
497 TensorShape weightsShape = MakeTensorShape(2, 1, 3, 3, layout);
Aron Virginas-Tard8edabb2019-08-12 14:29:59 +0100498 TensorShape biasesShape = { 2 };
499
500 TensorInfo inputInfo(inputShape, ArmnnType);
501 TensorInfo outputInfo(outputShape, ArmnnType);
502 TensorInfo weightsInfo(weightsShape, ArmnnType);
503 TensorInfo biasesInfo(biasesShape, ArmnnBType);
504
505 std::vector<float> inputData =
506 {
507 1.f, 2.f,
508 3.f, 4.f,
509 };
510
511 std::vector<float> weightsData =
512 {
513 1.f, 3.f, 5.f,
514 7.f, 9.f, 11.f,
515 13.f, 15.f, 17.f,
516
517 2.f, 4.f, 6.f,
518 8.f, 10.f, 12.f,
519 14.f, 16.f, 18.f
520 };
521
522 std::vector<float> biasesData = { -1.5f, -2.0f };
523
524 std::vector<float> expectedOutputData =
525 {
526 -0.5f, 1.5f, 5.5f, 4.5f, 8.5f,
527 5.5f, 7.5f, 23.5f, 16.5f, 20.5f,
528 14.5f, 22.5f, 60.5f, 40.5f, 52.5f,
529 19.5f, 25.5f, 59.5f, 34.5f, 42.5f,
530 37.5f, 43.5f, 101.5f, 58.5f, 66.5f,
531
532 0.0f, 2.0f, 8.0f, 6.0f, 10.0f,
533 6.0f, 8.0f, 26.0f, 18.0f, 22.0f,
534 18.0f, 26.0f, 70.0f, 46.0f, 58.0f,
535 22.0f, 28.0f, 66.0f, 38.0f, 46.0f,
536 40.0f, 46.0f, 108.0f, 62.0f, 70.0f,
537 };
538
539 TransposeConvolution2dDescriptor descriptor;
540 descriptor.m_StrideX = 2;
541 descriptor.m_StrideY = 2;
542 descriptor.m_BiasEnabled = true;
543 descriptor.m_DataLayout = layout;
544
545 // swizzle data if needed
546 if (layout == armnn::DataLayout::NHWC)
547 {
548 SwizzleData(inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData);
549 }
550
551 return TransposeConvolution2dTest<ArmnnType, ArmnnBType>(workloadFactory,
552 memoryManager,
553 descriptor,
554 inputInfo,
555 inputData,
556 outputInfo,
557 expectedOutputData,
558 weightsInfo,
559 weightsData,
560 biasesInfo,
561 biasesData);
562}