blob: 82b772ec9882b6d2d83ac0ec41ea8073187d9ab0 [file] [log] [blame]
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "PadTestImpl.hpp"
7
8#include <backendsCommon/test/TensorCopyUtils.hpp>
9#include <backendsCommon/test/WorkloadTestUtils.hpp>
10
11#include <test/TensorHelpers.hpp>
12
13//
14// Implementation templates
15//
16
17template<armnn::DataType ArmnnType, typename T>
18LayerTestResult<T, 2> Pad2dTestCommon(
19 armnn::IWorkloadFactory& workloadFactory,
20 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
21 float qScale,
22 int32_t qOffset,
23 const float customPaddingValue)
24{
25 const armnn::TensorShape inputShape{ 3, 3 };
26 const armnn::TensorShape outputShape{ 7, 7 };
27
28 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
29 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
30
31 std::vector<T> inputValues(
32 QuantizedVector<T>(qScale, qOffset,
33 {
34 // Height (3) x Width (3)
35 4, 8, 6,
36 7, 4, 4,
37 3, 2, 4
38 }));
39
40 auto p = customPaddingValue;
41 std::vector<T> expectedOutputValues;
42 expectedOutputValues = (
43 QuantizedVector<T>(qScale, qOffset,
44 {
45 p, p, p, p, p, p, p,
46 p, p, p, p, p, p, p,
47 p, p, 4, 8, 6, p, p,
48 p, p, 7, 4, 4, p, p,
49 p, p, 3, 2, 4, p, p,
50 p, p, p, p, p, p, p,
51 p, p, p, p, p, p, p
52 }));
53
54 auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, std::vector<T>(inputValues));
55
56 LayerTestResult<T, 2> result(outputTensorInfo);
57 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, std::vector<T>(expectedOutputValues));
58
59 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
60 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
61
62 armnn::PadQueueDescriptor descriptor;
63
64 std::vector<std::pair<unsigned int, unsigned int>> padList;
65 padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
66 padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
67
68 descriptor.m_Parameters.m_PadList = padList;
69 descriptor.m_Parameters.m_PadValue = customPaddingValue;
70 armnn::WorkloadInfo info;
71
72 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
73 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
74
75 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
76
77 inputHandle->Allocate();
78 outputHandle->Allocate();
79
80 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
81
82 workload->PostAllocationConfigure();
83 workload->Execute();
84
85 CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
86
87 return result;
88}
89
90template<armnn::DataType ArmnnType, typename T>
91LayerTestResult<T, 3> Pad3dTestCommon(
92 armnn::IWorkloadFactory& workloadFactory,
93 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
94 float qScale,
95 int32_t qOffset)
96{
97 const armnn::TensorShape inputShape{ 2, 2, 2 };
98 const armnn::TensorShape outputShape{ 3, 5, 6 };
99
100 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
101 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
102
103 std::vector<T> inputValues(
104 QuantizedVector<T>(qScale,qOffset,
105 {
106 // Channel 0, Height (2) x Width (2)
107 0, 4,
108 2, 5,
109
110 // Channel 1, Height (2) x Width (2)
111 6, 1,
112 5, 2
113 }));
114
115 std::vector<T> expectedOutputValues(
116 QuantizedVector<T>(qScale,qOffset,
117 {
118
119 0, 0, 0, 0, 0, 0,
120 0, 0, 0, 0, 0, 0,
121 0, 0, 0, 4, 0, 0,
122 0, 0, 2, 5, 0, 0,
123 0, 0, 0, 0, 0, 0,
124
125 0, 0, 0, 0, 0, 0,
126 0, 0, 0, 0, 0, 0,
127 0, 0, 6, 1, 0, 0,
128 0, 0, 5, 2, 0, 0,
129 0, 0, 0, 0, 0, 0,
130
131 0, 0, 0, 0, 0, 0,
132 0, 0, 0, 0, 0, 0,
133 0, 0, 0, 0, 0, 0,
134 0, 0, 0, 0, 0, 0,
135 0, 0, 0, 0, 0, 0
136
137 }));
138
139 auto inputTensor = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(inputValues));
140
141 LayerTestResult<T, 3> result(outputTensorInfo);
142 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, std::vector<T>(expectedOutputValues));
143
144 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
145 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
146
147 armnn::PadQueueDescriptor descriptor;
148
149 std::vector<std::pair<unsigned int, unsigned int>> PadList;
150 PadList.push_back(std::pair<unsigned int, unsigned int>(0,1));
151 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
152 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
153
154 descriptor.m_Parameters.m_PadList = PadList;
155 armnn::WorkloadInfo info;
156
157 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
158 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
159
160 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
161
162 inputHandle->Allocate();
163 outputHandle->Allocate();
164
165 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]);
166
167 workload->PostAllocationConfigure();
168 workload->Execute();
169
170 CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
171
172 return result;
173}
174
175template<armnn::DataType ArmnnType, typename T>
176LayerTestResult<T, 4> Pad4dTestCommon(
177 armnn::IWorkloadFactory& workloadFactory,
178 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
179 float qScale,
180 int32_t qOffset)
181{
182 const armnn::TensorShape inputShape{ 2, 2, 3, 2 };
183 const armnn::TensorShape outputShape{ 4, 5, 7, 4 };
184
185 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
186 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
187
188 std::vector<T> inputValues(
189 QuantizedVector<T>(qScale,qOffset,
190 {
191 // Batch 0, Channel 0, Height (3) x Width (2)
192 0, 1,
193 2, 3,
194 4, 5,
195
196 // Batch 0, Channel 1, Height (3) x Width (2)
197 6, 7,
198 8, 9,
199 10, 11,
200
201 // Batch 1, Channel 0, Height (3) x Width (2)
202 12, 13,
203 14, 15,
204 16, 17,
205
206 // Batch 1, Channel 1, Height (3) x Width (2)
207 18, 19,
208 20, 21,
209 22, 23
210 }));
211
212 std::vector<T> expectedOutputValues(
213 QuantizedVector<T>(qScale,qOffset,
214 {
215 0, 0, 0, 0,
216 0, 0, 0, 0,
217 0, 0, 0, 0,
218 0, 0, 0, 0,
219 0, 0, 0, 0,
220 0, 0, 0, 0,
221 0, 0, 0, 0,
222
223 0, 0, 0, 0,
224 0, 0, 0, 0,
225 0, 0, 0, 0,
226 0, 0, 0, 0,
227 0, 0, 0, 0,
228 0, 0, 0, 0,
229 0, 0, 0, 0,
230
231 0, 0, 0, 0,
232 0, 0, 0, 0,
233 0, 0, 0, 0,
234 0, 0, 0, 0,
235 0, 0, 0, 0,
236 0, 0, 0, 0,
237 0, 0, 0, 0,
238
239 0, 0, 0, 0,
240 0, 0, 0, 0,
241 0, 0, 0, 0,
242 0, 0, 0, 0,
243 0, 0, 0, 0,
244 0, 0, 0, 0,
245 0, 0, 0, 0,
246
247 0, 0, 0, 0,
248 0, 0, 0, 0,
249 0, 0, 0, 0,
250 0, 0, 0, 0,
251 0, 0, 0, 0,
252 0, 0, 0, 0,
253 0, 0, 0, 0,
254
255 0, 0, 0, 0,
256 0, 0, 0, 0,
257 0, 0, 0, 0,
258 0, 0, 0, 0,
259 0, 0, 0, 0,
260 0, 0, 0, 0,
261 0, 0, 0, 0,
262
263 0, 0, 0, 0,
264 0, 0, 0, 0,
265 0, 0, 0, 0,
266 0, 0, 0, 0,
267 0, 0, 0, 0,
268 0, 0, 0, 0,
269 0, 0, 0, 0,
270
271 0, 0, 0, 0,
272 0, 0, 0, 0,
273 0, 0, 0, 0,
274 0, 0, 1, 0,
275 0, 2, 3, 0,
276 0, 4, 5, 0,
277 0, 0, 0, 0,
278
279 0, 0, 0, 0,
280 0, 0, 0, 0,
281 0, 0, 0, 0,
282 0, 6, 7, 0,
283 0, 8, 9, 0,
284 0, 10, 11, 0,
285 0, 0, 0, 0,
286
287 0, 0, 0, 0,
288 0, 0, 0, 0,
289 0, 0, 0, 0,
290 0, 0, 0, 0,
291 0, 0, 0, 0,
292 0, 0, 0, 0,
293 0, 0, 0, 0,
294
295 0, 0, 0, 0,
296 0, 0, 0, 0,
297 0, 0, 0, 0,
298 0, 0, 0, 0,
299 0, 0, 0, 0,
300 0, 0, 0, 0,
301 0, 0, 0, 0,
302
303 0, 0, 0, 0,
304 0, 0, 0, 0,
305 0, 0, 0, 0,
306 0, 0, 0, 0,
307 0, 0, 0, 0,
308 0, 0, 0, 0,
309 0, 0, 0, 0,
310
311 0, 0, 0, 0,
312 0, 0, 0, 0,
313 0, 0, 0, 0,
314 0, 12, 13, 0,
315 0, 14, 15, 0,
316 0, 16, 17, 0,
317 0, 0, 0, 0,
318
319 0, 0, 0, 0,
320 0, 0, 0, 0,
321 0, 0, 0, 0,
322 0, 18, 19, 0,
323 0, 20, 21, 0,
324 0, 22, 23, 0,
325 0, 0, 0, 0,
326
327 0, 0, 0, 0,
328 0, 0, 0, 0,
329 0, 0, 0, 0,
330 0, 0, 0, 0,
331 0, 0, 0, 0,
332 0, 0, 0, 0,
333 0, 0, 0, 0,
334
335 0, 0, 0, 0,
336 0, 0, 0, 0,
337 0, 0, 0, 0,
338 0, 0, 0, 0,
339 0, 0, 0, 0,
340 0, 0, 0, 0,
341 0, 0, 0, 0,
342
343 0, 0, 0, 0,
344 0, 0, 0, 0,
345 0, 0, 0, 0,
346 0, 0, 0, 0,
347 0, 0, 0, 0,
348 0, 0, 0, 0,
349 0, 0, 0, 0,
350
351 0, 0, 0, 0,
352 0, 0, 0, 0,
353 0, 0, 0, 0,
354 0, 0, 0, 0,
355 0, 0, 0, 0,
356 0, 0, 0, 0,
357 0, 0, 0, 0,
358
359 0, 0, 0, 0,
360 0, 0, 0, 0,
361 0, 0, 0, 0,
362 0, 0, 0, 0,
363 0, 0, 0, 0,
364 0, 0, 0, 0,
365 0, 0, 0, 0,
366
367 0, 0, 0, 0,
368 0, 0, 0, 0,
369 0, 0, 0, 0,
370 0, 0, 0, 0,
371 0, 0, 0, 0,
372 0, 0, 0, 0,
373 0, 0, 0, 0
374 }));
375
376 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(inputValues));
377
378 LayerTestResult<T, 4> result(outputTensorInfo);
379 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(expectedOutputValues));
380
381 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
382 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
383
384 armnn::PadQueueDescriptor descriptor;
385
386 std::vector<std::pair<unsigned int, unsigned int>> PadList;
387 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
388 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
389 PadList.push_back(std::pair<unsigned int, unsigned int>(3,1));
390 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
391
392 descriptor.m_Parameters.m_PadList = PadList;
393 armnn::WorkloadInfo info;
394
395 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
396 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
397
398 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
399
400 inputHandle->Allocate();
401 outputHandle->Allocate();
402
403 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
404
405 workload->PostAllocationConfigure();
406 workload->Execute();
407
408 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
409
410 return result;
411}
412
413//
414// Explicit template specializations
415//
416
417template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 2>
418Pad2dTestCommon<armnn::DataType::QuantisedSymm16>(
419 armnn::IWorkloadFactory& workloadFactory,
420 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
421 float qScale,
422 int32_t qOffset,
423 const float customPaddingValue);
424
425template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 3>
426Pad3dTestCommon<armnn::DataType::QuantisedSymm16>(
427 armnn::IWorkloadFactory& workloadFactory,
428 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
429 float qScale,
430 int32_t qOffset);
431
432template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
433Pad4dTestCommon<armnn::DataType::QuantisedSymm16>(
434 armnn::IWorkloadFactory& workloadFactory,
435 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
436 float qScale,
437 int32_t qOffset);
438
439//
440// Implementation functions
441//
442
443LayerTestResult<uint8_t, 2> PadUint82dTest(
444 armnn::IWorkloadFactory& workloadFactory,
445 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
446{
447 return Pad2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
448}
449
450LayerTestResult<uint8_t, 2> PadUint82dCustomPaddingTest(
451 armnn::IWorkloadFactory& workloadFactory,
452 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
453{
454 return Pad2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0, 1.0f);
455}
456
457LayerTestResult<uint8_t, 3> PadUint83dTest(
458 armnn::IWorkloadFactory& workloadFactory,
459 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
460{
461 return Pad3dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
462}
463
464LayerTestResult<uint8_t, 4> PadUint84dTest(
465 armnn::IWorkloadFactory& workloadFactory,
466 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
467{
468 return Pad4dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
469}
470
471LayerTestResult<float, 2> PadFloat322dTest(
472 armnn::IWorkloadFactory& workloadFactory,
473 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
474{
475 return Pad2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
476}
477
478LayerTestResult<float, 2> PadFloat322dCustomPaddingTest(
479 armnn::IWorkloadFactory& workloadFactory,
480 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
481{
482 return Pad2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0, 1.0f);
483}
484
485LayerTestResult<float, 3> PadFloat323dTest(
486 armnn::IWorkloadFactory& workloadFactory,
487 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
488{
489 return Pad3dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
490}
491
492LayerTestResult<float, 4> PadFloat324dTest(
493 armnn::IWorkloadFactory& workloadFactory,
494 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
495{
496 return Pad4dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
497}