blob: 56ce51a84468e5077b749a1930e96ddb4010e5b1 [file] [log] [blame]
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
8#include "LayerTestResult.hpp"
9
10#include <Permute.hpp>
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010011#include <QuantizeHelper.hpp>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010012#include <ResolveType.hpp>
13#include <TensorUtils.hpp>
14
15#include <armnn/ArmNN.hpp>
16
17#include <backendsCommon/IBackendInternal.hpp>
18#include <backendsCommon/WorkloadFactory.hpp>
19
20#include <backendsCommon/test/TensorCopyUtils.hpp>
21#include <backendsCommon/test/WorkloadTestUtils.hpp>
22
23#include <test/TensorHelpers.hpp>
24
25//
26// ResizeBilinear
27//
28
29template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
30LayerTestResult<T, 4> ResizeBilinearNopTest(
31 armnn::IWorkloadFactory& workloadFactory,
32 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
33 const armnn::DataLayout dataLayout)
34{
35 armnn::TensorInfo inputTensorInfo = armnn::IsQuantizedType<T>()
36 ? armnnUtils::GetTensorInfo(1, 1, 4, 4, dataLayout, ArmnnType)
37 : armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
38
39 armnn::TensorInfo outputTensorInfo = armnn::IsQuantizedType<T>()
40 ? armnnUtils::GetTensorInfo(1, 1, 4, 4, dataLayout, ArmnnType)
41 : armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
42
43 if (armnn::IsQuantizedType<T>())
44 {
45 inputTensorInfo.SetQuantizationScale(1.5f);
46 inputTensorInfo.SetQuantizationOffset(-3);
47 outputTensorInfo.SetQuantizationScale(1.5f);
48 outputTensorInfo.SetQuantizationOffset(-3);
49 }
50
51 std::vector<float> inputData = armnn::IsQuantizedType<T>()
52 ? std::initializer_list<float>
53 {
54 1, 2, 3, 4,
55 2, 3, 4, 5,
56 3, 4, 5, 6,
57 4, 5, 6, 7
58 }
59 : std::initializer_list<float>
60 {
61 1.0f, 2.0f, 3.0f, 4.0f,
62 2.0f, 3.0f, 4.0f, 5.0f,
63 3.0f, 4.0f, 5.0f, 6.0f,
64 4.0f, 5.0f, 6.0f, 7.0f,
65
66 1.0f, 2.0f, 3.0f, 4.0f,
67 2.0f, 3.0f, 4.0f, 5.0f,
68 3.0f, 4.0f, 5.0f, 6.0f,
69 4.0f, 5.0f, 6.0f, 7.0f
70 };
71
72 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
73 if (dataLayout == armnn::DataLayout::NHWC)
74 {
75 std::vector<float> tmp(inputData.size());
76 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
77 inputData = tmp;
78 }
79
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010080 auto input = MakeTensor<T, 4>(inputTensorInfo,
81 armnnUtils::QuantizedVector<T>(inputData,
82 inputTensorInfo.GetQuantizationScale(),
83 inputTensorInfo.GetQuantizationOffset()));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010084
85 LayerTestResult<T, 4> result(outputTensorInfo);
86 result.outputExpected = input;
87
88 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
89 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
90
91 armnn::ResizeQueueDescriptor descriptor;
92 descriptor.m_Parameters.m_Method = armnn::ResizeMethod::Bilinear;
93 descriptor.m_Parameters.m_DataLayout = dataLayout;
94
95 armnn::WorkloadInfo info;
96 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
97 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
98
99 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResize(descriptor, info);
100
101 inputHandle->Allocate();
102 outputHandle->Allocate();
103 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
104
105 workload->PostAllocationConfigure();
106 workload->Execute();
107
108 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
109 return result;
110}
111
112template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
113LayerTestResult<T, 4> SimpleResizeBilinearTest(
114 armnn::IWorkloadFactory& workloadFactory,
115 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
116 const armnn::DataLayout dataLayout)
117{
118 armnn::TensorInfo inputTensorInfo = armnn::IsQuantizedType<T>()
119 ? armnnUtils::GetTensorInfo(1, 1, 2, 2, dataLayout, ArmnnType)
120 : armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
121
122 armnn::TensorInfo outputTensorInfo = armnn::IsQuantizedType<T>()
123 ? armnnUtils::GetTensorInfo(1, 1, 1, 1, dataLayout, ArmnnType)
124 : armnnUtils::GetTensorInfo(1, 2, 1, 1, dataLayout, ArmnnType);
125
126 if (armnn::IsQuantizedType<T>())
127 {
128 inputTensorInfo.SetQuantizationScale(0.1567f);
129 inputTensorInfo.SetQuantizationOffset(1);
130 outputTensorInfo.SetQuantizationScale(0.1567f);
131 outputTensorInfo.SetQuantizationOffset(1);
132 }
133
134 std::vector<float> inputData = armnn::IsQuantizedType<T>()
135 ? std::initializer_list<float>
136 {
137 1, 255,
138 200, 250
139 }
140 : std::initializer_list<float>
141 {
142 1.0f, 255.0f,
143 200.0f, 250.0f,
144
145 250.0f, 200.0f,
146 250.0f, 1.0f
147 };
148
149 // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
150 // then figures out the interpolants and weights. Note this is different to projecting the centre of the
151 // output texel. Thus, for a input matrix of 2x2, we'll expect the output 1x1 matrix to contain, as
152 // its single element, the value that was at position (0,0) of the input matrix (rather than an average,
153 // which we would expect if projecting the centre).
154
155 std::vector<float> outputData = armnn::IsQuantizedType<T>()
156 ? std::initializer_list<float>
157 {
158 1
159 }
160 : std::initializer_list<float>
161 {
162 1.0f,
163
164 250.0f
165 };
166
167 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
168 if (dataLayout == armnn::DataLayout::NHWC)
169 {
170 std::vector<float> tmp(inputData.size());
171 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
172 inputData = tmp;
173
174 std::vector<float> tmp1(outputData.size());
175 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
176 outputData = tmp1;
177 }
178
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100179 auto input = MakeTensor<T, 4>(inputTensorInfo,
180 armnnUtils::QuantizedVector<T>(inputData,
181 inputTensorInfo.GetQuantizationScale(),
182 inputTensorInfo.GetQuantizationOffset()));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100183
184 LayerTestResult<T, 4> result(outputTensorInfo);
185 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100186 armnnUtils::QuantizedVector<T>(outputData,
187 outputTensorInfo.GetQuantizationScale(),
188 outputTensorInfo.GetQuantizationOffset()));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100189
190 std::unique_ptr <armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
191 std::unique_ptr <armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
192
193 armnn::ResizeQueueDescriptor descriptor;
194 descriptor.m_Parameters.m_Method = armnn::ResizeMethod::Bilinear;
195 descriptor.m_Parameters.m_DataLayout = dataLayout;
196
197 armnn::WorkloadInfo info;
198 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
199 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
200
201 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResize(descriptor, info);
202
203 inputHandle->Allocate();
204 outputHandle->Allocate();
205 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
206
207 workload->PostAllocationConfigure();
208 workload->Execute();
209
210 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
211 return result;
212}
213
214template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
215LayerTestResult<T, 4> ResizeBilinearSqMinTest(
216 armnn::IWorkloadFactory& workloadFactory,
217 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
218 const armnn::DataLayout dataLayout)
219{
220 armnn::TensorInfo inputTensorInfo = armnn::IsQuantizedType<T>()
221 ? armnnUtils::GetTensorInfo(1, 1, 4, 4, dataLayout, ArmnnType)
222 : armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
223
224 armnn::TensorInfo outputTensorInfo = armnn::IsQuantizedType<T>()
225 ? armnnUtils::GetTensorInfo(1, 1, 2, 2, dataLayout, ArmnnType)
226 : armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
227
228 if (armnn::IsQuantizedType<T>())
229 {
230 inputTensorInfo.SetQuantizationScale(3.141592f);
231 inputTensorInfo.SetQuantizationOffset(3);
232 outputTensorInfo.SetQuantizationScale(3.141592f);
233 outputTensorInfo.SetQuantizationOffset(3);
234 }
235
236 std::vector<float> inputData = armnn::IsQuantizedType<T>()
237 ? std::initializer_list<float>
238 {
239 1, 2, 3, 4,
240 2, 3, 4, 5,
241 3, 4, 5, 6,
242 4, 5, 6, 7
243 }
244 : std::initializer_list<float>
245 {
246 1.0f, 2.0f, 3.0f, 4.0f,
247 2.0f, 3.0f, 4.0f, 5.0f,
248 3.0f, 4.0f, 5.0f, 6.0f,
249 4.0f, 5.0f, 6.0f, 7.0f,
250
251 7.0f, 6.0f, 5.0f, 4.0f,
252 6.0f, 5.0f, 4.0f, 3.0f,
253 5.0f, 4.0f, 3.0f, 2.0f,
254 4.0f, 3.0f, 2.0f, 1.0f
255 };
256
257 std::vector<float> outputData = armnn::IsQuantizedType<T>()
258 ? std::initializer_list<float>
259 {
260 1, 3,
261 3, 5
262 }
263 : std::initializer_list<float>
264 {
265 1.0f, 3.0f,
266 3.0f, 5.0f,
267
268 7.0f, 5.0f,
269 5.0f, 3.0f
270 };
271
272 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
273 if (dataLayout == armnn::DataLayout::NHWC)
274 {
275 std::vector<float> tmp(inputData.size());
276 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
277 inputData = tmp;
278
279 std::vector<float> tmp1(outputData.size());
280 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
281 outputData = tmp1;
282 }
283
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100284 auto input = MakeTensor<T, 4>(inputTensorInfo,
285 armnnUtils::QuantizedVector<T>(inputData,
286 inputTensorInfo.GetQuantizationScale(),
287 inputTensorInfo.GetQuantizationOffset()));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100288
289 LayerTestResult<T, 4> result(outputTensorInfo);
290 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100291 armnnUtils::QuantizedVector<T>(outputData,
292 outputTensorInfo.GetQuantizationScale(),
293 outputTensorInfo.GetQuantizationOffset()));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100294
295 std::unique_ptr <armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
296 std::unique_ptr <armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
297
298 armnn::ResizeQueueDescriptor descriptor;
299 descriptor.m_Parameters.m_Method = armnn::ResizeMethod::Bilinear;
300 descriptor.m_Parameters.m_DataLayout = dataLayout;
301
302 armnn::WorkloadInfo info;
303 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
304 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
305
306 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResize(descriptor, info);
307
308 inputHandle->Allocate();
309 outputHandle->Allocate();
310 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
311
312 workload->PostAllocationConfigure();
313 workload->Execute();
314
315 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
316 return result;
317}
318
319template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
320LayerTestResult<T, 4> ResizeBilinearMinTest(
321 armnn::IWorkloadFactory& workloadFactory,
322 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
323 const armnn::DataLayout dataLayout)
324{
325 armnn::TensorInfo inputTensorInfo = armnn::IsQuantizedType<T>()
326 ? armnnUtils::GetTensorInfo(1, 1, 2, 3, dataLayout, ArmnnType)
327 : armnnUtils::GetTensorInfo(1, 2, 3, 5, dataLayout, ArmnnType);
328
329 armnn::TensorInfo outputTensorInfo = armnn::IsQuantizedType<T>()
330 ? armnnUtils::GetTensorInfo(1, 1, 1, 2, dataLayout, ArmnnType)
331 : armnnUtils::GetTensorInfo(1, 2, 2, 3, dataLayout, ArmnnType);
332
333 if (armnn::IsQuantizedType<T>())
334 {
335 inputTensorInfo.SetQuantizationScale(1.5f);
336 inputTensorInfo.SetQuantizationOffset(-1);
337 outputTensorInfo.SetQuantizationScale(1.5f);
338 outputTensorInfo.SetQuantizationOffset(-1);
339 }
340
341 std::vector<float> inputData = armnn::IsQuantizedType<T>()
342 ? std::initializer_list<float>
343 {
344 3.0f, 4.5f, 6.0f, // 1, 2, 3, : Expected quantised values
345 9.0f, 13.5f, 21.0f // 5, 8, 13
346 }
347 : std::initializer_list<float>
348 {
349 1.0f, 2.0f, 3.0f, 5.0f, 8.0f,
350 13.0f, 21.0f, 34.0f, 55.0f, 89.0f,
351 144.0f, 233.0f, 377.0f, 610.0f, 987.0f,
352
353 987.0f, 610.0f, 377.0f, 233.0f, 144.0f,
354 89.0f, 55.0f, 34.0f, 21.0f, 13.0f,
355 8.0f, 5.0f, 3.0f, 2.0f, 1.0f
356 };
357
358 std::vector<float> outputData = armnn::IsQuantizedType<T>()
359 ? std::initializer_list<float>
360 {
361 3.0f, 5.25f // 1, 3
362 }
363 : std::initializer_list<float>
364 {
365 1.0f, 2.6666f, 6.00f,
366 78.5f, 179.3333f, 401.00f,
367
368 987.0f, 454.6670f, 203.33f,
369 48.5f, 22.3333f, 10.00f
370 };
371
372 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
373 if (dataLayout == armnn::DataLayout::NHWC)
374 {
375 std::vector<float> tmp(inputData.size());
376 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
377 inputData = tmp;
378
379 std::vector<float> tmp1(outputData.size());
380 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
381 outputData = tmp1;
382 }
383
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100384 auto input = MakeTensor<T, 4>(inputTensorInfo,
385 armnnUtils::QuantizedVector<T>(inputData,
386 inputTensorInfo.GetQuantizationScale(),
387 inputTensorInfo.GetQuantizationOffset()));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100388
389 LayerTestResult<T, 4> result(outputTensorInfo);
390 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100391 armnnUtils::QuantizedVector<T>(outputData,
392 outputTensorInfo.GetQuantizationScale(),
393 outputTensorInfo.GetQuantizationOffset()));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100394
395 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
396 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
397
398 armnn::ResizeQueueDescriptor descriptor;
399 descriptor.m_Parameters.m_Method = armnn::ResizeMethod::Bilinear;
400 descriptor.m_Parameters.m_DataLayout = dataLayout;
401
402 armnn::WorkloadInfo info;
403 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
404 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
405
406 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResize(descriptor, info);
407
408 inputHandle->Allocate();
409 outputHandle->Allocate();
410 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
411
412 workload->PostAllocationConfigure();
413 workload->Execute();
414
415 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
416 return result;
417}
418
419template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
420LayerTestResult<T, 4> ResizeBilinearMagTest(
421 armnn::IWorkloadFactory& workloadFactory,
422 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
423 const armnn::DataLayout dataLayout)
424{
425 armnn::TensorInfo inputTensorInfo = armnn::IsQuantizedType<T>()
426 ? armnnUtils::GetTensorInfo(1, 1, 3, 2, dataLayout, ArmnnType)
427 : armnnUtils::GetTensorInfo(1, 2, 3, 2, dataLayout, ArmnnType);
428
429 armnn::TensorInfo outputTensorInfo = armnn::IsQuantizedType<T>()
430 ? armnnUtils::GetTensorInfo(1, 1, 3, 5, dataLayout, ArmnnType)
431 : armnnUtils::GetTensorInfo(1, 2, 3, 5, dataLayout, ArmnnType);
432
433 if (armnn::IsQuantizedType<T>())
434 {
435 inputTensorInfo.SetQuantizationScale(0.010765f);
436 inputTensorInfo.SetQuantizationOffset(7);
437 outputTensorInfo.SetQuantizationScale(0.010132f);
438 outputTensorInfo.SetQuantizationOffset(-18);
439 }
440
441 std::vector<float> inputData = armnn::IsQuantizedType<T>()
442 ? std::initializer_list<float>
443 {
444 0.183005f, 2.379065f, // 24, 228, : Expected quantised values
445 1.054970f, 1.302565f, // 105, 128,
446 2.400595f, 0.688960f // 230, 71
447 }
448 : std::initializer_list<float>
449 {
450 1.0f, 2.0f,
451 13.0f, 21.0f,
452 144.0f, 233.0f,
453
454 233.0f, 144.0f,
455 21.0f, 13.0f,
456 2.0f, 1.0f
457 };
458
459 std::vector<float> outputData = armnn::IsQuantizedType<T>()
460 ? std::initializer_list<float>
461 {
462 0.18300501f, 1.06142902f, 1.93985295f, 2.37906504f, 2.37906504f,
463 1.05497003f, 1.15400803f, 1.25304604f, 1.30256498f, 1.30256498f,
464 2.40059495f, 1.71594095f, 1.03128707f, 0.68896002f, 0.68896002f
465 // 0, 87, 173, 217, 217, : Expected quantised values
466 // 86, 96, 106, 111, 111,
467 // 219, 151, 84, 50, 50
468 }
469 : std::initializer_list<float>
470 {
471 1.0f, 1.4f, 1.8f, 2.0f, 2.0f,
472 13.0f, 16.2f, 19.4f, 21.0f, 21.0f,
473 144.0f, 179.6f, 215.2f, 233.0f, 233.0f,
474
475 233.0f, 197.4f, 161.8f, 144.0f, 144.0f,
476 21.0f, 17.8f, 14.6f, 13.0f, 13.0f,
477 2.0f, 1.6f, 1.2f, 1.0f, 1.0f
478 };
479
480 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
481 if (dataLayout == armnn::DataLayout::NHWC)
482 {
483 std::vector<float> tmp(inputData.size());
484 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
485 inputData = tmp;
486
487 std::vector<float> tmp1(outputData.size());
488 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
489 outputData = tmp1;
490 }
491
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100492 auto input = MakeTensor<T, 4>(inputTensorInfo,
493 armnnUtils::QuantizedVector<T>(inputData,
494 inputTensorInfo.GetQuantizationScale(),
495 inputTensorInfo.GetQuantizationOffset()));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100496
497 LayerTestResult<T, 4> result(outputTensorInfo);
498 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100499 armnnUtils::QuantizedVector<T>(outputData,
500 outputTensorInfo.GetQuantizationScale(),
501 outputTensorInfo.GetQuantizationOffset()));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100502
503 std::unique_ptr <armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
504 std::unique_ptr <armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
505
506 armnn::ResizeQueueDescriptor descriptor;
507 descriptor.m_Parameters.m_Method = armnn::ResizeMethod::Bilinear;
508 descriptor.m_Parameters.m_DataLayout = dataLayout;
509
510 armnn::WorkloadInfo info;
511 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
512 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
513
514 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResize(descriptor, info);
515
516 inputHandle->Allocate();
517 outputHandle->Allocate();
518 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
519
520 workload->PostAllocationConfigure();
521 workload->Execute();
522
523 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
524 return result;
525}
526
527//
528// ResizeNearestNeighbor
529//
530
531template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
532LayerTestResult<T, 4> ResizeNearestNeighborNopTest(
533 armnn::IWorkloadFactory& workloadFactory,
534 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
535 const armnn::DataLayout dataLayout)
536{
537 armnn::TensorInfo inputTensorInfo = armnn::IsQuantizedType<T>()
538 ? armnnUtils::GetTensorInfo(1, 1, 4, 4, dataLayout, ArmnnType)
539 : armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
540
541 armnn::TensorInfo outputTensorInfo = armnn::IsQuantizedType<T>()
542 ? armnnUtils::GetTensorInfo(1, 1, 4, 4, dataLayout, ArmnnType)
543 : armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
544
545 if (armnn::IsQuantizedType<T>())
546 {
547 inputTensorInfo.SetQuantizationScale(1.5f);
548 inputTensorInfo.SetQuantizationOffset(-3);
549 outputTensorInfo.SetQuantizationScale(1.5f);
550 outputTensorInfo.SetQuantizationOffset(-3);
551 }
552
553 std::vector<float> inputData = armnn::IsQuantizedType<T>()
554 ? std::initializer_list<float>
555 {
556 1, 2, 3, 4,
557 2, 3, 4, 5,
558 3, 4, 5, 6,
559 4, 5, 6, 7
560 }
561 : std::initializer_list<float>
562 {
563 1.0f, 2.0f, 3.0f, 4.0f,
564 2.0f, 3.0f, 4.0f, 5.0f,
565 3.0f, 4.0f, 5.0f, 6.0f,
566 4.0f, 5.0f, 6.0f, 7.0f,
567
568 1.0f, 2.0f, 3.0f, 4.0f,
569 2.0f, 3.0f, 4.0f, 5.0f,
570 3.0f, 4.0f, 5.0f, 6.0f,
571 4.0f, 5.0f, 6.0f, 7.0f
572 };
573
574 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
575 if (dataLayout == armnn::DataLayout::NHWC)
576 {
577 std::vector<float> tmp(inputData.size());
578 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
579 inputData = tmp;
580 }
581
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100582 auto input = MakeTensor<T, 4>(inputTensorInfo,
583 armnnUtils::QuantizedVector<T>(inputData,
584 inputTensorInfo.GetQuantizationScale(),
585 inputTensorInfo.GetQuantizationOffset()));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100586
587 LayerTestResult<T, 4> result(outputTensorInfo);
588 result.outputExpected = input;
589
590 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
591 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
592
593 armnn::ResizeQueueDescriptor descriptor;
594 descriptor.m_Parameters.m_Method = armnn::ResizeMethod::NearestNeighbor;
595 descriptor.m_Parameters.m_DataLayout = dataLayout;
596 armnn::WorkloadInfo info;
597 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
598 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
599
600 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResize(descriptor, info);
601
602 inputHandle->Allocate();
603 outputHandle->Allocate();
604 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
605
606 workload->PostAllocationConfigure();
607 workload->Execute();
608
609 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
610 return result;
611}
612
613template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
614LayerTestResult<T, 4> SimpleResizeNearestNeighborTest(
615 armnn::IWorkloadFactory& workloadFactory,
616 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
617 const armnn::DataLayout dataLayout)
618{
619 armnn::TensorInfo inputTensorInfo = armnn::IsQuantizedType<T>()
620 ? armnnUtils::GetTensorInfo(1, 1, 2, 2, dataLayout, ArmnnType)
621 : armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
622
623 armnn::TensorInfo outputTensorInfo = armnn::IsQuantizedType<T>()
624 ? armnnUtils::GetTensorInfo(1, 1, 1, 1, dataLayout, ArmnnType)
625 : armnnUtils::GetTensorInfo(1, 2, 1, 1, dataLayout, ArmnnType);
626
627 if (armnn::IsQuantizedType<T>())
628 {
629 inputTensorInfo.SetQuantizationScale(0.1567f);
630 inputTensorInfo.SetQuantizationOffset(1);
631 outputTensorInfo.SetQuantizationScale(0.1567f);
632 outputTensorInfo.SetQuantizationOffset(1);
633 }
634
635 std::vector<float> inputData = armnn::IsQuantizedType<T>()
636 ? std::initializer_list<float>
637 {
638 1, 255,
639 200, 250
640 }
641 : std::initializer_list<float>
642 {
643 1.0f, 255.0f,
644 200.0f, 250.0f,
645
646 250.0f, 200.0f,
647 250.0f, 1.0f
648 };
649
650 // The 'resize' operation projects the top-left corner of output texels into the input image,
651 // then figures out the interpolants and weights. Note this is different to projecting the centre of the
652 // output texel. Thus, for a input matrix of 2x2, we'll expect the output 1x1 matrix to contain, as
653 // its single element, the value that was at position (0,0) of the input matrix (rather than an average,
654 // which we would expect if projecting the centre).
655
656 std::vector<float> outputData = armnn::IsQuantizedType<T>()
657 ? std::initializer_list<float>
658 {
659 1
660 }
661 : std::initializer_list<float>
662 {
663 1.0f,
664
665 250.0f
666 };
667
668 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
669 if (dataLayout == armnn::DataLayout::NHWC)
670 {
671 std::vector<float> tmp(inputData.size());
672 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
673 inputData = tmp;
674
675 std::vector<float> tmp1(outputData.size());
676 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
677 outputData = tmp1;
678 }
679
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100680 auto input = MakeTensor<T, 4>(inputTensorInfo,
681 armnnUtils::QuantizedVector<T>(inputData,
682 inputTensorInfo.GetQuantizationScale(),
683 inputTensorInfo.GetQuantizationOffset()));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100684
685 LayerTestResult<T, 4> result(outputTensorInfo);
686 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100687 armnnUtils::QuantizedVector<T>(outputData,
688 outputTensorInfo.GetQuantizationScale(),
689 outputTensorInfo.GetQuantizationOffset()));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100690
691 std::unique_ptr <armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
692 std::unique_ptr <armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
693
694 armnn::ResizeQueueDescriptor descriptor;
695 descriptor.m_Parameters.m_DataLayout = dataLayout;
696 descriptor.m_Parameters.m_Method = armnn::ResizeMethod::NearestNeighbor;
697 armnn::WorkloadInfo info;
698 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
699 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
700
701 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResize(descriptor, info);
702
703 inputHandle->Allocate();
704 outputHandle->Allocate();
705 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
706
707 workload->PostAllocationConfigure();
708 workload->Execute();
709
710 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
711 return result;
712}
713
714template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
715LayerTestResult<T, 4> ResizeNearestNeighborSqMinTest(
716 armnn::IWorkloadFactory& workloadFactory,
717 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
718 const armnn::DataLayout dataLayout)
719{
720 armnn::TensorInfo inputTensorInfo = armnn::IsQuantizedType<T>()
721 ? armnnUtils::GetTensorInfo(1, 1, 4, 4, dataLayout, ArmnnType)
722 : armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
723
724 armnn::TensorInfo outputTensorInfo = armnn::IsQuantizedType<T>()
725 ? armnnUtils::GetTensorInfo(1, 1, 2, 2, dataLayout, ArmnnType)
726 : armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
727
728 if (armnn::IsQuantizedType<T>())
729 {
730 inputTensorInfo.SetQuantizationScale(3.141592f);
731 inputTensorInfo.SetQuantizationOffset(3);
732 outputTensorInfo.SetQuantizationScale(3.141592f);
733 outputTensorInfo.SetQuantizationOffset(3);
734 }
735
736 std::vector<float> inputData = armnn::IsQuantizedType<T>()
737 ? std::initializer_list<float>
738 {
739 1, 2, 3, 4,
740 2, 3, 4, 5,
741 3, 4, 5, 6,
742 4, 5, 6, 7
743 }
744 : std::initializer_list<float>
745 {
746 1.0f, 2.0f, 3.0f, 4.0f,
747 2.0f, 3.0f, 4.0f, 5.0f,
748 3.0f, 4.0f, 5.0f, 6.0f,
749 4.0f, 5.0f, 6.0f, 7.0f,
750
751 7.0f, 6.0f, 5.0f, 4.0f,
752 6.0f, 5.0f, 4.0f, 3.0f,
753 5.0f, 4.0f, 3.0f, 2.0f,
754 4.0f, 3.0f, 2.0f, 1.0f
755 };
756
757 std::vector<float> outputData = armnn::IsQuantizedType<T>()
758 ? std::initializer_list<float>
759 {
760 1, 3,
761 3, 5
762 }
763 : std::initializer_list<float>
764 {
765 1.0f, 3.0f,
766 3.0f, 5.0f,
767
768 7.0f, 5.0f,
769 5.0f, 3.0f
770 };
771
772 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
773 if (dataLayout == armnn::DataLayout::NHWC)
774 {
775 std::vector<float> tmp(inputData.size());
776 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
777 inputData = tmp;
778
779 std::vector<float> tmp1(outputData.size());
780 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
781 outputData = tmp1;
782 }
783
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100784 auto input = MakeTensor<T, 4>(inputTensorInfo,
785 armnnUtils::QuantizedVector<T>(inputData,
786 inputTensorInfo.GetQuantizationScale(),
787 inputTensorInfo.GetQuantizationOffset()));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100788
789 LayerTestResult<T, 4> result(outputTensorInfo);
790 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100791 armnnUtils::QuantizedVector<T>(outputData,
792 outputTensorInfo.GetQuantizationScale(),
793 outputTensorInfo.GetQuantizationOffset()));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100794
795 std::unique_ptr <armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
796 std::unique_ptr <armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
797
798 armnn::ResizeQueueDescriptor descriptor;
799 descriptor.m_Parameters.m_DataLayout = dataLayout;
800 descriptor.m_Parameters.m_Method = armnn::ResizeMethod::NearestNeighbor;
801 armnn::WorkloadInfo info;
802 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
803 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
804
805 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResize(descriptor, info);
806
807 inputHandle->Allocate();
808 outputHandle->Allocate();
809 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
810
811 workload->PostAllocationConfigure();
812 workload->Execute();
813
814 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
815 return result;
816}
817
818template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
819LayerTestResult<T, 4> ResizeNearestNeighborMinTest(
820 armnn::IWorkloadFactory& workloadFactory,
821 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
822 const armnn::DataLayout dataLayout)
823{
824 armnn::TensorInfo inputTensorInfo = armnn::IsQuantizedType<T>()
825 ? armnnUtils::GetTensorInfo(1, 1, 2, 3, dataLayout, ArmnnType)
826 : armnnUtils::GetTensorInfo(1, 2, 3, 5, dataLayout, ArmnnType);
827
828 armnn::TensorInfo outputTensorInfo = armnn::IsQuantizedType<T>()
829 ? armnnUtils::GetTensorInfo(1, 1, 1, 2, dataLayout, ArmnnType)
830 : armnnUtils::GetTensorInfo(1, 2, 2, 3, dataLayout, ArmnnType);
831
832 if (armnn::IsQuantizedType<T>())
833 {
834 inputTensorInfo.SetQuantizationScale(1.5f);
835 inputTensorInfo.SetQuantizationOffset(-1);
836 outputTensorInfo.SetQuantizationScale(1.5f);
837 outputTensorInfo.SetQuantizationOffset(-1);
838 }
839
840 std::vector<float> inputData = armnn::IsQuantizedType<T>()
841 ? std::initializer_list<float>
842 {
843 3.0f, 4.5f, 6.0f, // 1, 2, 3, : Expected quantised values
844 9.0f, 13.5f, 21.0f // 5, 8, 13
845 }
846 : std::initializer_list<float>
847 {
848 1.0f, 2.0f, 3.0f, 5.0f, 8.0f,
849 13.0f, 21.0f, 34.0f, 55.0f, 89.0f,
850 144.0f, 233.0f, 377.0f, 610.0f, 987.0f,
851
852 987.0f, 610.0f, 377.0f, 233.0f, 144.0f,
853 89.0f, 55.0f, 34.0f, 21.0f, 13.0f,
854 8.0f, 5.0f, 3.0f, 2.0f, 1.0f
855 };
856
857 std::vector<float> outputData = armnn::IsQuantizedType<T>()
858 ? std::initializer_list<float>
859 {
860 3.0f, 4.5f // 1, 3
861 }
862 : std::initializer_list<float>
863 {
864 1.f, 2.f, 5.f,
865 13.f, 21.f, 55.f,
866
867 987.f, 610.f, 233.f,
868 89.f, 55.f, 21.f
869 };
870
871 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
872 if (dataLayout == armnn::DataLayout::NHWC)
873 {
874 std::vector<float> tmp(inputData.size());
875 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
876 inputData = tmp;
877
878 std::vector<float> tmp1(outputData.size());
879 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
880 outputData = tmp1;
881 }
882
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100883 auto input = MakeTensor<T, 4>(inputTensorInfo,
884 armnnUtils::QuantizedVector<T>(inputData,
885 inputTensorInfo.GetQuantizationScale(),
886 inputTensorInfo.GetQuantizationOffset()));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100887
888 LayerTestResult<T, 4> result(outputTensorInfo);
889 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100890 armnnUtils::QuantizedVector<T>(outputData,
891 outputTensorInfo.GetQuantizationScale(),
892 outputTensorInfo.GetQuantizationOffset()));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100893
894 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
895 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
896
897 armnn::ResizeQueueDescriptor descriptor;
898 descriptor.m_Parameters.m_DataLayout = dataLayout;
899 descriptor.m_Parameters.m_Method = armnn::ResizeMethod::NearestNeighbor;
900 armnn::WorkloadInfo info;
901 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
902 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
903
904 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResize(descriptor, info);
905
906 inputHandle->Allocate();
907 outputHandle->Allocate();
908 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
909
910 workload->PostAllocationConfigure();
911 workload->Execute();
912
913 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
914 return result;
915}
916
917template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
918LayerTestResult<T, 4> ResizeNearestNeighborMagTest(
919 armnn::IWorkloadFactory& workloadFactory,
920 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
921 const armnn::DataLayout dataLayout,
922 float inQuantScale,
923 int32_t inQuantOffset,
924 float outQuantScale,
925 int32_t outQuantOffset)
926{
927 armnn::TensorInfo inputTensorInfo = armnn::IsQuantizedType<T>()
928 ? armnnUtils::GetTensorInfo(1, 1, 3, 2, dataLayout, ArmnnType)
929 : armnnUtils::GetTensorInfo(1, 2, 3, 2, dataLayout, ArmnnType);
930
931 armnn::TensorInfo outputTensorInfo = armnn::IsQuantizedType<T>()
932 ? armnnUtils::GetTensorInfo(1, 1, 3, 5, dataLayout, ArmnnType)
933 : armnnUtils::GetTensorInfo(1, 2, 3, 5, dataLayout, ArmnnType);
934
935 if (armnn::IsQuantizedType<T>())
936 {
937 inputTensorInfo.SetQuantizationScale(inQuantScale);
938 inputTensorInfo.SetQuantizationOffset(inQuantOffset);
939 outputTensorInfo.SetQuantizationScale(outQuantScale);
940 outputTensorInfo.SetQuantizationOffset(outQuantOffset);
941 }
942
943 std::vector<float> inputData = armnn::IsQuantizedType<T>()
944 ? std::initializer_list<float>
945 {
946 0.183005f, 2.379065f, // 24, 228, : expected quantised values
947 1.054970f, 1.302565f, // 105, 128,
948 2.400595f, 0.688960f // 230, 71
949 }
950 : std::initializer_list<float>
951 {
952 1.0f, 2.0f,
953 13.0f, 21.0f,
954 144.0f, 233.0f,
955
956 233.0f, 144.0f,
957 21.0f, 13.0f,
958 2.0f, 1.0f
959 };
960
961 std::vector<float> outputData = armnn::IsQuantizedType<T>()
962 ? std::initializer_list<float>
963 {
964 0.183005f, 0.183005f, 0.183005f, 2.379065f, 2.379065f,
965 1.054970f, 1.054970f, 1.054970f, 1.302565f, 1.302565f,
966 2.400595f, 2.400595f, 2.400595f, 0.688960f, 0.688960f
967 }
968 : std::initializer_list<float>
969 {
970 1.f, 1.f, 1.f, 2.f, 2.f,
971 13.f, 13.f, 13.f, 21.f, 21.f,
972 144.f, 144.f, 144.f, 233.f, 233.f,
973
974 233.f, 233.f, 233.f, 144.f, 144.f,
975 21.f, 21.f, 21.f, 13.f, 13.f,
976 2.f, 2.f, 2.f, 1.f, 1.f
977 };
978
979 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
980 if (dataLayout == armnn::DataLayout::NHWC)
981 {
982 std::vector<float> tmp(inputData.size());
983 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
984 inputData = tmp;
985
986 std::vector<float> tmp1(outputData.size());
987 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
988 outputData = tmp1;
989 }
990
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100991 auto input = MakeTensor<T, 4>(inputTensorInfo,
992 armnnUtils::QuantizedVector<T>(inputData,
993 inputTensorInfo.GetQuantizationScale(),
994 inputTensorInfo.GetQuantizationOffset()));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100995
996 LayerTestResult<T, 4> result(outputTensorInfo);
997 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100998 armnnUtils::QuantizedVector<T>(outputData,
999 outputTensorInfo.GetQuantizationScale(),
1000 outputTensorInfo.GetQuantizationOffset()));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001001
1002 std::unique_ptr <armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
1003 std::unique_ptr <armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1004
1005 armnn::ResizeQueueDescriptor descriptor;
1006 descriptor.m_Parameters.m_DataLayout = dataLayout;
1007 descriptor.m_Parameters.m_Method = armnn::ResizeMethod::NearestNeighbor;
1008 armnn::WorkloadInfo info;
1009 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
1010 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
1011
1012 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResize(descriptor, info);
1013
1014 inputHandle->Allocate();
1015 outputHandle->Allocate();
1016 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
1017
1018 workload->PostAllocationConfigure();
1019 workload->Execute();
1020
1021 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
1022 return result;
1023}