blob: 247821b293c86fa54014bb085371d8a007a5c894 [file] [log] [blame]
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "AdditionTestImpl.hpp"
7
8#include "ElementwiseTestImpl.hpp"
9
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010010#include <QuantizeHelper.hpp>
11
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +010012template<>
13std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::AdditionQueueDescriptor>(
14 const armnn::IWorkloadFactory& workloadFactory,
15 const armnn::WorkloadInfo& info,
16 const armnn::AdditionQueueDescriptor& descriptor)
17{
18 return workloadFactory.CreateAddition(descriptor, info);
19}
20
21LayerTestResult<float,4> AdditionTest(
22 armnn::IWorkloadFactory& workloadFactory,
23 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
24{
25 unsigned int batchSize = 2u;
26 unsigned int channels = 2u;
27 unsigned int height = 2u;
28 unsigned int width = 3u;
29
30 unsigned int shape[] = { batchSize, channels, height, width };
31
32 std::vector<float> input1 =
33 {
34 0.0f, 2.0f, 1.0f,
35 0.2f, 1.0f, 2.0f,
36
37 1.0f, 2.0f, 1.0f,
38 0.2f, 1.0f, 2.0f,
39
40 0.0f, 2.0f, 1.0f,
41 4.2f, 1.0f, 2.0f,
42
43 0.0f, 0.0f, 1.0f,
44 0.2f, 1.0f, 2.0f,
45 };
46
47 std::vector<float> input2 =
48 {
49 1.0f, 2.0f, 1.0f,
50 0.0f, 1.0f, 2.0f,
51
52 1.0f, 2.0f, -2.0f,
53 0.2f, 1.0f, 2.0f,
54
55 0.0f, 2.0f, 1.0f,
56 4.2f, 0.0f, -3.0f,
57
58 0.0f, 0.0f, 1.0f,
59 0.7f, 1.0f, 5.0f,
60 };
61
62
63 std::vector<float> output
64 {
65 1.0f, 4.0f, 2.0f,
66 0.2f, 2.0f, 4.0f,
67
68 2.0f, 4.0f, -1.0f,
69 0.4f, 2.0f, 4.0f,
70
71 0.0f, 4.0f, 2.0f,
72 8.4f, 1.0f, -1.0f,
73
74 0.0f, 0.0f, 2.0f,
75 0.9f, 2.0f, 7.0f,
76 };
77
78 return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::Float32>(
79 workloadFactory,
80 memoryManager,
81 shape,
82 input1,
83 shape,
84 input2,
85 shape,
86 output);
87}
88
89LayerTestResult<float, 5> Addition5dTest(
90 armnn::IWorkloadFactory& workloadFactory,
91 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
92{
93 unsigned int depth = 2u;
94 unsigned int batchSize = 2u;
95 unsigned int channels = 2u;
96 unsigned int height = 2u;
97 unsigned int width = 3u;
98
99 unsigned int shape[] = { depth, batchSize, channels, height, width };
100
101 std::vector<float> input1 =
102 {
103 2.6f, 4.0f, 4.4f, 2.7f, 4.6f, 2.8f,
104 2.3f, 1.9f, 3.4f, 2.9f, 2.2f, 4.5f,
105
106 2.8f, 1.9f, 2.3f, 2.6f, 4.7f, 3.5f,
107 0.4f, 1.5f, 2.1f, 0.7f, 5.0f, 1.1f,
108
109
110 1.0f, 2.7f, 0.0f, 0.6f, 0.8f, 0.9f,
111 1.0f, 2.6f, 0.4f, 3.8f, 0.4f, 0.8f,
112
113 0.5f, 4.3f, 3.1f, 4.4f, 0.7f, 1.4f,
114 0.4f, 4.4f, 0.7f, 0.6f, 4.7f, 1.2f,
115
116 };
117
118 std::vector<float> input2 =
119 {
120 4.4f, 3.0f, 1.0f, 0.0f, 3.9f, 3.1f,
121 1.7f, 2.9f, 1.3f, 0.4f, 0.4f, 4.3f,
122
123 4.5f, 0.2f, 2.2f, 4.1f, 3.9f, 3.0f,
124 0.1f, 2.5f, 4.1f, 4.6f, 1.5f, 0.0f,
125
126
127 0.5f, 4.9f, 2.5f, 1.5f, 3.4f, 4.5f,
128 2.0f, 3.0f, 4.9f, 1.6f, 2.4f, 3.4f,
129
130 3.6f, 1.8f, 1.3f, 2.6f, 2.1f, 4.8f,
131 2.0f, 4.3f, 4.0f, 0.2f, 0.6f, 4.4f,
132 };
133
134 std::vector<float> output =
135 {
136 7.0f, 7.0f, 5.4f, 2.7f, 8.5f, 5.9f,
137 4.0f, 4.8f, 4.7f, 3.3f, 2.6f, 8.8f,
138
139 7.3f, 2.1f, 4.5f, 6.7f, 8.6f, 6.5f,
140 0.5f, 4.0f, 6.2f, 5.3f, 6.5f, 1.1f,
141
142
143 1.5f, 7.6f, 2.5f, 2.1f, 4.2f, 5.4f,
144 3.0f, 5.6f, 5.3f, 5.4f, 2.8f, 4.2f,
145
146 4.1f, 6.1f, 4.4f, 7.0f, 2.8f, 6.2f,
147 2.4f, 8.7f, 4.7f, 0.8f, 5.3f, 5.6f,
148 };
149
150 return ElementwiseTestHelper<5, armnn::AdditionQueueDescriptor, armnn::DataType::Float32>(
151 workloadFactory,
152 memoryManager,
153 shape,
154 input1,
155 shape,
156 input2,
157 shape,
158 output);
159}
160
161template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
162LayerTestResult<T, 4> AdditionBroadcastTestImpl(
163 armnn::IWorkloadFactory& workloadFactory,
164 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
165 float qScale,
166 int32_t qOffset)
167{
168 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, ArmnnType);
169 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, ArmnnType);
170 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
171
172 if (armnn::IsQuantizedType<T>())
173 {
174 inputTensorInfo1.SetQuantizationScale(qScale);
175 inputTensorInfo1.SetQuantizationOffset(qOffset);
176 inputTensorInfo2.SetQuantizationScale(qScale);
177 inputTensorInfo2.SetQuantizationOffset(qOffset);
178 outputTensorInfo.SetQuantizationScale(qScale);
179 outputTensorInfo.SetQuantizationOffset(qOffset);
180 }
181
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100182 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, armnnUtils::QuantizedVector<T>(
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100183 {
184 0.0f,
185 1.0f,
186
187 2.0f,
188 3.0f,
189
190 4.0f,
191 5.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100192 },
193 qScale, qOffset));
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100194
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100195 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, armnnUtils::QuantizedVector<T>(
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100196 {
197 0.5f, 1.5f, 2.5f,
198 3.5f, 4.5f, 5.5f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100199 },
200 qScale, qOffset));
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100201
202 LayerTestResult<T,4> ret(outputTensorInfo);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100203 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, armnnUtils::QuantizedVector<T>(
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100204 {
205 0.5f, 1.5f, 2.5f,
206 4.5f, 5.5f, 6.5f,
207
208 2.5f, 3.5f, 4.5f,
209 6.5f, 7.5f, 8.5f,
210
211 4.5f, 5.5f, 6.5f,
212 8.5f, 9.5f, 10.5f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100213 },
214 qScale, qOffset));
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100215
216 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
217 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
218 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
219
220 armnn::AdditionQueueDescriptor data;
221 armnn::WorkloadInfo info;
222 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
223 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
224 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
225
226 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
227
228 inputHandle1->Allocate();
229 inputHandle2->Allocate();
230 outputHandle->Allocate();
231
232 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
233 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
234
235 workload->PostAllocationConfigure();
236 workload->Execute();
237
238 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
239
240 return ret;
241}
242
243template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
244LayerTestResult<T, 4> AdditionBroadcast1ElementTestImpl(
245 armnn::IWorkloadFactory& workloadFactory,
246 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
247 float qScale,
248 int32_t qOffset)
249{
250 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
251 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, ArmnnType);
252 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
253
254 if (armnn::IsQuantizedType<T>())
255 {
256 inputTensorInfo1.SetQuantizationScale(qScale);
257 inputTensorInfo1.SetQuantizationOffset(qOffset);
258 inputTensorInfo2.SetQuantizationScale(qScale);
259 inputTensorInfo2.SetQuantizationOffset(qOffset);
260 outputTensorInfo.SetQuantizationScale(qScale);
261 outputTensorInfo.SetQuantizationOffset(qOffset);
262 }
263
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100264 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, armnnUtils::QuantizedVector<T>(
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100265 {
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100266 0.0f, 1.0f, 2.0f,
267 3.0f, 4.0f, 5.0f,
268 6.0f, 7.0f, 8.0f,
269 9.0f, 10.0f, 11.0f,
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100270 12.0f, 13.0f, 14.0f,
271 15.0f, 16.0f, 17.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100272 },
273 qScale, qOffset));
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100274
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100275 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, armnnUtils::QuantizedVector<T>(
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100276 {
277 0.5f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100278 },
279 qScale, qOffset));
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100280
281 LayerTestResult<T,4> ret(outputTensorInfo);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100282 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, armnnUtils::QuantizedVector<T>(
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100283 {
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100284 0.5f, 1.5f, 2.5f,
285 3.5f, 4.5f, 5.5f,
286 6.5f, 7.5f, 8.5f,
287 9.5f, 10.5f, 11.5f,
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100288 12.5f, 13.5f, 14.5f,
289 15.5f, 16.5f, 17.5f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100290 },
291 qScale, qOffset));
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100292
293 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
294 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
295 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
296
297 armnn::AdditionQueueDescriptor data;
298 armnn::WorkloadInfo info;
299 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
300 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
301 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
302
303 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
304
305 inputHandle1->Allocate();
306 inputHandle2->Allocate();
307 outputHandle->Allocate();
308
309 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
310 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
311
312 workload->PostAllocationConfigure();
313 workload->Execute();
314
315 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
316
317 return ret;
318}
319
320LayerTestResult<float, 4> AdditionBroadcastTest(
321 armnn::IWorkloadFactory& workloadFactory,
322 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
323{
324 return AdditionBroadcastTestImpl<armnn::DataType::Float32>(
325 workloadFactory, memoryManager, 0.0f, 0);
326}
327
328LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test(
329 armnn::IWorkloadFactory& workloadFactory,
330 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
331{
332 return AdditionBroadcastTestImpl<armnn::DataType::QuantisedAsymm8>(
333 workloadFactory, memoryManager, 2.f, 0);
334}
335
336LayerTestResult<int16_t, 4> AdditionBroadcastInt16Test(
337 armnn::IWorkloadFactory& workloadFactory,
338 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
339{
340 return AdditionBroadcastTestImpl<armnn::DataType::QuantisedSymm16>(
341 workloadFactory, memoryManager, 2.f, 0);
342}
343
344LayerTestResult<float, 4> AdditionBroadcast1ElementTest(
345 armnn::IWorkloadFactory& workloadFactory,
346 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
347{
348 return AdditionBroadcast1ElementTestImpl<armnn::DataType::Float32>(
349 workloadFactory, memoryManager, 0.0f, 0);
350}
351
352LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test(
353 armnn::IWorkloadFactory& workloadFactory,
354 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
355{
356 return AdditionBroadcast1ElementTestImpl<armnn::DataType::QuantisedAsymm8>(
357 workloadFactory, memoryManager, 0.1333333f, 128);
358}
359
360LayerTestResult<int16_t, 4> AdditionBroadcast1ElementInt16Test(
361 armnn::IWorkloadFactory& workloadFactory,
362 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
363{
364 return AdditionBroadcast1ElementTestImpl<armnn::DataType::QuantisedSymm16>(
365 workloadFactory, memoryManager, 0.1333333f, 0);
366}
367
368LayerTestResult<uint8_t, 4> AdditionUint8Test(
369 armnn::IWorkloadFactory& workloadFactory,
370 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
371{
372 const unsigned int shape0[] = { 1, 2, 2, 3 };
373 const unsigned int shape1[] = { 1, 2, 2, 3 };
374
375 std::vector<uint8_t> input0(
376 {
377 63, 35, 77, 70, 56, 112, // 420, 224, 518, 469, 371, 763
378 203, 28, 252, 168, 245, 91 // 1400, 175, 1743, 1155, 1694, 616
379 });
380
381 std::vector<uint8_t> input1(
382 {
383 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
384 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
385 });
386
387 std::vector<uint8_t> output(
388 {
389 81, 39, 249, 255, 228, 255, // 546, 252, 1722, 2065(clamped), 1575, 2212(clamped)
390 255, 186, 255, 186, 255, 214, // 2261(clamped), 1281, 2163(clamped), 1281, 2408(clamped), 1477
391 });
392
393 return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
394 workloadFactory,
395 memoryManager,
396 shape0,
397 input0,
398 7.0f,
399 3,
400 shape1,
401 input1,
402 7.0f,
403 3,
404 shape0,
405 output,
406 7.0f,
407 3);
408}
409
410LayerTestResult<int16_t, 4> AdditionInt16Test(
411 armnn::IWorkloadFactory& workloadFactory,
412 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
413{
414 const unsigned int shape0[] = { 1, 2, 2, 3 };
415 const unsigned int shape1[] = { 1, 2, 2, 3 };
416
417 std::vector<int16_t> input0 =
418 {
419 63, 35, 77, 70, 56, 112, // 441, 245, 539, 490, 392, 184
420 203, 28, 252, 168, 245, 91 // 1421, 196, 1764, 1176, 1715, 637
421 };
422
423 std::vector<int16_t> input1 =
424 {
425 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
426 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
427 };
428
429 std::vector<int16_t> output =
430 {
431 84, 42, 252, 301, 231, 322, // 588, 294, 1764, 2107(clamped), 1617, 2254(clamped)
432 329, 189, 315, 189, 350, 217, // 2303(clamped), 1323, 2205(clamped), 1323, 2450(clamped), 1519
433 };
434
435 return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::QuantisedSymm16>(
436 workloadFactory,
437 memoryManager,
438 shape0,
439 input0,
440 7.0f,
441 0,
442 shape1,
443 input1,
444 7.0f,
445 0,
446 shape0,
447 output,
448 7.0f,
449 0);
450}
451
452LayerTestResult<float, 4> AdditionAfterMaxPoolTest(
453 armnn::IWorkloadFactory& workloadFactory,
454 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
455{
456 // Create Initial Tensor
457 // 1, 2, 3
458 // 4, 5, 6
459 // 7, 8, 9
460
461 armnn::TensorInfo poolingInputTensorInfo({ 1, 1, 3, 3}, armnn::DataType::Float32);
462 armnn::TensorInfo poolingOutputTensorInfo({ 1, 1, 2, 2}, armnn::DataType::Float32);
463
464 boost::multi_array<float, 4> poolingInput = MakeTensor<float,4>(poolingInputTensorInfo,
465 {1, 2, 3,
466 4, 5, 6,
467 7, 8, 9
468 });
469
470 std::unique_ptr<armnn::ITensorHandle> poolingInputHandle =
471 workloadFactory.CreateTensorHandle(poolingInputTensorInfo);
472 std::unique_ptr<armnn::ITensorHandle> poolingOutputHandle =
473 workloadFactory.CreateTensorHandle(poolingOutputTensorInfo);
474
475 // Apply MaxPool poolSize = 1x1, stride=2x2
476 // Result =
477 // 1, 3
478 // 7, 9
479 armnn::Pooling2dDescriptor descriptor;
480 descriptor.m_PoolHeight = 1;
481 descriptor.m_PoolWidth = 1;
482 descriptor.m_StrideX = 2;
483 descriptor.m_StrideY = 2;
484 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
485
486 armnn::Pooling2dQueueDescriptor queueDescriptor;
487 queueDescriptor.m_Parameters = descriptor;
488 armnn::WorkloadInfo workloadInfo;
489 AddInputToWorkload(queueDescriptor, workloadInfo, poolingInputTensorInfo, poolingInputHandle.get());
490 AddOutputToWorkload(queueDescriptor, workloadInfo, poolingOutputTensorInfo, poolingOutputHandle.get());
491
492 // Create the MaxPool
493 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
494
495 //LayerTestResult<float, 4> result(poolingOutputTensorInfo);
496 auto shape( GetTensorShapeAsArray<4>(poolingOutputTensorInfo));
497 boost::multi_array<float, 4> resultMaxPool;
498 resultMaxPool.resize(shape);
499
500
501 // Create addition with another tensor the same size
502 // This would be the result to apply a Conv2d with kernel ones(2) and stride 1x1
503 // with the initial tensor.
504 // 12, 16
505 // 24, 28
506
507 armnn::TensorInfo addInputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
508 armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
509
510 boost::multi_array<float, 4> addInput = MakeTensor<float,4>(addInputTensorInfo,
511 {12, 16,
512 24, 28,
513 });
514
515 // Expected output tensor after MaxPool and Addition.
516 LayerTestResult<float,4> addRet(addOutputTensorInfo);
517 addRet.outputExpected = MakeTensor<float, 4>(addOutputTensorInfo, std::vector<float>(
518 {
519 13, 19,
520 31, 37
521 }));
522
523 std::unique_ptr<armnn::ITensorHandle> addInputHandle = workloadFactory.CreateTensorHandle(addInputTensorInfo);
524 std::unique_ptr<armnn::ITensorHandle> addOutputHandle = workloadFactory.CreateTensorHandle(addOutputTensorInfo);
525
526 armnn::AdditionQueueDescriptor data;
527 armnn::WorkloadInfo info;
528
529 // Add the output of the MaxPool and the new tensor
530 AddInputToWorkload(data, info, poolingOutputTensorInfo, poolingOutputHandle.get());
531 AddInputToWorkload(data, info, addInputTensorInfo, addInputHandle.get());
532 AddOutputToWorkload(data, info, addOutputTensorInfo, addOutputHandle.get());
533
534 std::unique_ptr<armnn::IWorkload> addWorkload = workloadFactory.CreateAddition(data, info);
535
536 poolingInputHandle->Allocate();
537 poolingOutputHandle->Allocate();
538 addInputHandle->Allocate();
539 addOutputHandle->Allocate();
540
541 CopyDataToITensorHandle(poolingInputHandle.get(), &poolingInput[0][0][0][0]);
542 CopyDataFromITensorHandle(&resultMaxPool[0][0][0][0], poolingOutputHandle.get());
543
544 CopyDataToITensorHandle(poolingOutputHandle.get(), &resultMaxPool[0][0][0][0]);
545 CopyDataToITensorHandle(addInputHandle.get(), &addInput[0][0][0][0]);
546
547 workload->PostAllocationConfigure();
548 workload->Execute();
549 addWorkload->PostAllocationConfigure();
550 addWorkload->Execute();
551
552 CopyDataFromITensorHandle(&addRet.output[0][0][0][0], addOutputHandle.get());
553
554 return addRet;
555}
556
557LayerTestResult<float,4> CompareAdditionTest(
558 armnn::IWorkloadFactory& workloadFactory,
559 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
560 armnn::IWorkloadFactory& refWorkloadFactory)
561{
562 unsigned int batchSize = 4;
563 unsigned int channels = 1;
564 unsigned int height = 2;
565 unsigned int width = 3;
566
567 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
568 armnn::TensorInfo outputTensorInfo;
569
570 unsigned int shape[] = {batchSize, channels, height, width};
571
572 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
573 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
574 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
575
576 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 1232);
577 auto input2 = MakeRandomTensor<float, 4>(inputTensorInfo2, 456);
578
579 LayerTestResult<float,4> ret(outputTensorInfo);
580
581 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
582 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
583 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
584
585 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
586 std::unique_ptr<armnn::ITensorHandle> inputHandle2Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo2);
587 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
588
589 armnn::AdditionQueueDescriptor data;
590 armnn::WorkloadInfo info;
591 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
592 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
593 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
594
595 armnn::AdditionQueueDescriptor refData = data;
596 armnn::WorkloadInfo refInfo = info;
597 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo1, inputHandle1Ref.get());
598 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get());
599 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
600
601 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
602 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateAddition(refData, refInfo);
603
604 inputHandle1->Allocate();
605 inputHandle2->Allocate();
606 outputHandle->Allocate();
607 inputHandle1Ref->Allocate();
608 inputHandle2Ref->Allocate();
609 outputHandleRef->Allocate();
610
611 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
612 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
613 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
614 CopyDataToITensorHandle(inputHandle2Ref.get(), &input2[0][0][0][0]);
615
616 workload->PostAllocationConfigure();
617 workload->Execute();
618 workloadRef->PostAllocationConfigure();
619 workloadRef->Execute();
620
621 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
622 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
623
624 return ret;
625}