blob: 4ae0f5aacf91aa72b4948fb2e0a848d95e64f212 [file] [log] [blame]
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "AdditionTestImpl.hpp"
7
8#include "ElementwiseTestImpl.hpp"
9
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010010#include <QuantizeHelper.hpp>
11
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +010012template<>
13std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::AdditionQueueDescriptor>(
14 const armnn::IWorkloadFactory& workloadFactory,
15 const armnn::WorkloadInfo& info,
16 const armnn::AdditionQueueDescriptor& descriptor)
17{
18 return workloadFactory.CreateAddition(descriptor, info);
19}
20
21LayerTestResult<float,4> AdditionTest(
22 armnn::IWorkloadFactory& workloadFactory,
23 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
24{
25 unsigned int batchSize = 2u;
26 unsigned int channels = 2u;
27 unsigned int height = 2u;
28 unsigned int width = 3u;
29
30 unsigned int shape[] = { batchSize, channels, height, width };
31
32 std::vector<float> input1 =
33 {
34 0.0f, 2.0f, 1.0f,
35 0.2f, 1.0f, 2.0f,
36
37 1.0f, 2.0f, 1.0f,
38 0.2f, 1.0f, 2.0f,
39
40 0.0f, 2.0f, 1.0f,
41 4.2f, 1.0f, 2.0f,
42
43 0.0f, 0.0f, 1.0f,
44 0.2f, 1.0f, 2.0f,
45 };
46
47 std::vector<float> input2 =
48 {
49 1.0f, 2.0f, 1.0f,
50 0.0f, 1.0f, 2.0f,
51
52 1.0f, 2.0f, -2.0f,
53 0.2f, 1.0f, 2.0f,
54
55 0.0f, 2.0f, 1.0f,
56 4.2f, 0.0f, -3.0f,
57
58 0.0f, 0.0f, 1.0f,
59 0.7f, 1.0f, 5.0f,
60 };
61
62
63 std::vector<float> output
64 {
65 1.0f, 4.0f, 2.0f,
66 0.2f, 2.0f, 4.0f,
67
68 2.0f, 4.0f, -1.0f,
69 0.4f, 2.0f, 4.0f,
70
71 0.0f, 4.0f, 2.0f,
72 8.4f, 1.0f, -1.0f,
73
74 0.0f, 0.0f, 2.0f,
75 0.9f, 2.0f, 7.0f,
76 };
77
78 return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::Float32>(
79 workloadFactory,
80 memoryManager,
81 shape,
82 input1,
83 shape,
84 input2,
85 shape,
86 output);
87}
88
89LayerTestResult<float, 5> Addition5dTest(
90 armnn::IWorkloadFactory& workloadFactory,
91 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
92{
93 unsigned int depth = 2u;
94 unsigned int batchSize = 2u;
95 unsigned int channels = 2u;
96 unsigned int height = 2u;
97 unsigned int width = 3u;
98
99 unsigned int shape[] = { depth, batchSize, channels, height, width };
100
101 std::vector<float> input1 =
102 {
103 2.6f, 4.0f, 4.4f, 2.7f, 4.6f, 2.8f,
104 2.3f, 1.9f, 3.4f, 2.9f, 2.2f, 4.5f,
105
106 2.8f, 1.9f, 2.3f, 2.6f, 4.7f, 3.5f,
107 0.4f, 1.5f, 2.1f, 0.7f, 5.0f, 1.1f,
108
109
110 1.0f, 2.7f, 0.0f, 0.6f, 0.8f, 0.9f,
111 1.0f, 2.6f, 0.4f, 3.8f, 0.4f, 0.8f,
112
113 0.5f, 4.3f, 3.1f, 4.4f, 0.7f, 1.4f,
114 0.4f, 4.4f, 0.7f, 0.6f, 4.7f, 1.2f,
115
116 };
117
118 std::vector<float> input2 =
119 {
120 4.4f, 3.0f, 1.0f, 0.0f, 3.9f, 3.1f,
121 1.7f, 2.9f, 1.3f, 0.4f, 0.4f, 4.3f,
122
123 4.5f, 0.2f, 2.2f, 4.1f, 3.9f, 3.0f,
124 0.1f, 2.5f, 4.1f, 4.6f, 1.5f, 0.0f,
125
126
127 0.5f, 4.9f, 2.5f, 1.5f, 3.4f, 4.5f,
128 2.0f, 3.0f, 4.9f, 1.6f, 2.4f, 3.4f,
129
130 3.6f, 1.8f, 1.3f, 2.6f, 2.1f, 4.8f,
131 2.0f, 4.3f, 4.0f, 0.2f, 0.6f, 4.4f,
132 };
133
134 std::vector<float> output =
135 {
136 7.0f, 7.0f, 5.4f, 2.7f, 8.5f, 5.9f,
137 4.0f, 4.8f, 4.7f, 3.3f, 2.6f, 8.8f,
138
139 7.3f, 2.1f, 4.5f, 6.7f, 8.6f, 6.5f,
140 0.5f, 4.0f, 6.2f, 5.3f, 6.5f, 1.1f,
141
142
143 1.5f, 7.6f, 2.5f, 2.1f, 4.2f, 5.4f,
144 3.0f, 5.6f, 5.3f, 5.4f, 2.8f, 4.2f,
145
146 4.1f, 6.1f, 4.4f, 7.0f, 2.8f, 6.2f,
147 2.4f, 8.7f, 4.7f, 0.8f, 5.3f, 5.6f,
148 };
149
150 return ElementwiseTestHelper<5, armnn::AdditionQueueDescriptor, armnn::DataType::Float32>(
151 workloadFactory,
152 memoryManager,
153 shape,
154 input1,
155 shape,
156 input2,
157 shape,
158 output);
159}
160
161template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
162LayerTestResult<T, 4> AdditionBroadcastTestImpl(
163 armnn::IWorkloadFactory& workloadFactory,
164 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
165 float qScale,
166 int32_t qOffset)
167{
Jan Eilers8eb25602020-03-09 12:13:48 +0000168 IgnoreUnused(memoryManager);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100169 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, ArmnnType);
170 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, ArmnnType);
171 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
172
173 if (armnn::IsQuantizedType<T>())
174 {
175 inputTensorInfo1.SetQuantizationScale(qScale);
176 inputTensorInfo1.SetQuantizationOffset(qOffset);
177 inputTensorInfo2.SetQuantizationScale(qScale);
178 inputTensorInfo2.SetQuantizationOffset(qOffset);
179 outputTensorInfo.SetQuantizationScale(qScale);
180 outputTensorInfo.SetQuantizationOffset(qOffset);
181 }
182
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100183 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, armnnUtils::QuantizedVector<T>(
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100184 {
185 0.0f,
186 1.0f,
187
188 2.0f,
189 3.0f,
190
191 4.0f,
192 5.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100193 },
194 qScale, qOffset));
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100195
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100196 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, armnnUtils::QuantizedVector<T>(
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100197 {
198 0.5f, 1.5f, 2.5f,
199 3.5f, 4.5f, 5.5f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100200 },
201 qScale, qOffset));
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100202
203 LayerTestResult<T,4> ret(outputTensorInfo);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100204 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, armnnUtils::QuantizedVector<T>(
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100205 {
206 0.5f, 1.5f, 2.5f,
207 4.5f, 5.5f, 6.5f,
208
209 2.5f, 3.5f, 4.5f,
210 6.5f, 7.5f, 8.5f,
211
212 4.5f, 5.5f, 6.5f,
213 8.5f, 9.5f, 10.5f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100214 },
215 qScale, qOffset));
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100216
217 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
218 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
219 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
220
221 armnn::AdditionQueueDescriptor data;
222 armnn::WorkloadInfo info;
223 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
224 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
225 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
226
227 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
228
229 inputHandle1->Allocate();
230 inputHandle2->Allocate();
231 outputHandle->Allocate();
232
233 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
234 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
235
236 workload->PostAllocationConfigure();
237 workload->Execute();
238
239 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
240
241 return ret;
242}
243
244template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
245LayerTestResult<T, 4> AdditionBroadcast1ElementTestImpl(
246 armnn::IWorkloadFactory& workloadFactory,
247 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
248 float qScale,
249 int32_t qOffset)
250{
Jan Eilers8eb25602020-03-09 12:13:48 +0000251 IgnoreUnused(memoryManager);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100252 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
253 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, ArmnnType);
254 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
255
256 if (armnn::IsQuantizedType<T>())
257 {
258 inputTensorInfo1.SetQuantizationScale(qScale);
259 inputTensorInfo1.SetQuantizationOffset(qOffset);
260 inputTensorInfo2.SetQuantizationScale(qScale);
261 inputTensorInfo2.SetQuantizationOffset(qOffset);
262 outputTensorInfo.SetQuantizationScale(qScale);
263 outputTensorInfo.SetQuantizationOffset(qOffset);
264 }
265
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100266 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, armnnUtils::QuantizedVector<T>(
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100267 {
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100268 0.0f, 1.0f, 2.0f,
269 3.0f, 4.0f, 5.0f,
270 6.0f, 7.0f, 8.0f,
271 9.0f, 10.0f, 11.0f,
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100272 12.0f, 13.0f, 14.0f,
273 15.0f, 16.0f, 17.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100274 },
275 qScale, qOffset));
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100276
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100277 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, armnnUtils::QuantizedVector<T>(
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100278 {
279 0.5f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100280 },
281 qScale, qOffset));
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100282
283 LayerTestResult<T,4> ret(outputTensorInfo);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100284 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, armnnUtils::QuantizedVector<T>(
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100285 {
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100286 0.5f, 1.5f, 2.5f,
287 3.5f, 4.5f, 5.5f,
288 6.5f, 7.5f, 8.5f,
289 9.5f, 10.5f, 11.5f,
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100290 12.5f, 13.5f, 14.5f,
291 15.5f, 16.5f, 17.5f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100292 },
293 qScale, qOffset));
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100294
295 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
296 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
297 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
298
299 armnn::AdditionQueueDescriptor data;
300 armnn::WorkloadInfo info;
301 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
302 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
303 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
304
305 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
306
307 inputHandle1->Allocate();
308 inputHandle2->Allocate();
309 outputHandle->Allocate();
310
311 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
312 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
313
314 workload->PostAllocationConfigure();
315 workload->Execute();
316
317 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
318
319 return ret;
320}
321
322LayerTestResult<float, 4> AdditionBroadcastTest(
323 armnn::IWorkloadFactory& workloadFactory,
324 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
325{
326 return AdditionBroadcastTestImpl<armnn::DataType::Float32>(
327 workloadFactory, memoryManager, 0.0f, 0);
328}
329
330LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test(
331 armnn::IWorkloadFactory& workloadFactory,
332 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
333{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000334 return AdditionBroadcastTestImpl<armnn::DataType::QAsymmU8>(
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100335 workloadFactory, memoryManager, 2.f, 0);
336}
337
338LayerTestResult<int16_t, 4> AdditionBroadcastInt16Test(
339 armnn::IWorkloadFactory& workloadFactory,
340 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
341{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000342 return AdditionBroadcastTestImpl<armnn::DataType::QSymmS16>(
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100343 workloadFactory, memoryManager, 2.f, 0);
344}
345
Teresa Charlinecb6b8e2020-05-22 18:08:23 +0100346LayerTestResult<int32_t, 4> AdditionBroadcastInt32Test(
347 armnn::IWorkloadFactory& workloadFactory,
348 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
349{
350 return AdditionBroadcastTestImpl<armnn::DataType::Signed32>(
351 workloadFactory, memoryManager, 1.f, 0);
352}
353
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100354LayerTestResult<float, 4> AdditionBroadcast1ElementTest(
355 armnn::IWorkloadFactory& workloadFactory,
356 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
357{
358 return AdditionBroadcast1ElementTestImpl<armnn::DataType::Float32>(
359 workloadFactory, memoryManager, 0.0f, 0);
360}
361
362LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test(
363 armnn::IWorkloadFactory& workloadFactory,
364 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
365{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000366 return AdditionBroadcast1ElementTestImpl<armnn::DataType::QAsymmU8>(
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100367 workloadFactory, memoryManager, 0.1333333f, 128);
368}
369
370LayerTestResult<int16_t, 4> AdditionBroadcast1ElementInt16Test(
371 armnn::IWorkloadFactory& workloadFactory,
372 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
373{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000374 return AdditionBroadcast1ElementTestImpl<armnn::DataType::QSymmS16>(
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100375 workloadFactory, memoryManager, 0.1333333f, 0);
376}
377
Teresa Charlinecb6b8e2020-05-22 18:08:23 +0100378LayerTestResult<int32_t, 4> AdditionBroadcast1ElementInt32Test(
379 armnn::IWorkloadFactory& workloadFactory,
380 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
381{
382 return AdditionBroadcast1ElementTestImpl<armnn::DataType::Signed32>(
383 workloadFactory, memoryManager, 1.f, 0);
384}
385
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100386LayerTestResult<uint8_t, 4> AdditionUint8Test(
387 armnn::IWorkloadFactory& workloadFactory,
388 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
389{
390 const unsigned int shape0[] = { 1, 2, 2, 3 };
391 const unsigned int shape1[] = { 1, 2, 2, 3 };
392
393 std::vector<uint8_t> input0(
394 {
395 63, 35, 77, 70, 56, 112, // 420, 224, 518, 469, 371, 763
396 203, 28, 252, 168, 245, 91 // 1400, 175, 1743, 1155, 1694, 616
397 });
398
399 std::vector<uint8_t> input1(
400 {
401 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
402 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
403 });
404
405 std::vector<uint8_t> output(
406 {
407 81, 39, 249, 255, 228, 255, // 546, 252, 1722, 2065(clamped), 1575, 2212(clamped)
408 255, 186, 255, 186, 255, 214, // 2261(clamped), 1281, 2163(clamped), 1281, 2408(clamped), 1477
409 });
410
Derek Lambertif90c56d2020-01-10 17:14:08 +0000411 return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::QAsymmU8>(
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100412 workloadFactory,
413 memoryManager,
414 shape0,
415 input0,
416 7.0f,
417 3,
418 shape1,
419 input1,
420 7.0f,
421 3,
422 shape0,
423 output,
424 7.0f,
425 3);
426}
427
428LayerTestResult<int16_t, 4> AdditionInt16Test(
429 armnn::IWorkloadFactory& workloadFactory,
430 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
431{
432 const unsigned int shape0[] = { 1, 2, 2, 3 };
433 const unsigned int shape1[] = { 1, 2, 2, 3 };
434
435 std::vector<int16_t> input0 =
436 {
437 63, 35, 77, 70, 56, 112, // 441, 245, 539, 490, 392, 184
438 203, 28, 252, 168, 245, 91 // 1421, 196, 1764, 1176, 1715, 637
439 };
440
441 std::vector<int16_t> input1 =
442 {
443 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
444 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
445 };
446
447 std::vector<int16_t> output =
448 {
449 84, 42, 252, 301, 231, 322, // 588, 294, 1764, 2107(clamped), 1617, 2254(clamped)
450 329, 189, 315, 189, 350, 217, // 2303(clamped), 1323, 2205(clamped), 1323, 2450(clamped), 1519
451 };
452
Derek Lambertif90c56d2020-01-10 17:14:08 +0000453 return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::QSymmS16>(
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100454 workloadFactory,
455 memoryManager,
456 shape0,
457 input0,
458 7.0f,
459 0,
460 shape1,
461 input1,
462 7.0f,
463 0,
464 shape0,
465 output,
466 7.0f,
467 0);
468}
469
Teresa Charlinecb6b8e2020-05-22 18:08:23 +0100470LayerTestResult<int32_t, 4> AdditionInt32Test(
471 armnn::IWorkloadFactory& workloadFactory,
472 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
473{
474 const unsigned int shape0[] = { 1, 2, 2, 3 };
475 const unsigned int shape1[] = { 1, 2, 2, 3 };
476
477 std::vector<int32_t> input0 =
478 {
479 63, 35, 77, 70, 56, 112, // 441, 245, 539, 490, 392, 184
480 203, 28, 252, 168, 245, 91 // 1421, 196, 1764, 1176, 1715, 637
481 };
482
483 std::vector<int32_t> input1 =
484 {
485 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
486 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
487 };
488
489 std::vector<int32_t> output =
490 {
491 84, 42, 252, 301, 231, 322, // 588, 294, 1764, 2107(clamped), 1617, 2254(clamped)
492 329, 189, 315, 189, 350, 217, // 2303(clamped), 1323, 2205(clamped), 1323, 2450(clamped), 1519
493 };
494
495 return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::Signed32>(
496 workloadFactory,
497 memoryManager,
498 shape0,
499 input0,
500 1.0f,
501 0,
502 shape1,
503 input1,
504 1.0f,
505 0,
506 shape0,
507 output,
508 1.0f,
509 0);
510}
511
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100512LayerTestResult<float, 4> AdditionAfterMaxPoolTest(
513 armnn::IWorkloadFactory& workloadFactory,
514 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
515{
Jan Eilers8eb25602020-03-09 12:13:48 +0000516 IgnoreUnused(memoryManager);
Derek Lambertic374ff02019-12-10 21:57:35 +0000517
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100518 // Create Initial Tensor
519 // 1, 2, 3
520 // 4, 5, 6
521 // 7, 8, 9
522
523 armnn::TensorInfo poolingInputTensorInfo({ 1, 1, 3, 3}, armnn::DataType::Float32);
524 armnn::TensorInfo poolingOutputTensorInfo({ 1, 1, 2, 2}, armnn::DataType::Float32);
525
526 boost::multi_array<float, 4> poolingInput = MakeTensor<float,4>(poolingInputTensorInfo,
527 {1, 2, 3,
528 4, 5, 6,
529 7, 8, 9
530 });
531
532 std::unique_ptr<armnn::ITensorHandle> poolingInputHandle =
533 workloadFactory.CreateTensorHandle(poolingInputTensorInfo);
534 std::unique_ptr<armnn::ITensorHandle> poolingOutputHandle =
535 workloadFactory.CreateTensorHandle(poolingOutputTensorInfo);
536
537 // Apply MaxPool poolSize = 1x1, stride=2x2
538 // Result =
539 // 1, 3
540 // 7, 9
541 armnn::Pooling2dDescriptor descriptor;
542 descriptor.m_PoolHeight = 1;
543 descriptor.m_PoolWidth = 1;
544 descriptor.m_StrideX = 2;
545 descriptor.m_StrideY = 2;
546 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
547
548 armnn::Pooling2dQueueDescriptor queueDescriptor;
549 queueDescriptor.m_Parameters = descriptor;
550 armnn::WorkloadInfo workloadInfo;
551 AddInputToWorkload(queueDescriptor, workloadInfo, poolingInputTensorInfo, poolingInputHandle.get());
552 AddOutputToWorkload(queueDescriptor, workloadInfo, poolingOutputTensorInfo, poolingOutputHandle.get());
553
554 // Create the MaxPool
555 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
556
557 //LayerTestResult<float, 4> result(poolingOutputTensorInfo);
558 auto shape( GetTensorShapeAsArray<4>(poolingOutputTensorInfo));
559 boost::multi_array<float, 4> resultMaxPool;
560 resultMaxPool.resize(shape);
561
562
563 // Create addition with another tensor the same size
564 // This would be the result to apply a Conv2d with kernel ones(2) and stride 1x1
565 // with the initial tensor.
566 // 12, 16
567 // 24, 28
568
569 armnn::TensorInfo addInputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
570 armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
571
572 boost::multi_array<float, 4> addInput = MakeTensor<float,4>(addInputTensorInfo,
573 {12, 16,
574 24, 28,
575 });
576
577 // Expected output tensor after MaxPool and Addition.
578 LayerTestResult<float,4> addRet(addOutputTensorInfo);
579 addRet.outputExpected = MakeTensor<float, 4>(addOutputTensorInfo, std::vector<float>(
580 {
581 13, 19,
582 31, 37
583 }));
584
585 std::unique_ptr<armnn::ITensorHandle> addInputHandle = workloadFactory.CreateTensorHandle(addInputTensorInfo);
586 std::unique_ptr<armnn::ITensorHandle> addOutputHandle = workloadFactory.CreateTensorHandle(addOutputTensorInfo);
587
588 armnn::AdditionQueueDescriptor data;
589 armnn::WorkloadInfo info;
590
591 // Add the output of the MaxPool and the new tensor
592 AddInputToWorkload(data, info, poolingOutputTensorInfo, poolingOutputHandle.get());
593 AddInputToWorkload(data, info, addInputTensorInfo, addInputHandle.get());
594 AddOutputToWorkload(data, info, addOutputTensorInfo, addOutputHandle.get());
595
596 std::unique_ptr<armnn::IWorkload> addWorkload = workloadFactory.CreateAddition(data, info);
597
598 poolingInputHandle->Allocate();
599 poolingOutputHandle->Allocate();
600 addInputHandle->Allocate();
601 addOutputHandle->Allocate();
602
603 CopyDataToITensorHandle(poolingInputHandle.get(), &poolingInput[0][0][0][0]);
604 CopyDataFromITensorHandle(&resultMaxPool[0][0][0][0], poolingOutputHandle.get());
605
606 CopyDataToITensorHandle(poolingOutputHandle.get(), &resultMaxPool[0][0][0][0]);
607 CopyDataToITensorHandle(addInputHandle.get(), &addInput[0][0][0][0]);
608
609 workload->PostAllocationConfigure();
610 workload->Execute();
611 addWorkload->PostAllocationConfigure();
612 addWorkload->Execute();
613
614 CopyDataFromITensorHandle(&addRet.output[0][0][0][0], addOutputHandle.get());
615
616 return addRet;
617}
618
619LayerTestResult<float,4> CompareAdditionTest(
620 armnn::IWorkloadFactory& workloadFactory,
621 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
622 armnn::IWorkloadFactory& refWorkloadFactory)
623{
Jan Eilers8eb25602020-03-09 12:13:48 +0000624 IgnoreUnused(memoryManager);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100625 unsigned int batchSize = 4;
626 unsigned int channels = 1;
627 unsigned int height = 2;
628 unsigned int width = 3;
629
630 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
631 armnn::TensorInfo outputTensorInfo;
632
633 unsigned int shape[] = {batchSize, channels, height, width};
634
635 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
636 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
637 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
638
639 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 1232);
640 auto input2 = MakeRandomTensor<float, 4>(inputTensorInfo2, 456);
641
642 LayerTestResult<float,4> ret(outputTensorInfo);
643
644 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
645 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
646 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
647
648 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
649 std::unique_ptr<armnn::ITensorHandle> inputHandle2Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo2);
650 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
651
652 armnn::AdditionQueueDescriptor data;
653 armnn::WorkloadInfo info;
654 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
655 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
656 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
657
658 armnn::AdditionQueueDescriptor refData = data;
659 armnn::WorkloadInfo refInfo = info;
660 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo1, inputHandle1Ref.get());
661 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get());
662 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
663
664 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
665 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateAddition(refData, refInfo);
666
667 inputHandle1->Allocate();
668 inputHandle2->Allocate();
669 outputHandle->Allocate();
670 inputHandle1Ref->Allocate();
671 inputHandle2Ref->Allocate();
672 outputHandleRef->Allocate();
673
674 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
675 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
676 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
677 CopyDataToITensorHandle(inputHandle2Ref.get(), &input2[0][0][0][0]);
678
679 workload->PostAllocationConfigure();
680 workload->Execute();
681 workloadRef->PostAllocationConfigure();
682 workloadRef->Execute();
683
684 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
685 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
686
687 return ret;
Teresa Charlinecb6b8e2020-05-22 18:08:23 +0100688}