blob: 0e1b7336de79008f3586d65f905bfd0117a43f86 [file] [log] [blame]
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +01001//
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +01003// SPDX-License-Identifier: MIT
4//
5
6#include "AdditionTestImpl.hpp"
7
8#include "ElementwiseTestImpl.hpp"
9
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010010#include <QuantizeHelper.hpp>
Keith Davis33a626f2020-08-27 15:38:12 +010011#include <reference/test/RefWorkloadFactoryHelper.hpp>
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010012
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +010013template<>
14std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::AdditionQueueDescriptor>(
15 const armnn::IWorkloadFactory& workloadFactory,
16 const armnn::WorkloadInfo& info,
17 const armnn::AdditionQueueDescriptor& descriptor)
18{
19 return workloadFactory.CreateAddition(descriptor, info);
20}
21
22LayerTestResult<float,4> AdditionTest(
23 armnn::IWorkloadFactory& workloadFactory,
Keith Davis33a626f2020-08-27 15:38:12 +010024 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
25 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +010026{
27 unsigned int batchSize = 2u;
28 unsigned int channels = 2u;
29 unsigned int height = 2u;
30 unsigned int width = 3u;
31
32 unsigned int shape[] = { batchSize, channels, height, width };
33
34 std::vector<float> input1 =
35 {
36 0.0f, 2.0f, 1.0f,
37 0.2f, 1.0f, 2.0f,
38
39 1.0f, 2.0f, 1.0f,
40 0.2f, 1.0f, 2.0f,
41
42 0.0f, 2.0f, 1.0f,
43 4.2f, 1.0f, 2.0f,
44
45 0.0f, 0.0f, 1.0f,
46 0.2f, 1.0f, 2.0f,
47 };
48
49 std::vector<float> input2 =
50 {
51 1.0f, 2.0f, 1.0f,
52 0.0f, 1.0f, 2.0f,
53
54 1.0f, 2.0f, -2.0f,
55 0.2f, 1.0f, 2.0f,
56
57 0.0f, 2.0f, 1.0f,
58 4.2f, 0.0f, -3.0f,
59
60 0.0f, 0.0f, 1.0f,
61 0.7f, 1.0f, 5.0f,
62 };
63
64
65 std::vector<float> output
66 {
67 1.0f, 4.0f, 2.0f,
68 0.2f, 2.0f, 4.0f,
69
70 2.0f, 4.0f, -1.0f,
71 0.4f, 2.0f, 4.0f,
72
73 0.0f, 4.0f, 2.0f,
74 8.4f, 1.0f, -1.0f,
75
76 0.0f, 0.0f, 2.0f,
77 0.9f, 2.0f, 7.0f,
78 };
79
80 return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::Float32>(
81 workloadFactory,
82 memoryManager,
83 shape,
84 input1,
85 shape,
86 input2,
87 shape,
Keith Davis33a626f2020-08-27 15:38:12 +010088 output,
89 tensorHandleFactory);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +010090}
91
92LayerTestResult<float, 5> Addition5dTest(
93 armnn::IWorkloadFactory& workloadFactory,
Keith Davis33a626f2020-08-27 15:38:12 +010094 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
95 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +010096{
97 unsigned int depth = 2u;
98 unsigned int batchSize = 2u;
99 unsigned int channels = 2u;
100 unsigned int height = 2u;
101 unsigned int width = 3u;
102
103 unsigned int shape[] = { depth, batchSize, channels, height, width };
104
105 std::vector<float> input1 =
106 {
107 2.6f, 4.0f, 4.4f, 2.7f, 4.6f, 2.8f,
108 2.3f, 1.9f, 3.4f, 2.9f, 2.2f, 4.5f,
109
110 2.8f, 1.9f, 2.3f, 2.6f, 4.7f, 3.5f,
111 0.4f, 1.5f, 2.1f, 0.7f, 5.0f, 1.1f,
112
113
114 1.0f, 2.7f, 0.0f, 0.6f, 0.8f, 0.9f,
115 1.0f, 2.6f, 0.4f, 3.8f, 0.4f, 0.8f,
116
117 0.5f, 4.3f, 3.1f, 4.4f, 0.7f, 1.4f,
118 0.4f, 4.4f, 0.7f, 0.6f, 4.7f, 1.2f,
119
120 };
121
122 std::vector<float> input2 =
123 {
124 4.4f, 3.0f, 1.0f, 0.0f, 3.9f, 3.1f,
125 1.7f, 2.9f, 1.3f, 0.4f, 0.4f, 4.3f,
126
127 4.5f, 0.2f, 2.2f, 4.1f, 3.9f, 3.0f,
128 0.1f, 2.5f, 4.1f, 4.6f, 1.5f, 0.0f,
129
130
131 0.5f, 4.9f, 2.5f, 1.5f, 3.4f, 4.5f,
132 2.0f, 3.0f, 4.9f, 1.6f, 2.4f, 3.4f,
133
134 3.6f, 1.8f, 1.3f, 2.6f, 2.1f, 4.8f,
135 2.0f, 4.3f, 4.0f, 0.2f, 0.6f, 4.4f,
136 };
137
138 std::vector<float> output =
139 {
140 7.0f, 7.0f, 5.4f, 2.7f, 8.5f, 5.9f,
141 4.0f, 4.8f, 4.7f, 3.3f, 2.6f, 8.8f,
142
143 7.3f, 2.1f, 4.5f, 6.7f, 8.6f, 6.5f,
144 0.5f, 4.0f, 6.2f, 5.3f, 6.5f, 1.1f,
145
146
147 1.5f, 7.6f, 2.5f, 2.1f, 4.2f, 5.4f,
148 3.0f, 5.6f, 5.3f, 5.4f, 2.8f, 4.2f,
149
150 4.1f, 6.1f, 4.4f, 7.0f, 2.8f, 6.2f,
151 2.4f, 8.7f, 4.7f, 0.8f, 5.3f, 5.6f,
152 };
153
154 return ElementwiseTestHelper<5, armnn::AdditionQueueDescriptor, armnn::DataType::Float32>(
155 workloadFactory,
156 memoryManager,
157 shape,
158 input1,
159 shape,
160 input2,
161 shape,
Keith Davis33a626f2020-08-27 15:38:12 +0100162 output,
163 tensorHandleFactory);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100164}
165
166template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
167LayerTestResult<T, 4> AdditionBroadcastTestImpl(
168 armnn::IWorkloadFactory& workloadFactory,
169 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
170 float qScale,
Keith Davis33a626f2020-08-27 15:38:12 +0100171 int32_t qOffset,
172 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100173{
Jan Eilers8eb25602020-03-09 12:13:48 +0000174 IgnoreUnused(memoryManager);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100175 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, ArmnnType);
176 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, ArmnnType);
177 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
178
179 if (armnn::IsQuantizedType<T>())
180 {
181 inputTensorInfo1.SetQuantizationScale(qScale);
182 inputTensorInfo1.SetQuantizationOffset(qOffset);
183 inputTensorInfo2.SetQuantizationScale(qScale);
184 inputTensorInfo2.SetQuantizationOffset(qOffset);
185 outputTensorInfo.SetQuantizationScale(qScale);
186 outputTensorInfo.SetQuantizationOffset(qOffset);
187 }
188
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100189 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, armnnUtils::QuantizedVector<T>(
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100190 {
191 0.0f,
192 1.0f,
193
194 2.0f,
195 3.0f,
196
197 4.0f,
198 5.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100199 },
200 qScale, qOffset));
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100201
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100202 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, armnnUtils::QuantizedVector<T>(
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100203 {
204 0.5f, 1.5f, 2.5f,
205 3.5f, 4.5f, 5.5f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100206 },
207 qScale, qOffset));
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100208
209 LayerTestResult<T,4> ret(outputTensorInfo);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100210 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, armnnUtils::QuantizedVector<T>(
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100211 {
212 0.5f, 1.5f, 2.5f,
213 4.5f, 5.5f, 6.5f,
214
215 2.5f, 3.5f, 4.5f,
216 6.5f, 7.5f, 8.5f,
217
218 4.5f, 5.5f, 6.5f,
219 8.5f, 9.5f, 10.5f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100220 },
221 qScale, qOffset));
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100222
Keith Davis33a626f2020-08-27 15:38:12 +0100223 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
224 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo2);
225 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100226
227 armnn::AdditionQueueDescriptor data;
228 armnn::WorkloadInfo info;
229 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
230 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
231 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
232
233 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
234
235 inputHandle1->Allocate();
236 inputHandle2->Allocate();
237 outputHandle->Allocate();
238
239 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
240 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
241
242 workload->PostAllocationConfigure();
243 workload->Execute();
244
245 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
246
247 return ret;
248}
249
250template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
251LayerTestResult<T, 4> AdditionBroadcast1ElementTestImpl(
252 armnn::IWorkloadFactory& workloadFactory,
253 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
254 float qScale,
Keith Davis33a626f2020-08-27 15:38:12 +0100255 int32_t qOffset,
256 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100257{
Jan Eilers8eb25602020-03-09 12:13:48 +0000258 IgnoreUnused(memoryManager);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100259 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
260 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, ArmnnType);
261 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
262
263 if (armnn::IsQuantizedType<T>())
264 {
265 inputTensorInfo1.SetQuantizationScale(qScale);
266 inputTensorInfo1.SetQuantizationOffset(qOffset);
267 inputTensorInfo2.SetQuantizationScale(qScale);
268 inputTensorInfo2.SetQuantizationOffset(qOffset);
269 outputTensorInfo.SetQuantizationScale(qScale);
270 outputTensorInfo.SetQuantizationOffset(qOffset);
271 }
272
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100273 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, armnnUtils::QuantizedVector<T>(
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100274 {
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100275 0.0f, 1.0f, 2.0f,
276 3.0f, 4.0f, 5.0f,
277 6.0f, 7.0f, 8.0f,
278 9.0f, 10.0f, 11.0f,
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100279 12.0f, 13.0f, 14.0f,
280 15.0f, 16.0f, 17.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100281 },
282 qScale, qOffset));
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100283
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100284 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, armnnUtils::QuantizedVector<T>(
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100285 {
286 0.5f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100287 },
288 qScale, qOffset));
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100289
290 LayerTestResult<T,4> ret(outputTensorInfo);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100291 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, armnnUtils::QuantizedVector<T>(
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100292 {
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100293 0.5f, 1.5f, 2.5f,
294 3.5f, 4.5f, 5.5f,
295 6.5f, 7.5f, 8.5f,
296 9.5f, 10.5f, 11.5f,
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100297 12.5f, 13.5f, 14.5f,
298 15.5f, 16.5f, 17.5f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100299 },
300 qScale, qOffset));
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100301
Keith Davis33a626f2020-08-27 15:38:12 +0100302 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
303 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo2);
304 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100305
306 armnn::AdditionQueueDescriptor data;
307 armnn::WorkloadInfo info;
308 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
309 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
310 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
311
312 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
313
314 inputHandle1->Allocate();
315 inputHandle2->Allocate();
316 outputHandle->Allocate();
317
318 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
319 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
320
321 workload->PostAllocationConfigure();
322 workload->Execute();
323
324 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
325
326 return ret;
327}
328
329LayerTestResult<float, 4> AdditionBroadcastTest(
330 armnn::IWorkloadFactory& workloadFactory,
Keith Davis33a626f2020-08-27 15:38:12 +0100331 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
332 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100333{
334 return AdditionBroadcastTestImpl<armnn::DataType::Float32>(
Keith Davis33a626f2020-08-27 15:38:12 +0100335 workloadFactory, memoryManager, 0.0f, 0, tensorHandleFactory);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100336}
337
338LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test(
339 armnn::IWorkloadFactory& workloadFactory,
Keith Davis33a626f2020-08-27 15:38:12 +0100340 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
341 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100342{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000343 return AdditionBroadcastTestImpl<armnn::DataType::QAsymmU8>(
Keith Davis33a626f2020-08-27 15:38:12 +0100344 workloadFactory, memoryManager, 2.f, 0, tensorHandleFactory);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100345}
346
347LayerTestResult<int16_t, 4> AdditionBroadcastInt16Test(
348 armnn::IWorkloadFactory& workloadFactory,
Keith Davis33a626f2020-08-27 15:38:12 +0100349 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
350 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100351{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000352 return AdditionBroadcastTestImpl<armnn::DataType::QSymmS16>(
Keith Davis33a626f2020-08-27 15:38:12 +0100353 workloadFactory, memoryManager, 2.f, 0, tensorHandleFactory);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100354}
355
Teresa Charlinecb6b8e2020-05-22 18:08:23 +0100356LayerTestResult<int32_t, 4> AdditionBroadcastInt32Test(
357 armnn::IWorkloadFactory& workloadFactory,
Keith Davis33a626f2020-08-27 15:38:12 +0100358 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
359 const armnn::ITensorHandleFactory& tensorHandleFactory)
Teresa Charlinecb6b8e2020-05-22 18:08:23 +0100360{
361 return AdditionBroadcastTestImpl<armnn::DataType::Signed32>(
Keith Davis33a626f2020-08-27 15:38:12 +0100362 workloadFactory, memoryManager, 1.f, 0, tensorHandleFactory);
Teresa Charlinecb6b8e2020-05-22 18:08:23 +0100363}
364
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100365LayerTestResult<float, 4> AdditionBroadcast1ElementTest(
366 armnn::IWorkloadFactory& workloadFactory,
Keith Davis33a626f2020-08-27 15:38:12 +0100367 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
368 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100369{
370 return AdditionBroadcast1ElementTestImpl<armnn::DataType::Float32>(
Keith Davis33a626f2020-08-27 15:38:12 +0100371 workloadFactory, memoryManager, 0.0f, 0, tensorHandleFactory);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100372}
373
374LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test(
375 armnn::IWorkloadFactory& workloadFactory,
Keith Davis33a626f2020-08-27 15:38:12 +0100376 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
377 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100378{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000379 return AdditionBroadcast1ElementTestImpl<armnn::DataType::QAsymmU8>(
Keith Davis33a626f2020-08-27 15:38:12 +0100380 workloadFactory, memoryManager, 0.1333333f, 128, tensorHandleFactory);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100381}
382
383LayerTestResult<int16_t, 4> AdditionBroadcast1ElementInt16Test(
384 armnn::IWorkloadFactory& workloadFactory,
Keith Davis33a626f2020-08-27 15:38:12 +0100385 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
386 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100387{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000388 return AdditionBroadcast1ElementTestImpl<armnn::DataType::QSymmS16>(
Keith Davis33a626f2020-08-27 15:38:12 +0100389 workloadFactory, memoryManager, 0.1333333f, 0, tensorHandleFactory);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100390}
391
Teresa Charlinecb6b8e2020-05-22 18:08:23 +0100392LayerTestResult<int32_t, 4> AdditionBroadcast1ElementInt32Test(
393 armnn::IWorkloadFactory& workloadFactory,
Keith Davis33a626f2020-08-27 15:38:12 +0100394 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
395 const armnn::ITensorHandleFactory& tensorHandleFactory)
Teresa Charlinecb6b8e2020-05-22 18:08:23 +0100396{
397 return AdditionBroadcast1ElementTestImpl<armnn::DataType::Signed32>(
Keith Davis33a626f2020-08-27 15:38:12 +0100398 workloadFactory, memoryManager, 1.f, 0, tensorHandleFactory);
Teresa Charlinecb6b8e2020-05-22 18:08:23 +0100399}
400
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100401LayerTestResult<uint8_t, 4> AdditionUint8Test(
402 armnn::IWorkloadFactory& workloadFactory,
Keith Davis33a626f2020-08-27 15:38:12 +0100403 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
404 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100405{
406 const unsigned int shape0[] = { 1, 2, 2, 3 };
407 const unsigned int shape1[] = { 1, 2, 2, 3 };
408
409 std::vector<uint8_t> input0(
410 {
411 63, 35, 77, 70, 56, 112, // 420, 224, 518, 469, 371, 763
412 203, 28, 252, 168, 245, 91 // 1400, 175, 1743, 1155, 1694, 616
413 });
414
415 std::vector<uint8_t> input1(
416 {
417 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
418 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
419 });
420
421 std::vector<uint8_t> output(
422 {
423 81, 39, 249, 255, 228, 255, // 546, 252, 1722, 2065(clamped), 1575, 2212(clamped)
424 255, 186, 255, 186, 255, 214, // 2261(clamped), 1281, 2163(clamped), 1281, 2408(clamped), 1477
425 });
426
Derek Lambertif90c56d2020-01-10 17:14:08 +0000427 return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::QAsymmU8>(
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100428 workloadFactory,
429 memoryManager,
430 shape0,
431 input0,
432 7.0f,
433 3,
434 shape1,
435 input1,
436 7.0f,
437 3,
438 shape0,
439 output,
Keith Davis33a626f2020-08-27 15:38:12 +0100440 tensorHandleFactory,
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100441 7.0f,
442 3);
443}
444
445LayerTestResult<int16_t, 4> AdditionInt16Test(
446 armnn::IWorkloadFactory& workloadFactory,
Keith Davis33a626f2020-08-27 15:38:12 +0100447 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
448 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100449{
450 const unsigned int shape0[] = { 1, 2, 2, 3 };
451 const unsigned int shape1[] = { 1, 2, 2, 3 };
452
453 std::vector<int16_t> input0 =
454 {
455 63, 35, 77, 70, 56, 112, // 441, 245, 539, 490, 392, 184
456 203, 28, 252, 168, 245, 91 // 1421, 196, 1764, 1176, 1715, 637
457 };
458
459 std::vector<int16_t> input1 =
460 {
461 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
462 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
463 };
464
465 std::vector<int16_t> output =
466 {
467 84, 42, 252, 301, 231, 322, // 588, 294, 1764, 2107(clamped), 1617, 2254(clamped)
468 329, 189, 315, 189, 350, 217, // 2303(clamped), 1323, 2205(clamped), 1323, 2450(clamped), 1519
469 };
470
Derek Lambertif90c56d2020-01-10 17:14:08 +0000471 return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::QSymmS16>(
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100472 workloadFactory,
473 memoryManager,
474 shape0,
475 input0,
476 7.0f,
477 0,
478 shape1,
479 input1,
480 7.0f,
481 0,
482 shape0,
483 output,
Keith Davis33a626f2020-08-27 15:38:12 +0100484 tensorHandleFactory,
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100485 7.0f,
486 0);
487}
488
Teresa Charlinecb6b8e2020-05-22 18:08:23 +0100489LayerTestResult<int32_t, 4> AdditionInt32Test(
490 armnn::IWorkloadFactory& workloadFactory,
Keith Davis33a626f2020-08-27 15:38:12 +0100491 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
492 const armnn::ITensorHandleFactory& tensorHandleFactory)
Teresa Charlinecb6b8e2020-05-22 18:08:23 +0100493{
494 const unsigned int shape0[] = { 1, 2, 2, 3 };
495 const unsigned int shape1[] = { 1, 2, 2, 3 };
496
497 std::vector<int32_t> input0 =
498 {
499 63, 35, 77, 70, 56, 112, // 441, 245, 539, 490, 392, 184
500 203, 28, 252, 168, 245, 91 // 1421, 196, 1764, 1176, 1715, 637
501 };
502
503 std::vector<int32_t> input1 =
504 {
505 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
506 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
507 };
508
509 std::vector<int32_t> output =
510 {
511 84, 42, 252, 301, 231, 322, // 588, 294, 1764, 2107(clamped), 1617, 2254(clamped)
512 329, 189, 315, 189, 350, 217, // 2303(clamped), 1323, 2205(clamped), 1323, 2450(clamped), 1519
513 };
514
515 return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::Signed32>(
516 workloadFactory,
517 memoryManager,
518 shape0,
519 input0,
520 1.0f,
521 0,
522 shape1,
523 input1,
524 1.0f,
525 0,
526 shape0,
527 output,
Keith Davis33a626f2020-08-27 15:38:12 +0100528 tensorHandleFactory,
Teresa Charlinecb6b8e2020-05-22 18:08:23 +0100529 1.0f,
530 0);
531}
532
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100533LayerTestResult<float, 4> AdditionAfterMaxPoolTest(
534 armnn::IWorkloadFactory& workloadFactory,
Keith Davis33a626f2020-08-27 15:38:12 +0100535 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
536 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100537{
Jan Eilers8eb25602020-03-09 12:13:48 +0000538 IgnoreUnused(memoryManager);
Derek Lambertic374ff02019-12-10 21:57:35 +0000539
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100540 // Create Initial Tensor
541 // 1, 2, 3
542 // 4, 5, 6
543 // 7, 8, 9
544
545 armnn::TensorInfo poolingInputTensorInfo({ 1, 1, 3, 3}, armnn::DataType::Float32);
546 armnn::TensorInfo poolingOutputTensorInfo({ 1, 1, 2, 2}, armnn::DataType::Float32);
547
548 boost::multi_array<float, 4> poolingInput = MakeTensor<float,4>(poolingInputTensorInfo,
549 {1, 2, 3,
550 4, 5, 6,
551 7, 8, 9
552 });
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100553 std::unique_ptr<armnn::ITensorHandle> poolingInputHandle =
Keith Davis33a626f2020-08-27 15:38:12 +0100554 tensorHandleFactory.CreateTensorHandle(poolingInputTensorInfo);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100555 std::unique_ptr<armnn::ITensorHandle> poolingOutputHandle =
Keith Davis33a626f2020-08-27 15:38:12 +0100556 tensorHandleFactory.CreateTensorHandle(poolingOutputTensorInfo);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100557
558 // Apply MaxPool poolSize = 1x1, stride=2x2
559 // Result =
560 // 1, 3
561 // 7, 9
562 armnn::Pooling2dDescriptor descriptor;
563 descriptor.m_PoolHeight = 1;
564 descriptor.m_PoolWidth = 1;
565 descriptor.m_StrideX = 2;
566 descriptor.m_StrideY = 2;
567 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
568
569 armnn::Pooling2dQueueDescriptor queueDescriptor;
570 queueDescriptor.m_Parameters = descriptor;
571 armnn::WorkloadInfo workloadInfo;
572 AddInputToWorkload(queueDescriptor, workloadInfo, poolingInputTensorInfo, poolingInputHandle.get());
573 AddOutputToWorkload(queueDescriptor, workloadInfo, poolingOutputTensorInfo, poolingOutputHandle.get());
574
575 // Create the MaxPool
576 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
577
578 //LayerTestResult<float, 4> result(poolingOutputTensorInfo);
579 auto shape( GetTensorShapeAsArray<4>(poolingOutputTensorInfo));
580 boost::multi_array<float, 4> resultMaxPool;
581 resultMaxPool.resize(shape);
582
583
584 // Create addition with another tensor the same size
585 // This would be the result to apply a Conv2d with kernel ones(2) and stride 1x1
586 // with the initial tensor.
587 // 12, 16
588 // 24, 28
589
590 armnn::TensorInfo addInputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
591 armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
592
593 boost::multi_array<float, 4> addInput = MakeTensor<float,4>(addInputTensorInfo,
594 {12, 16,
595 24, 28,
596 });
597
598 // Expected output tensor after MaxPool and Addition.
599 LayerTestResult<float,4> addRet(addOutputTensorInfo);
600 addRet.outputExpected = MakeTensor<float, 4>(addOutputTensorInfo, std::vector<float>(
601 {
602 13, 19,
603 31, 37
604 }));
605
Keith Davis33a626f2020-08-27 15:38:12 +0100606 std::unique_ptr<armnn::ITensorHandle> addInputHandle = tensorHandleFactory.CreateTensorHandle(addInputTensorInfo);
607 std::unique_ptr<armnn::ITensorHandle> addOutputHandle =
608 tensorHandleFactory.CreateTensorHandle(addOutputTensorInfo);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100609
610 armnn::AdditionQueueDescriptor data;
611 armnn::WorkloadInfo info;
612
613 // Add the output of the MaxPool and the new tensor
614 AddInputToWorkload(data, info, poolingOutputTensorInfo, poolingOutputHandle.get());
615 AddInputToWorkload(data, info, addInputTensorInfo, addInputHandle.get());
616 AddOutputToWorkload(data, info, addOutputTensorInfo, addOutputHandle.get());
617
618 std::unique_ptr<armnn::IWorkload> addWorkload = workloadFactory.CreateAddition(data, info);
619
620 poolingInputHandle->Allocate();
621 poolingOutputHandle->Allocate();
622 addInputHandle->Allocate();
623 addOutputHandle->Allocate();
624
625 CopyDataToITensorHandle(poolingInputHandle.get(), &poolingInput[0][0][0][0]);
626 CopyDataFromITensorHandle(&resultMaxPool[0][0][0][0], poolingOutputHandle.get());
627
628 CopyDataToITensorHandle(poolingOutputHandle.get(), &resultMaxPool[0][0][0][0]);
629 CopyDataToITensorHandle(addInputHandle.get(), &addInput[0][0][0][0]);
630
631 workload->PostAllocationConfigure();
632 workload->Execute();
633 addWorkload->PostAllocationConfigure();
634 addWorkload->Execute();
635
636 CopyDataFromITensorHandle(&addRet.output[0][0][0][0], addOutputHandle.get());
637
638 return addRet;
639}
640
641LayerTestResult<float,4> CompareAdditionTest(
642 armnn::IWorkloadFactory& workloadFactory,
643 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davis33a626f2020-08-27 15:38:12 +0100644 armnn::IWorkloadFactory& refWorkloadFactory,
645 const armnn::ITensorHandleFactory& tensorHandleFactory,
646 const armnn::ITensorHandleFactory& refTensorHandleFactory)
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100647{
Jan Eilers8eb25602020-03-09 12:13:48 +0000648 IgnoreUnused(memoryManager);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100649 unsigned int batchSize = 4;
650 unsigned int channels = 1;
651 unsigned int height = 2;
652 unsigned int width = 3;
653
654 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
655 armnn::TensorInfo outputTensorInfo;
656
657 unsigned int shape[] = {batchSize, channels, height, width};
658
659 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
660 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
661 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
662
663 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 1232);
664 auto input2 = MakeRandomTensor<float, 4>(inputTensorInfo2, 456);
665
666 LayerTestResult<float,4> ret(outputTensorInfo);
667
Keith Davis33a626f2020-08-27 15:38:12 +0100668 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
669 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo2);
670 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100671
Keith Davis33a626f2020-08-27 15:38:12 +0100672 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refTensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
673 std::unique_ptr<armnn::ITensorHandle> inputHandle2Ref = refTensorHandleFactory.CreateTensorHandle(inputTensorInfo2);
674 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refTensorHandleFactory.CreateTensorHandle(outputTensorInfo);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100675
676 armnn::AdditionQueueDescriptor data;
677 armnn::WorkloadInfo info;
678 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
679 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
680 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
681
682 armnn::AdditionQueueDescriptor refData = data;
683 armnn::WorkloadInfo refInfo = info;
684 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo1, inputHandle1Ref.get());
685 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get());
686 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
687
688 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
689 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateAddition(refData, refInfo);
690
691 inputHandle1->Allocate();
692 inputHandle2->Allocate();
693 outputHandle->Allocate();
694 inputHandle1Ref->Allocate();
695 inputHandle2Ref->Allocate();
696 outputHandleRef->Allocate();
697
698 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
699 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
700 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
701 CopyDataToITensorHandle(inputHandle2Ref.get(), &input2[0][0][0][0]);
702
703 workload->PostAllocationConfigure();
704 workload->Execute();
705 workloadRef->PostAllocationConfigure();
706 workloadRef->Execute();
707
708 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
709 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
710
711 return ret;
Teresa Charlinecb6b8e2020-05-22 18:08:23 +0100712}