blob: 61669a9ab9d2bb32974dcddbd0df548e8ce29a6c [file] [log] [blame]
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +01001//
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +01003// SPDX-License-Identifier: MIT
4//
5
6#include "AdditionTestImpl.hpp"
7
8#include "ElementwiseTestImpl.hpp"
9
Colm Donelanc42a9872022-02-02 16:35:09 +000010#include <armnnUtils/QuantizeHelper.hpp>
Keith Davis33a626f2020-08-27 15:38:12 +010011#include <reference/test/RefWorkloadFactoryHelper.hpp>
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010012
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +010013template<>
14std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::AdditionQueueDescriptor>(
15 const armnn::IWorkloadFactory& workloadFactory,
16 const armnn::WorkloadInfo& info,
17 const armnn::AdditionQueueDescriptor& descriptor)
18{
Teresa Charlin611c7fb2022-01-07 09:47:29 +000019 return workloadFactory.CreateWorkload(armnn::LayerType::Addition, descriptor, info);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +010020}
21
22LayerTestResult<float,4> AdditionTest(
23 armnn::IWorkloadFactory& workloadFactory,
Keith Davis33a626f2020-08-27 15:38:12 +010024 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
25 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +010026{
27 unsigned int batchSize = 2u;
28 unsigned int channels = 2u;
29 unsigned int height = 2u;
30 unsigned int width = 3u;
31
32 unsigned int shape[] = { batchSize, channels, height, width };
33
34 std::vector<float> input1 =
35 {
36 0.0f, 2.0f, 1.0f,
37 0.2f, 1.0f, 2.0f,
38
39 1.0f, 2.0f, 1.0f,
40 0.2f, 1.0f, 2.0f,
41
42 0.0f, 2.0f, 1.0f,
43 4.2f, 1.0f, 2.0f,
44
45 0.0f, 0.0f, 1.0f,
46 0.2f, 1.0f, 2.0f,
47 };
48
49 std::vector<float> input2 =
50 {
51 1.0f, 2.0f, 1.0f,
52 0.0f, 1.0f, 2.0f,
53
54 1.0f, 2.0f, -2.0f,
55 0.2f, 1.0f, 2.0f,
56
57 0.0f, 2.0f, 1.0f,
58 4.2f, 0.0f, -3.0f,
59
60 0.0f, 0.0f, 1.0f,
61 0.7f, 1.0f, 5.0f,
62 };
63
64
65 std::vector<float> output
66 {
67 1.0f, 4.0f, 2.0f,
68 0.2f, 2.0f, 4.0f,
69
70 2.0f, 4.0f, -1.0f,
71 0.4f, 2.0f, 4.0f,
72
73 0.0f, 4.0f, 2.0f,
74 8.4f, 1.0f, -1.0f,
75
76 0.0f, 0.0f, 2.0f,
77 0.9f, 2.0f, 7.0f,
78 };
79
80 return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::Float32>(
81 workloadFactory,
82 memoryManager,
83 shape,
84 input1,
85 shape,
86 input2,
87 shape,
Keith Davis33a626f2020-08-27 15:38:12 +010088 output,
89 tensorHandleFactory);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +010090}
91
92LayerTestResult<float, 5> Addition5dTest(
93 armnn::IWorkloadFactory& workloadFactory,
Keith Davis33a626f2020-08-27 15:38:12 +010094 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
95 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +010096{
97 unsigned int depth = 2u;
98 unsigned int batchSize = 2u;
99 unsigned int channels = 2u;
100 unsigned int height = 2u;
101 unsigned int width = 3u;
102
103 unsigned int shape[] = { depth, batchSize, channels, height, width };
104
105 std::vector<float> input1 =
106 {
107 2.6f, 4.0f, 4.4f, 2.7f, 4.6f, 2.8f,
108 2.3f, 1.9f, 3.4f, 2.9f, 2.2f, 4.5f,
109
110 2.8f, 1.9f, 2.3f, 2.6f, 4.7f, 3.5f,
111 0.4f, 1.5f, 2.1f, 0.7f, 5.0f, 1.1f,
112
113
114 1.0f, 2.7f, 0.0f, 0.6f, 0.8f, 0.9f,
115 1.0f, 2.6f, 0.4f, 3.8f, 0.4f, 0.8f,
116
117 0.5f, 4.3f, 3.1f, 4.4f, 0.7f, 1.4f,
118 0.4f, 4.4f, 0.7f, 0.6f, 4.7f, 1.2f,
119
120 };
121
122 std::vector<float> input2 =
123 {
124 4.4f, 3.0f, 1.0f, 0.0f, 3.9f, 3.1f,
125 1.7f, 2.9f, 1.3f, 0.4f, 0.4f, 4.3f,
126
127 4.5f, 0.2f, 2.2f, 4.1f, 3.9f, 3.0f,
128 0.1f, 2.5f, 4.1f, 4.6f, 1.5f, 0.0f,
129
130
131 0.5f, 4.9f, 2.5f, 1.5f, 3.4f, 4.5f,
132 2.0f, 3.0f, 4.9f, 1.6f, 2.4f, 3.4f,
133
134 3.6f, 1.8f, 1.3f, 2.6f, 2.1f, 4.8f,
135 2.0f, 4.3f, 4.0f, 0.2f, 0.6f, 4.4f,
136 };
137
138 std::vector<float> output =
139 {
140 7.0f, 7.0f, 5.4f, 2.7f, 8.5f, 5.9f,
141 4.0f, 4.8f, 4.7f, 3.3f, 2.6f, 8.8f,
142
143 7.3f, 2.1f, 4.5f, 6.7f, 8.6f, 6.5f,
144 0.5f, 4.0f, 6.2f, 5.3f, 6.5f, 1.1f,
145
146
147 1.5f, 7.6f, 2.5f, 2.1f, 4.2f, 5.4f,
148 3.0f, 5.6f, 5.3f, 5.4f, 2.8f, 4.2f,
149
150 4.1f, 6.1f, 4.4f, 7.0f, 2.8f, 6.2f,
151 2.4f, 8.7f, 4.7f, 0.8f, 5.3f, 5.6f,
152 };
153
154 return ElementwiseTestHelper<5, armnn::AdditionQueueDescriptor, armnn::DataType::Float32>(
155 workloadFactory,
156 memoryManager,
157 shape,
158 input1,
159 shape,
160 input2,
161 shape,
Keith Davis33a626f2020-08-27 15:38:12 +0100162 output,
163 tensorHandleFactory);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100164}
165
166template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
167LayerTestResult<T, 4> AdditionBroadcastTestImpl(
168 armnn::IWorkloadFactory& workloadFactory,
169 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
170 float qScale,
Keith Davis33a626f2020-08-27 15:38:12 +0100171 int32_t qOffset,
172 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100173{
Jan Eilers8eb25602020-03-09 12:13:48 +0000174 IgnoreUnused(memoryManager);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100175 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, ArmnnType);
176 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, ArmnnType);
177 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
178
179 if (armnn::IsQuantizedType<T>())
180 {
181 inputTensorInfo1.SetQuantizationScale(qScale);
182 inputTensorInfo1.SetQuantizationOffset(qOffset);
183 inputTensorInfo2.SetQuantizationScale(qScale);
184 inputTensorInfo2.SetQuantizationOffset(qOffset);
185 outputTensorInfo.SetQuantizationScale(qScale);
186 outputTensorInfo.SetQuantizationOffset(qOffset);
187 }
188
Sadik Armagan483c8112021-06-01 09:24:52 +0100189 auto input1 = armnnUtils::QuantizedVector<T>(
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100190 {
191 0.0f,
192 1.0f,
193
194 2.0f,
195 3.0f,
196
197 4.0f,
198 5.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100199 },
Sadik Armagan483c8112021-06-01 09:24:52 +0100200 qScale, qOffset);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100201
Sadik Armagan483c8112021-06-01 09:24:52 +0100202 auto input2 = armnnUtils::QuantizedVector<T>(
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100203 {
204 0.5f, 1.5f, 2.5f,
205 3.5f, 4.5f, 5.5f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100206 },
Sadik Armagan483c8112021-06-01 09:24:52 +0100207 qScale, qOffset);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100208
Sadik Armagan483c8112021-06-01 09:24:52 +0100209 std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
210
211 auto expectedOutput = armnnUtils::QuantizedVector<T>(
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100212 {
213 0.5f, 1.5f, 2.5f,
214 4.5f, 5.5f, 6.5f,
215
216 2.5f, 3.5f, 4.5f,
217 6.5f, 7.5f, 8.5f,
218
219 4.5f, 5.5f, 6.5f,
220 8.5f, 9.5f, 10.5f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100221 },
Sadik Armagan483c8112021-06-01 09:24:52 +0100222 qScale, qOffset);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100223
Keith Davis33a626f2020-08-27 15:38:12 +0100224 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
225 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo2);
226 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100227
228 armnn::AdditionQueueDescriptor data;
229 armnn::WorkloadInfo info;
230 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
231 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
232 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
233
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000234 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Addition,
235 data, info);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100236
237 inputHandle1->Allocate();
238 inputHandle2->Allocate();
239 outputHandle->Allocate();
240
Sadik Armagan483c8112021-06-01 09:24:52 +0100241 CopyDataToITensorHandle(inputHandle1.get(), input1.data());
242 CopyDataToITensorHandle(inputHandle2.get(), input2.data());
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100243
244 workload->PostAllocationConfigure();
245 workload->Execute();
246
Sadik Armagan483c8112021-06-01 09:24:52 +0100247 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100248
Sadik Armagan483c8112021-06-01 09:24:52 +0100249 return LayerTestResult<T, 4>(actualOutput,
250 expectedOutput,
251 outputHandle->GetShape(),
252 outputTensorInfo.GetShape());
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100253}
254
255template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
256LayerTestResult<T, 4> AdditionBroadcast1ElementTestImpl(
257 armnn::IWorkloadFactory& workloadFactory,
258 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
259 float qScale,
Keith Davis33a626f2020-08-27 15:38:12 +0100260 int32_t qOffset,
261 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100262{
Jan Eilers8eb25602020-03-09 12:13:48 +0000263 IgnoreUnused(memoryManager);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100264 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
265 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, ArmnnType);
266 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
267
268 if (armnn::IsQuantizedType<T>())
269 {
270 inputTensorInfo1.SetQuantizationScale(qScale);
271 inputTensorInfo1.SetQuantizationOffset(qOffset);
272 inputTensorInfo2.SetQuantizationScale(qScale);
273 inputTensorInfo2.SetQuantizationOffset(qOffset);
274 outputTensorInfo.SetQuantizationScale(qScale);
275 outputTensorInfo.SetQuantizationOffset(qOffset);
276 }
277
Sadik Armagan483c8112021-06-01 09:24:52 +0100278 auto input1 = armnnUtils::QuantizedVector<T>(
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100279 {
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100280 0.0f, 1.0f, 2.0f,
281 3.0f, 4.0f, 5.0f,
282 6.0f, 7.0f, 8.0f,
283 9.0f, 10.0f, 11.0f,
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100284 12.0f, 13.0f, 14.0f,
285 15.0f, 16.0f, 17.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100286 },
Sadik Armagan483c8112021-06-01 09:24:52 +0100287 qScale, qOffset);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100288
Sadik Armagan483c8112021-06-01 09:24:52 +0100289 auto input2 = armnnUtils::QuantizedVector<T>(
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100290 {
291 0.5f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100292 },
Sadik Armagan483c8112021-06-01 09:24:52 +0100293 qScale, qOffset);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100294
Sadik Armagan483c8112021-06-01 09:24:52 +0100295 std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
296
297 auto expectedOutput = armnnUtils::QuantizedVector<T>(
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100298 {
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100299 0.5f, 1.5f, 2.5f,
300 3.5f, 4.5f, 5.5f,
301 6.5f, 7.5f, 8.5f,
302 9.5f, 10.5f, 11.5f,
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100303 12.5f, 13.5f, 14.5f,
304 15.5f, 16.5f, 17.5f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100305 },
Sadik Armagan483c8112021-06-01 09:24:52 +0100306 qScale, qOffset);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100307
Keith Davis33a626f2020-08-27 15:38:12 +0100308 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
309 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo2);
310 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100311
312 armnn::AdditionQueueDescriptor data;
313 armnn::WorkloadInfo info;
314 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
315 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
316 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
317
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000318 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Addition,
319 data, info);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100320
321 inputHandle1->Allocate();
322 inputHandle2->Allocate();
323 outputHandle->Allocate();
324
Sadik Armagan483c8112021-06-01 09:24:52 +0100325 CopyDataToITensorHandle(inputHandle1.get(), input1.data());
326 CopyDataToITensorHandle(inputHandle2.get(), input2.data());
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100327
328 workload->PostAllocationConfigure();
329 workload->Execute();
330
Sadik Armagan483c8112021-06-01 09:24:52 +0100331 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100332
Sadik Armagan483c8112021-06-01 09:24:52 +0100333 return LayerTestResult<T, 4>(actualOutput,
334 expectedOutput,
335 outputHandle->GetShape(),
336 outputTensorInfo.GetShape());
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100337}
338
339LayerTestResult<float, 4> AdditionBroadcastTest(
340 armnn::IWorkloadFactory& workloadFactory,
Keith Davis33a626f2020-08-27 15:38:12 +0100341 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
342 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100343{
344 return AdditionBroadcastTestImpl<armnn::DataType::Float32>(
Keith Davis33a626f2020-08-27 15:38:12 +0100345 workloadFactory, memoryManager, 0.0f, 0, tensorHandleFactory);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100346}
347
348LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test(
349 armnn::IWorkloadFactory& workloadFactory,
Keith Davis33a626f2020-08-27 15:38:12 +0100350 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
351 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100352{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000353 return AdditionBroadcastTestImpl<armnn::DataType::QAsymmU8>(
Keith Davis33a626f2020-08-27 15:38:12 +0100354 workloadFactory, memoryManager, 2.f, 0, tensorHandleFactory);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100355}
356
357LayerTestResult<int16_t, 4> AdditionBroadcastInt16Test(
358 armnn::IWorkloadFactory& workloadFactory,
Keith Davis33a626f2020-08-27 15:38:12 +0100359 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
360 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100361{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000362 return AdditionBroadcastTestImpl<armnn::DataType::QSymmS16>(
Keith Davis33a626f2020-08-27 15:38:12 +0100363 workloadFactory, memoryManager, 2.f, 0, tensorHandleFactory);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100364}
365
Teresa Charlinecb6b8e2020-05-22 18:08:23 +0100366LayerTestResult<int32_t, 4> AdditionBroadcastInt32Test(
367 armnn::IWorkloadFactory& workloadFactory,
Keith Davis33a626f2020-08-27 15:38:12 +0100368 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
369 const armnn::ITensorHandleFactory& tensorHandleFactory)
Teresa Charlinecb6b8e2020-05-22 18:08:23 +0100370{
371 return AdditionBroadcastTestImpl<armnn::DataType::Signed32>(
Keith Davis33a626f2020-08-27 15:38:12 +0100372 workloadFactory, memoryManager, 1.f, 0, tensorHandleFactory);
Teresa Charlinecb6b8e2020-05-22 18:08:23 +0100373}
374
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100375LayerTestResult<float, 4> AdditionBroadcast1ElementTest(
376 armnn::IWorkloadFactory& workloadFactory,
Keith Davis33a626f2020-08-27 15:38:12 +0100377 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
378 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100379{
380 return AdditionBroadcast1ElementTestImpl<armnn::DataType::Float32>(
Keith Davis33a626f2020-08-27 15:38:12 +0100381 workloadFactory, memoryManager, 0.0f, 0, tensorHandleFactory);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100382}
383
384LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test(
385 armnn::IWorkloadFactory& workloadFactory,
Keith Davis33a626f2020-08-27 15:38:12 +0100386 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
387 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100388{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000389 return AdditionBroadcast1ElementTestImpl<armnn::DataType::QAsymmU8>(
Keith Davis33a626f2020-08-27 15:38:12 +0100390 workloadFactory, memoryManager, 0.1333333f, 128, tensorHandleFactory);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100391}
392
393LayerTestResult<int16_t, 4> AdditionBroadcast1ElementInt16Test(
394 armnn::IWorkloadFactory& workloadFactory,
Keith Davis33a626f2020-08-27 15:38:12 +0100395 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
396 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100397{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000398 return AdditionBroadcast1ElementTestImpl<armnn::DataType::QSymmS16>(
Keith Davis33a626f2020-08-27 15:38:12 +0100399 workloadFactory, memoryManager, 0.1333333f, 0, tensorHandleFactory);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100400}
401
Teresa Charlinecb6b8e2020-05-22 18:08:23 +0100402LayerTestResult<int32_t, 4> AdditionBroadcast1ElementInt32Test(
403 armnn::IWorkloadFactory& workloadFactory,
Keith Davis33a626f2020-08-27 15:38:12 +0100404 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
405 const armnn::ITensorHandleFactory& tensorHandleFactory)
Teresa Charlinecb6b8e2020-05-22 18:08:23 +0100406{
407 return AdditionBroadcast1ElementTestImpl<armnn::DataType::Signed32>(
Keith Davis33a626f2020-08-27 15:38:12 +0100408 workloadFactory, memoryManager, 1.f, 0, tensorHandleFactory);
Teresa Charlinecb6b8e2020-05-22 18:08:23 +0100409}
410
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100411LayerTestResult<uint8_t, 4> AdditionUint8Test(
412 armnn::IWorkloadFactory& workloadFactory,
Keith Davis33a626f2020-08-27 15:38:12 +0100413 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
414 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100415{
416 const unsigned int shape0[] = { 1, 2, 2, 3 };
417 const unsigned int shape1[] = { 1, 2, 2, 3 };
418
419 std::vector<uint8_t> input0(
420 {
421 63, 35, 77, 70, 56, 112, // 420, 224, 518, 469, 371, 763
422 203, 28, 252, 168, 245, 91 // 1400, 175, 1743, 1155, 1694, 616
423 });
424
425 std::vector<uint8_t> input1(
426 {
427 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
428 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
429 });
430
431 std::vector<uint8_t> output(
432 {
433 81, 39, 249, 255, 228, 255, // 546, 252, 1722, 2065(clamped), 1575, 2212(clamped)
434 255, 186, 255, 186, 255, 214, // 2261(clamped), 1281, 2163(clamped), 1281, 2408(clamped), 1477
435 });
436
Derek Lambertif90c56d2020-01-10 17:14:08 +0000437 return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::QAsymmU8>(
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100438 workloadFactory,
439 memoryManager,
440 shape0,
441 input0,
442 7.0f,
443 3,
444 shape1,
445 input1,
446 7.0f,
447 3,
448 shape0,
449 output,
Keith Davis33a626f2020-08-27 15:38:12 +0100450 tensorHandleFactory,
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100451 7.0f,
452 3);
453}
454
455LayerTestResult<int16_t, 4> AdditionInt16Test(
456 armnn::IWorkloadFactory& workloadFactory,
Keith Davis33a626f2020-08-27 15:38:12 +0100457 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
458 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100459{
460 const unsigned int shape0[] = { 1, 2, 2, 3 };
461 const unsigned int shape1[] = { 1, 2, 2, 3 };
462
463 std::vector<int16_t> input0 =
464 {
465 63, 35, 77, 70, 56, 112, // 441, 245, 539, 490, 392, 184
466 203, 28, 252, 168, 245, 91 // 1421, 196, 1764, 1176, 1715, 637
467 };
468
469 std::vector<int16_t> input1 =
470 {
471 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
472 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
473 };
474
475 std::vector<int16_t> output =
476 {
477 84, 42, 252, 301, 231, 322, // 588, 294, 1764, 2107(clamped), 1617, 2254(clamped)
478 329, 189, 315, 189, 350, 217, // 2303(clamped), 1323, 2205(clamped), 1323, 2450(clamped), 1519
479 };
480
Derek Lambertif90c56d2020-01-10 17:14:08 +0000481 return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::QSymmS16>(
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100482 workloadFactory,
483 memoryManager,
484 shape0,
485 input0,
486 7.0f,
487 0,
488 shape1,
489 input1,
490 7.0f,
491 0,
492 shape0,
493 output,
Keith Davis33a626f2020-08-27 15:38:12 +0100494 tensorHandleFactory,
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100495 7.0f,
496 0);
497}
498
Teresa Charlinecb6b8e2020-05-22 18:08:23 +0100499LayerTestResult<int32_t, 4> AdditionInt32Test(
500 armnn::IWorkloadFactory& workloadFactory,
Keith Davis33a626f2020-08-27 15:38:12 +0100501 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
502 const armnn::ITensorHandleFactory& tensorHandleFactory)
Teresa Charlinecb6b8e2020-05-22 18:08:23 +0100503{
504 const unsigned int shape0[] = { 1, 2, 2, 3 };
505 const unsigned int shape1[] = { 1, 2, 2, 3 };
506
507 std::vector<int32_t> input0 =
508 {
509 63, 35, 77, 70, 56, 112, // 441, 245, 539, 490, 392, 184
510 203, 28, 252, 168, 245, 91 // 1421, 196, 1764, 1176, 1715, 637
511 };
512
513 std::vector<int32_t> input1 =
514 {
515 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
516 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
517 };
518
519 std::vector<int32_t> output =
520 {
521 84, 42, 252, 301, 231, 322, // 588, 294, 1764, 2107(clamped), 1617, 2254(clamped)
522 329, 189, 315, 189, 350, 217, // 2303(clamped), 1323, 2205(clamped), 1323, 2450(clamped), 1519
523 };
524
525 return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::Signed32>(
526 workloadFactory,
527 memoryManager,
528 shape0,
529 input0,
530 1.0f,
531 0,
532 shape1,
533 input1,
534 1.0f,
535 0,
536 shape0,
537 output,
Keith Davis33a626f2020-08-27 15:38:12 +0100538 tensorHandleFactory,
Teresa Charlinecb6b8e2020-05-22 18:08:23 +0100539 1.0f,
540 0);
541}
542
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100543LayerTestResult<float, 4> AdditionAfterMaxPoolTest(
544 armnn::IWorkloadFactory& workloadFactory,
Keith Davis33a626f2020-08-27 15:38:12 +0100545 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
546 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100547{
Jan Eilers8eb25602020-03-09 12:13:48 +0000548 IgnoreUnused(memoryManager);
Derek Lambertic374ff02019-12-10 21:57:35 +0000549
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100550 // Create Initial Tensor
551 // 1, 2, 3
552 // 4, 5, 6
553 // 7, 8, 9
554
555 armnn::TensorInfo poolingInputTensorInfo({ 1, 1, 3, 3}, armnn::DataType::Float32);
556 armnn::TensorInfo poolingOutputTensorInfo({ 1, 1, 2, 2}, armnn::DataType::Float32);
557
Sadik Armagan483c8112021-06-01 09:24:52 +0100558 std::vector<float> poolingInput = {1, 2, 3,
559 4, 5, 6,
560 7, 8, 9
561 };
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100562 std::unique_ptr<armnn::ITensorHandle> poolingInputHandle =
Keith Davis33a626f2020-08-27 15:38:12 +0100563 tensorHandleFactory.CreateTensorHandle(poolingInputTensorInfo);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100564 std::unique_ptr<armnn::ITensorHandle> poolingOutputHandle =
Keith Davis33a626f2020-08-27 15:38:12 +0100565 tensorHandleFactory.CreateTensorHandle(poolingOutputTensorInfo);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100566
567 // Apply MaxPool poolSize = 1x1, stride=2x2
568 // Result =
569 // 1, 3
570 // 7, 9
571 armnn::Pooling2dDescriptor descriptor;
572 descriptor.m_PoolHeight = 1;
573 descriptor.m_PoolWidth = 1;
574 descriptor.m_StrideX = 2;
575 descriptor.m_StrideY = 2;
576 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
577
578 armnn::Pooling2dQueueDescriptor queueDescriptor;
579 queueDescriptor.m_Parameters = descriptor;
580 armnn::WorkloadInfo workloadInfo;
581 AddInputToWorkload(queueDescriptor, workloadInfo, poolingInputTensorInfo, poolingInputHandle.get());
582 AddOutputToWorkload(queueDescriptor, workloadInfo, poolingOutputTensorInfo, poolingOutputHandle.get());
583
584 // Create the MaxPool
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000585 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Pooling2d,
586 queueDescriptor,
587 workloadInfo);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100588
Sadik Armagan483c8112021-06-01 09:24:52 +0100589 std::vector<float> resultMaxPool(poolingOutputTensorInfo.GetNumElements());
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100590
591 // Create addition with another tensor the same size
592 // This would be the result to apply a Conv2d with kernel ones(2) and stride 1x1
593 // with the initial tensor.
594 // 12, 16
595 // 24, 28
Sadik Armagan483c8112021-06-01 09:24:52 +0100596 armnn::TensorInfo addInputTensorInfo({ 1,1,2,2 }, armnn::DataType::Float32);
597 armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2 }, armnn::DataType::Float32);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100598
Sadik Armagan483c8112021-06-01 09:24:52 +0100599 std::vector<float> addInput = { 12, 16,
600 24, 28 };
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100601
602 // Expected output tensor after MaxPool and Addition.
Sadik Armagan483c8112021-06-01 09:24:52 +0100603 std::vector<float> actualOutput(addOutputTensorInfo.GetNumElements());
604 std::vector<float> expectedOutput = { 13, 19,
605 31, 37 };
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100606
Keith Davis33a626f2020-08-27 15:38:12 +0100607 std::unique_ptr<armnn::ITensorHandle> addInputHandle = tensorHandleFactory.CreateTensorHandle(addInputTensorInfo);
Sadik Armagan483c8112021-06-01 09:24:52 +0100608 std::unique_ptr<armnn::ITensorHandle> addOutputHandle = tensorHandleFactory.CreateTensorHandle(addOutputTensorInfo);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100609
610 armnn::AdditionQueueDescriptor data;
611 armnn::WorkloadInfo info;
612
613 // Add the output of the MaxPool and the new tensor
614 AddInputToWorkload(data, info, poolingOutputTensorInfo, poolingOutputHandle.get());
615 AddInputToWorkload(data, info, addInputTensorInfo, addInputHandle.get());
616 AddOutputToWorkload(data, info, addOutputTensorInfo, addOutputHandle.get());
617
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000618 std::unique_ptr<armnn::IWorkload> addWorkload = workloadFactory.CreateWorkload(armnn::LayerType::Addition,
619 data, info);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100620
621 poolingInputHandle->Allocate();
622 poolingOutputHandle->Allocate();
623 addInputHandle->Allocate();
624 addOutputHandle->Allocate();
625
Sadik Armagan483c8112021-06-01 09:24:52 +0100626 CopyDataToITensorHandle(poolingInputHandle.get(), poolingInput.data());
627 CopyDataFromITensorHandle(resultMaxPool.data(), poolingOutputHandle.get());
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100628
Sadik Armagan483c8112021-06-01 09:24:52 +0100629 CopyDataToITensorHandle(poolingOutputHandle.get(), resultMaxPool.data());
630 CopyDataToITensorHandle(addInputHandle.get(), addInput.data());
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100631
632 workload->PostAllocationConfigure();
633 workload->Execute();
634 addWorkload->PostAllocationConfigure();
635 addWorkload->Execute();
636
Sadik Armagan483c8112021-06-01 09:24:52 +0100637 CopyDataFromITensorHandle(actualOutput.data(), addOutputHandle.get());
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100638
Sadik Armagan483c8112021-06-01 09:24:52 +0100639 return LayerTestResult<float, 4>(actualOutput,
640 expectedOutput,
641 addOutputHandle->GetShape(),
642 addOutputTensorInfo.GetShape());
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100643}
644
645LayerTestResult<float,4> CompareAdditionTest(
646 armnn::IWorkloadFactory& workloadFactory,
647 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davis33a626f2020-08-27 15:38:12 +0100648 armnn::IWorkloadFactory& refWorkloadFactory,
649 const armnn::ITensorHandleFactory& tensorHandleFactory,
650 const armnn::ITensorHandleFactory& refTensorHandleFactory)
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100651{
Jan Eilers8eb25602020-03-09 12:13:48 +0000652 IgnoreUnused(memoryManager);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100653 unsigned int batchSize = 4;
654 unsigned int channels = 1;
655 unsigned int height = 2;
656 unsigned int width = 3;
657
658 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
659 armnn::TensorInfo outputTensorInfo;
660
661 unsigned int shape[] = {batchSize, channels, height, width};
662
663 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
664 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
665 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
666
Sadik Armagan483c8112021-06-01 09:24:52 +0100667 auto input1 = MakeRandomTensor<float>(inputTensorInfo1, 1232);
668 auto input2 = MakeRandomTensor<float>(inputTensorInfo2, 456);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100669
Sadik Armagan483c8112021-06-01 09:24:52 +0100670 std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
671 std::vector<float> expectedOutput(outputTensorInfo.GetNumElements());
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100672
Keith Davis33a626f2020-08-27 15:38:12 +0100673 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
674 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo2);
675 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100676
Keith Davis33a626f2020-08-27 15:38:12 +0100677 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refTensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
678 std::unique_ptr<armnn::ITensorHandle> inputHandle2Ref = refTensorHandleFactory.CreateTensorHandle(inputTensorInfo2);
679 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refTensorHandleFactory.CreateTensorHandle(outputTensorInfo);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100680
681 armnn::AdditionQueueDescriptor data;
682 armnn::WorkloadInfo info;
683 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
684 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
685 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
686
687 armnn::AdditionQueueDescriptor refData = data;
688 armnn::WorkloadInfo refInfo = info;
689 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo1, inputHandle1Ref.get());
690 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get());
691 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
692
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000693 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Addition,
694 data, info);
695 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateWorkload(armnn::LayerType::Addition,
696 refData, refInfo);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100697
698 inputHandle1->Allocate();
699 inputHandle2->Allocate();
700 outputHandle->Allocate();
701 inputHandle1Ref->Allocate();
702 inputHandle2Ref->Allocate();
703 outputHandleRef->Allocate();
704
Sadik Armagan483c8112021-06-01 09:24:52 +0100705 CopyDataToITensorHandle(inputHandle1.get(), input1.data());
706 CopyDataToITensorHandle(inputHandle2.get(), input2.data());
707 CopyDataToITensorHandle(inputHandle1Ref.get(), input1.data());
708 CopyDataToITensorHandle(inputHandle2Ref.get(), input2.data());
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100709
710 workload->PostAllocationConfigure();
711 workload->Execute();
712 workloadRef->PostAllocationConfigure();
713 workloadRef->Execute();
714
Sadik Armagan483c8112021-06-01 09:24:52 +0100715 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
716 CopyDataFromITensorHandle(expectedOutput.data(), outputHandleRef.get());
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100717
Sadik Armagan483c8112021-06-01 09:24:52 +0100718 return LayerTestResult<float, 4>(actualOutput,
719 expectedOutput,
720 outputHandle->GetShape(),
721 outputTensorInfo.GetShape());
Teresa Charlinecb6b8e2020-05-22 18:08:23 +0100722}