blob: ce8f74d2e006af85afc4da1d76d25bf8c486cb84 [file] [log] [blame]
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +01001//
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +01003// SPDX-License-Identifier: MIT
4//
5
6#include "AdditionTestImpl.hpp"
7
8#include "ElementwiseTestImpl.hpp"
9
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010010#include <QuantizeHelper.hpp>
Keith Davis33a626f2020-08-27 15:38:12 +010011#include <reference/test/RefWorkloadFactoryHelper.hpp>
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010012
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +010013template<>
14std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::AdditionQueueDescriptor>(
15 const armnn::IWorkloadFactory& workloadFactory,
16 const armnn::WorkloadInfo& info,
17 const armnn::AdditionQueueDescriptor& descriptor)
18{
19 return workloadFactory.CreateAddition(descriptor, info);
20}
21
22LayerTestResult<float,4> AdditionTest(
23 armnn::IWorkloadFactory& workloadFactory,
Keith Davis33a626f2020-08-27 15:38:12 +010024 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
25 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +010026{
27 unsigned int batchSize = 2u;
28 unsigned int channels = 2u;
29 unsigned int height = 2u;
30 unsigned int width = 3u;
31
32 unsigned int shape[] = { batchSize, channels, height, width };
33
34 std::vector<float> input1 =
35 {
36 0.0f, 2.0f, 1.0f,
37 0.2f, 1.0f, 2.0f,
38
39 1.0f, 2.0f, 1.0f,
40 0.2f, 1.0f, 2.0f,
41
42 0.0f, 2.0f, 1.0f,
43 4.2f, 1.0f, 2.0f,
44
45 0.0f, 0.0f, 1.0f,
46 0.2f, 1.0f, 2.0f,
47 };
48
49 std::vector<float> input2 =
50 {
51 1.0f, 2.0f, 1.0f,
52 0.0f, 1.0f, 2.0f,
53
54 1.0f, 2.0f, -2.0f,
55 0.2f, 1.0f, 2.0f,
56
57 0.0f, 2.0f, 1.0f,
58 4.2f, 0.0f, -3.0f,
59
60 0.0f, 0.0f, 1.0f,
61 0.7f, 1.0f, 5.0f,
62 };
63
64
65 std::vector<float> output
66 {
67 1.0f, 4.0f, 2.0f,
68 0.2f, 2.0f, 4.0f,
69
70 2.0f, 4.0f, -1.0f,
71 0.4f, 2.0f, 4.0f,
72
73 0.0f, 4.0f, 2.0f,
74 8.4f, 1.0f, -1.0f,
75
76 0.0f, 0.0f, 2.0f,
77 0.9f, 2.0f, 7.0f,
78 };
79
80 return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::Float32>(
81 workloadFactory,
82 memoryManager,
83 shape,
84 input1,
85 shape,
86 input2,
87 shape,
Keith Davis33a626f2020-08-27 15:38:12 +010088 output,
89 tensorHandleFactory);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +010090}
91
92LayerTestResult<float, 5> Addition5dTest(
93 armnn::IWorkloadFactory& workloadFactory,
Keith Davis33a626f2020-08-27 15:38:12 +010094 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
95 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +010096{
97 unsigned int depth = 2u;
98 unsigned int batchSize = 2u;
99 unsigned int channels = 2u;
100 unsigned int height = 2u;
101 unsigned int width = 3u;
102
103 unsigned int shape[] = { depth, batchSize, channels, height, width };
104
105 std::vector<float> input1 =
106 {
107 2.6f, 4.0f, 4.4f, 2.7f, 4.6f, 2.8f,
108 2.3f, 1.9f, 3.4f, 2.9f, 2.2f, 4.5f,
109
110 2.8f, 1.9f, 2.3f, 2.6f, 4.7f, 3.5f,
111 0.4f, 1.5f, 2.1f, 0.7f, 5.0f, 1.1f,
112
113
114 1.0f, 2.7f, 0.0f, 0.6f, 0.8f, 0.9f,
115 1.0f, 2.6f, 0.4f, 3.8f, 0.4f, 0.8f,
116
117 0.5f, 4.3f, 3.1f, 4.4f, 0.7f, 1.4f,
118 0.4f, 4.4f, 0.7f, 0.6f, 4.7f, 1.2f,
119
120 };
121
122 std::vector<float> input2 =
123 {
124 4.4f, 3.0f, 1.0f, 0.0f, 3.9f, 3.1f,
125 1.7f, 2.9f, 1.3f, 0.4f, 0.4f, 4.3f,
126
127 4.5f, 0.2f, 2.2f, 4.1f, 3.9f, 3.0f,
128 0.1f, 2.5f, 4.1f, 4.6f, 1.5f, 0.0f,
129
130
131 0.5f, 4.9f, 2.5f, 1.5f, 3.4f, 4.5f,
132 2.0f, 3.0f, 4.9f, 1.6f, 2.4f, 3.4f,
133
134 3.6f, 1.8f, 1.3f, 2.6f, 2.1f, 4.8f,
135 2.0f, 4.3f, 4.0f, 0.2f, 0.6f, 4.4f,
136 };
137
138 std::vector<float> output =
139 {
140 7.0f, 7.0f, 5.4f, 2.7f, 8.5f, 5.9f,
141 4.0f, 4.8f, 4.7f, 3.3f, 2.6f, 8.8f,
142
143 7.3f, 2.1f, 4.5f, 6.7f, 8.6f, 6.5f,
144 0.5f, 4.0f, 6.2f, 5.3f, 6.5f, 1.1f,
145
146
147 1.5f, 7.6f, 2.5f, 2.1f, 4.2f, 5.4f,
148 3.0f, 5.6f, 5.3f, 5.4f, 2.8f, 4.2f,
149
150 4.1f, 6.1f, 4.4f, 7.0f, 2.8f, 6.2f,
151 2.4f, 8.7f, 4.7f, 0.8f, 5.3f, 5.6f,
152 };
153
154 return ElementwiseTestHelper<5, armnn::AdditionQueueDescriptor, armnn::DataType::Float32>(
155 workloadFactory,
156 memoryManager,
157 shape,
158 input1,
159 shape,
160 input2,
161 shape,
Keith Davis33a626f2020-08-27 15:38:12 +0100162 output,
163 tensorHandleFactory);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100164}
165
166template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
167LayerTestResult<T, 4> AdditionBroadcastTestImpl(
168 armnn::IWorkloadFactory& workloadFactory,
169 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
170 float qScale,
Keith Davis33a626f2020-08-27 15:38:12 +0100171 int32_t qOffset,
172 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100173{
Jan Eilers8eb25602020-03-09 12:13:48 +0000174 IgnoreUnused(memoryManager);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100175 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, ArmnnType);
176 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, ArmnnType);
177 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
178
179 if (armnn::IsQuantizedType<T>())
180 {
181 inputTensorInfo1.SetQuantizationScale(qScale);
182 inputTensorInfo1.SetQuantizationOffset(qOffset);
183 inputTensorInfo2.SetQuantizationScale(qScale);
184 inputTensorInfo2.SetQuantizationOffset(qOffset);
185 outputTensorInfo.SetQuantizationScale(qScale);
186 outputTensorInfo.SetQuantizationOffset(qOffset);
187 }
188
Sadik Armagan483c8112021-06-01 09:24:52 +0100189 auto input1 = armnnUtils::QuantizedVector<T>(
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100190 {
191 0.0f,
192 1.0f,
193
194 2.0f,
195 3.0f,
196
197 4.0f,
198 5.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100199 },
Sadik Armagan483c8112021-06-01 09:24:52 +0100200 qScale, qOffset);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100201
Sadik Armagan483c8112021-06-01 09:24:52 +0100202 auto input2 = armnnUtils::QuantizedVector<T>(
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100203 {
204 0.5f, 1.5f, 2.5f,
205 3.5f, 4.5f, 5.5f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100206 },
Sadik Armagan483c8112021-06-01 09:24:52 +0100207 qScale, qOffset);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100208
Sadik Armagan483c8112021-06-01 09:24:52 +0100209 std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
210
211 auto expectedOutput = armnnUtils::QuantizedVector<T>(
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100212 {
213 0.5f, 1.5f, 2.5f,
214 4.5f, 5.5f, 6.5f,
215
216 2.5f, 3.5f, 4.5f,
217 6.5f, 7.5f, 8.5f,
218
219 4.5f, 5.5f, 6.5f,
220 8.5f, 9.5f, 10.5f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100221 },
Sadik Armagan483c8112021-06-01 09:24:52 +0100222 qScale, qOffset);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100223
Keith Davis33a626f2020-08-27 15:38:12 +0100224 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
225 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo2);
226 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100227
228 armnn::AdditionQueueDescriptor data;
229 armnn::WorkloadInfo info;
230 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
231 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
232 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
233
234 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
235
236 inputHandle1->Allocate();
237 inputHandle2->Allocate();
238 outputHandle->Allocate();
239
Sadik Armagan483c8112021-06-01 09:24:52 +0100240 CopyDataToITensorHandle(inputHandle1.get(), input1.data());
241 CopyDataToITensorHandle(inputHandle2.get(), input2.data());
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100242
243 workload->PostAllocationConfigure();
244 workload->Execute();
245
Sadik Armagan483c8112021-06-01 09:24:52 +0100246 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100247
Sadik Armagan483c8112021-06-01 09:24:52 +0100248 return LayerTestResult<T, 4>(actualOutput,
249 expectedOutput,
250 outputHandle->GetShape(),
251 outputTensorInfo.GetShape());
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100252}
253
254template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
255LayerTestResult<T, 4> AdditionBroadcast1ElementTestImpl(
256 armnn::IWorkloadFactory& workloadFactory,
257 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
258 float qScale,
Keith Davis33a626f2020-08-27 15:38:12 +0100259 int32_t qOffset,
260 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100261{
Jan Eilers8eb25602020-03-09 12:13:48 +0000262 IgnoreUnused(memoryManager);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100263 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
264 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, ArmnnType);
265 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
266
267 if (armnn::IsQuantizedType<T>())
268 {
269 inputTensorInfo1.SetQuantizationScale(qScale);
270 inputTensorInfo1.SetQuantizationOffset(qOffset);
271 inputTensorInfo2.SetQuantizationScale(qScale);
272 inputTensorInfo2.SetQuantizationOffset(qOffset);
273 outputTensorInfo.SetQuantizationScale(qScale);
274 outputTensorInfo.SetQuantizationOffset(qOffset);
275 }
276
Sadik Armagan483c8112021-06-01 09:24:52 +0100277 auto input1 = armnnUtils::QuantizedVector<T>(
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100278 {
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100279 0.0f, 1.0f, 2.0f,
280 3.0f, 4.0f, 5.0f,
281 6.0f, 7.0f, 8.0f,
282 9.0f, 10.0f, 11.0f,
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100283 12.0f, 13.0f, 14.0f,
284 15.0f, 16.0f, 17.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100285 },
Sadik Armagan483c8112021-06-01 09:24:52 +0100286 qScale, qOffset);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100287
Sadik Armagan483c8112021-06-01 09:24:52 +0100288 auto input2 = armnnUtils::QuantizedVector<T>(
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100289 {
290 0.5f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100291 },
Sadik Armagan483c8112021-06-01 09:24:52 +0100292 qScale, qOffset);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100293
Sadik Armagan483c8112021-06-01 09:24:52 +0100294 std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
295
296 auto expectedOutput = armnnUtils::QuantizedVector<T>(
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100297 {
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100298 0.5f, 1.5f, 2.5f,
299 3.5f, 4.5f, 5.5f,
300 6.5f, 7.5f, 8.5f,
301 9.5f, 10.5f, 11.5f,
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100302 12.5f, 13.5f, 14.5f,
303 15.5f, 16.5f, 17.5f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100304 },
Sadik Armagan483c8112021-06-01 09:24:52 +0100305 qScale, qOffset);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100306
Keith Davis33a626f2020-08-27 15:38:12 +0100307 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
308 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo2);
309 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100310
311 armnn::AdditionQueueDescriptor data;
312 armnn::WorkloadInfo info;
313 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
314 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
315 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
316
317 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
318
319 inputHandle1->Allocate();
320 inputHandle2->Allocate();
321 outputHandle->Allocate();
322
Sadik Armagan483c8112021-06-01 09:24:52 +0100323 CopyDataToITensorHandle(inputHandle1.get(), input1.data());
324 CopyDataToITensorHandle(inputHandle2.get(), input2.data());
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100325
326 workload->PostAllocationConfigure();
327 workload->Execute();
328
Sadik Armagan483c8112021-06-01 09:24:52 +0100329 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100330
Sadik Armagan483c8112021-06-01 09:24:52 +0100331 return LayerTestResult<T, 4>(actualOutput,
332 expectedOutput,
333 outputHandle->GetShape(),
334 outputTensorInfo.GetShape());
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100335}
336
337LayerTestResult<float, 4> AdditionBroadcastTest(
338 armnn::IWorkloadFactory& workloadFactory,
Keith Davis33a626f2020-08-27 15:38:12 +0100339 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
340 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100341{
342 return AdditionBroadcastTestImpl<armnn::DataType::Float32>(
Keith Davis33a626f2020-08-27 15:38:12 +0100343 workloadFactory, memoryManager, 0.0f, 0, tensorHandleFactory);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100344}
345
346LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test(
347 armnn::IWorkloadFactory& workloadFactory,
Keith Davis33a626f2020-08-27 15:38:12 +0100348 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
349 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100350{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000351 return AdditionBroadcastTestImpl<armnn::DataType::QAsymmU8>(
Keith Davis33a626f2020-08-27 15:38:12 +0100352 workloadFactory, memoryManager, 2.f, 0, tensorHandleFactory);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100353}
354
355LayerTestResult<int16_t, 4> AdditionBroadcastInt16Test(
356 armnn::IWorkloadFactory& workloadFactory,
Keith Davis33a626f2020-08-27 15:38:12 +0100357 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
358 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100359{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000360 return AdditionBroadcastTestImpl<armnn::DataType::QSymmS16>(
Keith Davis33a626f2020-08-27 15:38:12 +0100361 workloadFactory, memoryManager, 2.f, 0, tensorHandleFactory);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100362}
363
Teresa Charlinecb6b8e2020-05-22 18:08:23 +0100364LayerTestResult<int32_t, 4> AdditionBroadcastInt32Test(
365 armnn::IWorkloadFactory& workloadFactory,
Keith Davis33a626f2020-08-27 15:38:12 +0100366 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
367 const armnn::ITensorHandleFactory& tensorHandleFactory)
Teresa Charlinecb6b8e2020-05-22 18:08:23 +0100368{
369 return AdditionBroadcastTestImpl<armnn::DataType::Signed32>(
Keith Davis33a626f2020-08-27 15:38:12 +0100370 workloadFactory, memoryManager, 1.f, 0, tensorHandleFactory);
Teresa Charlinecb6b8e2020-05-22 18:08:23 +0100371}
372
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100373LayerTestResult<float, 4> AdditionBroadcast1ElementTest(
374 armnn::IWorkloadFactory& workloadFactory,
Keith Davis33a626f2020-08-27 15:38:12 +0100375 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
376 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100377{
378 return AdditionBroadcast1ElementTestImpl<armnn::DataType::Float32>(
Keith Davis33a626f2020-08-27 15:38:12 +0100379 workloadFactory, memoryManager, 0.0f, 0, tensorHandleFactory);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100380}
381
382LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test(
383 armnn::IWorkloadFactory& workloadFactory,
Keith Davis33a626f2020-08-27 15:38:12 +0100384 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
385 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100386{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000387 return AdditionBroadcast1ElementTestImpl<armnn::DataType::QAsymmU8>(
Keith Davis33a626f2020-08-27 15:38:12 +0100388 workloadFactory, memoryManager, 0.1333333f, 128, tensorHandleFactory);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100389}
390
391LayerTestResult<int16_t, 4> AdditionBroadcast1ElementInt16Test(
392 armnn::IWorkloadFactory& workloadFactory,
Keith Davis33a626f2020-08-27 15:38:12 +0100393 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
394 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100395{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000396 return AdditionBroadcast1ElementTestImpl<armnn::DataType::QSymmS16>(
Keith Davis33a626f2020-08-27 15:38:12 +0100397 workloadFactory, memoryManager, 0.1333333f, 0, tensorHandleFactory);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100398}
399
Teresa Charlinecb6b8e2020-05-22 18:08:23 +0100400LayerTestResult<int32_t, 4> AdditionBroadcast1ElementInt32Test(
401 armnn::IWorkloadFactory& workloadFactory,
Keith Davis33a626f2020-08-27 15:38:12 +0100402 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
403 const armnn::ITensorHandleFactory& tensorHandleFactory)
Teresa Charlinecb6b8e2020-05-22 18:08:23 +0100404{
405 return AdditionBroadcast1ElementTestImpl<armnn::DataType::Signed32>(
Keith Davis33a626f2020-08-27 15:38:12 +0100406 workloadFactory, memoryManager, 1.f, 0, tensorHandleFactory);
Teresa Charlinecb6b8e2020-05-22 18:08:23 +0100407}
408
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100409LayerTestResult<uint8_t, 4> AdditionUint8Test(
410 armnn::IWorkloadFactory& workloadFactory,
Keith Davis33a626f2020-08-27 15:38:12 +0100411 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
412 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100413{
414 const unsigned int shape0[] = { 1, 2, 2, 3 };
415 const unsigned int shape1[] = { 1, 2, 2, 3 };
416
417 std::vector<uint8_t> input0(
418 {
419 63, 35, 77, 70, 56, 112, // 420, 224, 518, 469, 371, 763
420 203, 28, 252, 168, 245, 91 // 1400, 175, 1743, 1155, 1694, 616
421 });
422
423 std::vector<uint8_t> input1(
424 {
425 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
426 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
427 });
428
429 std::vector<uint8_t> output(
430 {
431 81, 39, 249, 255, 228, 255, // 546, 252, 1722, 2065(clamped), 1575, 2212(clamped)
432 255, 186, 255, 186, 255, 214, // 2261(clamped), 1281, 2163(clamped), 1281, 2408(clamped), 1477
433 });
434
Derek Lambertif90c56d2020-01-10 17:14:08 +0000435 return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::QAsymmU8>(
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100436 workloadFactory,
437 memoryManager,
438 shape0,
439 input0,
440 7.0f,
441 3,
442 shape1,
443 input1,
444 7.0f,
445 3,
446 shape0,
447 output,
Keith Davis33a626f2020-08-27 15:38:12 +0100448 tensorHandleFactory,
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100449 7.0f,
450 3);
451}
452
453LayerTestResult<int16_t, 4> AdditionInt16Test(
454 armnn::IWorkloadFactory& workloadFactory,
Keith Davis33a626f2020-08-27 15:38:12 +0100455 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
456 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100457{
458 const unsigned int shape0[] = { 1, 2, 2, 3 };
459 const unsigned int shape1[] = { 1, 2, 2, 3 };
460
461 std::vector<int16_t> input0 =
462 {
463 63, 35, 77, 70, 56, 112, // 441, 245, 539, 490, 392, 184
464 203, 28, 252, 168, 245, 91 // 1421, 196, 1764, 1176, 1715, 637
465 };
466
467 std::vector<int16_t> input1 =
468 {
469 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
470 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
471 };
472
473 std::vector<int16_t> output =
474 {
475 84, 42, 252, 301, 231, 322, // 588, 294, 1764, 2107(clamped), 1617, 2254(clamped)
476 329, 189, 315, 189, 350, 217, // 2303(clamped), 1323, 2205(clamped), 1323, 2450(clamped), 1519
477 };
478
Derek Lambertif90c56d2020-01-10 17:14:08 +0000479 return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::QSymmS16>(
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100480 workloadFactory,
481 memoryManager,
482 shape0,
483 input0,
484 7.0f,
485 0,
486 shape1,
487 input1,
488 7.0f,
489 0,
490 shape0,
491 output,
Keith Davis33a626f2020-08-27 15:38:12 +0100492 tensorHandleFactory,
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100493 7.0f,
494 0);
495}
496
Teresa Charlinecb6b8e2020-05-22 18:08:23 +0100497LayerTestResult<int32_t, 4> AdditionInt32Test(
498 armnn::IWorkloadFactory& workloadFactory,
Keith Davis33a626f2020-08-27 15:38:12 +0100499 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
500 const armnn::ITensorHandleFactory& tensorHandleFactory)
Teresa Charlinecb6b8e2020-05-22 18:08:23 +0100501{
502 const unsigned int shape0[] = { 1, 2, 2, 3 };
503 const unsigned int shape1[] = { 1, 2, 2, 3 };
504
505 std::vector<int32_t> input0 =
506 {
507 63, 35, 77, 70, 56, 112, // 441, 245, 539, 490, 392, 184
508 203, 28, 252, 168, 245, 91 // 1421, 196, 1764, 1176, 1715, 637
509 };
510
511 std::vector<int32_t> input1 =
512 {
513 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
514 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
515 };
516
517 std::vector<int32_t> output =
518 {
519 84, 42, 252, 301, 231, 322, // 588, 294, 1764, 2107(clamped), 1617, 2254(clamped)
520 329, 189, 315, 189, 350, 217, // 2303(clamped), 1323, 2205(clamped), 1323, 2450(clamped), 1519
521 };
522
523 return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::Signed32>(
524 workloadFactory,
525 memoryManager,
526 shape0,
527 input0,
528 1.0f,
529 0,
530 shape1,
531 input1,
532 1.0f,
533 0,
534 shape0,
535 output,
Keith Davis33a626f2020-08-27 15:38:12 +0100536 tensorHandleFactory,
Teresa Charlinecb6b8e2020-05-22 18:08:23 +0100537 1.0f,
538 0);
539}
540
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100541LayerTestResult<float, 4> AdditionAfterMaxPoolTest(
542 armnn::IWorkloadFactory& workloadFactory,
Keith Davis33a626f2020-08-27 15:38:12 +0100543 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
544 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100545{
Jan Eilers8eb25602020-03-09 12:13:48 +0000546 IgnoreUnused(memoryManager);
Derek Lambertic374ff02019-12-10 21:57:35 +0000547
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100548 // Create Initial Tensor
549 // 1, 2, 3
550 // 4, 5, 6
551 // 7, 8, 9
552
553 armnn::TensorInfo poolingInputTensorInfo({ 1, 1, 3, 3}, armnn::DataType::Float32);
554 armnn::TensorInfo poolingOutputTensorInfo({ 1, 1, 2, 2}, armnn::DataType::Float32);
555
Sadik Armagan483c8112021-06-01 09:24:52 +0100556 std::vector<float> poolingInput = {1, 2, 3,
557 4, 5, 6,
558 7, 8, 9
559 };
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100560 std::unique_ptr<armnn::ITensorHandle> poolingInputHandle =
Keith Davis33a626f2020-08-27 15:38:12 +0100561 tensorHandleFactory.CreateTensorHandle(poolingInputTensorInfo);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100562 std::unique_ptr<armnn::ITensorHandle> poolingOutputHandle =
Keith Davis33a626f2020-08-27 15:38:12 +0100563 tensorHandleFactory.CreateTensorHandle(poolingOutputTensorInfo);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100564
565 // Apply MaxPool poolSize = 1x1, stride=2x2
566 // Result =
567 // 1, 3
568 // 7, 9
569 armnn::Pooling2dDescriptor descriptor;
570 descriptor.m_PoolHeight = 1;
571 descriptor.m_PoolWidth = 1;
572 descriptor.m_StrideX = 2;
573 descriptor.m_StrideY = 2;
574 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
575
576 armnn::Pooling2dQueueDescriptor queueDescriptor;
577 queueDescriptor.m_Parameters = descriptor;
578 armnn::WorkloadInfo workloadInfo;
579 AddInputToWorkload(queueDescriptor, workloadInfo, poolingInputTensorInfo, poolingInputHandle.get());
580 AddOutputToWorkload(queueDescriptor, workloadInfo, poolingOutputTensorInfo, poolingOutputHandle.get());
581
582 // Create the MaxPool
583 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
584
Sadik Armagan483c8112021-06-01 09:24:52 +0100585 std::vector<float> resultMaxPool(poolingOutputTensorInfo.GetNumElements());
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100586
587 // Create addition with another tensor the same size
588 // This would be the result to apply a Conv2d with kernel ones(2) and stride 1x1
589 // with the initial tensor.
590 // 12, 16
591 // 24, 28
Sadik Armagan483c8112021-06-01 09:24:52 +0100592 armnn::TensorInfo addInputTensorInfo({ 1,1,2,2 }, armnn::DataType::Float32);
593 armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2 }, armnn::DataType::Float32);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100594
Sadik Armagan483c8112021-06-01 09:24:52 +0100595 std::vector<float> addInput = { 12, 16,
596 24, 28 };
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100597
598 // Expected output tensor after MaxPool and Addition.
Sadik Armagan483c8112021-06-01 09:24:52 +0100599 std::vector<float> actualOutput(addOutputTensorInfo.GetNumElements());
600 std::vector<float> expectedOutput = { 13, 19,
601 31, 37 };
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100602
Keith Davis33a626f2020-08-27 15:38:12 +0100603 std::unique_ptr<armnn::ITensorHandle> addInputHandle = tensorHandleFactory.CreateTensorHandle(addInputTensorInfo);
Sadik Armagan483c8112021-06-01 09:24:52 +0100604 std::unique_ptr<armnn::ITensorHandle> addOutputHandle = tensorHandleFactory.CreateTensorHandle(addOutputTensorInfo);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100605
606 armnn::AdditionQueueDescriptor data;
607 armnn::WorkloadInfo info;
608
609 // Add the output of the MaxPool and the new tensor
610 AddInputToWorkload(data, info, poolingOutputTensorInfo, poolingOutputHandle.get());
611 AddInputToWorkload(data, info, addInputTensorInfo, addInputHandle.get());
612 AddOutputToWorkload(data, info, addOutputTensorInfo, addOutputHandle.get());
613
614 std::unique_ptr<armnn::IWorkload> addWorkload = workloadFactory.CreateAddition(data, info);
615
616 poolingInputHandle->Allocate();
617 poolingOutputHandle->Allocate();
618 addInputHandle->Allocate();
619 addOutputHandle->Allocate();
620
Sadik Armagan483c8112021-06-01 09:24:52 +0100621 CopyDataToITensorHandle(poolingInputHandle.get(), poolingInput.data());
622 CopyDataFromITensorHandle(resultMaxPool.data(), poolingOutputHandle.get());
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100623
Sadik Armagan483c8112021-06-01 09:24:52 +0100624 CopyDataToITensorHandle(poolingOutputHandle.get(), resultMaxPool.data());
625 CopyDataToITensorHandle(addInputHandle.get(), addInput.data());
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100626
627 workload->PostAllocationConfigure();
628 workload->Execute();
629 addWorkload->PostAllocationConfigure();
630 addWorkload->Execute();
631
Sadik Armagan483c8112021-06-01 09:24:52 +0100632 CopyDataFromITensorHandle(actualOutput.data(), addOutputHandle.get());
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100633
Sadik Armagan483c8112021-06-01 09:24:52 +0100634 return LayerTestResult<float, 4>(actualOutput,
635 expectedOutput,
636 addOutputHandle->GetShape(),
637 addOutputTensorInfo.GetShape());
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100638}
639
640LayerTestResult<float,4> CompareAdditionTest(
641 armnn::IWorkloadFactory& workloadFactory,
642 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davis33a626f2020-08-27 15:38:12 +0100643 armnn::IWorkloadFactory& refWorkloadFactory,
644 const armnn::ITensorHandleFactory& tensorHandleFactory,
645 const armnn::ITensorHandleFactory& refTensorHandleFactory)
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100646{
Jan Eilers8eb25602020-03-09 12:13:48 +0000647 IgnoreUnused(memoryManager);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100648 unsigned int batchSize = 4;
649 unsigned int channels = 1;
650 unsigned int height = 2;
651 unsigned int width = 3;
652
653 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
654 armnn::TensorInfo outputTensorInfo;
655
656 unsigned int shape[] = {batchSize, channels, height, width};
657
658 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
659 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
660 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
661
Sadik Armagan483c8112021-06-01 09:24:52 +0100662 auto input1 = MakeRandomTensor<float>(inputTensorInfo1, 1232);
663 auto input2 = MakeRandomTensor<float>(inputTensorInfo2, 456);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100664
Sadik Armagan483c8112021-06-01 09:24:52 +0100665 std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
666 std::vector<float> expectedOutput(outputTensorInfo.GetNumElements());
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100667
Keith Davis33a626f2020-08-27 15:38:12 +0100668 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
669 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo2);
670 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100671
Keith Davis33a626f2020-08-27 15:38:12 +0100672 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refTensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
673 std::unique_ptr<armnn::ITensorHandle> inputHandle2Ref = refTensorHandleFactory.CreateTensorHandle(inputTensorInfo2);
674 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refTensorHandleFactory.CreateTensorHandle(outputTensorInfo);
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100675
676 armnn::AdditionQueueDescriptor data;
677 armnn::WorkloadInfo info;
678 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
679 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
680 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
681
682 armnn::AdditionQueueDescriptor refData = data;
683 armnn::WorkloadInfo refInfo = info;
684 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo1, inputHandle1Ref.get());
685 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get());
686 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
687
688 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
689 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateAddition(refData, refInfo);
690
691 inputHandle1->Allocate();
692 inputHandle2->Allocate();
693 outputHandle->Allocate();
694 inputHandle1Ref->Allocate();
695 inputHandle2Ref->Allocate();
696 outputHandleRef->Allocate();
697
Sadik Armagan483c8112021-06-01 09:24:52 +0100698 CopyDataToITensorHandle(inputHandle1.get(), input1.data());
699 CopyDataToITensorHandle(inputHandle2.get(), input2.data());
700 CopyDataToITensorHandle(inputHandle1Ref.get(), input1.data());
701 CopyDataToITensorHandle(inputHandle2Ref.get(), input2.data());
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100702
703 workload->PostAllocationConfigure();
704 workload->Execute();
705 workloadRef->PostAllocationConfigure();
706 workloadRef->Execute();
707
Sadik Armagan483c8112021-06-01 09:24:52 +0100708 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
709 CopyDataFromITensorHandle(expectedOutput.data(), outputHandleRef.get());
Aron Virginas-Tare89ebad2019-08-27 18:14:26 +0100710
Sadik Armagan483c8112021-06-01 09:24:52 +0100711 return LayerTestResult<float, 4>(actualOutput,
712 expectedOutput,
713 outputHandle->GetShape(),
714 outputTensorInfo.GetShape());
Teresa Charlinecb6b8e2020-05-22 18:08:23 +0100715}