blob: dad13413b4cc3225d8acb8b3a2e66535f2826bd0 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#include "LayerTests.hpp"
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006#include "WorkloadTestUtils.hpp"
telsoa014fcda012018-03-09 14:13:49 +00007
8#include "test/TensorHelpers.hpp"
9#include "TensorCopyUtils.hpp"
surmeh013537c2c2018-05-18 16:31:43 +010010#include "Permute.hpp"
telsoa014fcda012018-03-09 14:13:49 +000011
12#include <boost/test/unit_test.hpp>
surmeh013537c2c2018-05-18 16:31:43 +010013#include <boost/assert.hpp>
telsoa014fcda012018-03-09 14:13:49 +000014
David Beck711fa312018-09-24 10:46:38 +010015#include <armnn/LayerSupport.hpp>
telsoa014fcda012018-03-09 14:13:49 +000016
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000017#include <backendsCommon/CpuTensorHandle.hpp>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000018#include <backendsCommon/IBackendInternal.hpp>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000019#include <backendsCommon/WorkloadFactory.hpp>
telsoa014fcda012018-03-09 14:13:49 +000020
telsoa014fcda012018-03-09 14:13:49 +000021#include <algorithm>
22#include <boost/cast.hpp>
23
24#include "WorkloadTestUtils.hpp"
25#include "Conv2dTestImpl.hpp"
26#include "BatchNormTestImpl.hpp"
27#include "ActivationTestImpl.hpp"
28#include "Pooling2dTestImpl.hpp"
29#include "ReshapeTestImpl.hpp"
30#include "FullyConnectedTestImpl.hpp"
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +000031#include "SpaceToBatchNdTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000032#include "SplitterTestImpl.hpp"
33#include "SoftmaxTestImpl.hpp"
34#include "NormTestImpl.hpp"
35#include "PermuteTestImpl.hpp"
telsoa01c577f2c2018-08-31 09:22:23 +010036#include "LstmTestImpl.hpp"
37#include "ConvertFp16ToFp32TestImpl.hpp"
38#include "ConvertFp32ToFp16TestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000039
telsoa01c577f2c2018-08-31 09:22:23 +010040// 3-channel 16x8 image used as common input data for a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000041static std::vector<float> ConvInput3x8x16({
42 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
43 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
44 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
45 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
46 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
47 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
48 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
49 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
50 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
51 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
52 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
53 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
54 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
55 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
56 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
57 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
58 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
59 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
60 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
61 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
62 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
63 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
64 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
65 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
66});
67
telsoa01c577f2c2018-08-31 09:22:23 +010068// 2-channel bias used by a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000069static std::vector<float> Bias2({0, 2});
70
jimfly013aab7c32018-11-12 13:32:08 +000071armnn::TensorShape GetTestTensorShape(unsigned int numberOfBatches,
72 unsigned int numberOfChannels,
73 unsigned int height,
74 unsigned int width,
75 const armnn::DataLayoutIndexed& dataLayout)
76{
77 switch (dataLayout.GetDataLayout())
78 {
79 case armnn::DataLayout::NCHW:
80 return armnn::TensorShape({numberOfBatches, numberOfChannels, height, width});
81 case armnn::DataLayout::NHWC:
82 return armnn::TensorShape({numberOfBatches, height, width, numberOfChannels});
83 default:
84 throw armnn::InvalidArgumentException("unknown data layout ["
85 + std::to_string(static_cast<int>(dataLayout.GetDataLayout())) + "]");
86 }
87}
88
telsoa01c577f2c2018-08-31 09:22:23 +010089// Helper function that returns either Bias2 or an empty vector depending on whether bias is enabled.
telsoa014fcda012018-03-09 14:13:49 +000090template<typename T>
91boost::multi_array<T, 1> GetBias2(bool biasEnabled, float qScale, int32_t qOffset)
92{
93 if(biasEnabled)
94 {
95 armnn::TensorInfo biasDesc({static_cast<unsigned int>(Bias2.size())}, armnn::GetDataType<T>());
96 boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(qScale, qOffset, Bias2));
97 return bias;
98 }
99 else
100 {
101 return boost::multi_array<T, 1>();
102 }
103}
104
105template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000106LayerTestResult<T, 4> SimpleConvolution2d3x5TestCommon(
107 armnn::IWorkloadFactory& workloadFactory,
108 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
109 float qScale,
110 int32_t qOffset,
111 bool biasEnabled,
112 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000113{
telsoa01c577f2c2018-08-31 09:22:23 +0100114 // Use common single-batch 3-channel 16x8 image.
telsoa014fcda012018-03-09 14:13:49 +0000115 armnn::TensorInfo inputDesc({1, 3, 8, 16}, armnn::GetDataType<T>());
116 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
117
telsoa01c577f2c2018-08-31 09:22:23 +0100118 // Use a 2-element batch with 3-channel 3x5 kernels.
telsoa014fcda012018-03-09 14:13:49 +0000119 armnn::TensorInfo kernelDesc({2, 3, 5, 3}, armnn::GetDataType<T>());
120 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
121 QuantizedVector<T>(qScale, qOffset, {
122 1, 1, 1,
123 1, -1, 1,
124 1, 1, 1,
125 1, 1, 1,
126 1, 1, 1,
127
128 0, 0, 0,
129 0, 0, 0,
130 0, 0, 0,
131 0, 0, 0,
132 0, 0, 0,
133
134 2, 2, 2,
135 2, 2, 2,
136 2, 2, 2,
137 2, 2, 2,
138 2, 2, 2,
139
140
141 0, 0, 0,
142 0, 0, 0,
143 0, 0, 0,
144 0, 0, 0,
145 0, 0, 0,
146
147 1, 1, 1,
148 1, 1, 1,
149 1, 1, 1,
150 1, 1, 1,
151 1, 1, 1,
152
153 0, 0, 0,
154 0, 0, 0,
155 0, 0, 0,
156 0, 0, 0,
157 0, 0, 0
158 })));
159
telsoa01c577f2c2018-08-31 09:22:23 +0100160 // Expected output is 2 batch elements of a 1-channel 14x4 image.
telsoa014fcda012018-03-09 14:13:49 +0000161 armnn::TensorInfo outputDesc({1, 2, 4, 14}, armnn::GetDataType<T>());
162 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
163 QuantizedVector<T>(qScale, qOffset, {
164 -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24,
165 -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25,
166 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
167 -23.5f, -23.5f, -23.5f,
168 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
169 -23.5f, -23.5f, -23.5f,
170
171 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
172 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
173 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
174 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
175 })));
176
177 return SimpleConvolution2dTestImpl<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000178 memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000179 input,
180 kernel,
181 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
182 expectedOutput,
183 qScale,
jimfly010a088a62018-10-25 17:05:05 +0100184 qOffset,
185 layout);
telsoa014fcda012018-03-09 14:13:49 +0000186}
187
188template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000189LayerTestResult<T, 4> SimpleConvolution2d3x3TestCommon(
190 armnn::IWorkloadFactory& workloadFactory,
191 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
192 float qScale,
193 int32_t qOffset,
194 bool biasEnabled,
195 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000196{
telsoa01c577f2c2018-08-31 09:22:23 +0100197 // Use a 3x3 kernel, which exercises ArmCompute's direct convolution path.
telsoa014fcda012018-03-09 14:13:49 +0000198
telsoa01c577f2c2018-08-31 09:22:23 +0100199 // Use common single-batch 3-channel 16x8 image.
telsoa014fcda012018-03-09 14:13:49 +0000200 armnn::TensorInfo inputDesc({1, 3, 8, 16}, armnn::GetDataType<T>());
201 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
202
telsoa01c577f2c2018-08-31 09:22:23 +0100203 // Use a 2-element batch of 3-channel 3x3 kernels.
telsoa014fcda012018-03-09 14:13:49 +0000204 armnn::TensorInfo kernelDesc({2, 3, 3, 3}, armnn::GetDataType<T>());
205 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
206 QuantizedVector<T>(qScale, qOffset, {
207 1, 1, 1,
208 1, -1, 1,
209 1, 1, 1,
210
211 0, 0, 0,
212 0, 0, 0,
213 0, 0, 0,
214
215 2, 2, 2,
216 2, 2, 2,
217 2, 2, 2,
218
219
220 0, 0, 0,
221 0, 0, 0,
222 0, 0, 0,
223
224 1, 1, 1,
225 1, 1, 1,
226 1, 1, 1,
227
228 0, 0, 0,
229 0, 0, 0,
230 0, 0, 0
231 })));
232
telsoa01c577f2c2018-08-31 09:22:23 +0100233 // Expected output is 1 batch of a 2-channel 14x6 image.
telsoa014fcda012018-03-09 14:13:49 +0000234 armnn::TensorInfo outputDesc({1, 2, 6, 14}, armnn::GetDataType<T>());
235 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
236 QuantizedVector<T>(qScale, qOffset, {
237 -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15,
238 -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16,
239 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
240 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
241 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
242 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
243
244 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
245 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
246 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
247 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
248 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
249 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
250 })));
251
252 return SimpleConvolution2dTestImpl<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000253 memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000254 input,
255 kernel,
256 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
257 expectedOutput,
258 qScale,
narpra015f703182018-10-26 16:24:58 +0100259 qOffset,
260 layout);
telsoa014fcda012018-03-09 14:13:49 +0000261}
262
Francis Murtaghd59116e2018-10-04 16:03:07 +0100263template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000264LayerTestResult<T, 4> SimpleConvolution2d3x3NhwcTestCommon(
265 armnn::IWorkloadFactory& workloadFactory,
266 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
267 float qScale,
268 int32_t qOffset,
269 bool biasEnabled,
270 armnn::DataLayout dataLayout)
Francis Murtaghd59116e2018-10-04 16:03:07 +0100271{
272 // Use common single-batch 5x5 image.
273
274 armnn::TensorInfo inputDesc({1, 3, 4, 1}, armnn::GetDataType<T>());
275 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
276 {
277 1, 5, 2, 3,
278 8, 7, 3, 6,
279 3, 3, 9, 1
280 });
281
282
283 // Use a 2-element batch of 3-channel 3x3 kernels.
284 armnn::TensorInfo kernelDesc({1, 3, 3, 1}, armnn::GetDataType<T>());
285 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, {
286 4, 5, 6,
287 0, 0, 0,
288 3, 2, 1
289 });
290
291 // Expected output is 1 batch of a 5x5 image.
292 armnn::TensorInfo outputDesc({1, 3, 4, 1}, armnn::GetDataType<T>());
293
294 const std::vector<float> outputData =
295 {
296 23, 41, 33, 21,
297 44, 65, 76, 52,
298 82, 85, 79, 42
299 };
300
301 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
302
303 return SimpleConvolution2dNhwcTestImpl<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000304 memoryManager,
Francis Murtaghd59116e2018-10-04 16:03:07 +0100305 input,
306 kernel,
307 boost::multi_array<T, 1>(),
308 expectedOutput,
309 dataLayout,
310 qScale,
311 qOffset);
312}
313
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000314LayerTestResult<float, 4> SimpleConvolution2d3x5Test(
315 armnn::IWorkloadFactory& workloadFactory,
316 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
317 bool biasEnabled,
318 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000319{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000320 return SimpleConvolution2d3x5TestCommon<float>(workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000321}
322
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000323LayerTestResult<uint8_t, 4> SimpleConvolution2d3x5Uint8Test(
324 armnn::IWorkloadFactory& workloadFactory,
325 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
326 bool biasEnabled,
327 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000328{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000329 return SimpleConvolution2d3x5TestCommon<uint8_t>(workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000330}
331
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000332LayerTestResult<float, 4> SimpleConvolution2d3x3Test(
333 armnn::IWorkloadFactory& workloadFactory,
334 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
335 bool biasEnabled,
336 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000337{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000338 return SimpleConvolution2d3x3TestCommon<float>(workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000339}
340
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000341LayerTestResult<float, 4> SimpleConvolution2d3x3NhwcTest(
342 armnn::IWorkloadFactory& workloadFactory,
343 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
344 bool biasEnabled)
Francis Murtaghd59116e2018-10-04 16:03:07 +0100345{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000346 return SimpleConvolution2d3x3NhwcTestCommon<float>(workloadFactory,
347 memoryManager,
348 0.f,
349 0,
350 biasEnabled,
351 armnn::DataLayout::NHWC);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100352}
353
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000354LayerTestResult<uint8_t, 4> SimpleConvolution2d3x3Uint8Test(
355 armnn::IWorkloadFactory& workloadFactory,
356 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
357 bool biasEnabled,
358 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000359{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000360 return SimpleConvolution2d3x3TestCommon<uint8_t>(workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000361}
362
363template<typename T>
364LayerTestResult<T, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon(
365 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000366 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
narpra015f703182018-10-26 16:24:58 +0100367 const armnn::DataLayoutIndexed& layout,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000368 float qScale,
369 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +0000370{
telsoa01c577f2c2018-08-31 09:22:23 +0100371 // Use a single-batch 1-channel 3x3 image as input.
telsoa014fcda012018-03-09 14:13:49 +0000372 armnn::TensorInfo inputDesc({1, 1, 3, 3}, armnn::GetDataType<T>());
373 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
374 QuantizedVector<T>(qScale, qOffset, {
375 11,21,31,
376 12,22,32,
377 13,23,33
378 })));
379
telsoa01c577f2c2018-08-31 09:22:23 +0100380 // Use 1 batch of a 1-channel 2x2 kernel.
telsoa014fcda012018-03-09 14:13:49 +0000381 armnn::TensorInfo kernelDesc({1, 1, 2, 2}, armnn::GetDataType<T>());
382 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
383 QuantizedVector<T>(qScale, qOffset, {
384 -11,-21,
385 -12,-22,
386 })));
387
telsoa01c577f2c2018-08-31 09:22:23 +0100388// Expected output is 1 batch of a 1-channel 6x8 image.
telsoa014fcda012018-03-09 14:13:49 +0000389// Manually calculated like this:
390//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
391//[-11*0 -21*0 -12*0 -22*11 ; -11*0 -21*0 -12*11 -22*21 ; -11*0 -21*0 -12*21 -22*31 ; -11*0 -21*0 -12*31 -22*0 ..]
392//[-11*0 -21*11 -12*0 -22*12 ; -11*11 -21*21 -12*12 -22*22 ; -11*21 -21*31 -12*22 -22*32 ; -11*31 -21*0 -12*32 -22*0 ..]
393//[-11*0 -21*12 -12*0 -22*13 ; -11*12 -21*22 -12*13 -22*23 ; -11*22 -21*32 -12*23 -22*33 ; -11*32 -21*0 -12*33 -22*0 ..]
394//[-11*0 -21*13 -12*0 -22*0 ; -11*13 -21*23 -12*0 -22*0 ; -11*23 -21*33 -12*0 -22*0 ; -11*33 -21*0 -12*0 -22*0 ..]
395//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
396//[..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ..]
397 armnn::TensorInfo outputDesc({1, 1, 8, 6}, armnn::GetDataType<T>());
398 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
399 QuantizedVector<T>(qScale, qOffset, {
400 0, 0, 0, 0, 0, 0,
401 -242, -594, -934, -372, 0, 0,
402 -495, -1190, -1850, -725, 0, 0,
403 -538, -1256, -1916, -748, 0, 0,
404 -273, -626, -946, -363, 0, 0,
405 0, 0, 0, 0, 0, 0,
406 0, 0, 0, 0, 0, 0,
407 0, 0, 0, 0, 0, 0
408 })));
409
410 return SimpleConvolution2dTestImpl<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000411 memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000412 input,
413 kernel,
414 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(false, qScale, qOffset),
415 expectedOutput,
416 qScale,
417 qOffset,
narpra015f703182018-10-26 16:24:58 +0100418 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100419 1, // Padding left.
420 2, // Padding top.
421 3, // Padding right.
422 4); // Padding bottom.
telsoa014fcda012018-03-09 14:13:49 +0000423}
424
425template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000426LayerTestResult<T, 4> SimpleConvolution2dAsymmetricPaddingTestCommon(
427 armnn::IWorkloadFactory& workloadFactory,
428 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
429 const armnn::DataLayoutIndexed& layout,
430 float qScale,
431 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +0000432{
telsoa01c577f2c2018-08-31 09:22:23 +0100433 // Use a single-batch 1-channel 5x5 image as input.
telsoa014fcda012018-03-09 14:13:49 +0000434 armnn::TensorInfo inputDesc({ 1, 1, 5, 5 }, armnn::GetDataType<T>());
435 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
436 QuantizedVector<T>(qScale, qOffset, {
437 11,21,31,41,51,
438 12,22,32,42,52,
439 13,23,33,43,53,
440 14,24,34,44,54,
441 15,25,35,45,55,
442 })));
443
telsoa01c577f2c2018-08-31 09:22:23 +0100444 // Use 1 batch of a 1-channel 4x4 kernel.
telsoa014fcda012018-03-09 14:13:49 +0000445 armnn::TensorInfo kernelDesc({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
446 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
447 QuantizedVector<T>(qScale, qOffset, {
448 -11,-21,-31,-41,
449 -12,-22,-32,-42,
450 -13,-23,-33,-43,
451 -14,-24,-34,-44,
452 })));
453
telsoa01c577f2c2018-08-31 09:22:23 +0100454 // Expected output is 1 batch of a 1-channel 5x5 image.
telsoa014fcda012018-03-09 14:13:49 +0000455 armnn::TensorInfo outputDesc({ 1, 1, 5, 5 }, armnn::GetDataType<T>());
456 std::vector<T> myVec(outputDesc.GetNumElements(), 0);
457 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
458 QuantizedVector<T>(qScale, qOffset, {
telsoa014fcda012018-03-09 14:13:49 +0000459 -7140, -10580, -13940, -9300, -5230,
460 -9590, -14120, -18520, -12290, -6860,
461 -9980, -14560, -18960, -12560, -7000,
462 -7518, -10904, -14144, -9318, -5152,
surmeh013537c2c2018-05-18 16:31:43 +0100463 -5032, -7256, -9376, -6142, -3368,
telsoa014fcda012018-03-09 14:13:49 +0000464 })));
465
466 return SimpleConvolution2dTestImpl<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000467 memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000468 input,
469 kernel,
470 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(false, qScale, qOffset),
471 expectedOutput,
472 qScale,
473 qOffset,
narpra015f703182018-10-26 16:24:58 +0100474 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100475 1, // Padding left.
476 1, // Padding top.
477 2, // Padding right.
478 2); // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100479}
480
481template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000482LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(
483 armnn::IWorkloadFactory& workloadFactory,
484 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
485 float qScale,
486 int32_t qOffset,
487 bool biasEnabled,
488 const armnn::DataLayoutIndexed& layout)
surmeh013537c2c2018-05-18 16:31:43 +0100489{
telsoa01c577f2c2018-08-31 09:22:23 +0100490 // Use a single-batch 2-channel 5x5 image as input.
surmeh013537c2c2018-05-18 16:31:43 +0100491 armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5 }, armnn::GetDataType<T>());
492 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
493 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
494 0, 1, 2, 3, 4,
495 5, 6, 7, 8, 9,
496 10, 11, 12, 13, 14,
497 15, 16, 17, 18, 19,
498 20, 21, 22, 23, 24,
499
500 25, 26, 27, 28, 29,
501 30, 31, 32, 33, 34,
502 35, 36, 37, 38, 39,
503 40, 41, 42, 43, 44,
504 45, 46, 47, 48, 49
505 })));
506
telsoa01c577f2c2018-08-31 09:22:23 +0100507 // Use a depth multiplier of 1 on a 2-channel 4x4 kernel.
surmeh013537c2c2018-05-18 16:31:43 +0100508 armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, armnn::GetDataType<T>());
509 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
510 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
511 32, 31, 30, 29,
512 28, 27, 26, 25,
513 24, 23, 22, 21,
514 20, 19, 18, 17,
515
516 16, 15, 14, 13,
517 12, 11, 10, 9,
518 8, 7, 6, 5,
519 4, 3, 2, 1
520 })));
521
telsoa01c577f2c2018-08-31 09:22:23 +0100522 // Expected output is 1 batch of a 2-channel 5x5 image.
523 // Calculated using the python tensorflow library with strideX=1, strideY=1.
surmeh013537c2c2018-05-18 16:31:43 +0100524 armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5 }, armnn::GetDataType<T>());
525 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
526 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
527 1062, 1580, 1850, 1530, 1117,
528 2140, 3108, 3500, 2842, 2042,
529 3580, 5068, 5460, 4342, 3062,
530 3618, 5072, 5390, 4248, 2971,
531 3074, 4282, 4510, 3533, 2457,
532 1550, 2284, 2362, 1955, 1428,
533 2910, 4206, 4342, 3528, 2536,
534 3390, 4886, 5022, 4068, 2916,
535 3566, 5056, 5182, 4133, 2922,
536 3100, 4352, 4452, 3517, 2465
537 })));
538
539 return DepthwiseConvolution2dAsymmetricTestImpl<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000540 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +0100541 input,
542 kernel,
543 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
544 expectedOutput,
545 qScale,
546 qOffset,
jimfly01382a91d2018-10-26 15:55:50 +0100547 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100548 1, // Padding left.
549 1, // Padding top.
550 2, // Padding right.
551 2, // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100552 1, // strideX
553 1); // strideY
telsoa014fcda012018-03-09 14:13:49 +0000554}
555
Nikhil Rajcec6b652018-10-12 13:51:57 +0100556template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000557LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestCommon(
558 armnn::IWorkloadFactory& workloadFactory,
559 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
560 float qScale,
561 int32_t qOffset,
562 bool biasEnabled)
Nikhil Rajcec6b652018-10-12 13:51:57 +0100563{
564 armnn::TensorInfo inputTensorInfo({ 1, 5, 5, 2}, armnn::GetDataType<T>());
565 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
566 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
567 0, 25,
568 1, 26,
569 2, 27,
570 3, 28,
571 4, 29,
572
573 5, 30,
574 6, 31,
575 7, 32,
576 8, 33,
577 9, 34,
578
579 10, 35,
580 11, 36,
581 12, 37,
582 13, 38,
583 14, 39,
584
585 15, 40,
586 16, 41,
587 17, 42,
588 18, 43,
589 19, 44,
590
591 20, 45,
592 21, 46,
593 22, 47,
594 23, 48,
595 24, 49
596 })));
597
598 armnn::TensorInfo kernelTensorInfo({ 1, 4, 4, 2}, armnn::GetDataType<T>());
599 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
600 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
601 32, 16,
602 31, 15,
603 30, 14,
604 29, 13,
605
606 28, 12,
607 27, 11,
608 26, 10,
609 25, 9,
610
611 24, 8,
612 23, 7,
613 22, 6,
614 21, 5,
615
616 20, 4,
617 19, 3,
618 18, 2,
619 17, 1
620 })));
621
622 armnn::TensorInfo outputTensorInfo({ 1, 5, 5, 2}, armnn::GetDataType<T>());
623 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
624 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
625 1062, 1550,
626 1580, 2284,
627 1850, 2362,
628 1530, 1955,
629 1117, 1428,
630
631 2140, 2910,
632 3108, 4206,
633 3500, 4342,
634 2842, 3528,
635 2042, 2536,
636
637 3580, 3390,
638 5068, 4886,
639 5460, 5022,
640 4342, 4068,
641 3062, 2916,
642
643 3618, 3566,
644 5072, 5056,
645 5390, 5182,
646 4248, 4133,
647 2971, 2922,
648
649 3074, 3100,
650 4282, 4352,
651 4510, 4452,
652 3533, 3517,
653 2457, 2465
654 })));
655
656 return DepthwiseConvolution2dNhwcTestImpl<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000657 memoryManager,
Nikhil Rajcec6b652018-10-12 13:51:57 +0100658 input,
659 kernel,
660 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
661 expectedOutput,
662 qScale,
663 qOffset,
664 1, // Padding left.
665 1, // Padding top.
666 2, // Padding right.
667 2, // Padding bottom.
668 1, // strideX
669 1); // strideY
670}
671
telsoa014fcda012018-03-09 14:13:49 +0000672LayerTestResult<float, 4>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000673Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest(
674 armnn::IWorkloadFactory& workloadFactory,
675 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
676 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000677{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000678 return Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon<float>(
679 workloadFactory, memoryManager, layout, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000680}
681
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000682LayerTestResult<float, 4> Convolution2dAsymmetricPaddingTest(
683 armnn::IWorkloadFactory& workloadFactory,
684 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
685 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000686{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000687 return SimpleConvolution2dAsymmetricPaddingTestCommon<float>(
688 workloadFactory, memoryManager, layout, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000689}
690
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000691LayerTestResult<float, 4> DepthwiseConvolution2dTest(
692 armnn::IWorkloadFactory& workloadFactory,
693 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
694 bool biasEnabled,
695 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000696{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000697 return DepthwiseConvolution2dTestImpl<float, float>(
698 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000699}
700
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000701LayerTestResult<float, 4> DepthwiseConvolution2dDepthNhwcTest(
702 armnn::IWorkloadFactory& workloadFactory,
703 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
704 bool biasEnabled)
Nikhil Rajcec6b652018-10-12 13:51:57 +0100705{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000706 return DepthwiseConvolution2dNhwcTestCommon<float>(workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
Nikhil Rajcec6b652018-10-12 13:51:57 +0100707}
708
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000709LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul1Test(
710 armnn::IWorkloadFactory& workloadFactory,
711 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
712 bool biasEnabled,
713 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000714{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000715 return DepthwiseConvolution2dDepthMul1TestImpl<float, float>(
716 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000717}
718
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000719LayerTestResult<float, 4> DepthwiseConvolution2dAsymmetricTest(
720 armnn::IWorkloadFactory& workloadFactory,
721 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
722 bool biasEnabled,
723 const armnn::DataLayoutIndexed& layout)
surmeh013537c2c2018-05-18 16:31:43 +0100724{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000725 return DepthwiseConvolution2dAsymmetricTestCommon<float>(
726 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
surmeh013537c2c2018-05-18 16:31:43 +0100727}
728
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000729LayerTestResult<uint8_t, 4> DepthwiseConvolution2dUint8Test(
730 armnn::IWorkloadFactory& workloadFactory,
731 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
732 bool biasEnabled,
733 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000734{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000735 return DepthwiseConvolution2dTestImpl<uint8_t, int32_t>(
736 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000737}
738
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000739LayerTestResult<uint8_t, 4> DepthwiseConvolution2dDepthMul1Uint8Test(
740 armnn::IWorkloadFactory& workloadFactory,
741 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
742 bool biasEnabled,
743 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000744{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000745 return DepthwiseConvolution2dDepthMul1TestImpl<uint8_t, int32_t>(
746 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000747}
748
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000749LayerTestResult<float, 4> Convolution1dTest(
750 armnn::IWorkloadFactory& workloadFactory,
751 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
752 bool biasEnabled)
telsoa014fcda012018-03-09 14:13:49 +0000753{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000754 return Convolution1dTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
telsoa014fcda012018-03-09 14:13:49 +0000755}
756
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000757LayerTestResult<uint8_t, 4> Convolution1dUint8Test(
758 armnn::IWorkloadFactory& workloadFactory,
759 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
760 bool biasEnabled)
telsoa014fcda012018-03-09 14:13:49 +0000761{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000762 return Convolution1dTestImpl<uint8_t>(workloadFactory, memoryManager, 0.1f, 128, biasEnabled);
telsoa014fcda012018-03-09 14:13:49 +0000763}
764
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000765LayerTestResult<float,4> CompareConvolution2dTest(
766 armnn::IWorkloadFactory& workloadFactory,
767 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
768 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +0000769{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000770 return CompareConvolution2dTestImpl<float>(workloadFactory, memoryManager, refWorkloadFactory);
telsoa014fcda012018-03-09 14:13:49 +0000771}
772
773template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000774LayerTestResult<T,4> CompareDepthwiseConvolution2dTest(
775 armnn::IWorkloadFactory& workloadFactory,
776 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
777 armnn::IWorkloadFactory& refWorkloadFactory,
778 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000779{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000780 return CompareDepthwiseConvolution2dTestImpl<T>(workloadFactory, memoryManager, refWorkloadFactory, layout);
telsoa014fcda012018-03-09 14:13:49 +0000781}
782
783template LayerTestResult<float, 4> CompareDepthwiseConvolution2dTest<float>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000784 armnn::IWorkloadFactory&,
785 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
786 armnn::IWorkloadFactory&,
787 const armnn::DataLayoutIndexed&);
telsoa014fcda012018-03-09 14:13:49 +0000788
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000789template LayerTestResult<uint8_t, 4> CompareDepthwiseConvolution2dTest<uint8_t>(
790 armnn::IWorkloadFactory&,
791 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
792 armnn::IWorkloadFactory&,
793 const armnn::DataLayoutIndexed&);
794
795LayerTestResult<float,4> SimpleNormalizationAcrossTest(
796 armnn::IWorkloadFactory& workloadFactory,
797 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000798{
799 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
800 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000801 return SimpleNormalizationTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +0000802}
803
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000804LayerTestResult<float,4> SimpleNormalizationWithinTest(
805 armnn::IWorkloadFactory& workloadFactory,
806 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000807{
808 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
809 auto normChannel = armnn::NormalizationAlgorithmChannel::Within;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000810 return SimpleNormalizationTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +0000811}
812
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000813LayerTestResult<float,4> SimpleNormalizationAcrossNhwcTest(
814 armnn::IWorkloadFactory& workloadFactory,
815 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra0155a97bc2018-10-02 14:35:53 +0100816{
817 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
818 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000819 return SimpleNormalizationNhwcTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
narpra0155a97bc2018-10-02 14:35:53 +0100820}
821
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000822LayerTestResult<float,2> SimpleSoftmaxTest(
823 armnn::IWorkloadFactory& workloadFactory,
824 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
825 float beta)
telsoa014fcda012018-03-09 14:13:49 +0000826{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000827 return SimpleSoftmaxTestImpl<float>(workloadFactory, memoryManager, beta);
telsoa014fcda012018-03-09 14:13:49 +0000828}
829
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000830LayerTestResult<uint8_t,2> SimpleSoftmaxUint8Test(
831 armnn::IWorkloadFactory& workloadFactory,
832 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
833 float beta)
telsoa014fcda012018-03-09 14:13:49 +0000834{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000835 return SimpleSoftmaxTestImpl<uint8_t>(workloadFactory, memoryManager, beta);
telsoa014fcda012018-03-09 14:13:49 +0000836}
837
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000838LayerTestResult<float,4> CompareNormalizationTest(
839 armnn::IWorkloadFactory& workloadFactory,
840 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
841 armnn::IWorkloadFactory& refWorkloadFactory,
842 armnn::NormalizationAlgorithmChannel normChannel,
843 armnn::NormalizationAlgorithmMethod normMethod)
telsoa014fcda012018-03-09 14:13:49 +0000844{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000845 return CompareNormalizationTestImpl(workloadFactory, memoryManager, refWorkloadFactory, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +0000846}
847
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000848LayerTestResult<float,2> CompareSoftmaxTest(
849 armnn::IWorkloadFactory& workloadFactory,
850 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000851 armnn::IWorkloadFactory& refWorkloadFactory,
852 float beta)
853{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000854 return CompareSoftmaxTestImpl<float>(workloadFactory, memoryManager, refWorkloadFactory, beta);
telsoa014fcda012018-03-09 14:13:49 +0000855}
856
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000857LayerTestResult<uint8_t,2> CompareSoftmaxUint8Test(
858 armnn::IWorkloadFactory& workloadFactory,
859 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000860 armnn::IWorkloadFactory& refWorkloadFactory,
861 float beta)
862{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000863 return CompareSoftmaxTestImpl<uint8_t>(workloadFactory, memoryManager, refWorkloadFactory, beta);
telsoa014fcda012018-03-09 14:13:49 +0000864}
865
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000866std::vector<LayerTestResult<float,3>> SplitterTest(
867 armnn::IWorkloadFactory& workloadFactory,
868 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000869{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000870 return SplitterTestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +0000871}
872
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000873std::vector<LayerTestResult<uint8_t,3>> SplitterUint8Test(
874 armnn::IWorkloadFactory& workloadFactory,
875 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000876{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000877 return SplitterTestCommon<uint8_t>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000878}
879
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000880LayerTestResult<float, 3> CopyViaSplitterTest(
881 armnn::IWorkloadFactory& workloadFactory,
882 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000883{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000884 return CopyViaSplitterTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000885}
886
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000887LayerTestResult<uint8_t, 3> CopyViaSplitterUint8Test(
888 armnn::IWorkloadFactory& workloadFactory,
889 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000890{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000891 return CopyViaSplitterTestImpl<uint8_t>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000892}
893
telsoa01c577f2c2018-08-31 09:22:23 +0100894LayerTestResult<float, 2> LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000895 armnn::IWorkloadFactory& workloadFactory,
896 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +0100897{
898 armnn::TensorInfo inputDesc({ 2, 2 }, armnn::GetDataType<float>());
899 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
900 { 2., 3., 3., 4. }));
901
902 armnn::TensorInfo outputDesc({ 2, 4 }, armnn::GetDataType<float>());
903 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
904 {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
905 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f}));
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000906 return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl(
907 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +0100908}
909
910LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000911 armnn::IWorkloadFactory& workloadFactory,
912 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +0100913{
914 armnn::TensorInfo inputDesc({ 2, 5 }, armnn::GetDataType<float>());
915 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
916 {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
917 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f}));
918
919 armnn::TensorInfo outputDesc({ 2, 16 }, armnn::GetDataType<float>());
920 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
921 {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f,
922 -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f,
923 -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f,
924 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f,
925 -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f,
926 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f,
927 0.02168f}));
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000928 return LstmLayerFloat32NoCifgWithPeepholeWithProjectionTestImpl(
929 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +0100930}
931
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000932LayerTestResult<float, 2> LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest(
933 armnn::IWorkloadFactory& workloadFactory,
934 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +0100935{
936 armnn::TensorInfo inputDesc({2, 2}, armnn::GetDataType<float>());
937 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
938 {2., 3., 3., 4.}));
939
940
941 armnn::TensorInfo outputDesc({2, 4}, armnn::GetDataType<float>());
942 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
943 {{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
944 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}}));
945
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000946 return LstmNoCifgNoPeepholeNoProjectionTestImpl(
947 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +0100948}
949
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000950LayerTestResult<float,3> MergerTest(
951 armnn::IWorkloadFactory& workloadFactory,
952 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000953{
surmeh013537c2c2018-05-18 16:31:43 +0100954 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +0000955 unsigned int outputHeight = 6;
956 unsigned int outputChannels = 3;
957
surmeh013537c2c2018-05-18 16:31:43 +0100958 unsigned int inputWidth1 = 3;
959 unsigned int inputHeight1 = 6;
960 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +0000961
surmeh013537c2c2018-05-18 16:31:43 +0100962 unsigned int inputWidth2 = 3;
963 unsigned int inputHeight2 = 6;
964 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +0000965
telsoa01c577f2c2018-08-31 09:22:23 +0100966 // Define the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +0000967 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::Float32);
968 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::Float32);
969 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +0000970
971 LayerTestResult<float,3> ret(outputTensorInfo);
972
telsoa014fcda012018-03-09 14:13:49 +0000973 ret.outputExpected = MakeTensor<float, 3>(outputTensorInfo, std::vector<float>(
surmeh013537c2c2018-05-18 16:31:43 +0100974 {
975 1.0f, 2.0f, 3.0f,
976 4.0f, 5.0f, 6.0f,
977 7.0f, 8.0f, 9.0f,
978 10.0f, 11.0f, 12.0f,
979 13.0f, 14.0f, 15.0f,
980 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +0000981
surmeh013537c2c2018-05-18 16:31:43 +0100982 19.0f, 20.0f, 21.0f,
983 22.0f, 23.0f, 24.0f,
984 25.0f, 26.0f, 27.0f,
985 28.0f, 29.0f, 30.0f,
986 31.0f, 32.0f, 33.0f,
987 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +0000988
surmeh013537c2c2018-05-18 16:31:43 +0100989 37.0f, 38.0f, 39.0f,
990 40.0f, 41.0f, 42.0f,
991 43.0f, 44.0f, 45.0f,
992 46.0f, 47.0f, 48.0f,
993 49.0f, 50.0f, 51.0f,
994 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +0000995 })
996 );
997
telsoa014fcda012018-03-09 14:13:49 +0000998 auto input1 = MakeTensor<float, 3>(inputTensorInfo1, std::vector<float>(
999 {
surmeh013537c2c2018-05-18 16:31:43 +01001000 1.0f, 2.0f, 3.0f,
1001 4.0f, 5.0f, 6.0f,
1002 7.0f, 8.0f, 9.0f,
1003 10.0f, 11.0f, 12.0f,
1004 13.0f, 14.0f, 15.0f,
1005 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +00001006
surmeh013537c2c2018-05-18 16:31:43 +01001007 19.0f, 20.0f, 21.0f,
1008 22.0f, 23.0f, 24.0f,
1009 25.0f, 26.0f, 27.0f,
1010 28.0f, 29.0f, 30.0f,
1011 31.0f, 32.0f, 33.0f,
1012 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +00001013 })
1014 );
1015
1016 auto input2 = MakeTensor<float, 3>(inputTensorInfo2, std::vector<float>(
1017 {
surmeh013537c2c2018-05-18 16:31:43 +01001018 37.0f, 38.0f, 39.0f,
1019 40.0f, 41.0f, 42.0f,
telsoa014fcda012018-03-09 14:13:49 +00001020 43.0f, 44.0f, 45.0f,
surmeh013537c2c2018-05-18 16:31:43 +01001021 46.0f, 47.0f, 48.0f,
1022 49.0f, 50.0f, 51.0f,
1023 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +00001024 })
1025 );
1026
telsoa01c577f2c2018-08-31 09:22:23 +01001027 std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of input[0].
telsoa014fcda012018-03-09 14:13:49 +00001028 armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1);
1029
telsoa01c577f2c2018-08-31 09:22:23 +01001030 std::vector<unsigned int> wOrigin2 = {2, 0, 0}; //Extent of the window is defined by size of input[1].
telsoa014fcda012018-03-09 14:13:49 +00001031 armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2);
1032
telsoa014fcda012018-03-09 14:13:49 +00001033 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1034
1035 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
1036
1037 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
1038 subTensorsSupported ?
1039 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
1040 workloadFactory.CreateTensorHandle(inputTensorInfo1);
1041
1042 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
1043 subTensorsSupported ?
1044 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
1045 workloadFactory.CreateTensorHandle(inputTensorInfo2);
1046
telsoa014fcda012018-03-09 14:13:49 +00001047 armnn::MergerQueueDescriptor data;
1048 armnn::WorkloadInfo info;
1049 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1050 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +00001051 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1052
1053 data.m_ViewOrigins.push_back(window1);
1054 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +00001055
1056 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(data, info);
1057
1058 inputHandle1->Allocate();
1059 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00001060 outputHandle->Allocate();
1061
1062 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
1063 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00001064
1065 workload->Execute();
1066
1067 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
1068
1069 return ret;
1070}
1071
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001072LayerTestResult<float,4> AdditionTest(
1073 armnn::IWorkloadFactory& workloadFactory,
1074 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001075{
1076 unsigned int batchSize = 2;
1077 unsigned int channels = 2;
1078 unsigned int height = 2;
1079 unsigned int width = 3;
1080
1081 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
1082 armnn::TensorInfo outputTensorInfo;
1083
1084 unsigned int shape[] = {batchSize, channels, height, width};
1085
1086 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1087 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1088 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1089
1090
1091 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, std::vector<float>(
1092 {
1093 0.0f, 2.0f, 1.0f,
1094 0.2f, 1.0f, 2.0f,
1095
1096 1.0f, 2.0f, 1.0f,
1097 0.2f, 1.0f, 2.0f,
1098
1099 0.0f, 2.0f, 1.0f,
1100 4.2f, 1.0f, 2.0f,
1101
1102 0.0f, 0.0f, 1.0f,
1103 0.2f, 1.0f, 2.0f,
1104 }));
1105
1106 auto input2 = MakeTensor<float, 4>(inputTensorInfo2, std::vector<float>(
1107 {
1108 1.0f, 2.0f, 1.0f,
1109 0.0f, 1.0f, 2.0f,
1110
1111 1.0f, 2.0f, -2.0f,
1112 0.2f, 1.0f, 2.0f,
1113
1114 0.0f, 2.0f, 1.0f,
1115 4.2f, 0.0f, -3.0f,
1116
1117 0.0f, 0.0f, 1.0f,
1118 0.7f, 1.0f, 5.0f,
1119 }));
1120
1121 LayerTestResult<float,4> ret(outputTensorInfo);
1122 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>(
1123 {
1124 1.0f, 4.0f, 2.0f,
1125 0.2f, 2.0f, 4.0f,
1126
1127 2.0f, 4.0f, -1.0f,
1128 0.4f, 2.0f, 4.0f,
1129
1130 0.0f, 4.0f, 2.0f,
1131 8.4f, 1.0f, -1.0f,
1132
1133 0.0f, 0.0f, 2.0f,
1134 0.9f, 2.0f, 7.0f,
1135 }));
1136
1137 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1138 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1139 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1140
1141 armnn::AdditionQueueDescriptor data;
1142 armnn::WorkloadInfo info;
1143 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1144 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1145 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1146
1147 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1148
1149 inputHandle1->Allocate();
1150 inputHandle2->Allocate();
1151 outputHandle->Allocate();
1152
1153 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1154 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1155
1156 workload->Execute();
1157
1158 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1159
1160 return ret;
1161}
1162
1163template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001164LayerTestResult<T, 4> AdditionBroadcastTestImpl(
1165 armnn::IWorkloadFactory& workloadFactory,
1166 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00001167 float qScale,
1168 int32_t qOffset)
1169{
1170 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, armnn::GetDataType<T>());
1171 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, armnn::GetDataType<T>());
1172 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>());
1173
1174 if (armnn::IsQuantizedType<T>())
1175 {
1176 inputTensorInfo1.SetQuantizationScale(qScale);
1177 inputTensorInfo1.SetQuantizationOffset(qOffset);
1178 inputTensorInfo2.SetQuantizationScale(qScale);
1179 inputTensorInfo2.SetQuantizationOffset(qOffset);
1180 outputTensorInfo.SetQuantizationScale(qScale);
1181 outputTensorInfo.SetQuantizationOffset(qOffset);
1182 }
1183
1184 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
1185 {
1186 0.0f,
1187 1.0f,
1188
1189 2.0f,
1190 3.0f,
1191
1192 4.0f,
1193 5.0f,
1194 }));
1195
1196 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
1197 {
1198 0.5f, 1.5f, 2.5f,
1199 3.5f, 4.5f, 5.5f,
1200 }));
1201
1202 LayerTestResult<T,4> ret(outputTensorInfo);
1203 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
1204 {
1205 0.5f, 1.5f, 2.5f,
1206 4.5f, 5.5f, 6.5f,
1207
1208 2.5f, 3.5f, 4.5f,
1209 6.5f, 7.5f, 8.5f,
1210
1211 4.5f, 5.5f, 6.5f,
1212 8.5f, 9.5f, 10.5f,
1213 }));
1214
1215 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1216 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1217 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1218
1219 armnn::AdditionQueueDescriptor data;
1220 armnn::WorkloadInfo info;
1221 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1222 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1223 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1224
1225 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1226
1227 inputHandle1->Allocate();
1228 inputHandle2->Allocate();
1229 outputHandle->Allocate();
1230
1231 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1232 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1233
1234 workload->Execute();
1235
1236 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1237
1238 return ret;
1239}
1240
1241template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001242LayerTestResult<T, 4> AdditionBroadcast1ElementTestImpl(
1243 armnn::IWorkloadFactory& workloadFactory,
1244 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00001245 float qScale,
1246 int32_t qOffset)
1247{
1248 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>());
1249 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, armnn::GetDataType<T>());
1250 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>());
1251
1252 if (armnn::IsQuantizedType<T>())
1253 {
1254 inputTensorInfo1.SetQuantizationScale(qScale);
1255 inputTensorInfo1.SetQuantizationOffset(qOffset);
1256 inputTensorInfo2.SetQuantizationScale(qScale);
1257 inputTensorInfo2.SetQuantizationOffset(qOffset);
1258 outputTensorInfo.SetQuantizationScale(qScale);
1259 outputTensorInfo.SetQuantizationOffset(qOffset);
1260 }
1261
1262 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
1263 {
1264 0.0f, 1.0f, 2.0f,
1265 3.0f, 4.0f, 5.0f,
1266 6.0f, 7.0f, 8.0f,
1267 9.0f, 10.0f, 11.0f,
1268 12.0f, 13.0f, 14.0f,
1269 15.0f, 16.0f, 17.0f,
1270 }));
1271
1272 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
1273 {
1274 0.5f,
1275 }));
1276
1277 LayerTestResult<T,4> ret(outputTensorInfo);
1278 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
1279 {
1280 0.5f, 1.5f, 2.5f,
1281 3.5f, 4.5f, 5.5f,
1282 6.5f, 7.5f, 8.5f,
1283 9.5f, 10.5f, 11.5f,
1284 12.5f, 13.5f, 14.5f,
1285 15.5f, 16.5f, 17.5f,
1286 }));
1287
1288 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1289 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1290 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1291
1292 armnn::AdditionQueueDescriptor data;
1293 armnn::WorkloadInfo info;
1294 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1295 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1296 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1297
1298 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1299
1300 inputHandle1->Allocate();
1301 inputHandle2->Allocate();
1302 outputHandle->Allocate();
1303
1304 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1305 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1306
1307 workload->Execute();
1308
1309 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1310
1311 return ret;
1312}
1313
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001314LayerTestResult<float, 4> AdditionBroadcastTest(
1315 armnn::IWorkloadFactory& workloadFactory,
1316 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001317{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001318 return AdditionBroadcastTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001319}
1320
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001321LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test(
1322 armnn::IWorkloadFactory& workloadFactory,
1323 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001324{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001325 return AdditionBroadcastTestImpl<uint8_t>(workloadFactory, memoryManager, 2.f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001326}
1327
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001328LayerTestResult<float, 4> AdditionBroadcast1ElementTest(
1329 armnn::IWorkloadFactory& workloadFactory,
1330 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001331{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001332 return AdditionBroadcast1ElementTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001333}
1334
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001335LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test(
1336 armnn::IWorkloadFactory& workloadFactory,
1337 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001338{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001339 return AdditionBroadcast1ElementTestImpl<uint8_t>(workloadFactory, memoryManager, 0.1333333f, 128);
telsoa014fcda012018-03-09 14:13:49 +00001340}
1341
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001342LayerTestResult<float,4> CompareAdditionTest(
1343 armnn::IWorkloadFactory& workloadFactory,
1344 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1345 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00001346{
1347 unsigned int batchSize = 4;
1348 unsigned int channels = 1;
1349 unsigned int height = 2;
1350 unsigned int width = 3;
1351
1352 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
1353 armnn::TensorInfo outputTensorInfo;
1354
1355 unsigned int shape[] = {batchSize, channels, height, width};
1356
1357 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1358 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1359 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1360
1361 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 1232);
1362 auto input2 = MakeRandomTensor<float, 4>(inputTensorInfo2, 456);
1363
1364 LayerTestResult<float,4> ret(outputTensorInfo);
1365
1366 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1367 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1368 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1369
1370 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
1371 std::unique_ptr<armnn::ITensorHandle> inputHandle2Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo2);
1372 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
1373
1374 armnn::AdditionQueueDescriptor data;
1375 armnn::WorkloadInfo info;
1376 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1377 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1378 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1379
1380 armnn::AdditionQueueDescriptor refData = data;
1381 armnn::WorkloadInfo refInfo = info;
1382 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo1, inputHandle1Ref.get());
1383 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get());
1384 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1385
1386 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1387 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateAddition(refData, refInfo);
1388
1389 inputHandle1->Allocate();
1390 inputHandle2->Allocate();
1391 outputHandle->Allocate();
1392 inputHandle1Ref->Allocate();
1393 inputHandle2Ref->Allocate();
1394 outputHandleRef->Allocate();
1395
1396 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1397 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1398 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
1399 CopyDataToITensorHandle(inputHandle2Ref.get(), &input2[0][0][0][0]);
1400
1401 workload->Execute();
1402 workloadRef->Execute();
1403
1404 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1405 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
1406
1407 return ret;
1408}
1409
surmeh01bceff2f2018-03-29 16:29:27 +01001410namespace {
David Beck5cd01f32018-09-12 16:00:08 +01001411template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001412LayerTestResult<T, 4> DivisionTestHelper(
1413 armnn::IWorkloadFactory& workloadFactory,
1414 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1415 const unsigned int shape0[4],
1416 const std::vector<T>& values0,
1417 float scale0,
1418 int32_t offset0,
1419 const unsigned int shape1[4],
1420 const std::vector<T> & values1,
1421 float scale1,
1422 int32_t offset1,
1423 const unsigned int outShape[4],
1424 const std::vector<T> & outValues,
1425 float outScale,
1426 int32_t outOffset)
David Beck5cd01f32018-09-12 16:00:08 +01001427{
1428 auto dataType = (std::is_same<T, uint8_t>::value ?
1429 armnn::DataType::QuantisedAsymm8 :
1430 armnn::DataType::Float32);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001431
David Beck5cd01f32018-09-12 16:00:08 +01001432 armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
1433 armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
1434 armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001435
David Beck5cd01f32018-09-12 16:00:08 +01001436 inputTensorInfo0.SetQuantizationScale(scale0);
1437 inputTensorInfo0.SetQuantizationOffset(offset0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001438
David Beck5cd01f32018-09-12 16:00:08 +01001439 inputTensorInfo1.SetQuantizationScale(scale1);
1440 inputTensorInfo1.SetQuantizationOffset(offset1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001441
David Beck5cd01f32018-09-12 16:00:08 +01001442 outputTensorInfo.SetQuantizationScale(outScale);
1443 outputTensorInfo.SetQuantizationOffset(outOffset);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001444
David Beck5cd01f32018-09-12 16:00:08 +01001445 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
1446 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001447
David Beck5cd01f32018-09-12 16:00:08 +01001448 LayerTestResult<T, 4> result(outputTensorInfo);
1449 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001450
David Beck5cd01f32018-09-12 16:00:08 +01001451 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
1452 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1453 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001454
David Beck5cd01f32018-09-12 16:00:08 +01001455 armnn::DivisionQueueDescriptor data;
1456 armnn::WorkloadInfo info;
1457 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
1458 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1459 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001460
David Beck5cd01f32018-09-12 16:00:08 +01001461 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDivision(data, info);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001462
David Beck5cd01f32018-09-12 16:00:08 +01001463 inputHandle0->Allocate();
1464 inputHandle1->Allocate();
1465 outputHandle->Allocate();
1466
1467 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
1468 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1469
David Beck5cd01f32018-09-12 16:00:08 +01001470 workload->Execute();
1471
1472 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
1473
1474 return result;
1475}
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001476} // anonymous namespace
1477
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001478LayerTestResult<float,4> DivisionByZeroTest(
1479 armnn::IWorkloadFactory& workloadFactory,
1480 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01001481{
1482 const unsigned int width = 2;
1483 const unsigned int height = 2;
1484 const unsigned int channelCount = 2;
1485 const unsigned int batchSize = 2;
1486
1487 unsigned int shape[] = { batchSize, channelCount, height, width };
1488
1489 std::vector<float> input0({
1490 1.f, 1.f, 1.f, 1.f, 0.f, 0.f, 0.f, 0.f,
1491 -1.f, -1.f, -1.f, -1.f, 5.f, 5.f, 5.f, 5.f });
1492
1493 std::vector<float> input1({
1494 0.f, 0.f, -0.f, -0.f, 0.f, 0.f, -0.f, -0.f,
1495 0.f, 0.f, -0.f, -0.f, 5.f, 5.f, 5.f, 5.f });
1496
1497 std::vector<float> output({
1498 INFINITY, INFINITY, -INFINITY, -INFINITY, NAN, NAN, -NAN, -NAN,
1499 -INFINITY, -INFINITY, INFINITY, INFINITY, 1, 1, 1, 1 });
1500
David Beck5cd01f32018-09-12 16:00:08 +01001501 return DivisionTestHelper<float>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001502 memoryManager,
David Beck5cd01f32018-09-12 16:00:08 +01001503 shape, input0, 1.0f, 0,
1504 shape, input1, 1.0f, 0,
1505 shape, output, 1.0f, 0);
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01001506}
1507
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001508LayerTestResult<float,4> DivisionTest(
1509 armnn::IWorkloadFactory& workloadFactory,
1510 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001511{
1512 const unsigned int width = 2;
1513 const unsigned int height = 2;
1514 const unsigned int channelCount = 2;
1515 const unsigned int batchSize = 2;
1516
1517 unsigned int shape[] = { batchSize, channelCount, height, width };
1518
1519 std::vector<float> input0({
1520 2, 2, 2, 2, 3, 3, 3, 3,
1521 4, 4, 4, 4, 5, 5, 5, 5 });
1522
1523 std::vector<float> input1({
1524 1, 1, 1, 1, 2, 2, 2, 2,
1525 4, 4, 4, 4, 4, 4, 4, 4 });
1526
1527 std::vector<float> output({
1528 2, 2, 2, 2, 1.5, 1.5, 1.5, 1.5,
1529 1, 1, 1, 1, 1.25, 1.25, 1.25, 1.25 });
1530
David Beck5cd01f32018-09-12 16:00:08 +01001531
1532 return DivisionTestHelper<float>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001533 memoryManager,
David Beck5cd01f32018-09-12 16:00:08 +01001534 shape, input0, 1.0f, 0,
1535 shape, input1, 1.0f, 0,
1536 shape, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001537}
1538
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001539LayerTestResult<float, 4> DivisionBroadcast1ElementTest(
1540 armnn::IWorkloadFactory& workloadFactory,
1541 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001542{
1543 unsigned int shape0[] = { 1, 2, 2, 2 };
1544 std::vector<float> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
1545
1546 unsigned int shape1[] = { 1, 1, 1, 1 };
1547 std::vector<float> input1({ 2 });
1548
1549 std::vector<float> output({ 1, 2, 3, 4, 5, 6, 7, 8});
1550
David Beck5cd01f32018-09-12 16:00:08 +01001551
1552 return DivisionTestHelper<float>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001553 memoryManager,
David Beck5cd01f32018-09-12 16:00:08 +01001554 shape0, input0, 1.0f, 0,
1555 shape1, input1, 1.0f, 0,
1556 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001557}
1558
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001559LayerTestResult<float, 4> DivisionBroadcast1DVectorTest(
1560 armnn::IWorkloadFactory& workloadFactory,
1561 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001562{
1563 unsigned int shape0[] = { 1, 3, 3, 2 };
1564 std::vector<float> input0({
1565 1, 4, 3, 8, 5, 12,
1566 7, 16, 9, 20, 11, 24,
1567 13, 28, 15, 32, 17, 36});
1568
1569 unsigned int shape1[] = { 1, 1, 1, 2 };
1570 std::vector<float> input1({ 1, 2 });
1571
1572 std::vector<float> output({
1573 1, 2, 3, 4, 5, 6,
1574 7, 8, 9, 10, 11, 12,
1575 13, 14, 15, 16, 17, 18});
1576
David Beck5cd01f32018-09-12 16:00:08 +01001577 return DivisionTestHelper<float>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001578 memoryManager,
David Beck5cd01f32018-09-12 16:00:08 +01001579 shape0, input0, 1.0f, 0,
1580 shape1, input1, 1.0f, 0,
1581 shape0, output, 1.0f, 0);
1582}
1583
1584
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001585LayerTestResult<uint8_t,4> DivisionUint8Test(
1586 armnn::IWorkloadFactory& workloadFactory,
1587 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01001588{
1589 const unsigned int width = 2;
1590 const unsigned int height = 2;
1591 const unsigned int channelCount = 2;
1592 const unsigned int batchSize = 2;
1593
1594 unsigned int shape[] = { batchSize, channelCount, height, width };
1595
1596 std::vector<uint8_t> input0({2, 2, 2, 2, 3, 3, 3, 3,
1597 4, 4, 4, 4, 5, 5, 5, 5 });
1598
1599 std::vector<uint8_t> input1({1, 1, 1, 1, 2, 2, 2, 2,
1600 4, 4, 4, 4, 4, 4, 4, 4 });
1601
1602 std::vector<uint8_t> output({8, 8, 8, 8, 6, 6, 6, 6,
1603 4, 4, 4, 4, 5, 5, 5, 5});
1604
1605
1606 return DivisionTestHelper<uint8_t>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001607 memoryManager,
1608 shape, input0, 1.0f, 0,
1609 shape, input1, 1.0f, 0,
1610 shape, output, 0.25f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01001611}
1612
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001613LayerTestResult<uint8_t, 4> DivisionBroadcast1ElementUint8Test(
1614 armnn::IWorkloadFactory& workloadFactory,
1615 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01001616{
1617 unsigned int shape0[] = { 1, 2, 2, 2 };
1618 std::vector<uint8_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
1619
1620 unsigned int shape1[] = { 1, 1, 1, 1 };
1621 std::vector<uint8_t> input1({ 2 });
1622
1623 std::vector<uint8_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
1624
1625 return DivisionTestHelper<uint8_t>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001626 memoryManager,
1627 shape0, input0, 1.0f, 0,
1628 shape1, input1, 1.0f, 0,
1629 shape0, output, 1.0f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01001630}
1631
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001632LayerTestResult<uint8_t, 4> DivisionBroadcast1DVectorUint8Test(
1633 armnn::IWorkloadFactory& workloadFactory,
1634 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01001635{
1636 unsigned int shape0[] = { 1, 3, 3, 2 };
1637 std::vector<uint8_t> input0({1, 4, 3, 8, 5, 12,
1638 7, 16, 9, 20, 11, 24,
1639 13, 28, 15, 32, 17, 36});
1640
1641 unsigned int shape1[] = { 1, 1, 1, 2 };
1642 std::vector<uint8_t> input1({ 1, 2 });
1643
1644 std::vector<uint8_t> output({1, 2, 3, 4, 5, 6,
1645 7, 8, 9, 10, 11, 12,
1646 13, 14, 15, 16, 17, 18});
1647
1648 return DivisionTestHelper<uint8_t>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001649 memoryManager,
1650 shape0, input0, 1.0f, 0,
1651 shape1, input1, 1.0f, 0,
1652 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001653}
1654
1655namespace {
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001656LayerTestResult<float,4> MultiplicationTestHelper(
1657 armnn::IWorkloadFactory& workloadFactory,
1658 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1659 const unsigned int shape0[4],
1660 const std::vector<float> & values0,
1661 const unsigned int shape1[4],
1662 const std::vector<float> & values1,
1663 const unsigned int outShape[4],
1664 const std::vector<float> & outValues)
telsoa014fcda012018-03-09 14:13:49 +00001665{
surmeh01bceff2f2018-03-29 16:29:27 +01001666 const size_t dimensionCount = 4;
1667 armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, armnn::DataType::Float32};
1668 armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, armnn::DataType::Float32};
1669 armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, armnn::DataType::Float32};
telsoa014fcda012018-03-09 14:13:49 +00001670
surmeh01bceff2f2018-03-29 16:29:27 +01001671 auto input0 = MakeTensor<float, 4>(inputTensorInfo0, values0);
1672 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00001673
1674 LayerTestResult<float,4> ret(outputTensorInfo);
1675
1676 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
1677 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1678 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1679
1680 armnn::MultiplicationQueueDescriptor data;
1681 armnn::WorkloadInfo info;
1682 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
1683 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1684 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1685
1686 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
1687
1688 inputHandle0->Allocate();
1689 inputHandle1->Allocate();
1690 outputHandle->Allocate();
1691
1692 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
1693 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1694
1695 workload->Execute();
1696
1697 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1698
surmeh01bceff2f2018-03-29 16:29:27 +01001699 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00001700 return ret;
1701}
surmeh01bceff2f2018-03-29 16:29:27 +01001702} // anonymous namespace
1703
1704
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001705LayerTestResult<float,4> MultiplicationTest(
1706 armnn::IWorkloadFactory& workloadFactory,
1707 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01001708{
1709 const unsigned int width = 2;
1710 const unsigned int height = 2;
1711 const unsigned int channelCount = 2;
1712 const unsigned int batchSize = 2;
1713
1714 unsigned int shape[] = { batchSize, channelCount, height, width };
1715
1716 std::vector<float> input0({
1717 1, 1, 1, 1, 2, 2, 2, 2,
1718 3, 3, 3, 3, 4, 4, 4, 4 });
1719
1720 std::vector<float> input1({
1721 2, 2, 2, 2, 3, 3, 3, 3,
1722 4, 4, 4, 4, 5, 5, 5, 5 });
1723
1724 std::vector<float> output({
1725 2, 2, 2, 2, 6, 6, 6, 6,
1726 12, 12, 12, 12, 20, 20, 20, 20 });
1727
1728 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001729 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01001730 shape,
1731 input0,
1732 shape,
1733 input1,
1734 shape,
1735 output);
1736}
1737
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001738LayerTestResult<float, 4> MultiplicationBroadcast1ElementTest(
1739 armnn::IWorkloadFactory& workloadFactory,
1740 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01001741{
1742 unsigned int shape0[] = { 1, 2, 2, 2 };
1743 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
1744
1745 unsigned int shape1[] = { 1, 1, 1, 1 };
1746 std::vector<float> input1({ 2 });
1747
1748 std::vector<float> output({ 2, 4, 6, 8, 10, 12, 14, 16});
1749
1750 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001751 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01001752 shape0,
1753 input0,
1754 shape1,
1755 input1,
1756 shape0,
1757 output);
1758}
1759
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001760LayerTestResult<float, 4> MultiplicationBroadcast1DVectorTest(
1761 armnn::IWorkloadFactory& workloadFactory,
1762 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01001763{
1764 unsigned int shape0[] = { 1, 3, 3, 2 };
1765 std::vector<float> input0({
1766 1, 2, 3, 4, 5, 6,
1767 7, 8, 9, 10, 11, 12,
1768 13, 14, 15, 16, 17, 18});
1769
1770 unsigned int shape1[] = { 1, 1, 1, 2 };
1771 std::vector<float> input1({ 1, 2 });
1772
1773 std::vector<float> output({
1774 1, 4, 3, 8, 5, 12,
1775 7, 16, 9, 20, 11, 24,
1776 13, 28, 15, 32, 17, 36});
1777
1778 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001779 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01001780 shape0,
1781 input0,
1782 shape1,
1783 input1,
1784 shape0,
1785 output);
1786}
telsoa014fcda012018-03-09 14:13:49 +00001787
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001788LayerTestResult<float,4> CompareMultiplicationTest(
1789 armnn::IWorkloadFactory& workloadFactory,
1790 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1791 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00001792{
1793 const unsigned int width = 16;
1794 const unsigned int height = 32;
1795 const unsigned int channelCount = 2;
1796 const unsigned int batchSize = 5;
1797
1798 armnn::TensorInfo inputTensorInfo0;
1799 armnn::TensorInfo inputTensorInfo1;
1800 armnn::TensorInfo outputTensorInfo;
1801
1802 constexpr unsigned int shape[] = { batchSize, channelCount, height, width };
1803
1804 inputTensorInfo0 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1805 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1806 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1807
1808 LayerTestResult<float,4> comparisonResult(outputTensorInfo);
1809
1810 auto input0 = MakeRandomTensor<float, 4>(inputTensorInfo0, 803506992);
1811 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 54902257);
1812
1813 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
1814 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1815 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1816
1817 std::unique_ptr<armnn::ITensorHandle> inputHandle0Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo0);
1818 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
1819 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
1820
1821 armnn::MultiplicationQueueDescriptor data;
1822 armnn::WorkloadInfo info;
1823 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
1824 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1825 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1826
1827 armnn::MultiplicationQueueDescriptor refData = data;
1828 armnn::WorkloadInfo refInfo = info;
1829 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo0, inputHandle0Ref.get());
1830 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo1, inputHandle1Ref.get());
1831 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1832
1833 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
1834 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateMultiplication(refData, refInfo);
1835
1836 inputHandle0->Allocate();
1837 inputHandle1->Allocate();
1838 outputHandle->Allocate();
1839 inputHandle0Ref->Allocate();
1840 inputHandle1Ref->Allocate();
1841 outputHandleRef->Allocate();
1842
1843 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
1844 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1845 CopyDataToITensorHandle(inputHandle0Ref.get(), &input0[0][0][0][0]);
1846 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
1847
1848 workload->Execute();
1849 workloadRef->Execute();
1850
1851 CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get());
1852 CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get());
1853
1854 return comparisonResult;
1855}
1856
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001857LayerTestResult<float,4> CompareBatchNormTest(
1858 armnn::IWorkloadFactory& workloadFactory,
1859 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1860 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00001861{
1862 const unsigned int width = 2;
1863 const unsigned int height = 3;
1864 const unsigned int channels = 5;
1865 const unsigned int batchSize = 3;
1866
1867 armnn::TensorInfo inputTensorInfo;
1868 armnn::TensorInfo outputTensorInfo;
1869 armnn::TensorInfo tensorInfo;
1870
1871 constexpr unsigned int shape[] = {batchSize, channels, height, width};
1872 constexpr unsigned int tensorShape[] = {channels};
1873
1874 inputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1875 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1876 tensorInfo = armnn::TensorInfo(1, tensorShape, armnn::DataType::Float32);
1877
1878 auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 21312);
1879
1880 auto mean = MakeRandomTensor<float, 1>(tensorInfo, 123);
1881 auto variance = MakeRandomTensor<float, 1>(tensorInfo, 234, 0.0f);
1882 auto beta = MakeRandomTensor<float, 1>(tensorInfo, 123);
1883 auto gamma = MakeRandomTensor<float, 1>(tensorInfo, 345);
1884
1885 LayerTestResult<float,4> ret(outputTensorInfo);
1886
1887 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
1888 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1889
1890 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
1891 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
1892
1893 armnn::BatchNormalizationQueueDescriptor data;
1894 armnn::WorkloadInfo info;
1895 armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
1896 armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
1897 armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
1898 armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
1899
1900 AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
1901 AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
1902 AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
1903 AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
1904
1905 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1906 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1907 data.m_Mean = &meanTensor;
1908 data.m_Variance = &varianceTensor;
1909 data.m_Beta = &betaTensor;
1910 data.m_Gamma = &gammaTensor;
1911 data.m_Parameters.m_Eps = 0.01f;
1912
1913 armnn::BatchNormalizationQueueDescriptor refData = data;
1914 armnn::WorkloadInfo refInfo = info;
1915 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
1916 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1917
1918 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
1919 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateBatchNormalization(refData, refInfo);
1920
1921 inputHandle->Allocate();
1922 outputHandle->Allocate();
1923 inputHandleRef->Allocate();
1924 outputHandleRef->Allocate();
1925
1926 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
1927 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
1928
1929 workload->Execute();
1930 workloadRef->Execute();
1931
1932 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1933 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
1934
1935 return ret;
1936}
1937
surmeh013537c2c2018-05-18 16:31:43 +01001938template<typename T>
1939void PermuteTensorData(
1940 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001941 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01001942 const armnn::PermutationVector& mappings,
1943 armnn::TensorInfo & inputTensorInfo,
1944 const T * inputData,
1945 std::vector<T>& outputData)
telsoa014fcda012018-03-09 14:13:49 +00001946{
surmeh013537c2c2018-05-18 16:31:43 +01001947 BOOST_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
1948 if (inputData == nullptr)
1949 {
1950 // Nullptr is an error in the test. By returning without doing the concatenation
1951 // I expect the caller to fail the test. It still makes sense to report this as
1952 // an assert for Debug builds.
1953 return;
1954 }
telsoa014fcda012018-03-09 14:13:49 +00001955
surmeh013537c2c2018-05-18 16:31:43 +01001956 armnn::TensorInfo outputTensorInfo = armnnUtils::Permuted(inputTensorInfo, mappings);
1957
1958 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
1959 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1960
1961 armnn::PermuteQueueDescriptor queueDescriptor;
1962 queueDescriptor.m_Parameters = armnn::PermuteDescriptor{mappings};
1963 armnn::WorkloadInfo workloadInfo;
1964 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
1965 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
1966
1967 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePermute(queueDescriptor, workloadInfo);
1968
1969 inputHandle->Allocate();
1970 outputHandle->Allocate();
1971
1972 CopyDataToITensorHandle(inputHandle.get(), inputData);
1973
1974 workload->Execute();
1975
1976 outputData.resize(outputTensorInfo.GetNumElements());
1977 CopyDataFromITensorHandle(&outputData[0], outputHandle.get());
1978 inputTensorInfo = outputTensorInfo;
1979}
1980
1981armnn::OriginsDescriptor CreateMergerDescriptorForConcatenation(
1982 const std::vector<armnn::TensorInfo> & inputTensorInfos,
1983 unsigned int concatDim)
1984{
telsoa014fcda012018-03-09 14:13:49 +00001985 std::vector<armnn::TensorShape> shapes;
1986 shapes.reserve(inputTensorInfos.size());
1987 for (const armnn::TensorInfo& it: inputTensorInfos)
1988 {
1989 shapes.push_back(it.GetShape());
1990 }
surmeh013537c2c2018-05-18 16:31:43 +01001991
1992 return armnn::CreateMergerDescriptorForConcatenation(shapes.begin(),
1993 shapes.end(),
1994 concatDim);
1995}
1996
1997//
1998// Concatenation is only supported for N and C dimensions for NCHW. In case of
telsoa01c577f2c2018-08-31 09:22:23 +01001999// <4 dimensions we need to make sure that the concat dimensions are at least
surmeh013537c2c2018-05-18 16:31:43 +01002000// the 3rd slowest iterating one.
2001//
2002
2003bool NeedPermuteForConcat(
2004 const std::vector<armnn::TensorInfo> & inputTensorInfos,
2005 unsigned int concatDim)
2006{
2007 // See note above. Additionally we expect the input shapes to have the
2008 // same number of dimensions.
2009 unsigned int nDimensions = 0;
2010
telsoa01c577f2c2018-08-31 09:22:23 +01002011 // Determine the number of dimensions as well as sanity check them
2012 // agains test implementation issues.
surmeh013537c2c2018-05-18 16:31:43 +01002013 for (auto && tensorInfo : inputTensorInfos)
2014 {
2015 if (!nDimensions)
2016 {
2017 nDimensions = tensorInfo.GetShape().GetNumDimensions();
2018 }
2019 else
2020 {
2021 BOOST_ASSERT_MSG(nDimensions == tensorInfo.GetShape().GetNumDimensions(),
2022 "Input shapes must have the same number of dimensions");
2023 }
2024 }
2025
2026 return (nDimensions-concatDim) < 3;
2027}
2028
2029armnn::TensorShape ExpandTensorShapeTo3dForPermute(const armnn::TensorShape & inputShape)
2030{
2031 unsigned int numDims = inputShape.GetNumDimensions();
2032 if (numDims >= 3)
2033 {
2034 // Nothing to do if the inputShape has at least 3 dimensions.
2035 return inputShape;
2036 }
2037
2038 std::vector<unsigned int> newDims(size_t(3), 1u);
2039 unsigned int expandedBy = 3 - numDims;
2040 for (unsigned int i=0; i<numDims; ++i)
2041 {
2042 newDims[expandedBy+i] = inputShape[i];
2043 }
2044 return armnn::TensorShape(3u, &newDims[0]);
2045}
2046
2047void Generate3dPermuteVectorForConcat(
2048 unsigned int numDimensions,
2049 unsigned int & concatDim,
2050 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutations)
2051{
2052 BOOST_ASSERT_MSG(numDimensions <= 3,
2053 "Only dimensions 1,2 and 3 are supported by this helper");
2054
2055 unsigned int expandedBy = 3 - numDimensions;
2056 unsigned int expandedConcatAxis = concatDim + expandedBy;
2057
2058 if (expandedConcatAxis == 2)
2059 {
2060 concatDim = 0;
2061 armnn::PermutationVector forwardPermutation({1, 2, 0});
2062 armnn::PermutationVector reversePermutation({2, 0, 1});
2063 permutations = std::make_pair(forwardPermutation, reversePermutation);
2064 }
2065 else if (expandedConcatAxis == 1)
2066 {
2067 concatDim = 0;
2068 armnn::PermutationVector forwardPermutation({2, 0, 1});
2069 armnn::PermutationVector reversePermutation({1, 2, 0});
2070 permutations = std::make_pair(forwardPermutation, reversePermutation);
2071 }
2072 else
2073 {
2074 BOOST_ASSERT(expandedConcatAxis == 0);
2075 concatDim = 0;
2076 }
2077}
2078
2079//
2080// Permute the input tensors so we can do a supported concatenation.
2081// Also treat lower than 3d tensors as 3d by adding dummy 1 dimensions
2082// at the front. Finally this function tells what the output shape
2083// of the permuted concatenated tensor is going to be.
2084//
2085template <typename T>
2086void PermuteInputsForConcat(
2087 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002088 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01002089 std::vector<armnn::TensorInfo> & inputTensorInfos,
2090 std::vector<T *> & inputData,
2091 std::vector<std::vector<T>> & inputDataStorage,
2092 armnn::PermutationVector & permuteVector,
2093 unsigned int & concatDim,
2094 armnn::TensorInfo & outputTensorInfo)
2095{
2096 BOOST_ASSERT_MSG(inputTensorInfos.size() > 1,
2097 "Expecting more than one tensor to be concatenated here");
2098
2099 unsigned int numDims = 0;
2100 unsigned int nthInput = 0;
2101 const armnn::PermutationVector identity({0, 1, 2});
2102
2103 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutations =
2104 std::make_pair(identity, identity);
2105
2106 inputDataStorage.resize(inputData.size());
2107
2108 for (auto && tensorInfo : inputTensorInfos)
2109 {
2110 if (numDims == 0)
2111 {
2112 numDims = tensorInfo.GetShape().GetNumDimensions();
2113 Generate3dPermuteVectorForConcat(numDims, concatDim, permutations);
telsoa01c577f2c2018-08-31 09:22:23 +01002114 // Store the reverese permutation.
surmeh013537c2c2018-05-18 16:31:43 +01002115 permuteVector = permutations.second;
2116 BOOST_ASSERT_MSG(!permuteVector.IsEqual(identity),
2117 "Test logic error, we don't need permutation, so we shouldn't arrive here");
2118 }
2119 else
2120 {
2121 BOOST_ASSERT_MSG(numDims == tensorInfo.GetShape().GetNumDimensions(),
2122 "All inputs must have the same number of dimensions");
2123 }
2124
2125 armnn::TensorInfo newTensorInfo = tensorInfo;
2126 newTensorInfo.SetShape(ExpandTensorShapeTo3dForPermute(tensorInfo.GetShape()));
2127
2128 PermuteTensorData<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002129 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01002130 permutations.first,
2131 newTensorInfo,
2132 inputData[nthInput],
2133 inputDataStorage[nthInput]);
2134
2135 inputData[nthInput] = inputDataStorage[nthInput].data();
2136 inputTensorInfos[nthInput] = newTensorInfo;
2137
2138 ++nthInput;
2139 }
2140
2141 outputTensorInfo.SetShape(
2142 armnnUtils::Permuted(
2143 ExpandTensorShapeTo3dForPermute(outputTensorInfo.GetShape()),
2144 permutations.first));
2145}
2146
2147
2148//
2149// This is the pair of PermuteInputsForConcat(...) which permutes back
telsoa01c577f2c2018-08-31 09:22:23 +01002150// the output of the concatenation so we can check it against an expected
surmeh013537c2c2018-05-18 16:31:43 +01002151// output.
2152//
2153template <typename T>
2154void PermuteOutputForConcat(
2155 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002156 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01002157 const armnn::TensorInfo & tensorInfo,
2158 const armnn::PermutationVector & permuteVector,
2159 std::unique_ptr<armnn::ITensorHandle> && inputDataHandle,
2160 T * data)
2161{
2162 BOOST_ASSERT_MSG(data != nullptr, "data must not be null");
2163 if (data == nullptr)
2164 {
2165 // Nullptr is an error in the test. By returning without doing the permutation
2166 // I expect the caller to fail the test. It still makes sense to report this as
2167 // an assert for Debug builds.
2168 return;
2169 }
2170
2171 armnn::TensorInfo resultTensorInfo = tensorInfo;
2172 std::vector<T> inputData(tensorInfo.GetNumElements());
2173 std::vector<T> outputData;
2174
2175 CopyDataFromITensorHandle(&inputData[0], inputDataHandle.get());
2176
2177 PermuteTensorData<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002178 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01002179 permuteVector,
2180 resultTensorInfo,
2181 &inputData[0],
2182 outputData);
2183
2184 ::memcpy(data, &outputData[0], sizeof(T)*outputData.size());
2185}
2186
2187template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002188void Concatenate(
2189 armnn::IWorkloadFactory& workloadFactory,
2190 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2191 std::initializer_list<const armnn::TensorInfo> inputTensorInfosOrig,
2192 std::initializer_list<T *> inputsOrig,
2193 const armnn::TensorInfo& outputTensorInfoOrig,
2194 T * output,
2195 unsigned int concatDim)
surmeh013537c2c2018-05-18 16:31:43 +01002196{
2197 BOOST_ASSERT_MSG(output != nullptr, "output must not be null");
2198 if (output == nullptr)
2199 {
2200 // Nullptr is an error in the test. By returning without doing the permutation
2201 // I expect the caller to fail the test. It still makes sense to report this as
2202 // an assert for Debug builds.
2203 return;
2204 }
2205
2206 armnn::MergerQueueDescriptor queueDescriptor;
2207
telsoa01c577f2c2018-08-31 09:22:23 +01002208 // Saves a copy of the parameters which we might need to change.
surmeh013537c2c2018-05-18 16:31:43 +01002209 std::vector<armnn::TensorInfo> inputTensorInfos(inputTensorInfosOrig.begin(), inputTensorInfosOrig.end());
2210 std::vector<T *> inputs = inputsOrig;
2211 armnn::TensorInfo outputTensorInfo = outputTensorInfoOrig;
2212
2213 armnn::PermutationVector permuteVector{0, 1, 2};
2214
telsoa01c577f2c2018-08-31 09:22:23 +01002215 // Holds and automatically releases memory for the reshaped input data.
surmeh013537c2c2018-05-18 16:31:43 +01002216 std::vector<std::vector<T>> tmpInputDataStorage;
2217
2218 const size_t inputCount = inputTensorInfos.size();
2219
2220 bool needPermuteForConcat = NeedPermuteForConcat(inputTensorInfos, concatDim);
2221
2222 if (needPermuteForConcat)
2223 {
2224 //
2225 // We need to permute the inputs, because concatenation along
telsoa01c577f2c2018-08-31 09:22:23 +01002226 // the requested axis is not supported.
surmeh013537c2c2018-05-18 16:31:43 +01002227 //
2228 PermuteInputsForConcat<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002229 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01002230 inputTensorInfos,
2231 inputs,
2232 tmpInputDataStorage,
2233 permuteVector,
2234 concatDim,
2235 outputTensorInfo);
2236 }
2237
2238 armnn::OriginsDescriptor viewsDescriptor = CreateMergerDescriptorForConcatenation(inputTensorInfos, concatDim);
telsoa014fcda012018-03-09 14:13:49 +00002239
2240 queueDescriptor.m_ViewOrigins.reserve(viewsDescriptor.GetNumViews());
2241 for (unsigned int i = 0; i < viewsDescriptor.GetNumViews(); ++i)
2242 {
2243 queueDescriptor.m_ViewOrigins.emplace_back(std::vector<unsigned int>(viewsDescriptor.GetViewOrigin(i),
2244 viewsDescriptor.GetViewOrigin(i) + viewsDescriptor.GetNumDimensions()));
2245 }
2246
telsoa014fcda012018-03-09 14:13:49 +00002247 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2248
2249 std::vector<std::unique_ptr<armnn::ITensorHandle>> inputHandles;
2250 inputHandles.reserve(inputCount);
2251
2252 const bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2253 for (unsigned int i = 0; i < inputCount; ++i)
2254 {
surmeh013537c2c2018-05-18 16:31:43 +01002255 const armnn::TensorInfo& inputTensorInfo = inputTensorInfos[i];
telsoa014fcda012018-03-09 14:13:49 +00002256
2257 std::unique_ptr<armnn::ITensorHandle> inputHandle = subTensorsSupported ?
2258 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo.GetShape(),
2259 queueDescriptor.m_ViewOrigins[i].m_Origin.data())
2260 : workloadFactory.CreateTensorHandle(inputTensorInfo);
2261
2262 inputHandles.emplace_back(std::move(inputHandle));
2263 }
2264
2265 armnn::WorkloadInfo workloadInfo;
2266
2267 for (unsigned int i = 0; i < inputCount; ++i)
2268 {
surmeh013537c2c2018-05-18 16:31:43 +01002269 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfos[i], inputHandles[i].get());
telsoa014fcda012018-03-09 14:13:49 +00002270 }
2271
2272 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
2273
2274 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(queueDescriptor, workloadInfo);
2275
2276 for (auto& inputHandle : inputHandles)
2277 {
2278 inputHandle->Allocate();
2279 }
2280
2281 outputHandle->Allocate();
2282
2283 unsigned int nextInputId = 0;
2284 for (auto& inputHandle : inputHandles)
2285 {
surmeh013537c2c2018-05-18 16:31:43 +01002286 CopyDataToITensorHandle(inputHandle.get(), inputs[nextInputId]);
2287 ++nextInputId;
telsoa014fcda012018-03-09 14:13:49 +00002288 }
2289
2290 workload->Execute();
2291
surmeh013537c2c2018-05-18 16:31:43 +01002292 if (needPermuteForConcat)
2293 {
2294 PermuteOutputForConcat<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002295 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01002296 outputTensorInfo,
2297 permuteVector,
2298 std::move(outputHandle),
2299 output);
2300 }
2301 else
2302 {
2303 CopyDataFromITensorHandle(output, outputHandle.get());
2304 }
telsoa014fcda012018-03-09 14:13:49 +00002305}
2306
2307template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002308LayerTestResult<T, 1> Concatenation1dTestImpl(
2309 armnn::IWorkloadFactory& workloadFactory,
2310 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2311 float qScale,
2312 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00002313{
2314 armnn::TensorInfo inputTensorInfo({ 3 }, armnn::GetDataType<T>());
2315
2316 auto input0 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 1.0f, 2.0f, 3.0f }));
2317 auto input1 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 4.0f, 5.0f, 6.0f }));
2318 auto input2 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 7.0f, 8.0f, 9.0f }));
2319
2320 armnn::TensorInfo outputTensorInfo({ 9 }, armnn::GetDataType<T>());
2321
2322 LayerTestResult<T, 1> result(outputTensorInfo);
2323
2324 std::vector<T> output;
2325 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002326 Concatenate<T>(workloadFactory, memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00002327 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
2328 { input0.data(), input1.data(), input2.data() },
2329 outputTensorInfo,
2330 output.data(),
2331 0);
2332
2333 result.output = MakeTensor<T, 1>(outputTensorInfo, output);
2334 result.outputExpected = MakeTensor<T, 1>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2335 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f
2336 }));
2337
2338 return result;
2339}
2340
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002341LayerTestResult<float, 1> Concatenation1dTest(
2342 armnn::IWorkloadFactory& workloadFactory,
2343 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002344{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002345 return Concatenation1dTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002346}
2347
2348template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002349LayerTestResult<T, 2> Concatenation2dTestImpl(
2350 armnn::IWorkloadFactory& workloadFactory,
2351 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00002352 const armnn::TensorInfo& outputTensorInfo,
2353 unsigned int dimension,
2354 const float qScale,
2355 const int32_t qOffset)
2356{
2357 armnn::TensorInfo inputTensorInfo({ 2, 3 }, armnn::GetDataType<T>());
2358
2359 auto input0 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2360 // Batch 0
2361 1.0f, 2.0f, 3.0f,
2362
2363 // Batch 1
2364 10.0f, 11.0f, 12.0f,
2365 }));
2366
2367 auto input1 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2368 // Batch 0
2369 4.0f, 5.0f, 6.0f,
2370
2371 // Batch 1
2372 13.0f, 14.0f, 15.0f,
2373 }));
2374
2375 auto input2 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2376 // Batch 0
2377 7.0f, 8.0f, 9.0f,
2378
2379 // Batch 1
2380 16.0f, 17.0f, 18.0f,
2381 }));
2382
2383 LayerTestResult<T, 2> result(outputTensorInfo);
2384
2385 std::vector<T> output;
2386 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002387 Concatenate<T>(workloadFactory, memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00002388 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
2389 { input0.data(), input1.data(), input2.data() },
2390 outputTensorInfo,
2391 output.data(),
2392 dimension);
2393
2394 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
2395 return result;
2396}
2397
2398template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002399LayerTestResult<T, 2> Concatenation2dDim0TestImpl(
2400 armnn::IWorkloadFactory& workloadFactory,
2401 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2402 float qScale,
2403 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00002404{
2405 armnn::TensorInfo outputTensorInfo({ 6, 3 }, armnn::GetDataType<T>());
2406
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002407 LayerTestResult<T, 2> result =
2408 Concatenation2dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 0, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00002409 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2410 // Batch 0
2411 1.0f, 2.0f, 3.0f,
2412
2413 // Batch 1
2414 10.0f, 11.0f, 12.0f,
2415
2416 // Batch 2
2417 4.0f, 5.0f, 6.0f,
2418
2419 // Batch 3
2420 13.0f, 14.0f, 15.0f,
2421
2422 // Batch 4
2423 7.0f, 8.0f, 9.0f,
2424
2425 // Batch 5
2426 16.0f, 17.0f, 18.0f,
2427 }));
2428
2429 return result;
2430}
2431
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002432LayerTestResult<float, 2> Concatenation2dDim0Test(
2433 armnn::IWorkloadFactory& workloadFactory,
2434 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002435{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002436 return Concatenation2dDim0TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002437}
2438
2439template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002440LayerTestResult<T, 2> Concatenation2dDim1TestImpl(
2441 armnn::IWorkloadFactory& workloadFactory,
2442 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2443 float qScale,
2444 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00002445{
2446 armnn::TensorInfo outputTensorInfo({ 2, 9 }, armnn::GetDataType<T>());
2447
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002448 LayerTestResult<T, 2> result =
2449 Concatenation2dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 1, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00002450 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2451 // Batch 0
2452 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
2453
2454 // Batch 1
2455 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f
2456 }));
2457
2458 return result;
2459}
2460
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002461LayerTestResult<float, 2> Concatenation2dDim1Test(
2462 armnn::IWorkloadFactory& workloadFactory,
2463 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002464{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002465 return Concatenation2dDim1TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002466}
2467
2468template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002469LayerTestResult<T, 2> Concatenation2dDim0DiffInputDimsTestImpl(
2470 armnn::IWorkloadFactory& workloadFactory,
2471 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2472 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00002473 int32_t qOffset)
2474{
2475 armnn::TensorInfo input0TensorInfo({ 2, 3 }, armnn::GetDataType<T>());
2476 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2477 // Batch 0
2478 1.0f, 2.0f, 3.0f,
2479
2480 // Batch 1
2481 10.0f, 11.0f, 12.0f,
2482 }));
2483
2484 armnn::TensorInfo input1TensorInfo({ 3, 3 }, armnn::GetDataType<T>());
2485 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2486 // Batch 0
2487 4.0f, 5.0f, 6.0f,
2488
2489 // Batch 1
2490 13.0f, 14.0f, 15.0f,
2491
2492 // Batch 0
2493 7.0f, 8.0f, 9.0f,
2494 }));
2495
2496 armnn::TensorInfo input2TensorInfo({ 1, 3 }, armnn::GetDataType<T>());
2497 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2498 // Batch 1
2499 16.0f, 17.0f, 18.0f,
2500 }));
2501
2502 armnn::TensorInfo outputTensorInfo({ 6, 3 }, armnn::GetDataType<T>());
2503 LayerTestResult<T, 2> result(outputTensorInfo);
2504
2505 std::vector<T> output;
2506 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002507 Concatenate<T>(workloadFactory, memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00002508 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
2509 { input0.data(), input1.data(), input2.data() },
2510 outputTensorInfo,
2511 output.data(),
2512 0);
2513
2514 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
2515 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2516 // Batch 0
2517 1.0f, 2.0f, 3.0f,
2518
2519 // Batch 1
2520 10.0f, 11.0f, 12.0f,
2521
2522 // Batch 2
2523 4.0f, 5.0f, 6.0f,
2524
2525 // Batch 3
2526 13.0f, 14.0f, 15.0f,
2527
2528 // Batch 4
2529 7.0f, 8.0f, 9.0f,
2530
2531 // Batch 5
2532 16.0f, 17.0f, 18.0f,
2533 }));
2534
2535 return result;
2536}
2537
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002538LayerTestResult<float, 2> Concatenation2dDim0DiffInputDimsTest(
2539 armnn::IWorkloadFactory& workloadFactory,
2540 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002541{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002542 return Concatenation2dDim0DiffInputDimsTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002543}
2544
2545template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002546LayerTestResult<T, 2> Concatenation2dDim1DiffInputDimsTestImpl(
2547 armnn::IWorkloadFactory& workloadFactory,
2548 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2549 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00002550 int32_t qOffset)
2551{
2552 armnn::TensorInfo input0TensorInfo({ 2, 3 }, armnn::GetDataType<T>());
2553 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2554 // Batch 0
2555 1.0f, 2.0f, 3.0f,
2556
2557 // Batch 1
2558 10.0f, 11.0f, 12.0f,
2559 }));
2560
2561 armnn::TensorInfo input1TensorInfo({ 2, 5 }, armnn::GetDataType<T>());
2562 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2563 // Batch 0
2564 4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
2565
2566 // Batch 1
2567 13.0f, 14.0f, 15.0f, 16.0f, 17.0f,
2568 }));
2569
2570 armnn::TensorInfo input2TensorInfo({ 2, 1 }, armnn::GetDataType<T>());
2571 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2572 // Batch 0
2573 9.0f,
2574
2575 // Batch 1
2576 18.0f
2577 }));
2578
2579 armnn::TensorInfo outputTensorInfo({ 2, 9 }, armnn::GetDataType<T>());
2580 LayerTestResult<T, 2> result(outputTensorInfo);
2581
2582 std::vector<T> output;
2583 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002584 Concatenate<T>(workloadFactory, memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00002585 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
2586 { input0.data(), input1.data(), input2.data() },
2587 outputTensorInfo,
2588 output.data(),
2589 1);
2590
2591 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
2592 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2593 // Batch 0
2594 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
2595
2596 // Batch 1
2597 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f,
2598 }));
2599
2600 return result;
2601}
2602
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002603LayerTestResult<float, 2> Concatenation2dDim1DiffInputDimsTest(
2604 armnn::IWorkloadFactory& workloadFactory,
2605 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002606{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002607 return Concatenation2dDim1DiffInputDimsTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002608}
2609
2610template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002611LayerTestResult<T, 3> Concatenation3dTestImpl(
2612 armnn::IWorkloadFactory& workloadFactory,
2613 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00002614 const armnn::TensorInfo& outputTensorInfo,
2615 unsigned int dimension,
2616 float qScale,
2617 int32_t qOffset)
2618{
2619 armnn::TensorInfo inputTensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
2620
2621 auto input0 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2622 // Batch 0, Channel 0
2623 1.0f, 2.0f,
2624
2625 // Batch 0, Channel 1
2626 3.0f, 4.0f,
2627
2628 // Batch 0, Channel 2
2629 5.0f, 6.0f,
2630
2631 // Batch 1, Channel 0
2632 19.0f, 20.0f,
2633
2634 // Batch 1, Channel 1
2635 21.0f, 22.0f,
2636
2637 // Batch 1, Channel 2
2638 23.0f, 24.0f
2639 }));
2640
2641 auto input1 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2642 // Batch 0, Channel 0
2643 7.0f, 8.0f,
2644
2645 // Batch 0, Channel 1
2646 9.0f, 10.0f,
2647
2648 // Batch 0, Channel 2
2649 11.0f, 12.0f,
2650
2651 // Batch 1, Channel 0
2652 25.0f, 26.0f,
2653
2654 // Batch 1, Channel 1
2655 27.0f, 28.0f,
2656
2657 // Batch 1, Channel 2
2658 29.0f, 30.0f
2659 }));
2660
2661 auto input2 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2662 // Batch 0, Channel 0
2663 13.0f, 14.0f,
2664
2665 // Batch 0, Channel 1
2666 15.0f, 16.0f,
2667
2668 // Batch 0, Channel 2
2669 17.0f, 18.0f,
2670
2671 // Batch 1, Channel 0
2672 31.0f, 32.0f,
2673
2674 // Batch 1, Channel 1
2675 33.0f, 34.0f,
2676
2677 // Batch 1, Channel 2
2678 35.0f, 36.0f
2679 }));
2680
2681 LayerTestResult<T, 3> result(outputTensorInfo);
2682
2683 std::vector<T> output;
2684 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002685 Concatenate<T>(workloadFactory, memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00002686 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
2687 { input0.data(), input1.data(), input2.data() },
2688 outputTensorInfo,
2689 output.data(),
2690 dimension);
2691
2692 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
2693 return result;
2694}
2695
2696template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002697LayerTestResult<T, 3> Concatenation3dDim0TestImpl(
2698 armnn::IWorkloadFactory& workloadFactory,
2699 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2700 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00002701 int32_t qOffset)
2702{
2703 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, armnn::GetDataType<T>());
2704
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002705 LayerTestResult<T, 3> result =
2706 Concatenation3dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 0, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00002707 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2708 // Batch 0, Channel 0
2709 1.0f, 2.0f,
2710
2711 // Batch 0, Channel 1
2712 3.0f, 4.0f,
2713
2714 // Batch 0, Channel 2
2715 5.0f, 6.0f,
2716
2717 // Batch 1, Channel 0
2718 19.0f, 20.0f,
2719
2720 // Batch 1, Channel 1
2721 21.0f, 22.0f,
2722
2723 // Batch 1, Channel 2
2724 23.0f, 24.0f,
2725
2726 // Batch 2, Channel 0
2727 7.0f, 8.0f,
2728
2729 // Batch 2, Channel 1
2730 9.0f, 10.0f,
2731
2732 // Batch 2, Channel 2
2733 11.0f, 12.0f,
2734
2735 // Batch 3, Channel 0
2736 25.0f, 26.0f,
2737
2738 // Batch 3, Channel 1
2739 27.0f, 28.0f,
2740
2741 // Batch 3, Channel 2
2742 29.0f, 30.0f,
2743
2744 // Batch 4, Channel 0
2745 13.0f, 14.0f,
2746
2747 // Batch 4, Channel 1
2748 15.0f, 16.0f,
2749
2750 // Batch 4, Channel 2
2751 17.0f, 18.0f,
2752
2753 // Batch 5, Channel 0
2754 31.0f, 32.0f,
2755
2756 // Batch 5, Channel 1
2757 33.0f, 34.0f,
2758
2759 // Batch 5, Channel 2
2760 35.0f, 36.0f
2761 }));
2762 return result;
2763}
2764
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002765LayerTestResult<float, 3> Concatenation3dDim0Test(
2766 armnn::IWorkloadFactory& workloadFactory,
2767 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002768{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002769 return Concatenation3dDim0TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002770}
2771
2772template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002773LayerTestResult<T, 3> Concatenation3dDim1TestImpl(
2774 armnn::IWorkloadFactory& workloadFactory,
2775 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2776 float qScale,
2777 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00002778{
2779 armnn::TensorInfo outputTensorInfo({ 2, 9, 2 }, armnn::GetDataType<T>());
2780
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002781 LayerTestResult<T, 3> result =
2782 Concatenation3dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 1, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00002783 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2784 // Batch 0, Channel 0
2785 1.0f, 2.0f,
2786
2787 // Batch 0, Channel 1
2788 3.0f, 4.0f,
2789
2790 // Batch 0, Channel 2
2791 5.0f, 6.0f,
2792
2793 // Batch 0, Channel 3
2794 7.0f, 8.0f,
2795
2796 // Batch 0, Channel 4
2797 9.0f, 10.0f,
2798
2799 // Batch 0, Channel 5
2800 11.0f, 12.0f,
2801
2802 // Batch 0, Channel 6
2803 13.0f, 14.0f,
2804
2805 // Batch 0, Channel 7
2806 15.0f, 16.0f,
2807
2808 // Batch 0, Channel 8
2809 17.0f, 18.0f,
2810
2811 // Batch 1, Channel 0
2812 19.0f, 20.0f,
2813
2814 // Batch 1, Channel 1
2815 21.0f, 22.0f,
2816
2817 // Batch 1, Channel 2
2818 23.0f, 24.0f,
2819
2820 // Batch 1, Channel 3
2821 25.0f, 26.0f,
2822
2823 // Batch 1, Channel 4
2824 27.0f, 28.0f,
2825
2826 // Batch 1, Channel 5
2827 29.0f, 30.0f,
2828
2829 // Batch 1, Channel 6
2830 31.0f, 32.0f,
2831
2832 // Batch 1, Channel 7
2833 33.0f, 34.0f,
2834
2835 // Batch 1, Channel 8
2836 35.0f, 36.0f
2837 }));
2838
2839 return result;
2840}
2841
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002842LayerTestResult<float, 3> Concatenation3dDim1Test(
2843 armnn::IWorkloadFactory& workloadFactory,
2844 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002845{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002846 return Concatenation3dDim1TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002847}
2848
2849template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002850LayerTestResult<T, 3> Concatenation3dDim2TestImpl(
2851 armnn::IWorkloadFactory& workloadFactory,
2852 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2853 float qScale,
2854 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00002855{
2856 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, armnn::GetDataType<T>());
2857
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002858 LayerTestResult<T, 3> result =
2859 Concatenation3dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 2, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00002860 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2861 // Batch 0, Channel 0
2862 1.0f, 2.0f, 7.0f, 8.0f, 13.0f, 14.0f,
2863
2864 // Batch 0, Channel 1
2865 3.0f, 4.0f, 9.0f, 10.0f, 15.0f, 16.0f,
2866
2867 // Batch 0, Channel 2
2868 5.0f, 6.0f, 11.0f, 12.0f, 17.0f, 18.0f,
2869
2870 // Batch 1, Channel 0
2871 19.0f, 20.0f, 25.0f, 26.0f, 31.0f, 32.0f,
2872
2873 // Batch 1, Channel 1
2874 21.0f, 22.0f, 27.0f, 28.0f, 33.0f, 34.0f,
2875
2876 // Batch 1, Channel 2
2877 23.0f, 24.0f, 29.0f, 30.0f, 35.0f, 36.0f,
2878 }));
2879
2880 return result;
2881}
2882
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002883LayerTestResult<float, 3> Concatenation3dDim2Test(
2884 armnn::IWorkloadFactory& workloadFactory,
2885 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002886{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002887 return Concatenation3dDim2TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002888}
2889
2890template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002891LayerTestResult<T, 3> Concatenation3dDim0DiffInputDimsTestImpl(
2892 armnn::IWorkloadFactory& workloadFactory,
2893 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2894 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00002895 int32_t qOffset)
2896{
2897 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
2898 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2899 // Batch 0, Channel 0
2900 1.0f, 2.0f,
2901
2902 // Batch 0, Channel 1
2903 3.0f, 4.0f,
2904
2905 // Batch 0, Channel 2
2906 5.0f, 6.0f,
2907
2908 // Batch 1, Channel 0
2909 19.0f, 20.0f,
2910
2911 // Batch 1, Channel 1
2912 21.0f, 22.0f,
2913
2914 // Batch 1, Channel 2
2915 23.0f, 24.0f
2916 }));
2917
2918 armnn::TensorInfo input1TensorInfo({ 1, 3, 2 }, armnn::GetDataType<T>());
2919 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2920 // Batch 0, Channel 0
2921 7.0f, 8.0f,
2922
2923 // Batch 0, Channel 1
2924 9.0f, 10.0f,
2925
2926 // Batch 0, Channel 2
2927 11.0f, 12.0f,
2928 }));
2929
2930 armnn::TensorInfo input2TensorInfo({ 3, 3, 2 }, armnn::GetDataType<T>());
2931 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2932 // Batch 0, Channel 0
2933 25.0f, 26.0f,
2934
2935 // Batch 0, Channel 1
2936 27.0f, 28.0f,
2937
2938 // Batch 0, Channel 2
2939 29.0f, 30.0f,
2940
2941 // Batch 1, Channel 0
2942 13.0f, 14.0f,
2943
2944 // Batch 1, Channel 1
2945 15.0f, 16.0f,
2946
2947 // Batch 1, Channel 2
2948 17.0f, 18.0f,
2949
2950 // Batch 2, Channel 0
2951 31.0f, 32.0f,
2952
2953 // Batch 2, Channel 1
2954 33.0f, 34.0f,
2955
2956 // Batch 2, Channel 2
2957 35.0f, 36.0f
2958 }));
2959
2960 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, armnn::GetDataType<T>());
2961 LayerTestResult<T, 3> result(outputTensorInfo);
2962
2963 std::vector<T> output;
2964 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002965 Concatenate<T>(workloadFactory, memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00002966 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
2967 { input0.data(), input1.data(), input2.data() },
2968 outputTensorInfo,
2969 output.data(),
2970 0);
2971
2972 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
2973 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2974 // Batch 0, Channel 0
2975 1.0f, 2.0f,
2976
2977 // Batch 0, Channel 1
2978 3.0f, 4.0f,
2979
2980 // Batch 0, Channel 2
2981 5.0f, 6.0f,
2982
2983 // Batch 1, Channel 0
2984 19.0f, 20.0f,
2985
2986 // Batch 1, Channel 1
2987 21.0f, 22.0f,
2988
2989 // Batch 1, Channel 2
2990 23.0f, 24.0f,
2991
2992 // Batch 2, Channel 0
2993 7.0f, 8.0f,
2994
2995 // Batch 2, Channel 1
2996 9.0f, 10.0f,
2997
2998 // Batch 2, Channel 2
2999 11.0f, 12.0f,
3000
3001 // Batch 3, Channel 0
3002 25.0f, 26.0f,
3003
3004 // Batch 3, Channel 1
3005 27.0f, 28.0f,
3006
3007 // Batch 3, Channel 2
3008 29.0f, 30.0f,
3009
3010 // Batch 4, Channel 0
3011 13.0f, 14.0f,
3012
3013 // Batch 4, Channel 1
3014 15.0f, 16.0f,
3015
3016 // Batch 4, Channel 2
3017 17.0f, 18.0f,
3018
3019 // Batch 5, Channel 0
3020 31.0f, 32.0f,
3021
3022 // Batch 5, Channel 1
3023 33.0f, 34.0f,
3024
3025 // Batch 5, Channel 2
3026 35.0f, 36.0f
3027 }));
3028
3029 return result;
3030}
3031
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003032LayerTestResult<float, 3> Concatenation3dDim0DiffInputDimsTest(
3033 armnn::IWorkloadFactory& workloadFactory,
3034 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003035{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003036 return Concatenation3dDim0DiffInputDimsTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003037}
3038
3039template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003040LayerTestResult<T, 3> Concatenation3dDim1DiffInputDimsTestImpl(
3041 armnn::IWorkloadFactory& workloadFactory,
3042 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3043 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00003044 int32_t qOffset)
3045{
3046 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
3047 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3048 // Batch 0, Channel 0
3049 1.0f, 2.0f,
3050
3051 // Batch 0, Channel 1
3052 3.0f, 4.0f,
3053
3054 // Batch 0, Channel 2
3055 5.0f, 6.0f,
3056
3057 // Batch 1, Channel 0
3058 19.0f, 20.0f,
3059
3060 // Batch 1, Channel 1
3061 21.0f, 22.0f,
3062
3063 // Batch 1, Channel 2
3064 23.0f, 24.0f
3065 }));
3066
3067 armnn::TensorInfo input1TensorInfo({ 2, 4, 2 }, armnn::GetDataType<T>());
3068 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3069 // Batch 0, Channel 0
3070 7.0f, 8.0f,
3071
3072 // Batch 0, Channel 1
3073 9.0f, 10.0f,
3074
3075 // Batch 0, Channel 2
3076 11.0f, 12.0f,
3077
3078 // Batch 0, Channel 3
3079 25.0f, 26.0f,
3080
3081 // Batch 1, Channel 0
3082 27.0f, 28.0f,
3083
3084 // Batch 1, Channel 1
3085 29.0f, 30.0f,
3086
3087 // Batch 1, Channel 2
3088 13.0f, 14.0f,
3089
3090 // Batch 1, Channel 3
3091 15.0f, 16.0f,
3092 }));
3093
3094 armnn::TensorInfo input2TensorInfo({ 2, 1, 2 }, armnn::GetDataType<T>());
3095 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3096 // Batch 0, Channel 0
3097 17.0f, 18.0f,
3098
3099 // Batch 1, Channel 0
3100 31.0f, 32.0f,
3101 }));
3102
3103 armnn::TensorInfo outputTensorInfo({ 2, 8, 2 }, armnn::GetDataType<T>());
3104 LayerTestResult<T, 3> result(outputTensorInfo);
3105
3106 std::vector<T> output;
3107 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003108 Concatenate<T>(workloadFactory, memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00003109 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
3110 { input0.data(), input1.data(), input2.data() },
3111 outputTensorInfo,
3112 output.data(),
3113 1);
3114
3115 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
3116 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3117 // Batch 0, Channel 0
3118 1.0f, 2.0f,
3119
3120 // Batch 0, Channel 1
3121 3.0f, 4.0f,
3122
3123 // Batch 0, Channel 2
3124 5.0f, 6.0f,
3125
3126 // Batch 0, Channel 3
3127 7.0f, 8.0f,
3128
3129 // Batch 0, Channel 4
3130 9.0f, 10.0f,
3131
3132 // Batch 0, Channel 5
3133 11.0f, 12.0f,
3134
3135 // Batch 0, Channel 6
3136 25.0f, 26.0f,
3137
3138 // Batch 0, Channel 7
3139 17.0f, 18.0f,
3140
3141 // Batch 1, Channel 0
3142 19.0f, 20.0f,
3143
3144 // Batch 1, Channel 1
3145 21.0f, 22.0f,
3146
3147 // Batch 1, Channel 2
3148 23.0f, 24.0f,
3149
3150 // Batch 1, Channel 3
3151 27.0f, 28.0f,
3152
3153 // Batch 1, Channel 4
3154 29.0f, 30.0f,
3155
3156 // Batch 1, Channel 5
3157 13.0f, 14.0f,
3158
3159 // Batch 1, Channel 6
3160 15.0f, 16.0f,
3161
3162 // Batch 1, Channel 7
3163 31.0f, 32.0f,
3164 }));
3165
3166 return result;
3167}
3168
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003169LayerTestResult<float, 3> Concatenation3dDim1DiffInputDimsTest(
3170 armnn::IWorkloadFactory& workloadFactory,
3171 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003172{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003173 return Concatenation3dDim1DiffInputDimsTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003174}
3175
3176template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003177LayerTestResult<T, 3> Concatenation3dDim2DiffInputDimsTestImpl(
3178 armnn::IWorkloadFactory& workloadFactory,
3179 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3180 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00003181 int32_t qOffset)
3182{
3183 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
3184 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3185 // Batch 0, Channel 0
3186 1.0f, 2.0f,
3187
3188 // Batch 0, Channel 1
3189 3.0f, 4.0f,
3190
3191 // Batch 0, Channel 2
3192 5.0f, 6.0f,
3193
3194 // Batch 1, Channel 0
3195 19.0f, 20.0f,
3196
3197 // Batch 1, Channel 1
3198 21.0f, 22.0f,
3199
3200 // Batch 1, Channel 2
3201 23.0f, 24.0f
3202 }));
3203
3204 armnn::TensorInfo input1TensorInfo({ 2, 3, 1 }, armnn::GetDataType<T>());
3205 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3206 // Batch 0, Channel 0
3207 7.0f,
3208
3209 // Batch 0, Channel 1
3210 9.0f,
3211
3212 // Batch 0, Channel 2
3213 11.0f,
3214
3215 // Batch 1, Channel 0
3216 25.0f,
3217
3218 // Batch 1, Channel 1
3219 27.0f,
3220
3221 // Batch 1, Channel 2
3222 29.0f
3223 }));
3224
3225 armnn::TensorInfo input2TensorInfo({ 2, 3, 3 }, armnn::GetDataType<T>());
3226 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3227 // Batch 0, Channel 0
3228 13.0f, 14.0f, 50.0f,
3229
3230 // Batch 0, Channel 1
3231 15.0f, 16.0f, 51.0f,
3232
3233 // Batch 0, Channel 2
3234 17.0f, 18.0f, 52.0f,
3235
3236 // Batch 1, Channel 0
3237 31.0f, 32.0f, 53.0f,
3238
3239 // Batch 1, Channel 1
3240 33.0f, 34.0f, 54.0f,
3241
3242 // Batch 1, Channel 2
3243 35.0f, 36.0f, 55.0f,
3244 }));
3245
3246 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, armnn::GetDataType<T>());
3247 LayerTestResult<T, 3> result(outputTensorInfo);
3248
3249 std::vector<T> output;
3250 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003251 Concatenate<T>(workloadFactory, memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00003252 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
3253 { input0.data(), input1.data(), input2.data() },
3254 outputTensorInfo,
3255 output.data(),
3256 2);
3257
3258 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
3259 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3260 // Batch 0, Channel 0
3261 1.0f, 2.0f, 7.0f, 13.0f, 14.0f, 50.0f,
3262
3263 // Batch 0, Channel 1
3264 3.0f, 4.0f, 9.0f, 15.0f, 16.0f, 51.0f,
3265
3266 // Batch 0, Channel 2
3267 5.0f, 6.0f, 11.0f, 17.0f, 18.0f, 52.0f,
3268
3269 // Batch 1, Channel 0
3270 19.0f, 20.0f, 25.0f, 31.0f, 32.0f, 53.0f,
3271
3272 // Batch 1, Channel 1
3273 21.0f, 22.0f, 27.0f, 33.0f, 34.0f, 54.0f,
3274
3275 // Batch 1, Channel 2
3276 23.0f, 24.0f, 29.0f, 35.0f, 36.0f, 55.0f,
3277 }));
3278
3279 return result;
3280}
3281
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003282LayerTestResult<float, 3> Concatenation3dDim2DiffInputDimsTest(
3283 armnn::IWorkloadFactory& workloadFactory,
3284 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003285{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003286 return Concatenation3dDim2DiffInputDimsTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003287}
3288
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003289LayerTestResult<float, 4> ResizeBilinearNopTest(
3290 armnn::IWorkloadFactory& workloadFactory,
3291 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3292 const armnn::DataLayoutIndexed& dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00003293{
James Conroy6b965822018-11-01 11:33:09 +00003294 const armnn::TensorInfo inputTensorInfo = GetTensorInfo<float>(1, 2, 4, 4, dataLayout);
3295 const armnn::TensorInfo outputTensorInfo = GetTensorInfo<float>(1, 2, 4, 4, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00003296
James Conroy6b965822018-11-01 11:33:09 +00003297 std::vector<float> inputData({
3298 1.0f, 2.0f, 3.0f, 4.0f,
3299 2.0f, 3.0f, 4.0f, 5.0f,
3300 3.0f, 4.0f, 5.0f, 6.0f,
3301 4.0f, 5.0f, 6.0f, 7.0f,
3302
telsoa014fcda012018-03-09 14:13:49 +00003303 1.0f, 2.0f, 3.0f, 4.0f,
3304 2.0f, 3.0f, 4.0f, 5.0f,
3305 3.0f, 4.0f, 5.0f, 6.0f,
3306 4.0f, 5.0f, 6.0f, 7.0f
James Conroy6b965822018-11-01 11:33:09 +00003307 });
3308
3309 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
3310 if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC)
3311 {
3312 std::vector<float> tmp(inputData.size());
3313 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
3314 inputData = tmp;
3315 }
3316
3317 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
telsoa014fcda012018-03-09 14:13:49 +00003318
3319 LayerTestResult<float, 4> result(outputTensorInfo);
3320 result.outputExpected = input;
3321
3322 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3323 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3324
3325 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01003326 descriptor.m_Parameters.m_DataLayout = dataLayout;
3327 armnn::WorkloadInfo info;
3328 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3329 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3330
3331 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
3332
3333 inputHandle->Allocate();
3334 outputHandle->Allocate();
3335 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3336
James Conroy074f3712018-10-03 09:32:03 +01003337 workload->Execute();
3338
3339 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3340 return result;
3341}
3342
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003343LayerTestResult<float, 4> SimpleResizeBilinearTest(
3344 armnn::IWorkloadFactory& workloadFactory,
3345 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3346 const armnn::DataLayoutIndexed& dataLayout)
James Conroy074f3712018-10-03 09:32:03 +01003347{
James Conroy6b965822018-11-01 11:33:09 +00003348 const armnn::TensorInfo inputTensorInfo = GetTensorInfo<float>(1, 2, 2, 2, dataLayout);
3349 const armnn::TensorInfo outputTensorInfo = GetTensorInfo<float>(1, 2, 1, 1, dataLayout);
James Conroy074f3712018-10-03 09:32:03 +01003350
James Conroy6b965822018-11-01 11:33:09 +00003351 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01003352 1.0f, 255.0f,
James Conroy6b965822018-11-01 11:33:09 +00003353 200.0f, 250.0f,
3354
3355 250.0f, 200.0f,
3356 250.0f, 1.0f
3357 });
James Conroy074f3712018-10-03 09:32:03 +01003358
3359 // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
3360 // then figures out the interpolants and weights. Note this is different to projecting the centre of the
James Conroy6b965822018-11-01 11:33:09 +00003361 // output texel. Thus, for a input matrix of 2x2, we'll expect the output 1x1 matrix to contain, as
3362 // its single element, the value that was at position (0,0) of the input matrix (rather than an average,
3363 // which we would expect if projecting the centre).
3364
3365 std::vector<float> outputData({
3366 1.0f,
3367
3368 250.0f
3369 });
3370
3371 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
3372 if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC)
3373 {
3374 std::vector<float> tmp(inputData.size());
3375 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
3376 inputData = tmp;
3377
3378 std::vector<float> tmp1(outputData.size());
3379 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data());
3380 outputData = tmp1;
3381 }
3382
3383 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
3384
James Conroy074f3712018-10-03 09:32:03 +01003385 LayerTestResult<float, 4> result(outputTensorInfo);
James Conroy6b965822018-11-01 11:33:09 +00003386 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
James Conroy074f3712018-10-03 09:32:03 +01003387
3388 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3389 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3390
3391 armnn::ResizeBilinearQueueDescriptor descriptor;
3392 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00003393 armnn::WorkloadInfo info;
3394 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3395 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3396
3397 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
3398
3399 inputHandle->Allocate();
3400 outputHandle->Allocate();
3401 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3402
3403 workload->Execute();
3404
3405 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3406 return result;
3407}
3408
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003409LayerTestResult<float, 4> ResizeBilinearSqMinTest(
3410 armnn::IWorkloadFactory& workloadFactory,
3411 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3412 const armnn::DataLayoutIndexed& dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00003413{
James Conroy6b965822018-11-01 11:33:09 +00003414 const armnn::TensorInfo inputTensorInfo = GetTensorInfo<float>(1, 2, 4, 4, dataLayout);
3415 const armnn::TensorInfo outputTensorInfo = GetTensorInfo<float>(1, 2, 2, 2, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00003416
James Conroy6b965822018-11-01 11:33:09 +00003417 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01003418 1.0f, 2.0f, 3.0f, 4.0f,
3419 2.0f, 3.0f, 4.0f, 5.0f,
3420 3.0f, 4.0f, 5.0f, 6.0f,
James Conroy6b965822018-11-01 11:33:09 +00003421 4.0f, 5.0f, 6.0f, 7.0f,
3422
3423 7.0f, 6.0f, 5.0f, 4.0f,
3424 6.0f, 5.0f, 4.0f, 3.0f,
3425 5.0f, 4.0f, 3.0f, 2.0f,
3426 4.0f, 3.0f, 2.0f, 1.0f
3427 });
3428
3429 std::vector<float> outputData({
3430 1.0f, 3.0f,
3431 3.0f, 5.0f,
3432
3433 7.0f, 5.0f,
3434 5.0f, 3.0f
3435 });
3436
3437 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
3438 if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC)
3439 {
3440 std::vector<float> tmp(inputData.size());
3441 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
3442 inputData = tmp;
3443
3444 std::vector<float> tmp1(outputData.size());
3445 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data());
3446 outputData = tmp1;
3447 }
3448
3449 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
telsoa014fcda012018-03-09 14:13:49 +00003450
telsoa014fcda012018-03-09 14:13:49 +00003451 LayerTestResult<float, 4> result(outputTensorInfo);
James Conroy6b965822018-11-01 11:33:09 +00003452 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
telsoa014fcda012018-03-09 14:13:49 +00003453
3454 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3455 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3456
3457 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01003458 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00003459 armnn::WorkloadInfo info;
3460 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3461 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3462
3463 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
3464
3465 inputHandle->Allocate();
3466 outputHandle->Allocate();
3467 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3468
3469 workload->Execute();
3470
3471 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3472 return result;
3473}
3474
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003475LayerTestResult<float, 4> ResizeBilinearMinTest(
3476 armnn::IWorkloadFactory& workloadFactory,
3477 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3478 const armnn::DataLayoutIndexed& dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00003479{
James Conroy6b965822018-11-01 11:33:09 +00003480 const armnn::TensorInfo inputTensorInfo = GetTensorInfo<float>(1, 2, 3, 5, dataLayout);
3481 const armnn::TensorInfo outputTensorInfo = GetTensorInfo<float>(1, 2, 2, 3, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00003482
James Conroy6b965822018-11-01 11:33:09 +00003483 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01003484 1.0f, 2.0f, 3.0f, 5.0f, 8.0f,
3485 13.0f, 21.0f, 34.0f, 55.0f, 89.0f,
James Conroy6b965822018-11-01 11:33:09 +00003486 144.0f, 233.0f, 377.0f, 610.0f, 987.0f,
3487
3488 987.0f, 610.0f, 377.0f, 233.0f, 144.0f,
3489 89.0f, 55.0f, 34.0f, 21.0f, 13.0f,
3490 8.0f, 5.0f, 3.0f, 2.0f, 1.0f
3491 });
3492
3493 std::vector<float> outputData({
3494 1.0f, 2.6666f, 6.00f,
3495 78.5f, 179.3333f, 401.00f,
3496
3497 987.0f, 454.6670f, 203.33f,
3498 48.5f, 22.3333f, 10.00f
3499 });
3500
3501 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
3502 if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC)
3503 {
3504 std::vector<float> tmp(inputData.size());
3505 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
3506 inputData = tmp;
3507
3508 std::vector<float> tmp1(outputData.size());
3509 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data());
3510 outputData = tmp1;
3511 }
3512
3513 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
telsoa014fcda012018-03-09 14:13:49 +00003514
3515 LayerTestResult<float, 4> result(outputTensorInfo);
James Conroy6b965822018-11-01 11:33:09 +00003516 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
telsoa014fcda012018-03-09 14:13:49 +00003517
3518 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3519 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3520
3521 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01003522 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00003523 armnn::WorkloadInfo info;
3524 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3525 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3526
3527 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
3528
3529 inputHandle->Allocate();
3530 outputHandle->Allocate();
3531 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3532
3533 workload->Execute();
3534
3535 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3536 return result;
3537}
3538
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003539LayerTestResult<float, 4> ResizeBilinearMagTest(
3540 armnn::IWorkloadFactory& workloadFactory,
3541 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3542 const armnn::DataLayoutIndexed& dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00003543{
James Conroy6b965822018-11-01 11:33:09 +00003544 const armnn::TensorInfo inputTensorInfo = GetTensorInfo<float>(1, 2, 3, 2, dataLayout);
3545 const armnn::TensorInfo outputTensorInfo = GetTensorInfo<float>(1, 2, 3, 5, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00003546
James Conroy6b965822018-11-01 11:33:09 +00003547 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01003548 1.0f, 2.0f,
3549 13.0f, 21.0f,
James Conroy6b965822018-11-01 11:33:09 +00003550 144.0f, 233.0f,
telsoa014fcda012018-03-09 14:13:49 +00003551
James Conroy6b965822018-11-01 11:33:09 +00003552 233.0f, 144.0f,
3553 21.0f, 13.0f,
3554 2.0f, 1.0f
3555 });
3556
3557 std::vector<float> outputData({
James Conroy074f3712018-10-03 09:32:03 +01003558 1.0f, 1.4f, 1.8f, 2.0f, 2.0f,
3559 13.0f, 16.2f, 19.4f, 21.0f, 21.0f,
James Conroy6b965822018-11-01 11:33:09 +00003560 144.0f, 179.6f, 215.2f, 233.0f, 233.0f,
3561
3562 233.0f, 197.4f, 161.8f, 144.0f, 144.0f,
3563 21.0f, 17.8f, 14.6f, 13.0f, 13.0f,
3564 2.0f, 1.6f, 1.2f, 1.0f, 1.0f
3565 });
3566
3567 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
3568 if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC)
3569 {
3570 std::vector<float> tmp(inputData.size());
3571 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
3572 inputData = tmp;
3573
3574 std::vector<float> tmp1(outputData.size());
3575 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data());
3576 outputData = tmp1;
3577 }
3578
3579 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
3580
3581 LayerTestResult<float, 4> result(outputTensorInfo);
3582 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
telsoa014fcda012018-03-09 14:13:49 +00003583
3584 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3585 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3586
3587 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01003588 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00003589 armnn::WorkloadInfo info;
3590 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3591 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3592
3593 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
3594
3595 inputHandle->Allocate();
3596 outputHandle->Allocate();
3597 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3598
3599 workload->Execute();
3600
3601 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3602 return result;
3603}
3604
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003605LayerTestResult<float, 2> FakeQuantizationTest(
3606 armnn::IWorkloadFactory& workloadFactory,
3607 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003608{
3609 constexpr unsigned int width = 2;
3610 constexpr unsigned int height = 3;
3611
3612 const armnn::TensorInfo tensorInfo({height, width },
3613 armnn::DataType::Float32);
3614 auto input = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
3615 -10.0f, -5.0f,
3616 0.0f, 5.0f,
3617 10.0f, 10.0f
3618 }));
3619
3620 LayerTestResult<float, 2> ret(tensorInfo);
3621
3622 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
3623
3624 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
3625
3626 armnn::FakeQuantizationQueueDescriptor data;
3627 armnn::WorkloadInfo info;
3628
3629 AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
3630 AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
3631 float min = -10.f;
3632 float max = 10.f;
3633
3634 data.m_Parameters.m_Min = min;
3635 data.m_Parameters.m_Max = max;
3636
3637 armnn::PassthroughCpuTensorHandle refHandle(tensorInfo, &ret.outputExpected[0][0]);
3638 armnn::FakeQuantizationQueueDescriptor refData = data;
3639 armnn::WorkloadInfo refInfo = info;
3640 SetWorkloadOutput(refData, refInfo, 0, tensorInfo, &refHandle);
3641
3642 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFakeQuantization(data, info);
3643
3644 inputHandle->Allocate();
3645 outputHandle->Allocate();
3646
3647 CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
3648
3649 workload->Execute();
3650
3651 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
3652
3653 ret.outputExpected = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
3654 0.0f, 63.0f,
3655 128.0f, 191.0f,
3656 255.0f, 255.0f
3657 }));
3658 return ret;
3659}
3660
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003661namespace
3662{
3663
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003664LayerTestResult<float, 4> L2NormalizationTestImpl(
3665 armnn::IWorkloadFactory& workloadFactory,
3666 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3667 const armnn::TensorShape& inputOutputTensorShape,
3668 const std::vector<float>& inputValues,
3669 const std::vector<float>& expectedOutputValues,
3670 const armnn::DataLayoutIndexed& layout)
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003671{
3672 const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
3673 const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
3674
jimfly013aab7c32018-11-12 13:32:08 +00003675 // at this point if we require it permute the input data
3676 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
3677 std::vector<float> inputData = inputValues;
3678 if (layout.GetDataLayout() == armnn::DataLayout::NHWC)
3679 {
3680 std::vector<float> tmp(inputData.size());
3681 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
3682 inputData = tmp;
3683 }
3684
3685 auto inputTensor = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(inputData));
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003686
3687 LayerTestResult<float, 4> result(outputTensorInfo);
jimfly013aab7c32018-11-12 13:32:08 +00003688 std::vector<float> expectedOutputData = expectedOutputValues;
3689 if (layout.GetDataLayout() == armnn::DataLayout::NHWC)
3690 {
3691 std::vector<float> tmp(expectedOutputData.size());
3692 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, expectedOutputData.data(), tmp.data());
3693 expectedOutputData = tmp;
3694 }
3695 result.outputExpected = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(expectedOutputData));
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003696
3697 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3698 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3699
3700 armnn::L2NormalizationQueueDescriptor descriptor;
jimfly013aab7c32018-11-12 13:32:08 +00003701 descriptor.m_Parameters.m_DataLayout = layout.GetDataLayout();
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003702 armnn::WorkloadInfo info;
3703
3704 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3705 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3706
3707 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
3708
3709 inputHandle->Allocate();
3710 outputHandle->Allocate();
3711
3712 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
3713
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003714 ExecuteWorkload(*workload, memoryManager);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003715
3716 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3717
3718 return result;
3719}
3720
3721float CalcInvL2Norm(std::initializer_list<float> elements)
3722{
3723 const float reduction = std::accumulate(elements.begin(), elements.end(), 0.0f,
3724 [](float acc, float element) { return acc + element * element; });
3725 return 1.0f / sqrtf(reduction);
3726}
3727
3728} // anonymous namespace
3729
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003730template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003731LayerTestResult<T, 2> Pad2dTestCommon(
3732 armnn::IWorkloadFactory& workloadFactory,
3733 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3734 float qScale,
3735 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003736{
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003737 const armnn::TensorShape inputShape{ 3, 3 };
3738 const armnn::TensorShape outputShape{ 7, 7 };
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003739
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003740 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::GetDataType<T>());
3741 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::GetDataType<T>());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003742
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003743 std::vector<T> inputValues(
3744 QuantizedVector<T>(qScale, qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003745 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003746 // Height (3) x Width (3)
3747 4, 8, 6,
3748 7, 4, 4,
3749 3, 2, 4
3750 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003751
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003752 std::vector<T> expectedOutputValues(
3753 QuantizedVector<T>(qScale, qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003754 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003755 0, 0, 0, 0, 0, 0, 0,
3756 0, 0, 0, 0, 0, 0, 0,
3757 0, 0, 4, 8, 6, 0, 0,
3758 0, 0, 7, 4, 4, 0, 0,
3759 0, 0, 3, 2, 4, 0, 0,
3760 0, 0, 0, 0, 0, 0, 0,
3761 0, 0, 0, 0, 0, 0, 0
3762 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003763
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003764 auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003765
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003766 LayerTestResult<T, 2> result(outputTensorInfo);
3767 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003768
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003769 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3770 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003771
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003772 armnn::PadQueueDescriptor descriptor;
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003773
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003774 std::vector<std::pair<unsigned int, unsigned int>> PadList;
3775 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
3776 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003777
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003778 descriptor.m_Parameters.m_PadList = PadList;
3779 armnn::WorkloadInfo info;
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003780
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003781 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3782 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003783
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003784 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003785
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003786 inputHandle->Allocate();
3787 outputHandle->Allocate();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003788
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003789 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003790
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003791 workload->Execute();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003792
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003793 CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003794
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003795 return result;
3796}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003797
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003798template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003799LayerTestResult<T, 3> Pad3dTestCommon(
3800 armnn::IWorkloadFactory& workloadFactory,
3801 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3802 float qScale,
3803 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003804{
3805 const armnn::TensorShape inputShape{ 2, 2, 2 };
3806 const armnn::TensorShape outputShape{ 3, 5, 6 };
3807
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003808 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::GetDataType<T>());
3809 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::GetDataType<T>());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003810
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003811 std::vector<T> inputValues(
3812 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003813 {
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003814 // Channel 0, Height (2) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003815 0, 4,
3816 2, 5,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003817
3818 // Channel 1, Height (2) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003819 6, 1,
3820 5, 2
3821 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003822
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003823 std::vector<T> expectedOutputValues(
3824 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003825 {
3826
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003827 0, 0, 0, 0, 0, 0,
3828 0, 0, 0, 0, 0, 0,
3829 0, 0, 0, 4, 0, 0,
3830 0, 0, 2, 5, 0, 0,
3831 0, 0, 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003832
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003833 0, 0, 0, 0, 0, 0,
3834 0, 0, 0, 0, 0, 0,
3835 0, 0, 6, 1, 0, 0,
3836 0, 0, 5, 2, 0, 0,
3837 0, 0, 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003838
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003839 0, 0, 0, 0, 0, 0,
3840 0, 0, 0, 0, 0, 0,
3841 0, 0, 0, 0, 0, 0,
3842 0, 0, 0, 0, 0, 0,
3843 0, 0, 0, 0, 0, 0
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003844
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003845 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003846
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003847 auto inputTensor = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003848
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003849 LayerTestResult<T, 3> result(outputTensorInfo);
3850 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003851
3852 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3853 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3854
3855 armnn::PadQueueDescriptor descriptor;
3856
3857 std::vector<std::pair<unsigned int, unsigned int>> PadList;
3858 PadList.push_back(std::pair<unsigned int, unsigned int>(0,1));
3859 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
3860 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
3861
3862 descriptor.m_Parameters.m_PadList = PadList;
3863 armnn::WorkloadInfo info;
3864
3865 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3866 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3867
3868 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
3869
3870 inputHandle->Allocate();
3871 outputHandle->Allocate();
3872
3873 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]);
3874
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003875 workload->Execute();
3876
3877 CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
3878
3879 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003880}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003881
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003882template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003883LayerTestResult<T, 4> Pad4dTestCommon(
3884 armnn::IWorkloadFactory& workloadFactory,
3885 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3886 float qScale,
3887 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003888{
3889 const armnn::TensorShape inputShape{ 2, 2, 3, 2 };
3890 const armnn::TensorShape outputShape{ 4, 5, 7, 4 };
3891
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003892 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::GetDataType<T>());
3893 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::GetDataType<T>());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003894
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003895 std::vector<T> inputValues(
3896 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003897 {
3898 // Batch 0, Channel 0, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003899 0, 1,
3900 2, 3,
3901 4, 5,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003902
3903 // Batch 0, Channel 1, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003904 6, 7,
3905 8, 9,
3906 10, 11,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003907
3908 // Batch 1, Channel 0, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003909 12, 13,
3910 14, 15,
3911 16, 17,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003912
3913 // Batch 1, Channel 1, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003914 18, 19,
3915 20, 21,
3916 22, 23
3917 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003918
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003919 std::vector<T> expectedOutputValues(
3920 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003921 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003922 0, 0, 0, 0,
3923 0, 0, 0, 0,
3924 0, 0, 0, 0,
3925 0, 0, 0, 0,
3926 0, 0, 0, 0,
3927 0, 0, 0, 0,
3928 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003929
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003930 0, 0, 0, 0,
3931 0, 0, 0, 0,
3932 0, 0, 0, 0,
3933 0, 0, 0, 0,
3934 0, 0, 0, 0,
3935 0, 0, 0, 0,
3936 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003937
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003938 0, 0, 0, 0,
3939 0, 0, 0, 0,
3940 0, 0, 0, 0,
3941 0, 0, 0, 0,
3942 0, 0, 0, 0,
3943 0, 0, 0, 0,
3944 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003945
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003946 0, 0, 0, 0,
3947 0, 0, 0, 0,
3948 0, 0, 0, 0,
3949 0, 0, 0, 0,
3950 0, 0, 0, 0,
3951 0, 0, 0, 0,
3952 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003953
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003954 0, 0, 0, 0,
3955 0, 0, 0, 0,
3956 0, 0, 0, 0,
3957 0, 0, 0, 0,
3958 0, 0, 0, 0,
3959 0, 0, 0, 0,
3960 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003961
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003962 0, 0, 0, 0,
3963 0, 0, 0, 0,
3964 0, 0, 0, 0,
3965 0, 0, 0, 0,
3966 0, 0, 0, 0,
3967 0, 0, 0, 0,
3968 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003969
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003970 0, 0, 0, 0,
3971 0, 0, 0, 0,
3972 0, 0, 0, 0,
3973 0, 0, 0, 0,
3974 0, 0, 0, 0,
3975 0, 0, 0, 0,
3976 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003977
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003978 0, 0, 0, 0,
3979 0, 0, 0, 0,
3980 0, 0, 0, 0,
3981 0, 0, 1, 0,
3982 0, 2, 3, 0,
3983 0, 4, 5, 0,
3984 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003985
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003986 0, 0, 0, 0,
3987 0, 0, 0, 0,
3988 0, 0, 0, 0,
3989 0, 6, 7, 0,
3990 0, 8, 9, 0,
3991 0, 10, 11, 0,
3992 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003993
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003994 0, 0, 0, 0,
3995 0, 0, 0, 0,
3996 0, 0, 0, 0,
3997 0, 0, 0, 0,
3998 0, 0, 0, 0,
3999 0, 0, 0, 0,
4000 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004001
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004002 0, 0, 0, 0,
4003 0, 0, 0, 0,
4004 0, 0, 0, 0,
4005 0, 0, 0, 0,
4006 0, 0, 0, 0,
4007 0, 0, 0, 0,
4008 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004009
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004010 0, 0, 0, 0,
4011 0, 0, 0, 0,
4012 0, 0, 0, 0,
4013 0, 0, 0, 0,
4014 0, 0, 0, 0,
4015 0, 0, 0, 0,
4016 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004017
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004018 0, 0, 0, 0,
4019 0, 0, 0, 0,
4020 0, 0, 0, 0,
4021 0, 12, 13, 0,
4022 0, 14, 15, 0,
4023 0, 16, 17, 0,
4024 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004025
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004026 0, 0, 0, 0,
4027 0, 0, 0, 0,
4028 0, 0, 0, 0,
4029 0, 18, 19, 0,
4030 0, 20, 21, 0,
4031 0, 22, 23, 0,
4032 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004033
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004034 0, 0, 0, 0,
4035 0, 0, 0, 0,
4036 0, 0, 0, 0,
4037 0, 0, 0, 0,
4038 0, 0, 0, 0,
4039 0, 0, 0, 0,
4040 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004041
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004042 0, 0, 0, 0,
4043 0, 0, 0, 0,
4044 0, 0, 0, 0,
4045 0, 0, 0, 0,
4046 0, 0, 0, 0,
4047 0, 0, 0, 0,
4048 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004049
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004050 0, 0, 0, 0,
4051 0, 0, 0, 0,
4052 0, 0, 0, 0,
4053 0, 0, 0, 0,
4054 0, 0, 0, 0,
4055 0, 0, 0, 0,
4056 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004057
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004058 0, 0, 0, 0,
4059 0, 0, 0, 0,
4060 0, 0, 0, 0,
4061 0, 0, 0, 0,
4062 0, 0, 0, 0,
4063 0, 0, 0, 0,
4064 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004065
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004066 0, 0, 0, 0,
4067 0, 0, 0, 0,
4068 0, 0, 0, 0,
4069 0, 0, 0, 0,
4070 0, 0, 0, 0,
4071 0, 0, 0, 0,
4072 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004073
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004074 0, 0, 0, 0,
4075 0, 0, 0, 0,
4076 0, 0, 0, 0,
4077 0, 0, 0, 0,
4078 0, 0, 0, 0,
4079 0, 0, 0, 0,
4080 0, 0, 0, 0
4081 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004082
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004083 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004084
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004085 LayerTestResult<T, 4> result(outputTensorInfo);
4086 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004087
4088 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4089 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4090
4091 armnn::PadQueueDescriptor descriptor;
4092
4093 std::vector<std::pair<unsigned int, unsigned int>> PadList;
4094 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
4095 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
4096 PadList.push_back(std::pair<unsigned int, unsigned int>(3,1));
4097 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
4098
4099 descriptor.m_Parameters.m_PadList = PadList;
4100 armnn::WorkloadInfo info;
4101
4102 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
4103 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4104
4105 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
4106
4107 inputHandle->Allocate();
4108 outputHandle->Allocate();
4109
4110 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
4111
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004112 workload->Execute();
4113
4114 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4115
4116 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004117}
4118
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004119LayerTestResult<uint8_t, 2> PadUint82dTest(
4120 armnn::IWorkloadFactory& workloadFactory,
4121 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004122{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004123 return Pad2dTestCommon<uint8_t>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004124}
4125
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004126LayerTestResult<uint8_t, 3> PadUint83dTest(
4127 armnn::IWorkloadFactory& workloadFactory,
4128 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004129{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004130 return Pad3dTestCommon<uint8_t>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004131}
4132
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004133LayerTestResult<uint8_t, 4> PadUint84dTest(
4134 armnn::IWorkloadFactory& workloadFactory,
4135 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004136{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004137 return Pad4dTestCommon<uint8_t>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004138}
4139
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004140LayerTestResult<float, 2> PadFloat322dTest(
4141 armnn::IWorkloadFactory& workloadFactory,
4142 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004143{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004144 return Pad2dTestCommon<float>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004145}
4146
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004147LayerTestResult<float, 3> PadFloat323dTest(
4148 armnn::IWorkloadFactory& workloadFactory,
4149 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004150{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004151 return Pad3dTestCommon<float>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004152}
4153
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004154LayerTestResult<float, 4> PadFloat324dTest(
4155 armnn::IWorkloadFactory& workloadFactory,
4156 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004157{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004158 return Pad4dTestCommon<float>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004159}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004160
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004161LayerTestResult<float, 4> L2Normalization1dTest(
4162 armnn::IWorkloadFactory& workloadFactory,
4163 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4164 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +00004165{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004166 // Width: 1
4167 // Height: 1
4168 // Channels: 10
4169 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00004170 unsigned int numberOfBatches = 1;
4171 unsigned int numberOfChannels = 10;
4172 unsigned int height = 1;
4173 unsigned int width = 1;
telsoa014fcda012018-03-09 14:13:49 +00004174
jimfly013aab7c32018-11-12 13:32:08 +00004175
4176 const armnn::TensorShape inputOutputShape = GetTestTensorShape(
4177 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004178 std::vector<float> inputValues
4179 {
4180 // Batch 0, Channel 0, Height (1) x Width (1)
4181 1.0f,
telsoa014fcda012018-03-09 14:13:49 +00004182
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004183 // Batch 0, Channel 1, Height (1) x Width (1)
4184 2.0f,
telsoa014fcda012018-03-09 14:13:49 +00004185
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004186 // Batch 0, Channel 2, Height (1) x Width (1)
4187 3.0f,
telsoa014fcda012018-03-09 14:13:49 +00004188
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004189 // Batch 0, Channel 3, Height (1) x Width (1)
4190 4.0f,
4191
4192 // Batch 0, Channel 4, Height (1) x Width (1)
4193 5.0f,
4194
4195 // Batch 0, Channel 5, Height (1) x Width (1)
4196 6.0f,
4197
4198 // Batch 0, Channel 6, Height (1) x Width (1)
4199 7.0f,
4200
4201 // Batch 0, Channel 7, Height (1) x Width (1)
4202 8.0f,
4203
4204 // Batch 0, Channel 8, Height (1) x Width (1)
4205 9.0f,
4206
4207 // Batch 0, Channel 9, Height (1) x Width (1)
4208 10.0f
4209 };
telsoa014fcda012018-03-09 14:13:49 +00004210 const float approxInvL2Norm = 0.050964719f;
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004211 std::vector<float> expectedOutputValues
4212 {
4213 // Batch 0, Channel 0, Height (1) x Width (1)
telsoa014fcda012018-03-09 14:13:49 +00004214 1.0f * approxInvL2Norm,
4215 2.0f * approxInvL2Norm,
4216 3.0f * approxInvL2Norm,
4217 4.0f * approxInvL2Norm,
4218 5.0f * approxInvL2Norm,
4219 6.0f * approxInvL2Norm,
4220 7.0f * approxInvL2Norm,
4221 8.0f * approxInvL2Norm,
4222 9.0f * approxInvL2Norm,
4223 10.0f * approxInvL2Norm
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004224 };
telsoa014fcda012018-03-09 14:13:49 +00004225
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004226
4227 return L2NormalizationTestImpl(workloadFactory, memoryManager, inputOutputShape,
jimfly013aab7c32018-11-12 13:32:08 +00004228 inputValues, expectedOutputValues, layout);
telsoa014fcda012018-03-09 14:13:49 +00004229}
4230
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004231LayerTestResult<float, 4> L2Normalization2dTest(
4232 armnn::IWorkloadFactory& workloadFactory,
4233 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4234 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +00004235{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004236 // Width: 5
4237 // Height: 1
4238 // Channels: 2
4239 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00004240 unsigned int numberOfBatches = 1;
4241 unsigned int numberOfChannels = 2;
4242 unsigned int height = 1;
4243 unsigned int width = 5;
telsoa014fcda012018-03-09 14:13:49 +00004244
jimfly013aab7c32018-11-12 13:32:08 +00004245 const armnn::TensorShape inputOutputShape = GetTestTensorShape(
4246 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004247 std::vector<float> inputValues
4248 {
4249 // Batch 0, Channel 0, Height (1) x Width (5)
telsoa014fcda012018-03-09 14:13:49 +00004250 1.0f, 3.0f, 5.0f, 7.0f, 9.0f,
telsoa014fcda012018-03-09 14:13:49 +00004251
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004252 // Batch 0, Channel 1, Height (1) x Width (5)
4253 2.0f, 4.0f, 6.0f, 8.0f, 10.0f
4254 };
4255 std::vector<float> expectedOutputValues
4256 {
4257 // Batch 0, Channel 0, Height (1) x Width (5)
4258 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
4259 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
4260 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
4261 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
telsoa014fcda012018-03-09 14:13:49 +00004262 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
4263
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004264 // Batch 0, Channel 1, Height (1) x Width (5)
4265 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
4266 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
4267 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
4268 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
telsoa014fcda012018-03-09 14:13:49 +00004269 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004270 };
telsoa014fcda012018-03-09 14:13:49 +00004271
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004272 return L2NormalizationTestImpl(workloadFactory, memoryManager, inputOutputShape,
jimfly013aab7c32018-11-12 13:32:08 +00004273 inputValues, expectedOutputValues, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004274}
telsoa014fcda012018-03-09 14:13:49 +00004275
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004276LayerTestResult<float, 4> L2Normalization3dTest(
4277 armnn::IWorkloadFactory& workloadFactory,
4278 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4279 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +00004280{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004281 // Width: 3
4282 // Height: 4
4283 // Channels: 2
4284 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00004285 unsigned int numberOfBatches = 1;
4286 unsigned int numberOfChannels = 2;
4287 unsigned int height = 4;
4288 unsigned int width = 3;
telsoa014fcda012018-03-09 14:13:49 +00004289
jimfly013aab7c32018-11-12 13:32:08 +00004290 const armnn::TensorShape inputOutputShape = GetTestTensorShape(
4291 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004292 std::vector<float> inputValues
4293 {
4294 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004295 119.0f, 21.0f, 150.0f,
4296 149.0f, 32.0f, 179.0f,
4297 15.0f, 227.0f, 141.0f,
4298 147.0f, 199.0f, 220.0f,
4299
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004300 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004301 110.0f, 140.0f, 73.0f,
4302 211.0f, 212.0f, 89.0f,
4303 24.0f, 138.0f, 188.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004304 162.0f, 12.0f, 161.0f
4305 };
4306 std::vector<float> expectedOutputValues
4307 {
4308 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004309 119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
4310 21.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
4311 150.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
4312 149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
4313 32.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
4314 179.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
4315 15.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
4316 227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
4317 141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
4318 147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
4319 199.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
4320 220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
4321
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004322 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004323 110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
4324 140.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
4325 73.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
4326 211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
4327 212.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
4328 89.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
4329 24.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
4330 138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
4331 188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
4332 162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
4333 12.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004334 161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
4335 };
telsoa014fcda012018-03-09 14:13:49 +00004336
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004337 return L2NormalizationTestImpl(workloadFactory, memoryManager, inputOutputShape,
jimfly013aab7c32018-11-12 13:32:08 +00004338 inputValues, expectedOutputValues, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004339}
telsoa014fcda012018-03-09 14:13:49 +00004340
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004341LayerTestResult<float, 4> L2Normalization4dTest(
4342 armnn::IWorkloadFactory& workloadFactory,
4343 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4344 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +00004345{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004346 // Width: 3
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004347 // Height: 4
4348 // Channels: 3
4349 // BatchSize: 2
jimfly013aab7c32018-11-12 13:32:08 +00004350 unsigned int numberOfBatches = 2;
4351 unsigned int numberOfChannels = 3;
4352 unsigned int height = 4;
4353 unsigned int width = 3;
telsoa014fcda012018-03-09 14:13:49 +00004354
jimfly013aab7c32018-11-12 13:32:08 +00004355 const armnn::TensorShape inputOutputShape = GetTestTensorShape(
4356 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004357 std::vector<float> inputValues
4358 {
4359 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004360 235.0f, 46.0f, 178.0f,
4361 100.0f, 123.0f, 19.0f,
4362 172.0f, 74.0f, 250.0f,
4363 6.0f, 195.0f, 80.0f,
4364
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004365 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004366 113.0f, 95.0f, 202.0f,
4367 77.0f, 114.0f, 71.0f,
4368 122.0f, 246.0f, 166.0f,
4369 82.0f, 28.0f, 37.0f,
4370
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004371 // Batch 0, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004372 56.0f, 170.0f, 162.0f,
4373 194.0f, 89.0f, 254.0f,
4374 12.0f, 209.0f, 200.0f,
4375 1.0f, 64.0f, 54.0f,
4376
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004377 // Batch 1, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004378 67.0f, 90.0f, 49.0f,
4379 7.0f, 163.0f, 18.0f,
4380 25.0f, 117.0f, 103.0f,
4381 247.0f, 59.0f, 189.0f,
4382
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004383 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004384 239.0f, 104.0f, 199.0f,
4385 17.0f, 124.0f, 153.0f,
4386 222.0f, 217.0f, 75.0f,
4387 32.0f, 126.0f, 21.0f,
4388
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004389 // Batch 1, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004390 97.0f, 145.0f, 215.0f,
4391 115.0f, 116.0f, 238.0f,
4392 226.0f, 16.0f, 132.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004393 92.0f, 125.0f, 88.0f
4394 };
4395 std::vector<float> expectedOutputValues
4396 {
4397 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004398 235.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
4399 46.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
4400 178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
4401 100.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
4402 123.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
4403 19.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
4404 172.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
4405 74.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
4406 250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
4407 6.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
4408 195.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
4409 80.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
4410
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004411 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004412 113.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
4413 95.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
4414 202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
4415 77.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
4416 114.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
4417 71.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
4418 122.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
4419 246.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
4420 166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
4421 82.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
4422 28.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
4423 37.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
4424
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004425 // Batch 0, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004426 56.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
4427 170.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
4428 162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
4429 194.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
4430 89.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
4431 254.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
4432 12.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
4433 209.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
4434 200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
4435 1.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
4436 64.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
4437 54.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
4438
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004439 // Batch 1, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004440 67.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
4441 90.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
4442 49.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
4443 7.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
4444 163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
4445 18.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
4446 25.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
4447 117.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
4448 103.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
4449 247.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
4450 59.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
4451 189.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
4452
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004453 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004454 239.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
4455 104.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
4456 199.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
4457 17.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
4458 124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
4459 153.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
4460 222.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
4461 217.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
4462 75.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
4463 32.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
4464 126.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
4465 21.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
4466
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004467 // Batch 1, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004468 97.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
4469 145.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
4470 215.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
4471 115.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
4472 116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
4473 238.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
4474 226.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
4475 16.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
4476 132.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
4477 92.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
4478 125.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004479 88.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f })
4480 };
telsoa014fcda012018-03-09 14:13:49 +00004481
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004482 return L2NormalizationTestImpl(workloadFactory, memoryManager, inputOutputShape,
jimfly013aab7c32018-11-12 13:32:08 +00004483 inputValues, expectedOutputValues, layout);
telsoa014fcda012018-03-09 14:13:49 +00004484}
4485
4486template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004487LayerTestResult<T, 4> ConstantTestImpl(
4488 armnn::IWorkloadFactory& workloadFactory,
4489 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00004490 float qScale,
4491 int32_t qOffset)
4492{
4493 constexpr unsigned int inputWidth = 3;
4494 constexpr unsigned int inputHeight = 4;
4495 constexpr unsigned int inputChannels = 3;
4496 constexpr unsigned int inputBatchSize = 2;
4497
4498 constexpr unsigned int outputWidth = inputWidth;
4499 constexpr unsigned int outputHeight = inputHeight;
4500 constexpr unsigned int outputChannels = inputChannels;
4501 constexpr unsigned int outputBatchSize = inputBatchSize;
4502
4503 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
4504 armnn::GetDataType<T>());
4505
4506 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
4507 armnn::GetDataType<T>());
4508
4509 // Set quantization parameters if the requested type is a quantized type.
4510 if(armnn::IsQuantizedType<T>())
4511 {
4512 inputTensorInfo.SetQuantizationScale(qScale);
4513 inputTensorInfo.SetQuantizationOffset(qOffset);
4514 outputTensorInfo.SetQuantizationScale(qScale);
4515 outputTensorInfo.SetQuantizationOffset(qOffset);
4516 }
4517
4518 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
4519 QuantizedVector<T>(qScale, qOffset, {
4520 // Batch 0, Channel 0
4521 235.0f, 46.0f, 178.0f,
4522 100.0f, 123.0f, 19.0f,
4523 172.0f, 74.0f, 250.0f,
4524 6.0f, 195.0f, 80.0f,
4525
4526 // Batch 0, Channel 1
4527 113.0f, 95.0f, 202.0f,
4528 77.0f, 114.0f, 71.0f,
4529 122.0f, 246.0f, 166.0f,
4530 82.0f, 28.0f, 37.0f,
4531
4532 // Batch 0, Channel 2
4533 56.0f, 170.0f, 162.0f,
4534 194.0f, 89.0f, 254.0f,
4535 12.0f, 209.0f, 200.0f,
4536 1.0f, 64.0f, 54.0f,
4537
4538 // Batch 1, Channel 0
4539 67.0f, 90.0f, 49.0f,
4540 7.0f, 163.0f, 18.0f,
4541 25.0f, 117.0f, 103.0f,
4542 247.0f, 59.0f, 189.0f,
4543
4544 // Batch 1, Channel 1
4545 239.0f, 104.0f, 199.0f,
4546 17.0f, 124.0f, 153.0f,
4547 222.0f, 217.0f, 75.0f,
4548 32.0f, 126.0f, 21.0f,
4549
4550 // Batch 1, Channel 2
4551 97.0f, 145.0f, 215.0f,
4552 115.0f, 116.0f, 238.0f,
4553 226.0f, 16.0f, 132.0f,
4554 92.0f, 125.0f, 88.0f,
4555 })));
4556
4557 LayerTestResult<T, 4> result(outputTensorInfo);
4558 result.outputExpected = input;
4559
4560 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4561
4562 armnn::ScopedCpuTensorHandle constantTensor(inputTensorInfo);
4563 AllocateAndCopyDataToITensorHandle(&constantTensor, &input[0][0][0][0]);
4564
4565 armnn::ConstantQueueDescriptor descriptor;
4566 descriptor.m_LayerOutput = &constantTensor;
4567
4568 armnn::WorkloadInfo info;
4569 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4570
4571 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConstant(descriptor, info);
4572
4573 outputHandle->Allocate();
4574
4575 workload->Execute();
4576
4577 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4578 return result;
4579}
4580
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004581LayerTestResult<float, 4> ConstantTest(
4582 armnn::IWorkloadFactory& workloadFactory,
4583 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004584{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004585 return ConstantTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004586}
4587
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004588LayerTestResult<uint8_t, 4> ConstantTestUint8(
4589 armnn::IWorkloadFactory& workloadFactory,
4590 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004591{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004592 return ConstantTestImpl<uint8_t>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004593}
4594
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004595LayerTestResult<uint8_t, 3> MergerUint8Test(
4596 armnn::IWorkloadFactory& workloadFactory,
4597 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004598{
surmeh013537c2c2018-05-18 16:31:43 +01004599 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +00004600 unsigned int outputHeight = 6;
4601 unsigned int outputChannels = 3;
4602
surmeh013537c2c2018-05-18 16:31:43 +01004603 unsigned int inputWidth1 = 3;
4604 unsigned int inputHeight1 = 6;
4605 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +00004606
surmeh013537c2c2018-05-18 16:31:43 +01004607 unsigned int inputWidth2 = 3;
4608 unsigned int inputHeight2 = 6;
4609 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +00004610
telsoa01c577f2c2018-08-31 09:22:23 +01004611 // Defines the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +00004612 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
4613 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
4614 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
telsoa014fcda012018-03-09 14:13:49 +00004615
telsoa01c577f2c2018-08-31 09:22:23 +01004616 // Arbitrary scale and offsets. They don't really matter as the merger operator doesn't dequantize/quantize them.
telsoa014fcda012018-03-09 14:13:49 +00004617 const float scale = 0.13497836f;
4618 const int32_t offset = -7;
4619
4620 outputTensorInfo.SetQuantizationScale(scale);
4621 outputTensorInfo.SetQuantizationOffset(offset);
4622 inputTensorInfo1.SetQuantizationScale(scale);
4623 inputTensorInfo1.SetQuantizationOffset(offset);
4624 inputTensorInfo2.SetQuantizationScale(scale);
4625 inputTensorInfo2.SetQuantizationOffset(offset);
telsoa014fcda012018-03-09 14:13:49 +00004626
4627 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
4628
4629 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
surmeh013537c2c2018-05-18 16:31:43 +01004630 {
4631 1, 2, 3,
4632 4, 5, 6,
4633 7, 8, 9,
4634 10, 11, 12,
4635 13, 14, 15,
4636 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00004637
surmeh013537c2c2018-05-18 16:31:43 +01004638 19, 20, 21,
4639 22, 23, 24,
4640 25, 26, 27,
4641 28, 29, 30,
4642 31, 32, 33,
4643 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00004644
surmeh013537c2c2018-05-18 16:31:43 +01004645 37, 38, 39,
4646 40, 41, 42,
4647 43, 44, 45,
4648 46, 47, 48,
4649 49, 50, 51,
4650 52, 53, 54,
4651 })
telsoa014fcda012018-03-09 14:13:49 +00004652 );
4653
telsoa014fcda012018-03-09 14:13:49 +00004654 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
4655 {
surmeh013537c2c2018-05-18 16:31:43 +01004656 1, 2, 3,
4657 4, 5, 6,
4658 7, 8, 9,
4659 10, 11, 12,
4660 13, 14, 15,
4661 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00004662
surmeh013537c2c2018-05-18 16:31:43 +01004663 19, 20, 21,
4664 22, 23, 24,
4665 25, 26, 27,
4666 28, 29, 30,
4667 31, 32, 33,
4668 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00004669 })
4670 );
4671
4672 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
4673 {
surmeh013537c2c2018-05-18 16:31:43 +01004674 37, 38, 39,
4675 40, 41, 42,
telsoa014fcda012018-03-09 14:13:49 +00004676 43, 44, 45,
surmeh013537c2c2018-05-18 16:31:43 +01004677 46, 47, 48,
4678 49, 50, 51,
4679 52, 53, 54,
telsoa014fcda012018-03-09 14:13:49 +00004680 })
4681 );
4682
telsoa01c577f2c2018-08-31 09:22:23 +01004683 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
telsoa014fcda012018-03-09 14:13:49 +00004684 armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1);
4685
telsoa01c577f2c2018-08-31 09:22:23 +01004686 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
telsoa014fcda012018-03-09 14:13:49 +00004687 armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2);
4688
telsoa014fcda012018-03-09 14:13:49 +00004689
4690 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4691
4692 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
4693
4694 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
4695 subTensorsSupported ?
4696 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
4697 workloadFactory.CreateTensorHandle(inputTensorInfo1);
4698
4699 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
4700 subTensorsSupported ?
4701 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
4702 workloadFactory.CreateTensorHandle(inputTensorInfo2);
4703
telsoa014fcda012018-03-09 14:13:49 +00004704
4705 armnn::MergerQueueDescriptor data;
4706 armnn::WorkloadInfo info;
4707 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
4708 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +00004709 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4710
4711 data.m_ViewOrigins.push_back(window1);
4712 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +00004713
4714 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(data, info);
4715
4716 inputHandle1->Allocate();
4717 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00004718 outputHandle->Allocate();
4719
4720 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
4721 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00004722
4723 workload->Execute();
4724
4725 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
4726
4727 return ret;
4728}
4729
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004730LayerTestResult<uint8_t, 4> AdditionUint8Test(
4731 armnn::IWorkloadFactory& workloadFactory,
4732 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004733{
4734 unsigned int batchSize = 1;
4735 unsigned int channels = 2;
4736 unsigned int height = 2;
4737 unsigned int width = 3;
4738
4739 const float scale = 7.0f;
4740 const int32_t offset = 3;
4741
4742 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
4743 armnn::TensorInfo outputTensorInfo;
4744
4745 const unsigned int shape[] = { batchSize, channels, height, width };
4746 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
4747 inputTensorInfo1.SetQuantizationScale(scale);
4748 inputTensorInfo1.SetQuantizationOffset(offset);
4749
4750 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
4751 inputTensorInfo2.SetQuantizationScale(scale);
4752 inputTensorInfo2.SetQuantizationOffset(offset);
4753
4754 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
4755 outputTensorInfo.SetQuantizationScale(scale);
4756 outputTensorInfo.SetQuantizationOffset(offset);
4757
telsoa01c577f2c2018-08-31 09:22:23 +01004758 // See dequantized values to the right.
telsoa014fcda012018-03-09 14:13:49 +00004759 auto input1 = MakeTensor<uint8_t, 4>(inputTensorInfo1, std::vector<uint8_t>(
4760 {
4761 63, 35, 77, 70, 56, 112, // 420, 224, 518, 469, 371, 763
4762 203, 28, 252, 168, 245, 91 // 1400, 175, 1743, 1155, 1694, 616
4763 }));
4764
telsoa01c577f2c2018-08-31 09:22:23 +01004765 // See dequantized values to the right.
telsoa014fcda012018-03-09 14:13:49 +00004766 auto input2 = MakeTensor<uint8_t, 4>(inputTensorInfo1, std::vector<uint8_t>(
4767 {
4768 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
4769 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
4770 }));
4771
telsoa01c577f2c2018-08-31 09:22:23 +01004772 // See dequantized values to the right.
telsoa014fcda012018-03-09 14:13:49 +00004773 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
4774 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>(
4775 {
4776 81, 39, 249, 255, 228, 255, // 546, 252, 1722, 2065(clamped), 1575, 2212(clamped)
4777 255, 186, 255, 186, 255, 214, // 2261(clamped), 1281, 2163(clamped), 1281, 2408(clamped), 1477
4778 }));
4779
4780 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
4781 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
4782 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4783
4784 armnn::AdditionQueueDescriptor data;
4785 armnn::WorkloadInfo info;
4786 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
4787 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
4788 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4789
4790 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
4791
4792 inputHandle1->Allocate();
4793 inputHandle2->Allocate();
4794 outputHandle->Allocate();
4795
4796 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
4797 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
4798
4799 workload->Execute();
4800
4801 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4802
4803 return result;
4804}
4805
surmeh01bceff2f2018-03-29 16:29:27 +01004806namespace
telsoa014fcda012018-03-09 14:13:49 +00004807{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004808LayerTestResult<uint8_t, 4> MultiplicationUint8TestHelper(
4809 armnn::IWorkloadFactory& workloadFactory,
4810 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4811 const unsigned int shape0[4],
4812 const std::vector<uint8_t> & values0,
4813 float scale0,
4814 int32_t offset0,
4815 const unsigned int shape1[4],
4816 const std::vector<uint8_t> & values1,
4817 float scale1,
4818 int32_t offset1,
4819 const unsigned int outShape[4],
4820 const std::vector<uint8_t> & outValues,
4821 float outScale,
4822 int32_t outOffset)
surmeh01bceff2f2018-03-29 16:29:27 +01004823{
4824 armnn::TensorInfo inputTensorInfo0(4, shape0, armnn::DataType::QuantisedAsymm8);
4825 armnn::TensorInfo inputTensorInfo1(4, shape1, armnn::DataType::QuantisedAsymm8);
4826 armnn::TensorInfo outputTensorInfo(4, outShape, armnn::DataType::QuantisedAsymm8);
telsoa014fcda012018-03-09 14:13:49 +00004827
surmeh01bceff2f2018-03-29 16:29:27 +01004828 inputTensorInfo0.SetQuantizationScale(scale0);
4829 inputTensorInfo0.SetQuantizationOffset(offset0);
telsoa014fcda012018-03-09 14:13:49 +00004830
surmeh01bceff2f2018-03-29 16:29:27 +01004831 inputTensorInfo1.SetQuantizationScale(scale1);
4832 inputTensorInfo1.SetQuantizationOffset(offset1);
telsoa014fcda012018-03-09 14:13:49 +00004833
surmeh01bceff2f2018-03-29 16:29:27 +01004834 outputTensorInfo.SetQuantizationScale(outScale);
4835 outputTensorInfo.SetQuantizationOffset(outOffset);
telsoa014fcda012018-03-09 14:13:49 +00004836
surmeh01bceff2f2018-03-29 16:29:27 +01004837 auto input0 = MakeTensor<uint8_t, 4>(inputTensorInfo0, values0);
4838 auto input1 = MakeTensor<uint8_t, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00004839
telsoa014fcda012018-03-09 14:13:49 +00004840 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
surmeh01bceff2f2018-03-29 16:29:27 +01004841 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00004842
surmeh01bceff2f2018-03-29 16:29:27 +01004843 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
telsoa014fcda012018-03-09 14:13:49 +00004844 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
telsoa014fcda012018-03-09 14:13:49 +00004845 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4846
4847 armnn::MultiplicationQueueDescriptor data;
4848 armnn::WorkloadInfo info;
surmeh01bceff2f2018-03-29 16:29:27 +01004849 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
4850 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
telsoa014fcda012018-03-09 14:13:49 +00004851 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4852
4853 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
4854
surmeh01bceff2f2018-03-29 16:29:27 +01004855 inputHandle0->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00004856 inputHandle1->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00004857 outputHandle->Allocate();
4858
surmeh01bceff2f2018-03-29 16:29:27 +01004859 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00004860 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00004861
4862 workload->Execute();
4863
4864 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4865
4866 return result;
4867}
surmeh01bceff2f2018-03-29 16:29:27 +01004868} // anonymous namespace
4869
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004870LayerTestResult<uint8_t, 4> MultiplicationUint8Test(
4871 armnn::IWorkloadFactory& workloadFactory,
4872 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01004873{
4874 unsigned int batchSize = 1;
4875 unsigned int channels = 2;
4876 unsigned int height = 2;
4877 unsigned int width = 3;
4878 const unsigned int shape[] = { batchSize, channels, height, width };
4879
telsoa01c577f2c2018-08-31 09:22:23 +01004880 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01004881 std::vector<uint8_t> input0({
4882 62, 37, 3, 172, 13, 111, // 244, 144, 8, 684, 48, 440,
4883 188, 20, 73, 31, 23, 31 // 748, 76, 288, 120, 88, 120
4884 });
4885
telsoa01c577f2c2018-08-31 09:22:23 +01004886 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01004887 std::vector<uint8_t> input1({
4888 126, 240, 252, 183, 121, 247, // 384, 726, 762, 555, 369, 747,
4889 48, 115, 151, 79, 78, 97 // 150, 351, 459, 243, 240, 297
4890 });
4891
telsoa01c577f2c2018-08-31 09:22:23 +01004892 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01004893 std::vector<uint8_t> output(
4894 {
4895 64, 72, 0, 255, 8, 236, // 93696, 104544, 6096(clamped), 379620(clamped), 17712, 328680,
4896 77, 15, 92, 16, 10, 21, // 112200, 26676, 132192, 29160, 21120, 35640
4897 });
4898
4899 return MultiplicationUint8TestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004900 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01004901 shape,
4902 input0,
4903 4.0f,
4904 1,
4905 shape,
4906 input1,
4907 3.0f,
4908 -2,
4909 shape,
4910 output,
telsoa01c577f2c2018-08-31 09:22:23 +01004911 1366.255f, // Scale/offset chosen to have output values out of range.
surmeh01bceff2f2018-03-29 16:29:27 +01004912 -5);
4913}
4914
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004915LayerTestResult<uint8_t, 4> MultiplicationBroadcast1ElementUint8Test(
4916 armnn::IWorkloadFactory& workloadFactory,
4917 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01004918{
4919 const unsigned int shape0[] = { 1, 2, 2, 3 };
4920 const unsigned int shape1[] = { 1, 1, 1, 1 };
4921
4922 std::vector<uint8_t> input0({
4923 1, 2, 3, 4, 5, 6,
4924 7, 8, 9, 10, 11, 12
4925 });
4926
4927 std::vector<uint8_t> input1({2});
4928
4929 std::vector<uint8_t> output({
4930 2, 4, 6, 8, 10, 12,
4931 14, 16, 18, 20, 22, 24
4932 });
4933
4934 return MultiplicationUint8TestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004935 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01004936 shape0,
4937 input0,
4938 1.0f,
4939 0,
4940 shape1,
4941 input1,
4942 1.0f,
4943 0,
4944 shape0,
4945 output,
4946 1.0f,
4947 0);
4948}
4949
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004950LayerTestResult<uint8_t, 4> MultiplicationBroadcast1DVectorUint8Test(
4951 armnn::IWorkloadFactory& workloadFactory,
4952 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01004953{
4954 const unsigned int shape0[] = { 1, 2, 2, 3 };
4955 const unsigned int shape1[] = { 1, 1, 1, 3 };
4956
4957 std::vector<uint8_t> input0({
4958 1, 2, 3, 4, 5, 6,
4959 7, 8, 9, 10, 11, 12
4960 });
4961
4962 std::vector<uint8_t> input1({1, 2, 3});
4963
4964 std::vector<uint8_t> output({
4965 1, 4, 9, 4, 10, 18,
4966 7, 16, 27, 10, 22, 36
4967 });
4968
4969 return MultiplicationUint8TestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004970 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01004971 shape0,
4972 input0,
4973 1.0f,
4974 0,
4975 shape1,
4976 input1,
4977 1.0f,
4978 0,
4979 shape0,
4980 output,
4981 1.0f,
4982 0);
4983}
telsoa014fcda012018-03-09 14:13:49 +00004984
David Beckf195f032018-09-06 16:46:34 +01004985namespace
4986{
4987template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004988LayerTestResult<T, 4> SubtractionTestHelper(
4989 armnn::IWorkloadFactory& workloadFactory,
4990 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4991 const unsigned int shape0[4],
4992 const std::vector<T>& values0,
4993 float scale0,
4994 int32_t offset0,
4995 const unsigned int shape1[4],
4996 const std::vector<T> & values1,
4997 float scale1,
4998 int32_t offset1,
4999 const unsigned int outShape[4],
5000 const std::vector<T> & outValues,
5001 float outScale,
5002 int32_t outOffset)
David Beckf195f032018-09-06 16:46:34 +01005003{
5004 auto dataType = (std::is_same<T, uint8_t>::value ?
5005 armnn::DataType::QuantisedAsymm8 :
5006 armnn::DataType::Float32);
5007
5008 armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
5009 armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
5010 armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
5011
5012 inputTensorInfo0.SetQuantizationScale(scale0);
5013 inputTensorInfo0.SetQuantizationOffset(offset0);
5014
5015 inputTensorInfo1.SetQuantizationScale(scale1);
5016 inputTensorInfo1.SetQuantizationOffset(offset1);
5017
5018 outputTensorInfo.SetQuantizationScale(outScale);
5019 outputTensorInfo.SetQuantizationOffset(outOffset);
5020
5021 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
5022 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
5023
5024 LayerTestResult<T, 4> result(outputTensorInfo);
5025 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
5026
5027 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
5028 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
5029 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5030
5031 armnn::SubtractionQueueDescriptor data;
5032 armnn::WorkloadInfo info;
5033 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
5034 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
5035 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
5036
5037 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSubtraction(data, info);
5038
5039 inputHandle0->Allocate();
5040 inputHandle1->Allocate();
5041 outputHandle->Allocate();
5042
5043 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
5044 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
5045
David Beckf195f032018-09-06 16:46:34 +01005046 workload->Execute();
5047
5048 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5049
5050 return result;
5051}
5052} // anonymous namespace
5053
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005054LayerTestResult<uint8_t, 4> SubtractionUint8Test(
5055 armnn::IWorkloadFactory& workloadFactory,
5056 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01005057{
5058 const unsigned int shape0[] = { 1, 1, 2, 2 };
5059 const unsigned int shape1[] = { 1, 1, 2, 2 };
5060
5061 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
5062 std::vector<uint8_t> input1({ 1, 2, 1, 2 });
5063 std::vector<uint8_t> output({ 3, 3, 5, 5 });
5064
5065 return SubtractionTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005066 memoryManager,
David Beckf195f032018-09-06 16:46:34 +01005067 shape0, input0, 0.5f, 2,
5068 shape1, input1, 1.0f, 0,
5069 shape0, output, 1.0f, 0);
5070}
5071
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005072LayerTestResult<uint8_t, 4> SubtractionBroadcast1ElementUint8Test(
5073 armnn::IWorkloadFactory& workloadFactory,
5074 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01005075{
5076 const unsigned int shape0[] = { 1, 1, 2, 2 };
5077 const unsigned int shape1[] = { 1, 1, 1, 1 };
5078
5079 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
5080 std::vector<uint8_t> input1({ 2 });
5081 std::vector<uint8_t> output({ 5, 6, 7, 8 });
5082
5083 return SubtractionTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005084 memoryManager,
David Beckf195f032018-09-06 16:46:34 +01005085 shape0, input0, 0.5f, 2,
5086 shape1, input1, 1.0f, 0,
5087 shape0, output, 1.0f, 3);
5088}
5089
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005090LayerTestResult<uint8_t, 4> SubtractionBroadcastUint8Test(
5091 armnn::IWorkloadFactory& workloadFactory,
5092 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01005093{
5094 const unsigned int shape0[] = { 1, 1, 2, 2 };
5095 const unsigned int shape1[] = { 1, 1, 2, 1 };
5096
5097 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
5098 std::vector<uint8_t> input1({ 2, 1 });
5099 std::vector<uint8_t> output({ 8, 11, 12, 15 });
5100
5101 return SubtractionTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005102 memoryManager,
David Beckf195f032018-09-06 16:46:34 +01005103 shape0, input0, 1.0f, 0,
5104 shape1, input1, 1.0f, 0,
5105 shape0, output, 1.0f, 0);
5106}
5107
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005108LayerTestResult<float, 4> SubtractionTest(
5109 armnn::IWorkloadFactory& workloadFactory,
5110 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01005111{
5112 const unsigned int shape0[] = { 1, 1, 2, 2 };
5113 const unsigned int shape1[] = { 1, 1, 2, 2 };
5114
5115 std::vector<float> input0({ 1, 2, 3, 4 });
5116 std::vector<float> input1({ 1, -1, 0, 2 });
5117 std::vector<float> output({ 0, 3, 3, 2 });
5118
5119 return SubtractionTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005120 memoryManager,
David Beckf195f032018-09-06 16:46:34 +01005121 shape0, input0, 1.0f, 0,
5122 shape1, input1, 1.0f, 0,
5123 shape0, output, 1.0f, 0);
5124}
5125
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005126LayerTestResult<float, 4> SubtractionBroadcast1ElementTest(
5127 armnn::IWorkloadFactory& workloadFactory,
5128 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01005129{
5130 const unsigned int shape0[] = { 1, 1, 2, 2 };
5131 const unsigned int shape1[] = { 1, 1, 1, 1 };
5132
5133 std::vector<float> input0({ 1, 2, 3, 4 });
5134 std::vector<float> input1({ 10 });
5135 std::vector<float> output({ -9, -8, -7, -6 });
5136
5137 return SubtractionTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005138 memoryManager,
David Beckf195f032018-09-06 16:46:34 +01005139 shape0, input0, 1.0f, 0,
5140 shape1, input1, 1.0f, 0,
5141 shape0, output, 1.0f, 0);
5142}
5143
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005144LayerTestResult<float, 4> SubtractionBroadcastTest(
5145 armnn::IWorkloadFactory& workloadFactory,
5146 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01005147{
5148 const unsigned int shape0[] = { 1, 1, 2, 2 };
5149 const unsigned int shape1[] = { 1, 1, 1, 2 };
5150
5151 std::vector<float> input0({ 1, 2, 3, 4 });
5152 std::vector<float> input1({ 10, -5 });
5153 std::vector<float> output({ -9, 7, -7, 9 });
5154
5155 return SubtractionTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005156 memoryManager,
David Beckf195f032018-09-06 16:46:34 +01005157 shape0, input0, 1.0f, 0,
5158 shape1, input1, 1.0f, 0,
5159 shape0, output, 1.0f, 0);
5160}
5161
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005162LayerTestResult<uint8_t, 4> ResizeBilinearNopUint8Test(
5163 armnn::IWorkloadFactory& workloadFactory,
5164 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005165{
5166 constexpr unsigned int inputWidth = 4;
5167 constexpr unsigned int inputHeight = 4;
5168 constexpr unsigned int inputChannels = 1;
5169 constexpr unsigned int inputBatchSize = 1;
5170
5171 constexpr unsigned int outputWidth = inputWidth;
5172 constexpr unsigned int outputHeight = inputHeight;
5173 constexpr unsigned int outputChannels = inputChannels;
5174 constexpr unsigned int outputBatchSize = inputBatchSize;
5175
5176 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
5177 armnn::DataType::QuantisedAsymm8);
5178 inputTensorInfo.SetQuantizationScale(1.5f);
5179 inputTensorInfo.SetQuantizationOffset(-3);
5180
5181 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
5182 armnn::DataType::QuantisedAsymm8);
5183 outputTensorInfo.SetQuantizationScale(1.5f);
5184 outputTensorInfo.SetQuantizationOffset(-3);
5185
5186 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
5187 1, 2, 3, 4,
5188 2, 3, 4, 5,
5189 3, 4, 5, 6,
5190 4, 5, 6, 7
5191 }));
5192
5193 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
5194 result.outputExpected = input;
5195
5196 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5197 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5198
5199 armnn::ResizeBilinearQueueDescriptor descriptor;
5200 armnn::WorkloadInfo info;
5201 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5202 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5203
5204 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5205
5206 inputHandle->Allocate();
5207 outputHandle->Allocate();
5208 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5209
5210 workload->Execute();
5211
5212 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5213 return result;
5214}
5215
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005216LayerTestResult<uint8_t, 4> SimpleResizeBilinearUint8Test(
5217 armnn::IWorkloadFactory& workloadFactory,
5218 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005219{
5220 constexpr unsigned int inputWidth = 2;
5221 constexpr unsigned int inputHeight = 2;
5222 constexpr unsigned int inputChannels = 1;
5223 constexpr unsigned int inputBatchSize = 1;
5224
5225 constexpr unsigned int outputWidth = inputWidth / 2;
5226 constexpr unsigned int outputHeight = inputHeight / 2;
5227 constexpr unsigned int outputChannels = inputChannels;
5228 constexpr unsigned int outputBatchSize = inputBatchSize;
5229
5230 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
5231 armnn::DataType::QuantisedAsymm8);
5232 inputTensorInfo.SetQuantizationScale(0.1567f);
5233 inputTensorInfo.SetQuantizationOffset(1);
5234
5235 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
5236 armnn::DataType::QuantisedAsymm8);
5237 outputTensorInfo.SetQuantizationScale(0.1567f);
5238 outputTensorInfo.SetQuantizationOffset(1);
5239
5240 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
5241 1, 255,
5242 200, 250
5243 }));
5244
5245 // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
5246 // then figures out the interpolants and weights. Note this is different to projecting the centre of the
telsoa01c577f2c2018-08-31 09:22:23 +01005247 // output texel - and thus we'll expect the output 1x1 matrix to contain, as its single element, the value
telsoa014fcda012018-03-09 14:13:49 +00005248 // that was at position (0,0) of the input matrix (rather than an average, which we would expect if projecting
5249 // the centre).
5250 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
5251 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
5252 1
5253 }));
5254
5255 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5256 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5257
5258 armnn::ResizeBilinearQueueDescriptor descriptor;
5259 armnn::WorkloadInfo info;
5260 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5261 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5262
5263 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5264
5265 inputHandle->Allocate();
5266 outputHandle->Allocate();
5267 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5268
5269 workload->Execute();
5270
5271 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5272 return result;
5273}
5274
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005275LayerTestResult<uint8_t, 4> ResizeBilinearSqMinUint8Test(
5276 armnn::IWorkloadFactory& workloadFactory,
5277 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005278{
5279 constexpr unsigned int inputWidth = 4;
5280 constexpr unsigned int inputHeight = 4;
5281 constexpr unsigned int inputChannels = 1;
5282 constexpr unsigned int inputBatchSize = 1;
5283
5284 constexpr unsigned int outputWidth = inputWidth / 2;
5285 constexpr unsigned int outputHeight = inputHeight / 2;
5286 constexpr unsigned int outputChannels = inputChannels;
5287 constexpr unsigned int outputBatchSize = inputBatchSize;
5288
5289 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
5290 armnn::DataType::QuantisedAsymm8);
5291 inputTensorInfo.SetQuantizationScale(3.141592f);
5292 inputTensorInfo.SetQuantizationOffset(3);
5293
5294 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
5295 armnn::DataType::QuantisedAsymm8);
5296 outputTensorInfo.SetQuantizationScale(3.141592f);
5297 outputTensorInfo.SetQuantizationOffset(3);
5298
5299 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
5300 1, 2, 3, 4,
5301 2, 3, 4, 5,
5302 3, 4, 5, 6,
5303 4, 5, 6, 7
5304 }));
5305
5306 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
5307 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
5308 1, 3,
5309 3, 5
5310 }));
5311
5312 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5313 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5314
5315 armnn::ResizeBilinearQueueDescriptor descriptor;
5316 armnn::WorkloadInfo info;
5317 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5318 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5319
5320 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5321
5322 inputHandle->Allocate();
5323 outputHandle->Allocate();
5324 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5325
5326 workload->Execute();
5327
5328 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5329 return result;
5330}
5331
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005332LayerTestResult<uint8_t, 4> ResizeBilinearMinUint8Test(
5333 armnn::IWorkloadFactory& workloadFactory,
5334 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005335{
5336 constexpr unsigned int inputWidth = 3;
5337 constexpr unsigned int inputHeight = 2;
5338 constexpr unsigned int inputChannels = 1;
5339 constexpr unsigned int inputBatchSize = 1;
5340
5341 constexpr unsigned int outputWidth = 2;
5342 constexpr unsigned int outputHeight = 1;
5343 constexpr unsigned int outputChannels = inputChannels;
5344 constexpr unsigned int outputBatchSize = inputBatchSize;
5345
5346 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
5347 armnn::DataType::QuantisedAsymm8);
5348 inputTensorInfo.SetQuantizationScale(1.5f);
5349 inputTensorInfo.SetQuantizationOffset(-1);
5350
5351 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
5352 armnn::DataType::QuantisedAsymm8);
5353 outputTensorInfo.SetQuantizationScale(1.5f);
5354 outputTensorInfo.SetQuantizationOffset(-1);
5355
5356 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
5357 1, 2, 3, // 3.0, 4.5, 6.0
5358 5, 8, 13 // 9.0, 13.5, 21.0
5359 }));
5360
5361 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
5362 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
5363 1, 3 // 3.0, 5.25
5364 }));
5365
5366 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5367 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5368
5369 armnn::ResizeBilinearQueueDescriptor descriptor;
5370 armnn::WorkloadInfo info;
5371 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5372 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5373
5374 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5375
5376 inputHandle->Allocate();
5377 outputHandle->Allocate();
5378
5379 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5380
5381 workload->Execute();
5382
5383 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5384 return result;
5385}
5386
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005387LayerTestResult<uint8_t, 4> ResizeBilinearMagUint8Test(
5388 armnn::IWorkloadFactory& workloadFactory,
5389 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005390{
5391 constexpr unsigned int inputWidth = 2;
5392 constexpr unsigned int inputHeight = 3;
5393 constexpr unsigned int inputChannels = 1;
5394 constexpr unsigned int inputBatchSize = 1;
5395
5396 constexpr unsigned int outputWidth = 5;
5397 constexpr unsigned int outputHeight = 3;
5398 constexpr unsigned int outputChannels = inputChannels;
5399 constexpr unsigned int outputBatchSize = inputBatchSize;
5400
5401 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
5402 armnn::DataType::QuantisedAsymm8);
5403 inputTensorInfo.SetQuantizationScale(0.010765f);
5404 inputTensorInfo.SetQuantizationOffset(7);
5405
5406 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
5407 armnn::DataType::QuantisedAsymm8);
5408 outputTensorInfo.SetQuantizationScale(0.010132f);
5409 outputTensorInfo.SetQuantizationOffset(-18);
5410
5411 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
5412 24, 228, // 0.183005, 2.379065,
5413 105, 128, // 1.05497, 1.302565
5414 230, 71 // 2.400595, 0.68896
5415 }));
5416
5417 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
5418 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
5419 0, 87, 173, 217, 217, // 0.18300501, 1.06142902, 1.93985295, 2.37906504, 2.37906504
5420 86, 96, 106, 111, 111, // 1.05497003, 1.15400803, 1.25304604, 1.30256498, 1.30256498
5421 219, 151, 84, 50, 50 // 2.40059495, 1.71594095, 1.03128707, 0.68896002, 0.68896002
5422 }));
5423
5424 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5425 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5426
5427 armnn::ResizeBilinearQueueDescriptor descriptor;
5428 armnn::WorkloadInfo info;
5429 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5430 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5431
5432 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5433
5434 inputHandle->Allocate();
5435 outputHandle->Allocate();
5436 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5437
5438 workload->Execute();
5439
5440 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5441 return result;
5442}
5443
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005444LayerTestResult<float, 4> BatchNormTest(
5445 armnn::IWorkloadFactory& workloadFactory,
5446 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005447{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01005448 // BatchSize: 1
5449 // Channels: 2
5450 // Height: 3
5451 // Width: 2
5452
5453 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
5454 std::vector<float> inputValues
5455 {
5456 // Batch 0, Channel 0, Height (3) x Width (2)
5457 1.f, 4.f,
5458 4.f, 2.f,
5459 1.f, 6.f,
5460
5461 // Batch 0, Channel 1, Height (3) x Width (2)
5462 1.f, 1.f,
5463 4.f, 1.f,
5464 -2.f, 4.f
5465 };
5466 std::vector<float> expectedOutputValues
5467 {
5468 // Batch 0, Channel 0, Height (3) x Width (2)
5469 1.f, 4.f,
5470 4.f, 2.f,
5471 1.f, 6.f,
5472
5473 // Batch 0, Channel 1, Height (3) x Width (2)
5474 3.f, 3.f,
5475 4.f, 3.f,
5476 2.f, 4.f
5477 };
5478
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005479 return BatchNormTestImpl<float>(workloadFactory, memoryManager,
5480 inputOutputShape, inputValues, expectedOutputValues,
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01005481 0.f, 0, armnn::DataLayout::NCHW);
5482}
5483
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005484LayerTestResult<float, 4> BatchNormNhwcTest(
5485 armnn::IWorkloadFactory& workloadFactory,
5486 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01005487{
5488 // BatchSize: 1
5489 // Height: 3
5490 // Width: 2
5491 // Channels: 2
5492
5493 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
5494 std::vector<float> inputValues
5495 {
5496 // Batch 0, Height 0, Width (2) x Channel (2)
5497 1.f, 1.f,
5498 4.f, 1.f,
5499
5500 // Batch 0, Height 1, Width (2) x Channel (2)
5501 4.f, 4.f,
5502 2.f, 1.f,
5503
5504 // Batch 0, Height 2, Width (2) x Channel (2)
5505 1.f, -2.f,
5506 6.f, 4.f
5507 };
5508 std::vector<float> expectedOutputValues
5509 {
5510 // Batch 0, Height 0, Width (2) x Channel (2)
5511 1.f, 3.f,
5512 4.f, 3.f,
5513
5514 // Batch 0, Height 1, Width (2) x Channel (2)
5515 4.f, 4.f,
5516 2.f, 3.f,
5517
5518 // Batch 0, Height 2, Width (2) x Channel (2)
5519 1.f, 2.f,
5520 6.f, 4.f
5521 };
5522
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005523 return BatchNormTestImpl<float>(workloadFactory, memoryManager,
5524 inputOutputShape, inputValues, expectedOutputValues,
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01005525 0.f, 0, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00005526}
5527
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005528LayerTestResult<uint8_t, 4> BatchNormUint8Test(
5529 armnn::IWorkloadFactory& workloadFactory,
5530 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005531{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01005532 // BatchSize: 1
5533 // Channels: 2
5534 // Height: 3
5535 // Width: 2
5536
5537 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
5538 std::vector<float> inputValues
5539 {
5540 // Batch 0, Channel 0, Height (3) x Width (2)
5541 1.f, 4.f,
5542 4.f, 2.f,
5543 1.f, 6.f,
5544
5545 // Batch 0, Channel 1, Height (3) x Width (2)
5546 1.f, 1.f,
5547 4.f, 1.f,
5548 -2.f, 4.f
5549 };
5550 std::vector<float> expectedOutputValues
5551 {
5552 // Batch 0, Channel 0, Height (3) x Width (2)
5553 1.f, 4.f,
5554 4.f, 2.f,
5555 1.f, 6.f,
5556
5557 // Batch 0, Channel 1, Height (3) x Width (2)
5558 3.f, 3.f,
5559 4.f, 3.f,
5560 2.f, 4.f
5561 };
5562
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005563 return BatchNormTestImpl<uint8_t>(workloadFactory, memoryManager,
5564 inputOutputShape, inputValues, expectedOutputValues,
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01005565 1.f/20.f, 50, armnn::DataLayout::NCHW);
5566}
5567
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005568LayerTestResult<uint8_t, 4> BatchNormUint8NhwcTest(
5569 armnn::IWorkloadFactory& workloadFactory,
5570 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01005571{
5572 // BatchSize: 1
5573 // Height: 3
5574 // Width: 2
5575 // Channels: 2
5576
5577 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
5578 std::vector<float> inputValues
5579 {
5580 // Batch 0, Height 0, Width (2) x Channel (2)
5581 1.f, 1.f,
5582 4.f, 1.f,
5583
5584 // Batch 0, Height 1, Width (2) x Channel (2)
5585 4.f, 4.f,
5586 2.f, 1.f,
5587
5588 // Batch 0, Height 2, Width (2) x Channel (2)
5589 1.f, -2.f,
5590 6.f, 4.f
5591 };
5592 std::vector<float> expectedOutputValues
5593 {
5594 // Batch 0, Height 0, Width (2) x Channel (2)
5595 1.f, 3.f,
5596 4.f, 3.f,
5597
5598 // Batch 0, Height 1, Width (2) x Channel (2)
5599 4.f, 4.f,
5600 2.f, 3.f,
5601
5602 // Batch 0, Height 2, Width (2) x Channel (2)
5603 1.f, 2.f,
5604 6.f, 4.f
5605 };
5606
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005607 return BatchNormTestImpl<uint8_t>(workloadFactory, memoryManager,
5608 inputOutputShape, inputValues, expectedOutputValues,
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01005609 1.f/20.f, 50, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00005610}
5611
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005612LayerTestResult<uint8_t, 4> ConstantUint8Test(
5613 armnn::IWorkloadFactory& workloadFactory,
5614 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005615{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005616 return ConstantTestImpl<uint8_t>(workloadFactory, memoryManager, 2e-6f, 1);
telsoa014fcda012018-03-09 14:13:49 +00005617}
5618
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005619LayerTestResult<uint8_t, 1> Concatenation1dUint8Test(
5620 armnn::IWorkloadFactory& workloadFactory,
5621 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005622{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005623 return Concatenation1dTestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00005624}
5625
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005626LayerTestResult<uint8_t, 2> Concatenation2dDim0Uint8Test(
5627 armnn::IWorkloadFactory& workloadFactory,
5628 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005629{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005630 return Concatenation2dDim0TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00005631}
5632
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005633LayerTestResult<uint8_t, 2> Concatenation2dDim1Uint8Test(
5634 armnn::IWorkloadFactory& workloadFactory,
5635 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005636{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005637 return Concatenation2dDim1TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00005638}
5639
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005640LayerTestResult<uint8_t, 2> Concatenation2dDim0DiffInputDimsUint8Test(
5641 armnn::IWorkloadFactory& workloadFactory,
5642 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005643{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005644 return Concatenation2dDim0DiffInputDimsTestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00005645}
5646
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005647LayerTestResult<uint8_t, 2> Concatenation2dDim1DiffInputDimsUint8Test(
5648 armnn::IWorkloadFactory& workloadFactory,
5649 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005650{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005651 return Concatenation2dDim1DiffInputDimsTestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00005652}
5653
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005654LayerTestResult<uint8_t, 3> Concatenation3dDim0Uint8Test(
5655 armnn::IWorkloadFactory& workloadFactory,
5656 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005657{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005658 return Concatenation3dDim0TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00005659}
5660
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005661LayerTestResult<uint8_t, 3> Concatenation3dDim1Uint8Test(
5662 armnn::IWorkloadFactory& workloadFactory,
5663 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005664{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005665 return Concatenation3dDim1TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00005666}
5667
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005668LayerTestResult<uint8_t, 3> Concatenation3dDim2Uint8Test(
5669 armnn::IWorkloadFactory& workloadFactory,
5670 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005671{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005672 return Concatenation3dDim2TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00005673}
5674
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005675LayerTestResult<uint8_t, 3> Concatenation3dDim0DiffInputDimsUint8Test(
5676 armnn::IWorkloadFactory& workloadFactory,
5677 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005678{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005679 return Concatenation3dDim0TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00005680}
5681
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005682LayerTestResult<uint8_t, 3> Concatenation3dDim1DiffInputDimsUint8Test(
5683 armnn::IWorkloadFactory& workloadFactory,
5684 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005685{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005686 return Concatenation3dDim1DiffInputDimsTestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00005687}
5688
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005689LayerTestResult<uint8_t, 3> Concatenation3dDim2DiffInputDimsUint8Test(
5690 armnn::IWorkloadFactory& workloadFactory,
5691 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005692{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005693 return Concatenation3dDim2DiffInputDimsTestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00005694}
5695
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005696LayerTestResult<float, 4> SimpleMaxPooling2dSize2x2Stride2x2Test(
5697 armnn::IWorkloadFactory& workloadFactory,
5698 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5699 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00005700{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005701 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<float>(workloadFactory, memoryManager, forceNoPadding);
telsoa014fcda012018-03-09 14:13:49 +00005702}
5703
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005704LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(
5705 armnn::IWorkloadFactory& workloadFactory,
5706 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5707 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00005708{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005709 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<uint8_t>(
5710 workloadFactory, memoryManager, forceNoPadding, 3.0f, -5);
telsoa014fcda012018-03-09 14:13:49 +00005711}
5712
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005713LayerTestResult<float, 4> SimpleMaxPooling2dSize3x3Stride2x4Test(
5714 armnn::IWorkloadFactory& workloadFactory,
5715 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5716 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00005717{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005718 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<float>(workloadFactory, memoryManager, forceNoPadding);
telsoa014fcda012018-03-09 14:13:49 +00005719}
5720
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005721LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(
5722 armnn::IWorkloadFactory& workloadFactory,
5723 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5724 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00005725{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005726 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<uint8_t>(
5727 workloadFactory, memoryManager, forceNoPadding, 0.1f, 128);
telsoa014fcda012018-03-09 14:13:49 +00005728}
5729
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005730LayerTestResult<float, 4> SimpleMaxPooling2dTest(
5731 armnn::IWorkloadFactory& workloadFactory,
5732 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5733 const armnn::DataLayoutIndexed& dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00005734{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005735 return SimpleMaxPooling2dTestCommon<float>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00005736}
5737
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005738LayerTestResult<uint8_t, 4> SimpleMaxPooling2dUint8Test(
5739 armnn::IWorkloadFactory& workloadFactory,
5740 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5741 const armnn::DataLayoutIndexed& dataLayout)
Francis Murtagh043d0d02018-10-05 14:08:48 +01005742{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005743 return SimpleMaxPooling2dTestCommon<uint8_t>(workloadFactory, memoryManager, dataLayout);
Francis Murtagh043d0d02018-10-05 14:08:48 +01005744}
5745
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005746LayerTestResult<float, 4> SimpleAveragePooling2dTest(
5747 armnn::IWorkloadFactory& workloadFactory,
5748 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5749 const armnn::DataLayoutIndexed& dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00005750{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005751 return SimpleAveragePooling2dTestCommon<float>(workloadFactory, memoryManager, dataLayout);
James Conroy69482272018-10-19 10:41:35 +01005752}
5753
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005754LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(
5755 armnn::IWorkloadFactory& workloadFactory,
5756 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5757 const armnn::DataLayoutIndexed& dataLayout)
James Conroy69482272018-10-19 10:41:35 +01005758{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005759 return SimpleAveragePooling2dTestCommon<uint8_t>(
5760 workloadFactory, memoryManager, dataLayout, 0.5, -1);
telsoa014fcda012018-03-09 14:13:49 +00005761}
5762
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005763LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(
5764 armnn::IWorkloadFactory& workloadFactory,
5765 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5766 bool forceNoPadding)
surmeh01bceff2f2018-03-29 16:29:27 +01005767{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005768 return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<float>(
5769 workloadFactory, memoryManager, forceNoPadding);
surmeh01bceff2f2018-03-29 16:29:27 +01005770}
5771
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005772LayerTestResult<float, 4> LargeTensorsAveragePooling2dTest(
5773 armnn::IWorkloadFactory& workloadFactory,
5774 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005775{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005776 return LargeTensorsAveragePooling2dTestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00005777}
5778
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005779LayerTestResult<uint8_t, 4> LargeTensorsAveragePooling2dUint8Test(
5780 armnn::IWorkloadFactory& workloadFactory,
5781 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005782{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005783 return LargeTensorsAveragePooling2dTestCommon<uint8_t>(workloadFactory, memoryManager, 0.5, -1);
telsoa014fcda012018-03-09 14:13:49 +00005784}
5785
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005786LayerTestResult<float, 4> SimpleL2Pooling2dTest(
5787 armnn::IWorkloadFactory& workloadFactory,
5788 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5789 const armnn::DataLayoutIndexed& dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00005790{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005791 return SimpleL2Pooling2dTestCommon<float>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00005792}
5793
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005794LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(
5795 armnn::IWorkloadFactory& workloadFactory,
5796 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5797 const armnn::DataLayoutIndexed& dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00005798{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005799 return SimpleL2Pooling2dTestCommon<uint8_t>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00005800}
5801
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005802LayerTestResult<float, 4> L2Pooling2dSize3Stride1Test(
5803 armnn::IWorkloadFactory& workloadFactory,
5804 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005805{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005806 return L2Pooling2dSize3Stride1TestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00005807}
5808
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005809LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride1Uint8Test(
5810 armnn::IWorkloadFactory& workloadFactory,
5811 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005812{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005813 return L2Pooling2dSize3Stride1TestCommon<uint8_t>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00005814}
5815
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005816LayerTestResult<float, 4> L2Pooling2dSize3Stride3Test(
5817 armnn::IWorkloadFactory& workloadFactory,
5818 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005819{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005820 return L2Pooling2dSize3Stride3TestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00005821}
5822
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005823LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride3Uint8Test(
5824 armnn::IWorkloadFactory& workloadFactory,
5825 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005826{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005827 return L2Pooling2dSize3Stride3TestCommon<uint8_t>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00005828}
5829
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005830LayerTestResult<float, 4> L2Pooling2dSize3Stride4Test(
5831 armnn::IWorkloadFactory& workloadFactory,
5832 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005833{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005834 return L2Pooling2dSize3Stride4TestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00005835}
5836
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005837LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride4Uint8Test(
5838 armnn::IWorkloadFactory& workloadFactory,
5839 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005840{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005841 return L2Pooling2dSize3Stride4TestCommon<uint8_t>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00005842}
5843
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005844LayerTestResult<float, 4> L2Pooling2dSize7Test(
5845 armnn::IWorkloadFactory& workloadFactory,
5846 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005847{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005848 return L2Pooling2dSize7TestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00005849}
5850
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005851LayerTestResult<uint8_t, 4> L2Pooling2dSize7Uint8Test(
5852 armnn::IWorkloadFactory& workloadFactory,
5853 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005854{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005855 return L2Pooling2dSize7TestCommon<uint8_t>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00005856}
5857
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005858LayerTestResult<float, 4> L2Pooling2dSize9Test(
5859 armnn::IWorkloadFactory& workloadFactory,
5860 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005861{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005862 return L2Pooling2dSize9TestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00005863}
5864
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005865LayerTestResult<uint8_t, 4> L2Pooling2dSize9Uint8Test(
5866 armnn::IWorkloadFactory& workloadFactory,
5867 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005868{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005869 return L2Pooling2dSize9TestCommon<uint8_t>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00005870}
5871
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005872LayerTestResult<float, 4> AsymmetricNonSquarePooling2dTest(
5873 armnn::IWorkloadFactory& workloadFactory,
5874 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005875{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005876 return AsymmetricNonSquarePooling2dTestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00005877}
5878
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005879LayerTestResult<uint8_t, 4> AsymmetricNonSquarePooling2dUint8Test(
5880 armnn::IWorkloadFactory& workloadFactory,
5881 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005882{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005883 return AsymmetricNonSquarePooling2dTestCommon<uint8_t>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00005884}
5885
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005886LayerTestResult<float, 4> ComparePooling2dTest(
5887 armnn::IWorkloadFactory& workloadFactory,
5888 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5889 armnn::IWorkloadFactory& refWorkloadFactory,
5890 armnn::PoolingAlgorithm poolingType)
telsoa014fcda012018-03-09 14:13:49 +00005891{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005892 return ComparePooling2dTestCommon<float>(
5893 workloadFactory, memoryManager, refWorkloadFactory, poolingType);
telsoa014fcda012018-03-09 14:13:49 +00005894}
5895
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005896LayerTestResult<uint8_t, 4> ComparePooling2dUint8Test(
5897 armnn::IWorkloadFactory& workloadFactory,
5898 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5899 armnn::IWorkloadFactory& refWorkloadFactory,
5900 armnn::PoolingAlgorithm poolingType)
telsoa014fcda012018-03-09 14:13:49 +00005901{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005902 return ComparePooling2dTestCommon<uint8_t>(
5903 workloadFactory, memoryManager, refWorkloadFactory, poolingType, 0.1f, 128);
telsoa014fcda012018-03-09 14:13:49 +00005904}
5905
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005906LayerTestResult<float, 2> FullyConnectedLargeTest(
5907 armnn::IWorkloadFactory& workloadFactory,
5908 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5909 bool transposeWeights)
telsoa014fcda012018-03-09 14:13:49 +00005910{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005911 return FullyConnectedLargeTestCommon<float>(workloadFactory, memoryManager, transposeWeights);
telsoa014fcda012018-03-09 14:13:49 +00005912}
5913
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005914LayerTestResult<float, 4> IgnorePaddingSimpleMaxPooling2dTest(
5915 armnn::IWorkloadFactory& workloadFactory,
5916 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005917{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005918 return IgnorePaddingSimpleMaxPooling2dTestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00005919}
5920
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005921LayerTestResult<uint8_t, 4> IgnorePaddingSimpleMaxPooling2dUint8Test(
5922 armnn::IWorkloadFactory& workloadFactory,
5923 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005924{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005925 return IgnorePaddingSimpleMaxPooling2dTestCommon<uint8_t>(workloadFactory, memoryManager, 1.0f, -5);
telsoa014fcda012018-03-09 14:13:49 +00005926}
5927
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005928LayerTestResult<float, 4> IgnorePaddingMaxPooling2dSize3Test(
5929 armnn::IWorkloadFactory& workloadFactory,
5930 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005931{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005932 return IgnorePaddingMaxPooling2dSize3TestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00005933}
5934
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005935LayerTestResult<uint8_t, 4> IgnorePaddingMaxPooling2dSize3Uint8Test(
5936 armnn::IWorkloadFactory& workloadFactory,
5937 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005938{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005939 return IgnorePaddingMaxPooling2dSize3TestCommon<uint8_t>(workloadFactory, memoryManager, 1.0f, -5);
telsoa014fcda012018-03-09 14:13:49 +00005940}
5941
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005942LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dTest(
5943 armnn::IWorkloadFactory& workloadFactory,
5944 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005945{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005946 return IgnorePaddingSimpleAveragePooling2dTestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00005947}
5948
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005949LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dUint8Test(
5950 armnn::IWorkloadFactory& workloadFactory,
5951 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005952{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005953 return IgnorePaddingSimpleAveragePooling2dTestCommon<uint8_t>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00005954}
5955
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005956LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTest(
5957 armnn::IWorkloadFactory& workloadFactory,
5958 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005959{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005960 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00005961}
5962
5963LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005964 armnn::IWorkloadFactory& workloadFactory,
5965 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005966{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005967 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<uint8_t>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00005968}
5969
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005970LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3Test(
5971 armnn::IWorkloadFactory& workloadFactory,
5972 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005973{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005974 return IgnorePaddingAveragePooling2dSize3TestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00005975}
5976
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005977LayerTestResult<uint8_t, 4> IgnorePaddingAveragePooling2dSize3Uint8Test(
5978 armnn::IWorkloadFactory& workloadFactory,
5979 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005980{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005981 return IgnorePaddingAveragePooling2dSize3TestCommon<uint8_t>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00005982}
5983
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005984LayerTestResult<float, 4> IgnorePaddingSimpleL2Pooling2dTest(
5985 armnn::IWorkloadFactory& workloadFactory,
5986 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005987{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005988 return IgnorePaddingSimpleL2Pooling2dTestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00005989}
5990
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005991LayerTestResult<uint8_t, 4> IgnorePaddingSimpleL2Pooling2dUint8Test(
5992 armnn::IWorkloadFactory& workloadFactory,
5993 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005994{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005995 return IgnorePaddingSimpleL2Pooling2dTestCommon<uint8_t>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00005996}
5997
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005998LayerTestResult<float, 4> IgnorePaddingL2Pooling2dSize3Test(
5999 armnn::IWorkloadFactory& workloadFactory,
6000 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006001{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006002 return IgnorePaddingL2Pooling2dSize3TestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00006003}
6004
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006005LayerTestResult<uint8_t, 4> IgnorePaddingL2Pooling2dSize3Uint8Test(
6006 armnn::IWorkloadFactory& workloadFactory,
6007 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006008{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006009 return IgnorePaddingL2Pooling2dSize3TestCommon<uint8_t>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00006010}
6011
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006012LayerTestResult<float, 4> SimplePermuteFloat32Test(
6013 armnn::IWorkloadFactory& workloadFactory,
6014 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006015{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006016 return SimplePermuteFloat32TestCommon(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00006017};
6018
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006019LayerTestResult<uint8_t, 4> SimplePermuteUint8Test(
6020 armnn::IWorkloadFactory& workloadFactory,
6021 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006022{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006023 return SimplePermuteUint8TestCommon(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00006024};
surmeh01bceff2f2018-03-29 16:29:27 +01006025
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006026LayerTestResult<float, 4> PermuteFloat32ValueSet1Test(
6027 armnn::IWorkloadFactory& workloadFactory,
6028 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01006029{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006030 return PermuteFloat32ValueSet1TestCommon(workloadFactory, memoryManager);
surmeh01bceff2f2018-03-29 16:29:27 +01006031};
6032
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006033LayerTestResult<float, 4> PermuteFloat32ValueSet2Test(
6034 armnn::IWorkloadFactory& workloadFactory,
6035 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01006036{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006037 return PermuteFloat32ValueSet2TestCommon(workloadFactory, memoryManager);
surmeh01bceff2f2018-03-29 16:29:27 +01006038};
6039
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006040LayerTestResult<float, 4> PermuteFloat32ValueSet3Test(
6041 armnn::IWorkloadFactory& workloadFactory,
6042 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01006043{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006044 return PermuteFloat32ValueSet3TestCommon(workloadFactory, memoryManager);
narpra011e4c31d2018-09-28 11:07:51 +01006045};
6046
6047namespace
6048{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01006049
narpra011e4c31d2018-09-28 11:07:51 +01006050template <typename T, std::size_t InputDim, std::size_t OutputDim>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006051LayerTestResult<T, OutputDim> MeanTestHelper(
6052 armnn::IWorkloadFactory& workloadFactory,
6053 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6054 const unsigned int* inputShape,
6055 const std::vector<T>& inputData,
6056 const std::vector<unsigned int>& axis,
6057 bool keepDims,
6058 const unsigned int* outputShape,
6059 const std::vector<T>& outputData,
6060 float scale = 1.0f,
6061 int32_t offset = 0)
narpra011e4c31d2018-09-28 11:07:51 +01006062{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01006063 auto dataType = (std::is_same<T, uint8_t>::value ? armnn::DataType::QuantisedAsymm8 : armnn::DataType::Float32);
narpra011e4c31d2018-09-28 11:07:51 +01006064
6065 armnn::TensorInfo inputTensorInfo(InputDim, inputShape, dataType);
6066 armnn::TensorInfo outputTensorInfo(OutputDim, outputShape, dataType);
6067
6068 inputTensorInfo.SetQuantizationScale(scale);
6069 inputTensorInfo.SetQuantizationOffset(offset);
6070
6071 outputTensorInfo.SetQuantizationScale(scale);
6072 outputTensorInfo.SetQuantizationOffset(offset);
6073
6074 auto input = MakeTensor<T, InputDim>(inputTensorInfo, inputData);
6075
6076 LayerTestResult<T, OutputDim> result(outputTensorInfo);
6077 result.outputExpected = MakeTensor<T, OutputDim>(outputTensorInfo, outputData);
6078
6079 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6080 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6081
6082 armnn::MeanQueueDescriptor data;
6083 data.m_Parameters.m_Axis = axis;
6084 data.m_Parameters.m_KeepDims = keepDims;
6085 armnn::WorkloadInfo info;
6086 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
6087 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
6088
6089 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMean(data, info);
6090
6091 inputHandle->Allocate();
6092 outputHandle->Allocate();
6093
6094 CopyDataToITensorHandle(inputHandle.get(), input.origin());
6095
narpra011e4c31d2018-09-28 11:07:51 +01006096 workload->Execute();
6097
6098 CopyDataFromITensorHandle(result.output.origin(), outputHandle.get());
6099
6100 return result;
6101}
Matteo Martincigh28dcab62018-10-19 16:40:03 +01006102
narpra011e4c31d2018-09-28 11:07:51 +01006103} // anonymous namespace
6104
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006105LayerTestResult<uint8_t, 1> MeanUint8SimpleTest(
6106 armnn::IWorkloadFactory& workloadFactory,
6107 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01006108{
6109 const unsigned int inputShape[] = { 3, 2 };
6110 const unsigned int outputShape[] = { 1 };
6111
6112 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
6113 std::vector<uint8_t> output({ 2 });
6114
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006115 return MeanTestHelper<uint8_t, 2, 1>(
6116 workloadFactory, memoryManager, inputShape, input, {}, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01006117}
6118
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006119LayerTestResult<uint8_t, 3> MeanUint8SimpleAxisTest(
6120 armnn::IWorkloadFactory& workloadFactory,
6121 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01006122{
6123 const unsigned int inputShape[] = { 1, 1, 3, 2 };
6124 const unsigned int outputShape[] = { 1, 1, 2 };
6125
6126 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
6127 std::vector<uint8_t> output({ 2, 2 });
6128
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006129 return MeanTestHelper<uint8_t, 4, 3>(
6130 workloadFactory, memoryManager, inputShape, input, { 2 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01006131}
6132
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006133LayerTestResult<uint8_t, 4> MeanUint8KeepDimsTest(
6134 armnn::IWorkloadFactory& workloadFactory,
6135 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01006136{
6137 const unsigned int inputShape[] = { 1, 1, 3, 2 };
6138 const unsigned int outputShape[] = { 1, 1, 1, 2 };
6139
6140 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
6141 std::vector<uint8_t> output({ 2, 2 });
6142
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006143 return MeanTestHelper<uint8_t, 4, 4>(
6144 workloadFactory, memoryManager, inputShape, input, { 2 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01006145}
6146
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006147LayerTestResult<uint8_t, 4> MeanUint8MultipleDimsTest(
6148 armnn::IWorkloadFactory& workloadFactory,
6149 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01006150{
6151 const unsigned int inputShape[] = { 2, 3, 1, 2 };
6152 const unsigned int outputShape[] = { 1, 3, 1, 1 };
6153
Matteo Martincigh28dcab62018-10-19 16:40:03 +01006154 std::vector<uint8_t> input({ 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6 });
narpra011e4c31d2018-09-28 11:07:51 +01006155 std::vector<uint8_t> output({ 1, 3, 5 });
6156
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006157 return MeanTestHelper<uint8_t, 4, 4>(
6158 workloadFactory, memoryManager, inputShape, input, { 0, 3 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01006159}
6160
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006161LayerTestResult<uint8_t, 1> MeanVtsUint8Test(
6162 armnn::IWorkloadFactory& workloadFactory,
6163 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01006164{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01006165 const unsigned int inputShape[] = { 4, 3, 2 };
narpra011e4c31d2018-09-28 11:07:51 +01006166 const unsigned int outputShape[] = { 2 };
6167
Matteo Martincigh28dcab62018-10-19 16:40:03 +01006168 std::vector<uint8_t> input({ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
6169 24 });
6170 std::vector<uint8_t> output({ 12, 13 });
narpra011e4c31d2018-09-28 11:07:51 +01006171
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006172 return MeanTestHelper<uint8_t, 3, 1>(workloadFactory, memoryManager,
6173 inputShape, input, { 0, 1 }, false, outputShape,
Matteo Martincigh28dcab62018-10-19 16:40:03 +01006174 output, 0.8f, 5);
narpra011e4c31d2018-09-28 11:07:51 +01006175}
6176
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006177LayerTestResult<float, 1> MeanFloatSimpleTest(
6178 armnn::IWorkloadFactory& workloadFactory,
6179 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01006180{
6181 const unsigned int inputShape[] = { 3, 2 };
6182 const unsigned int outputShape[] = { 1 };
6183
Matteo Martincigh28dcab62018-10-19 16:40:03 +01006184 std::vector<float> input({ 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f });
6185 std::vector<float> output({ 2.0f });
narpra011e4c31d2018-09-28 11:07:51 +01006186
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006187 return MeanTestHelper<float, 2, 1>(
6188 workloadFactory, memoryManager, inputShape, input, {}, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01006189}
6190
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006191LayerTestResult<float, 3> MeanFloatSimpleAxisTest(
6192 armnn::IWorkloadFactory& workloadFactory,
6193 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01006194{
6195 const unsigned int inputShape[] = { 2, 3, 1, 2 };
6196 const unsigned int outputShape[] = { 3, 1, 2 };
6197
Matteo Martincigh28dcab62018-10-19 16:40:03 +01006198 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f });
6199 std::vector<float> output({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f });
narpra011e4c31d2018-09-28 11:07:51 +01006200
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006201 return MeanTestHelper<float, 4, 3>(
6202 workloadFactory, memoryManager, inputShape, input, { 0 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01006203}
6204
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006205LayerTestResult<float, 4> MeanFloatKeepDimsTest(
6206 armnn::IWorkloadFactory& workloadFactory,
6207 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01006208{
6209 const unsigned int inputShape[] = { 1, 1, 3, 2 };
6210 const unsigned int outputShape[] = { 1, 1, 1, 2 };
6211
Matteo Martincigh28dcab62018-10-19 16:40:03 +01006212 std::vector<float> input({ 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f });
6213 std::vector<float> output({ 2.0f, 2.0f });
narpra011e4c31d2018-09-28 11:07:51 +01006214
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006215 return MeanTestHelper<float, 4, 4>(
6216 workloadFactory, memoryManager, inputShape, input, { 2 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01006217}
6218
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006219LayerTestResult<float, 4> MeanFloatMultipleDimsTest(
6220 armnn::IWorkloadFactory& workloadFactory,
6221 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01006222{
6223 const unsigned int inputShape[] = { 2, 3, 1, 2 };
6224 const unsigned int outputShape[] = { 1, 3, 1, 1 };
6225
Matteo Martincigh28dcab62018-10-19 16:40:03 +01006226 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f });
6227 std::vector<float> output({ 1.5f, 3.5f, 5.5f });
narpra011e4c31d2018-09-28 11:07:51 +01006228
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006229 return MeanTestHelper<float, 4, 4>(
6230 workloadFactory, memoryManager, inputShape, input, { 0, 3 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01006231}
6232
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006233LayerTestResult<float, 1> MeanVtsFloat1Test(
6234 armnn::IWorkloadFactory& workloadFactory,
6235 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01006236{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01006237 const unsigned int inputShape[] = { 4, 3, 2 };
narpra011e4c31d2018-09-28 11:07:51 +01006238 const unsigned int outputShape[] = { 2 };
6239
Matteo Martincigh28dcab62018-10-19 16:40:03 +01006240 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f,
6241 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f });
6242 std::vector<float> output({ 12.0f, 13.0f });
narpra011e4c31d2018-09-28 11:07:51 +01006243
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006244 return MeanTestHelper<float, 3, 1>(
6245 workloadFactory, memoryManager, inputShape, input, { 0, 1 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01006246}
6247
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006248LayerTestResult<float, 3> MeanVtsFloat2Test(
6249 armnn::IWorkloadFactory& workloadFactory,
6250 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01006251{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01006252 const unsigned int inputShape[] = { 4, 3, 2 };
6253 const unsigned int outputShape[] = { 1, 3, 1 };
narpra011e4c31d2018-09-28 11:07:51 +01006254
Matteo Martincigh28dcab62018-10-19 16:40:03 +01006255 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f,
6256 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f });
6257 std::vector<float> output({ 10.5f, 12.5f, 14.5f });
narpra011e4c31d2018-09-28 11:07:51 +01006258
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006259 return MeanTestHelper<float, 3, 3>(
6260 workloadFactory, memoryManager, inputShape, input, { 0, 2 }, true, outputShape, output);
Matteo Martincigh28dcab62018-10-19 16:40:03 +01006261}
6262
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006263LayerTestResult<float, 3> MeanVtsFloat3Test(
6264 armnn::IWorkloadFactory& workloadFactory,
6265 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matteo Martincigh28dcab62018-10-19 16:40:03 +01006266{
6267 const unsigned int inputShape[] = { 1, 2, 2, 1 };
6268 const unsigned int outputShape[] = { 1, 2, 1 };
6269
6270 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f });
6271 std::vector<float> output({ 1.5f, 3.5f });
6272
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006273 return MeanTestHelper<float, 4, 3>(
6274 workloadFactory, memoryManager, inputShape, input, { 2 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01006275}
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01006276
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006277LayerTestResult<float, 4> AdditionAfterMaxPoolTest(
6278 armnn::IWorkloadFactory& workloadFactory,
6279 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01006280{
6281 // Create Initial Tensor
6282 // 1, 2, 3
6283 // 4, 5, 6
6284 // 7, 8, 9
6285
6286 armnn::TensorInfo poolingInputTensorInfo({ 1, 1, 3, 3}, armnn::GetDataType<float>());
6287 armnn::TensorInfo poolingOutputTensorInfo({ 1, 1, 2, 2}, armnn::GetDataType<float>());
6288
6289 boost::multi_array<float, 4> poolingInput = MakeTensor<float,4>(poolingInputTensorInfo,
6290 {1, 2, 3,
6291 4, 5, 6,
6292 7, 8, 9
6293 });
6294
6295 std::unique_ptr<armnn::ITensorHandle> poolingInputHandle =
6296 workloadFactory.CreateTensorHandle(poolingInputTensorInfo);
6297 std::unique_ptr<armnn::ITensorHandle> poolingOutputHandle =
6298 workloadFactory.CreateTensorHandle(poolingOutputTensorInfo);
6299
6300 // Apply MaxPool poolSize = 1x1, stride=2x2
6301 // Result =
6302 // 1, 3
6303 // 7, 9
6304 armnn::Pooling2dDescriptor descriptor;
6305 descriptor.m_PoolHeight = 1;
6306 descriptor.m_PoolWidth = 1;
6307 descriptor.m_StrideX = 2;
6308 descriptor.m_StrideY = 2;
6309 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
6310
6311 armnn::Pooling2dQueueDescriptor queueDescriptor;
6312 queueDescriptor.m_Parameters = descriptor;
6313 armnn::WorkloadInfo workloadInfo;
6314 AddInputToWorkload(queueDescriptor, workloadInfo, poolingInputTensorInfo, poolingInputHandle.get());
6315 AddOutputToWorkload(queueDescriptor, workloadInfo, poolingOutputTensorInfo, poolingOutputHandle.get());
6316
6317 // Create the MaxPool
6318 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
6319
6320 //LayerTestResult<float, 4> result(poolingOutputTensorInfo);
6321 auto shape( GetTensorShapeAsArray<4>(poolingOutputTensorInfo));
6322 boost::multi_array<float, 4> resultMaxPool;
6323 resultMaxPool.resize(shape);
6324
6325
6326 // Create addition with another tensor the same size
6327 // This would be the result to apply a Conv2d with kernel ones(2) and stride 1x1
6328 // with the initial tensor.
6329 // 12, 16
6330 // 24, 28
6331
6332 armnn::TensorInfo addInputTensorInfo({ 1,1,2,2}, armnn::GetDataType<float>());
6333 armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2}, armnn::GetDataType<float>());
6334
6335 boost::multi_array<float, 4> addInput = MakeTensor<float,4>(addInputTensorInfo,
6336 {12, 16,
6337 24, 28,
6338 });
6339
6340 // Expected output tensor after MaxPool and Addition.
6341 LayerTestResult<float,4> addRet(addOutputTensorInfo);
6342 addRet.outputExpected = MakeTensor<float, 4>(addOutputTensorInfo, std::vector<float>(
6343 {
6344 13, 19,
6345 31, 37
6346 }));
6347
6348 std::unique_ptr<armnn::ITensorHandle> addInputHandle = workloadFactory.CreateTensorHandle(addInputTensorInfo);
6349 std::unique_ptr<armnn::ITensorHandle> addOutputHandle = workloadFactory.CreateTensorHandle(addOutputTensorInfo);
6350
6351 armnn::AdditionQueueDescriptor data;
6352 armnn::WorkloadInfo info;
6353
6354 // Add the output of the MaxPool and the new tensor
6355 AddInputToWorkload(data, info, poolingOutputTensorInfo, poolingOutputHandle.get());
6356 AddInputToWorkload(data, info, addInputTensorInfo, addInputHandle.get());
6357 AddOutputToWorkload(data, info, addOutputTensorInfo, addOutputHandle.get());
6358
6359 std::unique_ptr<armnn::IWorkload> addWorkload = workloadFactory.CreateAddition(data, info);
6360
6361 poolingInputHandle->Allocate();
6362 poolingOutputHandle->Allocate();
6363 addInputHandle->Allocate();
6364 addOutputHandle->Allocate();
6365
6366 CopyDataToITensorHandle(poolingInputHandle.get(), &poolingInput[0][0][0][0]);
6367 CopyDataFromITensorHandle(&resultMaxPool[0][0][0][0], poolingOutputHandle.get());
6368
6369 CopyDataToITensorHandle(poolingOutputHandle.get(), &resultMaxPool[0][0][0][0]);
6370 CopyDataToITensorHandle(addInputHandle.get(), &addInput[0][0][0][0]);
6371
6372 workload->Execute();
6373 addWorkload->Execute();
6374
6375 CopyDataFromITensorHandle(&addRet.output[0][0][0][0], addOutputHandle.get());
6376
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01006377 return addRet;
6378}
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006379
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006380LayerTestResult<float, 4> SpaceToBatchNdSimpleFloat32Test(
6381 armnn::IWorkloadFactory& workloadFactory,
6382 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006383{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006384 return SpaceToBatchNdSimpleTest<float>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006385}
6386
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006387LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsFloat32Test(
6388 armnn::IWorkloadFactory& workloadFactory,
6389 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006390{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006391 return SpaceToBatchNdMultiChannelsTest<float>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006392}
6393
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006394LayerTestResult<float, 4> SpaceToBatchNdMultiBlockFloat32Test(
6395 armnn::IWorkloadFactory& workloadFactory,
6396 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006397{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006398 return SpaceToBatchNdMultiBlockTest<float>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006399}
6400
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006401LayerTestResult<float, 4> SpaceToBatchNdPaddingFloat32Test(
6402 armnn::IWorkloadFactory& workloadFactory,
6403 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006404{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006405 return SpaceToBatchNdPaddingTest<float>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006406}
6407
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006408LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleUint8Test(
6409 armnn::IWorkloadFactory& workloadFactory,
6410 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006411{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006412 return SpaceToBatchNdSimpleTest<uint8_t>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006413}
6414
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006415LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsUint8Test(
6416 armnn::IWorkloadFactory& workloadFactory,
6417 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006418{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006419 return SpaceToBatchNdMultiChannelsTest<uint8_t>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006420}
6421
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006422LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockUint8Test(
6423 armnn::IWorkloadFactory& workloadFactory,
6424 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006425{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006426 return SpaceToBatchNdMultiBlockTest<uint8_t>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006427}
6428
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006429LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingUint8Test(
6430 armnn::IWorkloadFactory& workloadFactory,
6431 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006432{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006433 return SpaceToBatchNdPaddingTest<uint8_t>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006434}
6435
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006436LayerTestResult<float, 4> SpaceToBatchNdSimpleNHWCFloat32Test(
6437 armnn::IWorkloadFactory& workloadFactory,
6438 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006439{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006440 return SpaceToBatchNdSimpleNHWCTest<float>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006441}
6442
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006443LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsNHWCFloat32Test(
6444 armnn::IWorkloadFactory& workloadFactory,
6445 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006446{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006447 return SpaceToBatchNdMultiChannelsNHWCTest<float>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006448}
6449
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006450LayerTestResult<float, 4> SpaceToBatchNdMultiBlockNHWCFloat32Test(
6451 armnn::IWorkloadFactory& workloadFactory,
6452 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006453{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006454 return SpaceToBatchNdMultiBlockNHWCTest<float>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006455}
6456
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006457LayerTestResult<float, 4> SpaceToBatchNdPaddingNHWCFloat32Test(
6458 armnn::IWorkloadFactory& workloadFactory,
6459 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006460{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006461 return SpaceToBatchNdPaddingNHWCTest<float>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006462}
6463
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006464LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleNHWCUint8Test(
6465 armnn::IWorkloadFactory& workloadFactory,
6466 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006467{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006468 return SpaceToBatchNdSimpleNHWCTest<uint8_t>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006469}
6470
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006471LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsNHWCUint8Test(
6472 armnn::IWorkloadFactory& workloadFactory,
6473 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006474{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006475 return SpaceToBatchNdMultiChannelsNHWCTest<uint8_t>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006476}
6477
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006478LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockNHWCUint8Test(
6479 armnn::IWorkloadFactory& workloadFactory,
6480 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006481{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006482 return SpaceToBatchNdMultiBlockNHWCTest<uint8_t>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006483}
6484
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006485LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingNHWCUint8Test(
6486 armnn::IWorkloadFactory& workloadFactory,
6487 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006488{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006489 return SpaceToBatchNdPaddingNHWCTest<uint8_t>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006490}
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00006491
6492namespace {
6493
6494template<typename T, std::size_t InputDim, std::size_t OutputDim>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006495LayerTestResult<T, OutputDim> BatchToSpaceNdHelper(
6496 armnn::IWorkloadFactory &workloadFactory,
6497 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6498 const armnn::DataLayout& dataLayout,
6499 const unsigned int *inputShape,
6500 const std::vector<T> &inputData,
6501 const std::vector<unsigned int> &blockShape,
6502 const std::vector<std::pair<unsigned int, unsigned int>> &crops,
6503 const unsigned int *outputShape,
6504 const std::vector<T> &outputData,
6505 float scale = 1.0f,
6506 int32_t offset = 0)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00006507 {
6508 auto dataType = (std::is_same<T, uint8_t>::value ? armnn::DataType::QuantisedAsymm8 : armnn::DataType::Float32);
6509
6510 armnn::TensorInfo inputTensorInfo(InputDim, inputShape, dataType);
6511 armnn::TensorInfo outputTensorInfo(OutputDim, outputShape, dataType);
6512
6513 inputTensorInfo.SetQuantizationScale(scale);
6514 inputTensorInfo.SetQuantizationOffset(offset);
6515
6516 outputTensorInfo.SetQuantizationScale(scale);
6517 outputTensorInfo.SetQuantizationOffset(offset);
6518
6519 auto input = MakeTensor<T, InputDim>(inputTensorInfo, inputData);
6520
6521 LayerTestResult<T, OutputDim> result(outputTensorInfo);
6522 result.outputExpected = MakeTensor<T, OutputDim>(outputTensorInfo, outputData);
6523
6524 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6525 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6526
6527 armnn::BatchToSpaceNdQueueDescriptor data;
6528 data.m_Parameters.m_DataLayout = dataLayout;
6529 data.m_Parameters.m_BlockShape = blockShape;
6530 data.m_Parameters.m_Crops = crops;
6531 armnn::WorkloadInfo info;
6532 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
6533 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
6534
6535 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchToSpaceNd(data, info);
6536
6537 inputHandle->Allocate();
6538 outputHandle->Allocate();
6539
6540 CopyDataToITensorHandle(inputHandle.get(), input.origin());
6541
6542 workload->Execute();
6543
6544 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6545
6546 return result;
6547}
6548
6549} // anonymous namespace
6550
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006551LayerTestResult<float, 4> BatchToSpaceNdNhwcFloat32Test1(
6552 armnn::IWorkloadFactory& workloadFactory,
6553 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00006554{
6555 const unsigned int inputShape[] = {4, 2, 2, 1};
6556 const unsigned int outputShape[] = {1, 4, 4, 1 };
6557
6558 std::vector<float> input
6559 ({
6560 // Batch 0, Height 0, Width (2) x Channel (1)
6561 1.0f, 3.0f,
6562 // Batch 0, Height 1, Width (2) x Channel (1)
6563 9.0f, 11.0f,
6564
6565
6566 // Batch 1, Height 0, Width (2) x Channel (1)
6567 2.0f, 4.0f,
6568 // Batch 1, Height 1, Width (2) x Channel (1)
6569 10.0f, 12.0f,
6570
6571
6572 // Batch 2, Height 0, Width (2) x Channel (1)
6573 5.0f, 7.0f,
6574 // Batch 2, Height 1, Width (2) x Channel (1)
6575 13.0f, 15.0f,
6576
6577 // Batch 3, Height 0, Width (2) x Channel (3)
6578 6.0f, 8.0f,
6579 // Batch 3, Height 1, Width (2) x Channel (1)
6580 14.0f, 16.0f
6581 });
6582
6583 std::vector<float> expectedOutput
6584 ({
6585 1.0f, 2.0f, 3.0f, 4.0f,
6586 5.0f, 6.0f, 7.0f, 8.0f,
6587 9.0f, 10.0f, 11.0f, 12.0f,
6588 13.0f, 14.0f, 15.0f, 16.0f
6589 });
6590
6591 std::vector<unsigned int> blockShape {2, 2};
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00006592 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00006593
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006594 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
6595 armnn::DataLayout::NHWC, inputShape, input, blockShape,
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00006596 crops, outputShape, expectedOutput);
6597}
6598
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006599LayerTestResult<float, 4> BatchToSpaceNdNhwcFloat32Test2(
6600 armnn::IWorkloadFactory& workloadFactory,
6601 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00006602{
6603 const unsigned int inputShape[] = {4, 1, 1, 1};
6604 const unsigned int outputShape[] = {1, 2, 2, 1};
6605
6606 std::vector<float> input
6607 ({
6608 // Batch 0, Height 0, Width (2) x Channel (1)
6609 1.0f, 2.0f, 3.0f, 4.0f
6610 });
6611
6612 std::vector<float> expectedOutput({1.0f, 2.0f, 3.0f, 4.0f});
6613
6614 std::vector<unsigned int> blockShape({2, 2});
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00006615 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00006616
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006617 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
6618 armnn::DataLayout::NHWC, inputShape, input, blockShape,
6619 crops, outputShape, expectedOutput);
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00006620}
6621
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006622LayerTestResult<float, 4> BatchToSpaceNdNhwcFloat32Test3(
6623 armnn::IWorkloadFactory& workloadFactory,
6624 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00006625{
6626 const unsigned int inputShape[] = {4, 1, 1, 3};
6627 const unsigned int outputShape[] = {1, 2, 2, 3};
6628
6629 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f });
6630
6631 std::vector<float> expectedOutput({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f });
6632
6633 std::vector<unsigned int> blockShape({2, 2});
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00006634 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00006635
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006636 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
6637 armnn::DataLayout::NHWC, inputShape, input, blockShape,
6638 crops, outputShape, expectedOutput);
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00006639}
6640
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006641LayerTestResult<float, 4> BatchToSpaceNdNchwFloat32Test1(
6642 armnn::IWorkloadFactory &workloadFactory,
6643 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00006644{
6645 const unsigned int inputShape[] = {4, 3, 1, 1};
6646 const unsigned int outputShape[] = {1, 3, 2, 2};
6647
6648 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f });
6649
6650 std::vector<float> expectedOutput
6651 ({
6652 // Batch 0, Channel 0, Height (2) x Width (2)
6653 1.0f, 4.0f,
6654 7.0f, 10.0f,
6655
6656 // Batch 0, Channel 1, Height (2) x Width (2)
6657 2.0f, 5.0f,
6658 8.0f, 11.0f,
6659
6660 // Batch 0, Channel 2, Height (2) x Width (2)
6661 3.0f, 6.0f,
6662 9.0f, 12.0f,
6663 });
6664
6665 std::vector<unsigned int> blockShape({2, 2});
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00006666 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00006667
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006668 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
6669 armnn::DataLayout::NCHW, inputShape, input, blockShape,
6670 crops, outputShape, expectedOutput);
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00006671}
Éanna Ó Catháin262553e2018-11-14 11:26:23 +00006672
6673
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006674LayerTestResult<uint8_t, 4> BatchToSpaceNdNhwcUintTest1(
6675 armnn::IWorkloadFactory& workloadFactory,
6676 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin262553e2018-11-14 11:26:23 +00006677{
6678 const unsigned int inputShape[] = {4, 2, 2, 1};
6679 const unsigned int outputShape[] = {1, 4, 4, 1};
6680
6681 std::vector<uint8_t> input({ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 });
6682 std::vector<uint8_t> expectedOutput({ 1, 5, 2, 6, 9, 13, 10, 14, 3, 7, 4, 8, 11, 15, 12, 16});
6683
6684 std::vector<unsigned int> blockShape({2, 2});
6685 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
6686
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006687 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
6688 armnn::DataLayout::NHWC, inputShape, input, blockShape,
6689 crops, outputShape, expectedOutput);
6690}