blob: bd8b38da01dcd4e342253232a10f36bf7e3bb2e6 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#include "LayerTests.hpp"
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006#include "WorkloadTestUtils.hpp"
telsoa014fcda012018-03-09 14:13:49 +00007
8#include "test/TensorHelpers.hpp"
9#include "TensorCopyUtils.hpp"
surmeh013537c2c2018-05-18 16:31:43 +010010#include "Permute.hpp"
telsoa014fcda012018-03-09 14:13:49 +000011
12#include <boost/test/unit_test.hpp>
surmeh013537c2c2018-05-18 16:31:43 +010013#include <boost/assert.hpp>
telsoa014fcda012018-03-09 14:13:49 +000014
David Beck711fa312018-09-24 10:46:38 +010015#include <armnn/LayerSupport.hpp>
telsoa014fcda012018-03-09 14:13:49 +000016
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000017#include <backendsCommon/CpuTensorHandle.hpp>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000018#include <backendsCommon/IBackendInternal.hpp>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000019#include <backendsCommon/WorkloadFactory.hpp>
telsoa014fcda012018-03-09 14:13:49 +000020
telsoa014fcda012018-03-09 14:13:49 +000021#include <algorithm>
22#include <boost/cast.hpp>
23
24#include "WorkloadTestUtils.hpp"
25#include "Conv2dTestImpl.hpp"
26#include "BatchNormTestImpl.hpp"
27#include "ActivationTestImpl.hpp"
28#include "Pooling2dTestImpl.hpp"
29#include "ReshapeTestImpl.hpp"
30#include "FullyConnectedTestImpl.hpp"
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +000031#include "SpaceToBatchNdTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000032#include "SplitterTestImpl.hpp"
33#include "SoftmaxTestImpl.hpp"
34#include "NormTestImpl.hpp"
35#include "PermuteTestImpl.hpp"
telsoa01c577f2c2018-08-31 09:22:23 +010036#include "LstmTestImpl.hpp"
37#include "ConvertFp16ToFp32TestImpl.hpp"
38#include "ConvertFp32ToFp16TestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000039
telsoa01c577f2c2018-08-31 09:22:23 +010040// 3-channel 16x8 image used as common input data for a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000041static std::vector<float> ConvInput3x8x16({
42 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
43 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
44 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
45 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
46 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
47 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
48 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
49 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
50 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
51 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
52 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
53 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
54 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
55 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
56 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
57 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
58 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
59 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
60 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
61 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
62 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
63 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
64 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
65 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
66});
67
telsoa01c577f2c2018-08-31 09:22:23 +010068// 2-channel bias used by a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000069static std::vector<float> Bias2({0, 2});
70
jimfly013aab7c32018-11-12 13:32:08 +000071armnn::TensorShape GetTestTensorShape(unsigned int numberOfBatches,
72 unsigned int numberOfChannels,
73 unsigned int height,
74 unsigned int width,
75 const armnn::DataLayoutIndexed& dataLayout)
76{
77 switch (dataLayout.GetDataLayout())
78 {
79 case armnn::DataLayout::NCHW:
80 return armnn::TensorShape({numberOfBatches, numberOfChannels, height, width});
81 case armnn::DataLayout::NHWC:
82 return armnn::TensorShape({numberOfBatches, height, width, numberOfChannels});
83 default:
84 throw armnn::InvalidArgumentException("unknown data layout ["
85 + std::to_string(static_cast<int>(dataLayout.GetDataLayout())) + "]");
86 }
87}
88
telsoa01c577f2c2018-08-31 09:22:23 +010089// Helper function that returns either Bias2 or an empty vector depending on whether bias is enabled.
telsoa014fcda012018-03-09 14:13:49 +000090template<typename T>
91boost::multi_array<T, 1> GetBias2(bool biasEnabled, float qScale, int32_t qOffset)
92{
93 if(biasEnabled)
94 {
95 armnn::TensorInfo biasDesc({static_cast<unsigned int>(Bias2.size())}, armnn::GetDataType<T>());
96 boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(qScale, qOffset, Bias2));
97 return bias;
98 }
99 else
100 {
101 return boost::multi_array<T, 1>();
102 }
103}
104
105template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000106LayerTestResult<T, 4> SimpleConvolution2d3x5TestCommon(
107 armnn::IWorkloadFactory& workloadFactory,
108 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
109 float qScale,
110 int32_t qOffset,
111 bool biasEnabled,
112 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000113{
telsoa01c577f2c2018-08-31 09:22:23 +0100114 // Use common single-batch 3-channel 16x8 image.
telsoa014fcda012018-03-09 14:13:49 +0000115 armnn::TensorInfo inputDesc({1, 3, 8, 16}, armnn::GetDataType<T>());
116 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
117
telsoa01c577f2c2018-08-31 09:22:23 +0100118 // Use a 2-element batch with 3-channel 3x5 kernels.
telsoa014fcda012018-03-09 14:13:49 +0000119 armnn::TensorInfo kernelDesc({2, 3, 5, 3}, armnn::GetDataType<T>());
120 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
121 QuantizedVector<T>(qScale, qOffset, {
122 1, 1, 1,
123 1, -1, 1,
124 1, 1, 1,
125 1, 1, 1,
126 1, 1, 1,
127
128 0, 0, 0,
129 0, 0, 0,
130 0, 0, 0,
131 0, 0, 0,
132 0, 0, 0,
133
134 2, 2, 2,
135 2, 2, 2,
136 2, 2, 2,
137 2, 2, 2,
138 2, 2, 2,
139
140
141 0, 0, 0,
142 0, 0, 0,
143 0, 0, 0,
144 0, 0, 0,
145 0, 0, 0,
146
147 1, 1, 1,
148 1, 1, 1,
149 1, 1, 1,
150 1, 1, 1,
151 1, 1, 1,
152
153 0, 0, 0,
154 0, 0, 0,
155 0, 0, 0,
156 0, 0, 0,
157 0, 0, 0
158 })));
159
telsoa01c577f2c2018-08-31 09:22:23 +0100160 // Expected output is 2 batch elements of a 1-channel 14x4 image.
telsoa014fcda012018-03-09 14:13:49 +0000161 armnn::TensorInfo outputDesc({1, 2, 4, 14}, armnn::GetDataType<T>());
162 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
163 QuantizedVector<T>(qScale, qOffset, {
164 -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24,
165 -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25,
166 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
167 -23.5f, -23.5f, -23.5f,
168 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
169 -23.5f, -23.5f, -23.5f,
170
171 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
172 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
173 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
174 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
175 })));
176
177 return SimpleConvolution2dTestImpl<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000178 memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000179 input,
180 kernel,
181 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
182 expectedOutput,
183 qScale,
jimfly010a088a62018-10-25 17:05:05 +0100184 qOffset,
185 layout);
telsoa014fcda012018-03-09 14:13:49 +0000186}
187
188template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000189LayerTestResult<T, 4> SimpleConvolution2d3x3TestCommon(
190 armnn::IWorkloadFactory& workloadFactory,
191 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
192 float qScale,
193 int32_t qOffset,
194 bool biasEnabled,
195 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000196{
telsoa01c577f2c2018-08-31 09:22:23 +0100197 // Use a 3x3 kernel, which exercises ArmCompute's direct convolution path.
telsoa014fcda012018-03-09 14:13:49 +0000198
telsoa01c577f2c2018-08-31 09:22:23 +0100199 // Use common single-batch 3-channel 16x8 image.
telsoa014fcda012018-03-09 14:13:49 +0000200 armnn::TensorInfo inputDesc({1, 3, 8, 16}, armnn::GetDataType<T>());
201 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
202
telsoa01c577f2c2018-08-31 09:22:23 +0100203 // Use a 2-element batch of 3-channel 3x3 kernels.
telsoa014fcda012018-03-09 14:13:49 +0000204 armnn::TensorInfo kernelDesc({2, 3, 3, 3}, armnn::GetDataType<T>());
205 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
206 QuantizedVector<T>(qScale, qOffset, {
207 1, 1, 1,
208 1, -1, 1,
209 1, 1, 1,
210
211 0, 0, 0,
212 0, 0, 0,
213 0, 0, 0,
214
215 2, 2, 2,
216 2, 2, 2,
217 2, 2, 2,
218
219
220 0, 0, 0,
221 0, 0, 0,
222 0, 0, 0,
223
224 1, 1, 1,
225 1, 1, 1,
226 1, 1, 1,
227
228 0, 0, 0,
229 0, 0, 0,
230 0, 0, 0
231 })));
232
telsoa01c577f2c2018-08-31 09:22:23 +0100233 // Expected output is 1 batch of a 2-channel 14x6 image.
telsoa014fcda012018-03-09 14:13:49 +0000234 armnn::TensorInfo outputDesc({1, 2, 6, 14}, armnn::GetDataType<T>());
235 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
236 QuantizedVector<T>(qScale, qOffset, {
237 -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15,
238 -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16,
239 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
240 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
241 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
242 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
243
244 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
245 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
246 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
247 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
248 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
249 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
250 })));
251
252 return SimpleConvolution2dTestImpl<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000253 memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000254 input,
255 kernel,
256 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
257 expectedOutput,
258 qScale,
narpra015f703182018-10-26 16:24:58 +0100259 qOffset,
260 layout);
telsoa014fcda012018-03-09 14:13:49 +0000261}
262
Francis Murtaghd59116e2018-10-04 16:03:07 +0100263template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000264LayerTestResult<T, 4> SimpleConvolution2d3x3NhwcTestCommon(
265 armnn::IWorkloadFactory& workloadFactory,
266 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
267 float qScale,
268 int32_t qOffset,
269 bool biasEnabled,
270 armnn::DataLayout dataLayout)
Francis Murtaghd59116e2018-10-04 16:03:07 +0100271{
272 // Use common single-batch 5x5 image.
273
274 armnn::TensorInfo inputDesc({1, 3, 4, 1}, armnn::GetDataType<T>());
275 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
276 {
277 1, 5, 2, 3,
278 8, 7, 3, 6,
279 3, 3, 9, 1
280 });
281
282
283 // Use a 2-element batch of 3-channel 3x3 kernels.
284 armnn::TensorInfo kernelDesc({1, 3, 3, 1}, armnn::GetDataType<T>());
285 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, {
286 4, 5, 6,
287 0, 0, 0,
288 3, 2, 1
289 });
290
291 // Expected output is 1 batch of a 5x5 image.
292 armnn::TensorInfo outputDesc({1, 3, 4, 1}, armnn::GetDataType<T>());
293
294 const std::vector<float> outputData =
295 {
296 23, 41, 33, 21,
297 44, 65, 76, 52,
298 82, 85, 79, 42
299 };
300
301 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
302
303 return SimpleConvolution2dNhwcTestImpl<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000304 memoryManager,
Francis Murtaghd59116e2018-10-04 16:03:07 +0100305 input,
306 kernel,
307 boost::multi_array<T, 1>(),
308 expectedOutput,
309 dataLayout,
310 qScale,
311 qOffset);
312}
313
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000314LayerTestResult<float, 4> SimpleConvolution2d3x5Test(
315 armnn::IWorkloadFactory& workloadFactory,
316 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
317 bool biasEnabled,
318 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000319{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000320 return SimpleConvolution2d3x5TestCommon<float>(workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000321}
322
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000323LayerTestResult<uint8_t, 4> SimpleConvolution2d3x5Uint8Test(
324 armnn::IWorkloadFactory& workloadFactory,
325 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
326 bool biasEnabled,
327 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000328{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000329 return SimpleConvolution2d3x5TestCommon<uint8_t>(workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000330}
331
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000332LayerTestResult<float, 4> SimpleConvolution2d3x3Test(
333 armnn::IWorkloadFactory& workloadFactory,
334 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
335 bool biasEnabled,
336 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000337{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000338 return SimpleConvolution2d3x3TestCommon<float>(workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000339}
340
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000341LayerTestResult<float, 4> SimpleConvolution2d3x3NhwcTest(
342 armnn::IWorkloadFactory& workloadFactory,
343 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
344 bool biasEnabled)
Francis Murtaghd59116e2018-10-04 16:03:07 +0100345{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000346 return SimpleConvolution2d3x3NhwcTestCommon<float>(workloadFactory,
347 memoryManager,
348 0.f,
349 0,
350 biasEnabled,
351 armnn::DataLayout::NHWC);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100352}
353
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000354LayerTestResult<uint8_t, 4> SimpleConvolution2d3x3Uint8Test(
355 armnn::IWorkloadFactory& workloadFactory,
356 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
357 bool biasEnabled,
358 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000359{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000360 return SimpleConvolution2d3x3TestCommon<uint8_t>(workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000361}
362
363template<typename T>
364LayerTestResult<T, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon(
365 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000366 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
narpra015f703182018-10-26 16:24:58 +0100367 const armnn::DataLayoutIndexed& layout,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000368 float qScale,
369 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +0000370{
telsoa01c577f2c2018-08-31 09:22:23 +0100371 // Use a single-batch 1-channel 3x3 image as input.
telsoa014fcda012018-03-09 14:13:49 +0000372 armnn::TensorInfo inputDesc({1, 1, 3, 3}, armnn::GetDataType<T>());
373 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
374 QuantizedVector<T>(qScale, qOffset, {
375 11,21,31,
376 12,22,32,
377 13,23,33
378 })));
379
telsoa01c577f2c2018-08-31 09:22:23 +0100380 // Use 1 batch of a 1-channel 2x2 kernel.
telsoa014fcda012018-03-09 14:13:49 +0000381 armnn::TensorInfo kernelDesc({1, 1, 2, 2}, armnn::GetDataType<T>());
382 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
383 QuantizedVector<T>(qScale, qOffset, {
384 -11,-21,
385 -12,-22,
386 })));
387
telsoa01c577f2c2018-08-31 09:22:23 +0100388// Expected output is 1 batch of a 1-channel 6x8 image.
telsoa014fcda012018-03-09 14:13:49 +0000389// Manually calculated like this:
390//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
391//[-11*0 -21*0 -12*0 -22*11 ; -11*0 -21*0 -12*11 -22*21 ; -11*0 -21*0 -12*21 -22*31 ; -11*0 -21*0 -12*31 -22*0 ..]
392//[-11*0 -21*11 -12*0 -22*12 ; -11*11 -21*21 -12*12 -22*22 ; -11*21 -21*31 -12*22 -22*32 ; -11*31 -21*0 -12*32 -22*0 ..]
393//[-11*0 -21*12 -12*0 -22*13 ; -11*12 -21*22 -12*13 -22*23 ; -11*22 -21*32 -12*23 -22*33 ; -11*32 -21*0 -12*33 -22*0 ..]
394//[-11*0 -21*13 -12*0 -22*0 ; -11*13 -21*23 -12*0 -22*0 ; -11*23 -21*33 -12*0 -22*0 ; -11*33 -21*0 -12*0 -22*0 ..]
395//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
396//[..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ..]
397 armnn::TensorInfo outputDesc({1, 1, 8, 6}, armnn::GetDataType<T>());
398 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
399 QuantizedVector<T>(qScale, qOffset, {
400 0, 0, 0, 0, 0, 0,
401 -242, -594, -934, -372, 0, 0,
402 -495, -1190, -1850, -725, 0, 0,
403 -538, -1256, -1916, -748, 0, 0,
404 -273, -626, -946, -363, 0, 0,
405 0, 0, 0, 0, 0, 0,
406 0, 0, 0, 0, 0, 0,
407 0, 0, 0, 0, 0, 0
408 })));
409
410 return SimpleConvolution2dTestImpl<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000411 memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000412 input,
413 kernel,
414 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(false, qScale, qOffset),
415 expectedOutput,
416 qScale,
417 qOffset,
narpra015f703182018-10-26 16:24:58 +0100418 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100419 1, // Padding left.
420 2, // Padding top.
421 3, // Padding right.
422 4); // Padding bottom.
telsoa014fcda012018-03-09 14:13:49 +0000423}
424
425template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000426LayerTestResult<T, 4> SimpleConvolution2dAsymmetricPaddingTestCommon(
427 armnn::IWorkloadFactory& workloadFactory,
428 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
429 const armnn::DataLayoutIndexed& layout,
430 float qScale,
431 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +0000432{
telsoa01c577f2c2018-08-31 09:22:23 +0100433 // Use a single-batch 1-channel 5x5 image as input.
telsoa014fcda012018-03-09 14:13:49 +0000434 armnn::TensorInfo inputDesc({ 1, 1, 5, 5 }, armnn::GetDataType<T>());
435 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
436 QuantizedVector<T>(qScale, qOffset, {
437 11,21,31,41,51,
438 12,22,32,42,52,
439 13,23,33,43,53,
440 14,24,34,44,54,
441 15,25,35,45,55,
442 })));
443
telsoa01c577f2c2018-08-31 09:22:23 +0100444 // Use 1 batch of a 1-channel 4x4 kernel.
telsoa014fcda012018-03-09 14:13:49 +0000445 armnn::TensorInfo kernelDesc({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
446 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
447 QuantizedVector<T>(qScale, qOffset, {
448 -11,-21,-31,-41,
449 -12,-22,-32,-42,
450 -13,-23,-33,-43,
451 -14,-24,-34,-44,
452 })));
453
telsoa01c577f2c2018-08-31 09:22:23 +0100454 // Expected output is 1 batch of a 1-channel 5x5 image.
telsoa014fcda012018-03-09 14:13:49 +0000455 armnn::TensorInfo outputDesc({ 1, 1, 5, 5 }, armnn::GetDataType<T>());
456 std::vector<T> myVec(outputDesc.GetNumElements(), 0);
457 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
458 QuantizedVector<T>(qScale, qOffset, {
telsoa014fcda012018-03-09 14:13:49 +0000459 -7140, -10580, -13940, -9300, -5230,
460 -9590, -14120, -18520, -12290, -6860,
461 -9980, -14560, -18960, -12560, -7000,
462 -7518, -10904, -14144, -9318, -5152,
surmeh013537c2c2018-05-18 16:31:43 +0100463 -5032, -7256, -9376, -6142, -3368,
telsoa014fcda012018-03-09 14:13:49 +0000464 })));
465
466 return SimpleConvolution2dTestImpl<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000467 memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000468 input,
469 kernel,
470 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(false, qScale, qOffset),
471 expectedOutput,
472 qScale,
473 qOffset,
narpra015f703182018-10-26 16:24:58 +0100474 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100475 1, // Padding left.
476 1, // Padding top.
477 2, // Padding right.
478 2); // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100479}
480
481template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000482LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(
483 armnn::IWorkloadFactory& workloadFactory,
484 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
485 float qScale,
486 int32_t qOffset,
487 bool biasEnabled,
488 const armnn::DataLayoutIndexed& layout)
surmeh013537c2c2018-05-18 16:31:43 +0100489{
telsoa01c577f2c2018-08-31 09:22:23 +0100490 // Use a single-batch 2-channel 5x5 image as input.
surmeh013537c2c2018-05-18 16:31:43 +0100491 armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5 }, armnn::GetDataType<T>());
492 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
493 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
494 0, 1, 2, 3, 4,
495 5, 6, 7, 8, 9,
496 10, 11, 12, 13, 14,
497 15, 16, 17, 18, 19,
498 20, 21, 22, 23, 24,
499
500 25, 26, 27, 28, 29,
501 30, 31, 32, 33, 34,
502 35, 36, 37, 38, 39,
503 40, 41, 42, 43, 44,
504 45, 46, 47, 48, 49
505 })));
506
telsoa01c577f2c2018-08-31 09:22:23 +0100507 // Use a depth multiplier of 1 on a 2-channel 4x4 kernel.
surmeh013537c2c2018-05-18 16:31:43 +0100508 armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, armnn::GetDataType<T>());
509 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
510 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
511 32, 31, 30, 29,
512 28, 27, 26, 25,
513 24, 23, 22, 21,
514 20, 19, 18, 17,
515
516 16, 15, 14, 13,
517 12, 11, 10, 9,
518 8, 7, 6, 5,
519 4, 3, 2, 1
520 })));
521
telsoa01c577f2c2018-08-31 09:22:23 +0100522 // Expected output is 1 batch of a 2-channel 5x5 image.
523 // Calculated using the python tensorflow library with strideX=1, strideY=1.
surmeh013537c2c2018-05-18 16:31:43 +0100524 armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5 }, armnn::GetDataType<T>());
525 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
526 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
527 1062, 1580, 1850, 1530, 1117,
528 2140, 3108, 3500, 2842, 2042,
529 3580, 5068, 5460, 4342, 3062,
530 3618, 5072, 5390, 4248, 2971,
531 3074, 4282, 4510, 3533, 2457,
532 1550, 2284, 2362, 1955, 1428,
533 2910, 4206, 4342, 3528, 2536,
534 3390, 4886, 5022, 4068, 2916,
535 3566, 5056, 5182, 4133, 2922,
536 3100, 4352, 4452, 3517, 2465
537 })));
538
539 return DepthwiseConvolution2dAsymmetricTestImpl<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000540 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +0100541 input,
542 kernel,
543 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
544 expectedOutput,
545 qScale,
546 qOffset,
jimfly01382a91d2018-10-26 15:55:50 +0100547 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100548 1, // Padding left.
549 1, // Padding top.
550 2, // Padding right.
551 2, // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100552 1, // strideX
553 1); // strideY
telsoa014fcda012018-03-09 14:13:49 +0000554}
555
Nikhil Rajcec6b652018-10-12 13:51:57 +0100556template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000557LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestCommon(
558 armnn::IWorkloadFactory& workloadFactory,
559 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
560 float qScale,
561 int32_t qOffset,
562 bool biasEnabled)
Nikhil Rajcec6b652018-10-12 13:51:57 +0100563{
564 armnn::TensorInfo inputTensorInfo({ 1, 5, 5, 2}, armnn::GetDataType<T>());
565 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
566 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
567 0, 25,
568 1, 26,
569 2, 27,
570 3, 28,
571 4, 29,
572
573 5, 30,
574 6, 31,
575 7, 32,
576 8, 33,
577 9, 34,
578
579 10, 35,
580 11, 36,
581 12, 37,
582 13, 38,
583 14, 39,
584
585 15, 40,
586 16, 41,
587 17, 42,
588 18, 43,
589 19, 44,
590
591 20, 45,
592 21, 46,
593 22, 47,
594 23, 48,
595 24, 49
596 })));
597
598 armnn::TensorInfo kernelTensorInfo({ 1, 4, 4, 2}, armnn::GetDataType<T>());
599 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
600 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
601 32, 16,
602 31, 15,
603 30, 14,
604 29, 13,
605
606 28, 12,
607 27, 11,
608 26, 10,
609 25, 9,
610
611 24, 8,
612 23, 7,
613 22, 6,
614 21, 5,
615
616 20, 4,
617 19, 3,
618 18, 2,
619 17, 1
620 })));
621
622 armnn::TensorInfo outputTensorInfo({ 1, 5, 5, 2}, armnn::GetDataType<T>());
623 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
624 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
625 1062, 1550,
626 1580, 2284,
627 1850, 2362,
628 1530, 1955,
629 1117, 1428,
630
631 2140, 2910,
632 3108, 4206,
633 3500, 4342,
634 2842, 3528,
635 2042, 2536,
636
637 3580, 3390,
638 5068, 4886,
639 5460, 5022,
640 4342, 4068,
641 3062, 2916,
642
643 3618, 3566,
644 5072, 5056,
645 5390, 5182,
646 4248, 4133,
647 2971, 2922,
648
649 3074, 3100,
650 4282, 4352,
651 4510, 4452,
652 3533, 3517,
653 2457, 2465
654 })));
655
656 return DepthwiseConvolution2dNhwcTestImpl<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000657 memoryManager,
Nikhil Rajcec6b652018-10-12 13:51:57 +0100658 input,
659 kernel,
660 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
661 expectedOutput,
662 qScale,
663 qOffset,
664 1, // Padding left.
665 1, // Padding top.
666 2, // Padding right.
667 2, // Padding bottom.
668 1, // strideX
669 1); // strideY
670}
671
telsoa014fcda012018-03-09 14:13:49 +0000672LayerTestResult<float, 4>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000673Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest(
674 armnn::IWorkloadFactory& workloadFactory,
675 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
676 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000677{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000678 return Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon<float>(
679 workloadFactory, memoryManager, layout, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000680}
681
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000682LayerTestResult<float, 4> Convolution2dAsymmetricPaddingTest(
683 armnn::IWorkloadFactory& workloadFactory,
684 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
685 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000686{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000687 return SimpleConvolution2dAsymmetricPaddingTestCommon<float>(
688 workloadFactory, memoryManager, layout, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000689}
690
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000691LayerTestResult<float, 4> DepthwiseConvolution2dTest(
692 armnn::IWorkloadFactory& workloadFactory,
693 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
694 bool biasEnabled,
695 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000696{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000697 return DepthwiseConvolution2dTestImpl<float, float>(
698 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000699}
700
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000701LayerTestResult<float, 4> DepthwiseConvolution2dDepthNhwcTest(
702 armnn::IWorkloadFactory& workloadFactory,
703 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
704 bool biasEnabled)
Nikhil Rajcec6b652018-10-12 13:51:57 +0100705{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000706 return DepthwiseConvolution2dNhwcTestCommon<float>(workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
Nikhil Rajcec6b652018-10-12 13:51:57 +0100707}
708
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000709LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul1Test(
710 armnn::IWorkloadFactory& workloadFactory,
711 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
712 bool biasEnabled,
713 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000714{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000715 return DepthwiseConvolution2dDepthMul1TestImpl<float, float>(
716 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000717}
718
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000719LayerTestResult<float, 4> DepthwiseConvolution2dAsymmetricTest(
720 armnn::IWorkloadFactory& workloadFactory,
721 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
722 bool biasEnabled,
723 const armnn::DataLayoutIndexed& layout)
surmeh013537c2c2018-05-18 16:31:43 +0100724{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000725 return DepthwiseConvolution2dAsymmetricTestCommon<float>(
726 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
surmeh013537c2c2018-05-18 16:31:43 +0100727}
728
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000729LayerTestResult<uint8_t, 4> DepthwiseConvolution2dUint8Test(
730 armnn::IWorkloadFactory& workloadFactory,
731 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
732 bool biasEnabled,
733 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000734{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000735 return DepthwiseConvolution2dTestImpl<uint8_t, int32_t>(
736 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000737}
738
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000739LayerTestResult<uint8_t, 4> DepthwiseConvolution2dDepthMul1Uint8Test(
740 armnn::IWorkloadFactory& workloadFactory,
741 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
742 bool biasEnabled,
743 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000744{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000745 return DepthwiseConvolution2dDepthMul1TestImpl<uint8_t, int32_t>(
746 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000747}
748
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000749LayerTestResult<float, 4> Convolution1dTest(
750 armnn::IWorkloadFactory& workloadFactory,
751 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
752 bool biasEnabled)
telsoa014fcda012018-03-09 14:13:49 +0000753{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000754 return Convolution1dTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
telsoa014fcda012018-03-09 14:13:49 +0000755}
756
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000757LayerTestResult<uint8_t, 4> Convolution1dUint8Test(
758 armnn::IWorkloadFactory& workloadFactory,
759 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
760 bool biasEnabled)
telsoa014fcda012018-03-09 14:13:49 +0000761{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000762 return Convolution1dTestImpl<uint8_t>(workloadFactory, memoryManager, 0.1f, 128, biasEnabled);
telsoa014fcda012018-03-09 14:13:49 +0000763}
764
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000765LayerTestResult<float,4> CompareConvolution2dTest(
766 armnn::IWorkloadFactory& workloadFactory,
767 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
768 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +0000769{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000770 return CompareConvolution2dTestImpl<float>(workloadFactory, memoryManager, refWorkloadFactory);
telsoa014fcda012018-03-09 14:13:49 +0000771}
772
773template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000774LayerTestResult<T,4> CompareDepthwiseConvolution2dTest(
775 armnn::IWorkloadFactory& workloadFactory,
776 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
777 armnn::IWorkloadFactory& refWorkloadFactory,
778 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000779{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000780 return CompareDepthwiseConvolution2dTestImpl<T>(workloadFactory, memoryManager, refWorkloadFactory, layout);
telsoa014fcda012018-03-09 14:13:49 +0000781}
782
783template LayerTestResult<float, 4> CompareDepthwiseConvolution2dTest<float>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000784 armnn::IWorkloadFactory&,
785 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
786 armnn::IWorkloadFactory&,
787 const armnn::DataLayoutIndexed&);
telsoa014fcda012018-03-09 14:13:49 +0000788
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000789template LayerTestResult<uint8_t, 4> CompareDepthwiseConvolution2dTest<uint8_t>(
790 armnn::IWorkloadFactory&,
791 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
792 armnn::IWorkloadFactory&,
793 const armnn::DataLayoutIndexed&);
794
795LayerTestResult<float,4> SimpleNormalizationAcrossTest(
796 armnn::IWorkloadFactory& workloadFactory,
797 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000798{
799 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
800 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000801 return SimpleNormalizationTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +0000802}
803
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000804LayerTestResult<float,4> SimpleNormalizationWithinTest(
805 armnn::IWorkloadFactory& workloadFactory,
806 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000807{
808 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
809 auto normChannel = armnn::NormalizationAlgorithmChannel::Within;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000810 return SimpleNormalizationTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +0000811}
812
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000813LayerTestResult<float,4> SimpleNormalizationAcrossNhwcTest(
814 armnn::IWorkloadFactory& workloadFactory,
815 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra0155a97bc2018-10-02 14:35:53 +0100816{
817 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
818 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000819 return SimpleNormalizationNhwcTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
narpra0155a97bc2018-10-02 14:35:53 +0100820}
821
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000822LayerTestResult<float,2> SimpleSoftmaxTest(
823 armnn::IWorkloadFactory& workloadFactory,
824 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
825 float beta)
telsoa014fcda012018-03-09 14:13:49 +0000826{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000827 return SimpleSoftmaxTestImpl<float>(workloadFactory, memoryManager, beta);
telsoa014fcda012018-03-09 14:13:49 +0000828}
829
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000830LayerTestResult<uint8_t,2> SimpleSoftmaxUint8Test(
831 armnn::IWorkloadFactory& workloadFactory,
832 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
833 float beta)
telsoa014fcda012018-03-09 14:13:49 +0000834{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000835 return SimpleSoftmaxTestImpl<uint8_t>(workloadFactory, memoryManager, beta);
telsoa014fcda012018-03-09 14:13:49 +0000836}
837
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000838LayerTestResult<float,4> CompareNormalizationTest(
839 armnn::IWorkloadFactory& workloadFactory,
840 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
841 armnn::IWorkloadFactory& refWorkloadFactory,
842 armnn::NormalizationAlgorithmChannel normChannel,
843 armnn::NormalizationAlgorithmMethod normMethod)
telsoa014fcda012018-03-09 14:13:49 +0000844{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000845 return CompareNormalizationTestImpl(workloadFactory, memoryManager, refWorkloadFactory, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +0000846}
847
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000848LayerTestResult<float,2> CompareSoftmaxTest(
849 armnn::IWorkloadFactory& workloadFactory,
850 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000851 armnn::IWorkloadFactory& refWorkloadFactory,
852 float beta)
853{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000854 return CompareSoftmaxTestImpl<float>(workloadFactory, memoryManager, refWorkloadFactory, beta);
telsoa014fcda012018-03-09 14:13:49 +0000855}
856
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000857LayerTestResult<uint8_t,2> CompareSoftmaxUint8Test(
858 armnn::IWorkloadFactory& workloadFactory,
859 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000860 armnn::IWorkloadFactory& refWorkloadFactory,
861 float beta)
862{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000863 return CompareSoftmaxTestImpl<uint8_t>(workloadFactory, memoryManager, refWorkloadFactory, beta);
telsoa014fcda012018-03-09 14:13:49 +0000864}
865
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000866std::vector<LayerTestResult<float,3>> SplitterTest(
867 armnn::IWorkloadFactory& workloadFactory,
868 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000869{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000870 return SplitterTestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +0000871}
872
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000873std::vector<LayerTestResult<uint8_t,3>> SplitterUint8Test(
874 armnn::IWorkloadFactory& workloadFactory,
875 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000876{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000877 return SplitterTestCommon<uint8_t>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000878}
879
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000880LayerTestResult<float, 3> CopyViaSplitterTest(
881 armnn::IWorkloadFactory& workloadFactory,
882 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000883{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000884 return CopyViaSplitterTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000885}
886
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000887LayerTestResult<uint8_t, 3> CopyViaSplitterUint8Test(
888 armnn::IWorkloadFactory& workloadFactory,
889 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000890{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000891 return CopyViaSplitterTestImpl<uint8_t>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000892}
893
telsoa01c577f2c2018-08-31 09:22:23 +0100894LayerTestResult<float, 2> LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000895 armnn::IWorkloadFactory& workloadFactory,
896 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +0100897{
898 armnn::TensorInfo inputDesc({ 2, 2 }, armnn::GetDataType<float>());
899 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
900 { 2., 3., 3., 4. }));
901
902 armnn::TensorInfo outputDesc({ 2, 4 }, armnn::GetDataType<float>());
903 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
904 {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
905 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f}));
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000906 return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl(
907 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +0100908}
909
910LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000911 armnn::IWorkloadFactory& workloadFactory,
912 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +0100913{
914 armnn::TensorInfo inputDesc({ 2, 5 }, armnn::GetDataType<float>());
915 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
916 {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
917 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f}));
918
919 armnn::TensorInfo outputDesc({ 2, 16 }, armnn::GetDataType<float>());
920 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
921 {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f,
922 -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f,
923 -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f,
924 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f,
925 -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f,
926 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f,
927 0.02168f}));
Matteo Martincigha65b7ae2018-11-14 12:39:55 +0000928 return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl(workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +0100929}
930
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000931LayerTestResult<float, 2> LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest(
932 armnn::IWorkloadFactory& workloadFactory,
933 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +0100934{
935 armnn::TensorInfo inputDesc({2, 2}, armnn::GetDataType<float>());
936 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
937 {2., 3., 3., 4.}));
938
939
940 armnn::TensorInfo outputDesc({2, 4}, armnn::GetDataType<float>());
941 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
942 {{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
943 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}}));
944
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000945 return LstmNoCifgNoPeepholeNoProjectionTestImpl(
946 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +0100947}
948
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000949LayerTestResult<float,3> MergerTest(
950 armnn::IWorkloadFactory& workloadFactory,
951 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000952{
surmeh013537c2c2018-05-18 16:31:43 +0100953 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +0000954 unsigned int outputHeight = 6;
955 unsigned int outputChannels = 3;
956
surmeh013537c2c2018-05-18 16:31:43 +0100957 unsigned int inputWidth1 = 3;
958 unsigned int inputHeight1 = 6;
959 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +0000960
surmeh013537c2c2018-05-18 16:31:43 +0100961 unsigned int inputWidth2 = 3;
962 unsigned int inputHeight2 = 6;
963 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +0000964
telsoa01c577f2c2018-08-31 09:22:23 +0100965 // Define the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +0000966 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::Float32);
967 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::Float32);
968 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +0000969
970 LayerTestResult<float,3> ret(outputTensorInfo);
971
telsoa014fcda012018-03-09 14:13:49 +0000972 ret.outputExpected = MakeTensor<float, 3>(outputTensorInfo, std::vector<float>(
surmeh013537c2c2018-05-18 16:31:43 +0100973 {
974 1.0f, 2.0f, 3.0f,
975 4.0f, 5.0f, 6.0f,
976 7.0f, 8.0f, 9.0f,
977 10.0f, 11.0f, 12.0f,
978 13.0f, 14.0f, 15.0f,
979 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +0000980
surmeh013537c2c2018-05-18 16:31:43 +0100981 19.0f, 20.0f, 21.0f,
982 22.0f, 23.0f, 24.0f,
983 25.0f, 26.0f, 27.0f,
984 28.0f, 29.0f, 30.0f,
985 31.0f, 32.0f, 33.0f,
986 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +0000987
surmeh013537c2c2018-05-18 16:31:43 +0100988 37.0f, 38.0f, 39.0f,
989 40.0f, 41.0f, 42.0f,
990 43.0f, 44.0f, 45.0f,
991 46.0f, 47.0f, 48.0f,
992 49.0f, 50.0f, 51.0f,
993 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +0000994 })
995 );
996
telsoa014fcda012018-03-09 14:13:49 +0000997 auto input1 = MakeTensor<float, 3>(inputTensorInfo1, std::vector<float>(
998 {
surmeh013537c2c2018-05-18 16:31:43 +0100999 1.0f, 2.0f, 3.0f,
1000 4.0f, 5.0f, 6.0f,
1001 7.0f, 8.0f, 9.0f,
1002 10.0f, 11.0f, 12.0f,
1003 13.0f, 14.0f, 15.0f,
1004 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +00001005
surmeh013537c2c2018-05-18 16:31:43 +01001006 19.0f, 20.0f, 21.0f,
1007 22.0f, 23.0f, 24.0f,
1008 25.0f, 26.0f, 27.0f,
1009 28.0f, 29.0f, 30.0f,
1010 31.0f, 32.0f, 33.0f,
1011 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +00001012 })
1013 );
1014
1015 auto input2 = MakeTensor<float, 3>(inputTensorInfo2, std::vector<float>(
1016 {
surmeh013537c2c2018-05-18 16:31:43 +01001017 37.0f, 38.0f, 39.0f,
1018 40.0f, 41.0f, 42.0f,
telsoa014fcda012018-03-09 14:13:49 +00001019 43.0f, 44.0f, 45.0f,
surmeh013537c2c2018-05-18 16:31:43 +01001020 46.0f, 47.0f, 48.0f,
1021 49.0f, 50.0f, 51.0f,
1022 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +00001023 })
1024 );
1025
telsoa01c577f2c2018-08-31 09:22:23 +01001026 std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of input[0].
telsoa014fcda012018-03-09 14:13:49 +00001027 armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1);
1028
telsoa01c577f2c2018-08-31 09:22:23 +01001029 std::vector<unsigned int> wOrigin2 = {2, 0, 0}; //Extent of the window is defined by size of input[1].
telsoa014fcda012018-03-09 14:13:49 +00001030 armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2);
1031
telsoa014fcda012018-03-09 14:13:49 +00001032 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1033
1034 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
1035
1036 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
1037 subTensorsSupported ?
1038 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
1039 workloadFactory.CreateTensorHandle(inputTensorInfo1);
1040
1041 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
1042 subTensorsSupported ?
1043 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
1044 workloadFactory.CreateTensorHandle(inputTensorInfo2);
1045
telsoa014fcda012018-03-09 14:13:49 +00001046 armnn::MergerQueueDescriptor data;
1047 armnn::WorkloadInfo info;
1048 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1049 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +00001050 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1051
1052 data.m_ViewOrigins.push_back(window1);
1053 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +00001054
1055 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(data, info);
1056
1057 inputHandle1->Allocate();
1058 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00001059 outputHandle->Allocate();
1060
1061 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
1062 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00001063
1064 workload->Execute();
1065
1066 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
1067
1068 return ret;
1069}
1070
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001071LayerTestResult<float,4> AdditionTest(
1072 armnn::IWorkloadFactory& workloadFactory,
1073 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001074{
1075 unsigned int batchSize = 2;
1076 unsigned int channels = 2;
1077 unsigned int height = 2;
1078 unsigned int width = 3;
1079
1080 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
1081 armnn::TensorInfo outputTensorInfo;
1082
1083 unsigned int shape[] = {batchSize, channels, height, width};
1084
1085 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1086 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1087 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1088
1089
1090 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, std::vector<float>(
1091 {
1092 0.0f, 2.0f, 1.0f,
1093 0.2f, 1.0f, 2.0f,
1094
1095 1.0f, 2.0f, 1.0f,
1096 0.2f, 1.0f, 2.0f,
1097
1098 0.0f, 2.0f, 1.0f,
1099 4.2f, 1.0f, 2.0f,
1100
1101 0.0f, 0.0f, 1.0f,
1102 0.2f, 1.0f, 2.0f,
1103 }));
1104
1105 auto input2 = MakeTensor<float, 4>(inputTensorInfo2, std::vector<float>(
1106 {
1107 1.0f, 2.0f, 1.0f,
1108 0.0f, 1.0f, 2.0f,
1109
1110 1.0f, 2.0f, -2.0f,
1111 0.2f, 1.0f, 2.0f,
1112
1113 0.0f, 2.0f, 1.0f,
1114 4.2f, 0.0f, -3.0f,
1115
1116 0.0f, 0.0f, 1.0f,
1117 0.7f, 1.0f, 5.0f,
1118 }));
1119
1120 LayerTestResult<float,4> ret(outputTensorInfo);
1121 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>(
1122 {
1123 1.0f, 4.0f, 2.0f,
1124 0.2f, 2.0f, 4.0f,
1125
1126 2.0f, 4.0f, -1.0f,
1127 0.4f, 2.0f, 4.0f,
1128
1129 0.0f, 4.0f, 2.0f,
1130 8.4f, 1.0f, -1.0f,
1131
1132 0.0f, 0.0f, 2.0f,
1133 0.9f, 2.0f, 7.0f,
1134 }));
1135
1136 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1137 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1138 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1139
1140 armnn::AdditionQueueDescriptor data;
1141 armnn::WorkloadInfo info;
1142 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1143 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1144 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1145
1146 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1147
1148 inputHandle1->Allocate();
1149 inputHandle2->Allocate();
1150 outputHandle->Allocate();
1151
1152 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1153 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1154
1155 workload->Execute();
1156
1157 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1158
1159 return ret;
1160}
1161
1162template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001163LayerTestResult<T, 4> AdditionBroadcastTestImpl(
1164 armnn::IWorkloadFactory& workloadFactory,
1165 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00001166 float qScale,
1167 int32_t qOffset)
1168{
1169 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, armnn::GetDataType<T>());
1170 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, armnn::GetDataType<T>());
1171 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>());
1172
1173 if (armnn::IsQuantizedType<T>())
1174 {
1175 inputTensorInfo1.SetQuantizationScale(qScale);
1176 inputTensorInfo1.SetQuantizationOffset(qOffset);
1177 inputTensorInfo2.SetQuantizationScale(qScale);
1178 inputTensorInfo2.SetQuantizationOffset(qOffset);
1179 outputTensorInfo.SetQuantizationScale(qScale);
1180 outputTensorInfo.SetQuantizationOffset(qOffset);
1181 }
1182
1183 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
1184 {
1185 0.0f,
1186 1.0f,
1187
1188 2.0f,
1189 3.0f,
1190
1191 4.0f,
1192 5.0f,
1193 }));
1194
1195 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
1196 {
1197 0.5f, 1.5f, 2.5f,
1198 3.5f, 4.5f, 5.5f,
1199 }));
1200
1201 LayerTestResult<T,4> ret(outputTensorInfo);
1202 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
1203 {
1204 0.5f, 1.5f, 2.5f,
1205 4.5f, 5.5f, 6.5f,
1206
1207 2.5f, 3.5f, 4.5f,
1208 6.5f, 7.5f, 8.5f,
1209
1210 4.5f, 5.5f, 6.5f,
1211 8.5f, 9.5f, 10.5f,
1212 }));
1213
1214 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1215 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1216 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1217
1218 armnn::AdditionQueueDescriptor data;
1219 armnn::WorkloadInfo info;
1220 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1221 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1222 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1223
1224 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1225
1226 inputHandle1->Allocate();
1227 inputHandle2->Allocate();
1228 outputHandle->Allocate();
1229
1230 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1231 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1232
1233 workload->Execute();
1234
1235 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1236
1237 return ret;
1238}
1239
1240template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001241LayerTestResult<T, 4> AdditionBroadcast1ElementTestImpl(
1242 armnn::IWorkloadFactory& workloadFactory,
1243 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00001244 float qScale,
1245 int32_t qOffset)
1246{
1247 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>());
1248 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, armnn::GetDataType<T>());
1249 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>());
1250
1251 if (armnn::IsQuantizedType<T>())
1252 {
1253 inputTensorInfo1.SetQuantizationScale(qScale);
1254 inputTensorInfo1.SetQuantizationOffset(qOffset);
1255 inputTensorInfo2.SetQuantizationScale(qScale);
1256 inputTensorInfo2.SetQuantizationOffset(qOffset);
1257 outputTensorInfo.SetQuantizationScale(qScale);
1258 outputTensorInfo.SetQuantizationOffset(qOffset);
1259 }
1260
1261 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
1262 {
1263 0.0f, 1.0f, 2.0f,
1264 3.0f, 4.0f, 5.0f,
1265 6.0f, 7.0f, 8.0f,
1266 9.0f, 10.0f, 11.0f,
1267 12.0f, 13.0f, 14.0f,
1268 15.0f, 16.0f, 17.0f,
1269 }));
1270
1271 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
1272 {
1273 0.5f,
1274 }));
1275
1276 LayerTestResult<T,4> ret(outputTensorInfo);
1277 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
1278 {
1279 0.5f, 1.5f, 2.5f,
1280 3.5f, 4.5f, 5.5f,
1281 6.5f, 7.5f, 8.5f,
1282 9.5f, 10.5f, 11.5f,
1283 12.5f, 13.5f, 14.5f,
1284 15.5f, 16.5f, 17.5f,
1285 }));
1286
1287 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1288 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1289 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1290
1291 armnn::AdditionQueueDescriptor data;
1292 armnn::WorkloadInfo info;
1293 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1294 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1295 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1296
1297 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1298
1299 inputHandle1->Allocate();
1300 inputHandle2->Allocate();
1301 outputHandle->Allocate();
1302
1303 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1304 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1305
1306 workload->Execute();
1307
1308 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1309
1310 return ret;
1311}
1312
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001313LayerTestResult<float, 4> AdditionBroadcastTest(
1314 armnn::IWorkloadFactory& workloadFactory,
1315 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001316{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001317 return AdditionBroadcastTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001318}
1319
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001320LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test(
1321 armnn::IWorkloadFactory& workloadFactory,
1322 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001323{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001324 return AdditionBroadcastTestImpl<uint8_t>(workloadFactory, memoryManager, 2.f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001325}
1326
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001327LayerTestResult<float, 4> AdditionBroadcast1ElementTest(
1328 armnn::IWorkloadFactory& workloadFactory,
1329 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001330{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001331 return AdditionBroadcast1ElementTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001332}
1333
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001334LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test(
1335 armnn::IWorkloadFactory& workloadFactory,
1336 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001337{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001338 return AdditionBroadcast1ElementTestImpl<uint8_t>(workloadFactory, memoryManager, 0.1333333f, 128);
telsoa014fcda012018-03-09 14:13:49 +00001339}
1340
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001341LayerTestResult<float,4> CompareAdditionTest(
1342 armnn::IWorkloadFactory& workloadFactory,
1343 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1344 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00001345{
1346 unsigned int batchSize = 4;
1347 unsigned int channels = 1;
1348 unsigned int height = 2;
1349 unsigned int width = 3;
1350
1351 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
1352 armnn::TensorInfo outputTensorInfo;
1353
1354 unsigned int shape[] = {batchSize, channels, height, width};
1355
1356 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1357 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1358 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1359
1360 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 1232);
1361 auto input2 = MakeRandomTensor<float, 4>(inputTensorInfo2, 456);
1362
1363 LayerTestResult<float,4> ret(outputTensorInfo);
1364
1365 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1366 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1367 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1368
1369 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
1370 std::unique_ptr<armnn::ITensorHandle> inputHandle2Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo2);
1371 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
1372
1373 armnn::AdditionQueueDescriptor data;
1374 armnn::WorkloadInfo info;
1375 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1376 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1377 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1378
1379 armnn::AdditionQueueDescriptor refData = data;
1380 armnn::WorkloadInfo refInfo = info;
1381 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo1, inputHandle1Ref.get());
1382 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get());
1383 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1384
1385 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1386 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateAddition(refData, refInfo);
1387
1388 inputHandle1->Allocate();
1389 inputHandle2->Allocate();
1390 outputHandle->Allocate();
1391 inputHandle1Ref->Allocate();
1392 inputHandle2Ref->Allocate();
1393 outputHandleRef->Allocate();
1394
1395 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1396 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1397 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
1398 CopyDataToITensorHandle(inputHandle2Ref.get(), &input2[0][0][0][0]);
1399
1400 workload->Execute();
1401 workloadRef->Execute();
1402
1403 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1404 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
1405
1406 return ret;
1407}
1408
surmeh01bceff2f2018-03-29 16:29:27 +01001409namespace {
David Beck5cd01f32018-09-12 16:00:08 +01001410template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001411LayerTestResult<T, 4> DivisionTestHelper(
1412 armnn::IWorkloadFactory& workloadFactory,
1413 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1414 const unsigned int shape0[4],
1415 const std::vector<T>& values0,
1416 float scale0,
1417 int32_t offset0,
1418 const unsigned int shape1[4],
1419 const std::vector<T> & values1,
1420 float scale1,
1421 int32_t offset1,
1422 const unsigned int outShape[4],
1423 const std::vector<T> & outValues,
1424 float outScale,
1425 int32_t outOffset)
David Beck5cd01f32018-09-12 16:00:08 +01001426{
1427 auto dataType = (std::is_same<T, uint8_t>::value ?
1428 armnn::DataType::QuantisedAsymm8 :
1429 armnn::DataType::Float32);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001430
David Beck5cd01f32018-09-12 16:00:08 +01001431 armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
1432 armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
1433 armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001434
David Beck5cd01f32018-09-12 16:00:08 +01001435 inputTensorInfo0.SetQuantizationScale(scale0);
1436 inputTensorInfo0.SetQuantizationOffset(offset0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001437
David Beck5cd01f32018-09-12 16:00:08 +01001438 inputTensorInfo1.SetQuantizationScale(scale1);
1439 inputTensorInfo1.SetQuantizationOffset(offset1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001440
David Beck5cd01f32018-09-12 16:00:08 +01001441 outputTensorInfo.SetQuantizationScale(outScale);
1442 outputTensorInfo.SetQuantizationOffset(outOffset);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001443
David Beck5cd01f32018-09-12 16:00:08 +01001444 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
1445 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001446
David Beck5cd01f32018-09-12 16:00:08 +01001447 LayerTestResult<T, 4> result(outputTensorInfo);
1448 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001449
David Beck5cd01f32018-09-12 16:00:08 +01001450 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
1451 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1452 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001453
David Beck5cd01f32018-09-12 16:00:08 +01001454 armnn::DivisionQueueDescriptor data;
1455 armnn::WorkloadInfo info;
1456 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
1457 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1458 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001459
David Beck5cd01f32018-09-12 16:00:08 +01001460 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDivision(data, info);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001461
David Beck5cd01f32018-09-12 16:00:08 +01001462 inputHandle0->Allocate();
1463 inputHandle1->Allocate();
1464 outputHandle->Allocate();
1465
1466 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
1467 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1468
David Beck5cd01f32018-09-12 16:00:08 +01001469 workload->Execute();
1470
1471 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
1472
1473 return result;
1474}
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001475} // anonymous namespace
1476
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001477LayerTestResult<float,4> DivisionByZeroTest(
1478 armnn::IWorkloadFactory& workloadFactory,
1479 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01001480{
1481 const unsigned int width = 2;
1482 const unsigned int height = 2;
1483 const unsigned int channelCount = 2;
1484 const unsigned int batchSize = 2;
1485
1486 unsigned int shape[] = { batchSize, channelCount, height, width };
1487
1488 std::vector<float> input0({
1489 1.f, 1.f, 1.f, 1.f, 0.f, 0.f, 0.f, 0.f,
1490 -1.f, -1.f, -1.f, -1.f, 5.f, 5.f, 5.f, 5.f });
1491
1492 std::vector<float> input1({
1493 0.f, 0.f, -0.f, -0.f, 0.f, 0.f, -0.f, -0.f,
1494 0.f, 0.f, -0.f, -0.f, 5.f, 5.f, 5.f, 5.f });
1495
1496 std::vector<float> output({
1497 INFINITY, INFINITY, -INFINITY, -INFINITY, NAN, NAN, -NAN, -NAN,
1498 -INFINITY, -INFINITY, INFINITY, INFINITY, 1, 1, 1, 1 });
1499
David Beck5cd01f32018-09-12 16:00:08 +01001500 return DivisionTestHelper<float>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001501 memoryManager,
David Beck5cd01f32018-09-12 16:00:08 +01001502 shape, input0, 1.0f, 0,
1503 shape, input1, 1.0f, 0,
1504 shape, output, 1.0f, 0);
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01001505}
1506
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001507LayerTestResult<float,4> DivisionTest(
1508 armnn::IWorkloadFactory& workloadFactory,
1509 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001510{
1511 const unsigned int width = 2;
1512 const unsigned int height = 2;
1513 const unsigned int channelCount = 2;
1514 const unsigned int batchSize = 2;
1515
1516 unsigned int shape[] = { batchSize, channelCount, height, width };
1517
1518 std::vector<float> input0({
1519 2, 2, 2, 2, 3, 3, 3, 3,
1520 4, 4, 4, 4, 5, 5, 5, 5 });
1521
1522 std::vector<float> input1({
1523 1, 1, 1, 1, 2, 2, 2, 2,
1524 4, 4, 4, 4, 4, 4, 4, 4 });
1525
1526 std::vector<float> output({
1527 2, 2, 2, 2, 1.5, 1.5, 1.5, 1.5,
1528 1, 1, 1, 1, 1.25, 1.25, 1.25, 1.25 });
1529
David Beck5cd01f32018-09-12 16:00:08 +01001530
1531 return DivisionTestHelper<float>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001532 memoryManager,
David Beck5cd01f32018-09-12 16:00:08 +01001533 shape, input0, 1.0f, 0,
1534 shape, input1, 1.0f, 0,
1535 shape, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001536}
1537
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001538LayerTestResult<float, 4> DivisionBroadcast1ElementTest(
1539 armnn::IWorkloadFactory& workloadFactory,
1540 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001541{
1542 unsigned int shape0[] = { 1, 2, 2, 2 };
1543 std::vector<float> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
1544
1545 unsigned int shape1[] = { 1, 1, 1, 1 };
1546 std::vector<float> input1({ 2 });
1547
1548 std::vector<float> output({ 1, 2, 3, 4, 5, 6, 7, 8});
1549
David Beck5cd01f32018-09-12 16:00:08 +01001550
1551 return DivisionTestHelper<float>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001552 memoryManager,
David Beck5cd01f32018-09-12 16:00:08 +01001553 shape0, input0, 1.0f, 0,
1554 shape1, input1, 1.0f, 0,
1555 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001556}
1557
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001558LayerTestResult<float, 4> DivisionBroadcast1DVectorTest(
1559 armnn::IWorkloadFactory& workloadFactory,
1560 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001561{
1562 unsigned int shape0[] = { 1, 3, 3, 2 };
1563 std::vector<float> input0({
1564 1, 4, 3, 8, 5, 12,
1565 7, 16, 9, 20, 11, 24,
1566 13, 28, 15, 32, 17, 36});
1567
1568 unsigned int shape1[] = { 1, 1, 1, 2 };
1569 std::vector<float> input1({ 1, 2 });
1570
1571 std::vector<float> output({
1572 1, 2, 3, 4, 5, 6,
1573 7, 8, 9, 10, 11, 12,
1574 13, 14, 15, 16, 17, 18});
1575
David Beck5cd01f32018-09-12 16:00:08 +01001576 return DivisionTestHelper<float>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001577 memoryManager,
David Beck5cd01f32018-09-12 16:00:08 +01001578 shape0, input0, 1.0f, 0,
1579 shape1, input1, 1.0f, 0,
1580 shape0, output, 1.0f, 0);
1581}
1582
1583
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001584LayerTestResult<uint8_t,4> DivisionUint8Test(
1585 armnn::IWorkloadFactory& workloadFactory,
1586 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01001587{
1588 const unsigned int width = 2;
1589 const unsigned int height = 2;
1590 const unsigned int channelCount = 2;
1591 const unsigned int batchSize = 2;
1592
1593 unsigned int shape[] = { batchSize, channelCount, height, width };
1594
1595 std::vector<uint8_t> input0({2, 2, 2, 2, 3, 3, 3, 3,
1596 4, 4, 4, 4, 5, 5, 5, 5 });
1597
1598 std::vector<uint8_t> input1({1, 1, 1, 1, 2, 2, 2, 2,
1599 4, 4, 4, 4, 4, 4, 4, 4 });
1600
1601 std::vector<uint8_t> output({8, 8, 8, 8, 6, 6, 6, 6,
1602 4, 4, 4, 4, 5, 5, 5, 5});
1603
1604
1605 return DivisionTestHelper<uint8_t>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001606 memoryManager,
1607 shape, input0, 1.0f, 0,
1608 shape, input1, 1.0f, 0,
1609 shape, output, 0.25f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01001610}
1611
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001612LayerTestResult<uint8_t, 4> DivisionBroadcast1ElementUint8Test(
1613 armnn::IWorkloadFactory& workloadFactory,
1614 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01001615{
1616 unsigned int shape0[] = { 1, 2, 2, 2 };
1617 std::vector<uint8_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
1618
1619 unsigned int shape1[] = { 1, 1, 1, 1 };
1620 std::vector<uint8_t> input1({ 2 });
1621
1622 std::vector<uint8_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
1623
1624 return DivisionTestHelper<uint8_t>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001625 memoryManager,
1626 shape0, input0, 1.0f, 0,
1627 shape1, input1, 1.0f, 0,
1628 shape0, output, 1.0f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01001629}
1630
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001631LayerTestResult<uint8_t, 4> DivisionBroadcast1DVectorUint8Test(
1632 armnn::IWorkloadFactory& workloadFactory,
1633 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01001634{
1635 unsigned int shape0[] = { 1, 3, 3, 2 };
1636 std::vector<uint8_t> input0({1, 4, 3, 8, 5, 12,
1637 7, 16, 9, 20, 11, 24,
1638 13, 28, 15, 32, 17, 36});
1639
1640 unsigned int shape1[] = { 1, 1, 1, 2 };
1641 std::vector<uint8_t> input1({ 1, 2 });
1642
1643 std::vector<uint8_t> output({1, 2, 3, 4, 5, 6,
1644 7, 8, 9, 10, 11, 12,
1645 13, 14, 15, 16, 17, 18});
1646
1647 return DivisionTestHelper<uint8_t>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001648 memoryManager,
1649 shape0, input0, 1.0f, 0,
1650 shape1, input1, 1.0f, 0,
1651 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001652}
1653
1654namespace {
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001655LayerTestResult<float,4> MultiplicationTestHelper(
1656 armnn::IWorkloadFactory& workloadFactory,
1657 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1658 const unsigned int shape0[4],
1659 const std::vector<float> & values0,
1660 const unsigned int shape1[4],
1661 const std::vector<float> & values1,
1662 const unsigned int outShape[4],
1663 const std::vector<float> & outValues)
telsoa014fcda012018-03-09 14:13:49 +00001664{
surmeh01bceff2f2018-03-29 16:29:27 +01001665 const size_t dimensionCount = 4;
1666 armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, armnn::DataType::Float32};
1667 armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, armnn::DataType::Float32};
1668 armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, armnn::DataType::Float32};
telsoa014fcda012018-03-09 14:13:49 +00001669
surmeh01bceff2f2018-03-29 16:29:27 +01001670 auto input0 = MakeTensor<float, 4>(inputTensorInfo0, values0);
1671 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00001672
1673 LayerTestResult<float,4> ret(outputTensorInfo);
1674
1675 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
1676 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1677 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1678
1679 armnn::MultiplicationQueueDescriptor data;
1680 armnn::WorkloadInfo info;
1681 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
1682 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1683 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1684
1685 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
1686
1687 inputHandle0->Allocate();
1688 inputHandle1->Allocate();
1689 outputHandle->Allocate();
1690
1691 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
1692 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1693
1694 workload->Execute();
1695
1696 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1697
surmeh01bceff2f2018-03-29 16:29:27 +01001698 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00001699 return ret;
1700}
surmeh01bceff2f2018-03-29 16:29:27 +01001701} // anonymous namespace
1702
1703
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001704LayerTestResult<float,4> MultiplicationTest(
1705 armnn::IWorkloadFactory& workloadFactory,
1706 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01001707{
1708 const unsigned int width = 2;
1709 const unsigned int height = 2;
1710 const unsigned int channelCount = 2;
1711 const unsigned int batchSize = 2;
1712
1713 unsigned int shape[] = { batchSize, channelCount, height, width };
1714
1715 std::vector<float> input0({
1716 1, 1, 1, 1, 2, 2, 2, 2,
1717 3, 3, 3, 3, 4, 4, 4, 4 });
1718
1719 std::vector<float> input1({
1720 2, 2, 2, 2, 3, 3, 3, 3,
1721 4, 4, 4, 4, 5, 5, 5, 5 });
1722
1723 std::vector<float> output({
1724 2, 2, 2, 2, 6, 6, 6, 6,
1725 12, 12, 12, 12, 20, 20, 20, 20 });
1726
1727 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001728 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01001729 shape,
1730 input0,
1731 shape,
1732 input1,
1733 shape,
1734 output);
1735}
1736
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001737LayerTestResult<float, 4> MultiplicationBroadcast1ElementTest(
1738 armnn::IWorkloadFactory& workloadFactory,
1739 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01001740{
1741 unsigned int shape0[] = { 1, 2, 2, 2 };
1742 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
1743
1744 unsigned int shape1[] = { 1, 1, 1, 1 };
1745 std::vector<float> input1({ 2 });
1746
1747 std::vector<float> output({ 2, 4, 6, 8, 10, 12, 14, 16});
1748
1749 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001750 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01001751 shape0,
1752 input0,
1753 shape1,
1754 input1,
1755 shape0,
1756 output);
1757}
1758
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001759LayerTestResult<float, 4> MultiplicationBroadcast1DVectorTest(
1760 armnn::IWorkloadFactory& workloadFactory,
1761 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01001762{
1763 unsigned int shape0[] = { 1, 3, 3, 2 };
1764 std::vector<float> input0({
1765 1, 2, 3, 4, 5, 6,
1766 7, 8, 9, 10, 11, 12,
1767 13, 14, 15, 16, 17, 18});
1768
1769 unsigned int shape1[] = { 1, 1, 1, 2 };
1770 std::vector<float> input1({ 1, 2 });
1771
1772 std::vector<float> output({
1773 1, 4, 3, 8, 5, 12,
1774 7, 16, 9, 20, 11, 24,
1775 13, 28, 15, 32, 17, 36});
1776
1777 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001778 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01001779 shape0,
1780 input0,
1781 shape1,
1782 input1,
1783 shape0,
1784 output);
1785}
telsoa014fcda012018-03-09 14:13:49 +00001786
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001787LayerTestResult<float,4> CompareMultiplicationTest(
1788 armnn::IWorkloadFactory& workloadFactory,
1789 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1790 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00001791{
1792 const unsigned int width = 16;
1793 const unsigned int height = 32;
1794 const unsigned int channelCount = 2;
1795 const unsigned int batchSize = 5;
1796
1797 armnn::TensorInfo inputTensorInfo0;
1798 armnn::TensorInfo inputTensorInfo1;
1799 armnn::TensorInfo outputTensorInfo;
1800
1801 constexpr unsigned int shape[] = { batchSize, channelCount, height, width };
1802
1803 inputTensorInfo0 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1804 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1805 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1806
1807 LayerTestResult<float,4> comparisonResult(outputTensorInfo);
1808
1809 auto input0 = MakeRandomTensor<float, 4>(inputTensorInfo0, 803506992);
1810 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 54902257);
1811
1812 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
1813 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1814 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1815
1816 std::unique_ptr<armnn::ITensorHandle> inputHandle0Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo0);
1817 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
1818 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
1819
1820 armnn::MultiplicationQueueDescriptor data;
1821 armnn::WorkloadInfo info;
1822 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
1823 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1824 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1825
1826 armnn::MultiplicationQueueDescriptor refData = data;
1827 armnn::WorkloadInfo refInfo = info;
1828 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo0, inputHandle0Ref.get());
1829 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo1, inputHandle1Ref.get());
1830 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1831
1832 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
1833 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateMultiplication(refData, refInfo);
1834
1835 inputHandle0->Allocate();
1836 inputHandle1->Allocate();
1837 outputHandle->Allocate();
1838 inputHandle0Ref->Allocate();
1839 inputHandle1Ref->Allocate();
1840 outputHandleRef->Allocate();
1841
1842 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
1843 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1844 CopyDataToITensorHandle(inputHandle0Ref.get(), &input0[0][0][0][0]);
1845 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
1846
1847 workload->Execute();
1848 workloadRef->Execute();
1849
1850 CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get());
1851 CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get());
1852
1853 return comparisonResult;
1854}
1855
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001856LayerTestResult<float,4> CompareBatchNormTest(
1857 armnn::IWorkloadFactory& workloadFactory,
1858 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1859 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00001860{
1861 const unsigned int width = 2;
1862 const unsigned int height = 3;
1863 const unsigned int channels = 5;
1864 const unsigned int batchSize = 3;
1865
1866 armnn::TensorInfo inputTensorInfo;
1867 armnn::TensorInfo outputTensorInfo;
1868 armnn::TensorInfo tensorInfo;
1869
1870 constexpr unsigned int shape[] = {batchSize, channels, height, width};
1871 constexpr unsigned int tensorShape[] = {channels};
1872
1873 inputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1874 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1875 tensorInfo = armnn::TensorInfo(1, tensorShape, armnn::DataType::Float32);
1876
1877 auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 21312);
1878
1879 auto mean = MakeRandomTensor<float, 1>(tensorInfo, 123);
1880 auto variance = MakeRandomTensor<float, 1>(tensorInfo, 234, 0.0f);
1881 auto beta = MakeRandomTensor<float, 1>(tensorInfo, 123);
1882 auto gamma = MakeRandomTensor<float, 1>(tensorInfo, 345);
1883
1884 LayerTestResult<float,4> ret(outputTensorInfo);
1885
1886 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
1887 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1888
1889 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
1890 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
1891
1892 armnn::BatchNormalizationQueueDescriptor data;
1893 armnn::WorkloadInfo info;
1894 armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
1895 armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
1896 armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
1897 armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
1898
1899 AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
1900 AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
1901 AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
1902 AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
1903
1904 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1905 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1906 data.m_Mean = &meanTensor;
1907 data.m_Variance = &varianceTensor;
1908 data.m_Beta = &betaTensor;
1909 data.m_Gamma = &gammaTensor;
1910 data.m_Parameters.m_Eps = 0.01f;
1911
1912 armnn::BatchNormalizationQueueDescriptor refData = data;
1913 armnn::WorkloadInfo refInfo = info;
1914 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
1915 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1916
1917 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
1918 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateBatchNormalization(refData, refInfo);
1919
1920 inputHandle->Allocate();
1921 outputHandle->Allocate();
1922 inputHandleRef->Allocate();
1923 outputHandleRef->Allocate();
1924
1925 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
1926 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
1927
1928 workload->Execute();
1929 workloadRef->Execute();
1930
1931 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1932 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
1933
1934 return ret;
1935}
1936
surmeh013537c2c2018-05-18 16:31:43 +01001937template<typename T>
1938void PermuteTensorData(
1939 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001940 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01001941 const armnn::PermutationVector& mappings,
1942 armnn::TensorInfo & inputTensorInfo,
1943 const T * inputData,
1944 std::vector<T>& outputData)
telsoa014fcda012018-03-09 14:13:49 +00001945{
surmeh013537c2c2018-05-18 16:31:43 +01001946 BOOST_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
1947 if (inputData == nullptr)
1948 {
1949 // Nullptr is an error in the test. By returning without doing the concatenation
1950 // I expect the caller to fail the test. It still makes sense to report this as
1951 // an assert for Debug builds.
1952 return;
1953 }
telsoa014fcda012018-03-09 14:13:49 +00001954
surmeh013537c2c2018-05-18 16:31:43 +01001955 armnn::TensorInfo outputTensorInfo = armnnUtils::Permuted(inputTensorInfo, mappings);
1956
1957 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
1958 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1959
1960 armnn::PermuteQueueDescriptor queueDescriptor;
1961 queueDescriptor.m_Parameters = armnn::PermuteDescriptor{mappings};
1962 armnn::WorkloadInfo workloadInfo;
1963 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
1964 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
1965
1966 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePermute(queueDescriptor, workloadInfo);
1967
1968 inputHandle->Allocate();
1969 outputHandle->Allocate();
1970
1971 CopyDataToITensorHandle(inputHandle.get(), inputData);
1972
1973 workload->Execute();
1974
1975 outputData.resize(outputTensorInfo.GetNumElements());
1976 CopyDataFromITensorHandle(&outputData[0], outputHandle.get());
1977 inputTensorInfo = outputTensorInfo;
1978}
1979
1980armnn::OriginsDescriptor CreateMergerDescriptorForConcatenation(
1981 const std::vector<armnn::TensorInfo> & inputTensorInfos,
1982 unsigned int concatDim)
1983{
telsoa014fcda012018-03-09 14:13:49 +00001984 std::vector<armnn::TensorShape> shapes;
1985 shapes.reserve(inputTensorInfos.size());
1986 for (const armnn::TensorInfo& it: inputTensorInfos)
1987 {
1988 shapes.push_back(it.GetShape());
1989 }
surmeh013537c2c2018-05-18 16:31:43 +01001990
1991 return armnn::CreateMergerDescriptorForConcatenation(shapes.begin(),
1992 shapes.end(),
1993 concatDim);
1994}
1995
1996//
1997// Concatenation is only supported for N and C dimensions for NCHW. In case of
telsoa01c577f2c2018-08-31 09:22:23 +01001998// <4 dimensions we need to make sure that the concat dimensions are at least
surmeh013537c2c2018-05-18 16:31:43 +01001999// the 3rd slowest iterating one.
2000//
2001
2002bool NeedPermuteForConcat(
2003 const std::vector<armnn::TensorInfo> & inputTensorInfos,
2004 unsigned int concatDim)
2005{
2006 // See note above. Additionally we expect the input shapes to have the
2007 // same number of dimensions.
2008 unsigned int nDimensions = 0;
2009
telsoa01c577f2c2018-08-31 09:22:23 +01002010 // Determine the number of dimensions as well as sanity check them
2011 // agains test implementation issues.
surmeh013537c2c2018-05-18 16:31:43 +01002012 for (auto && tensorInfo : inputTensorInfos)
2013 {
2014 if (!nDimensions)
2015 {
2016 nDimensions = tensorInfo.GetShape().GetNumDimensions();
2017 }
2018 else
2019 {
2020 BOOST_ASSERT_MSG(nDimensions == tensorInfo.GetShape().GetNumDimensions(),
2021 "Input shapes must have the same number of dimensions");
2022 }
2023 }
2024
2025 return (nDimensions-concatDim) < 3;
2026}
2027
2028armnn::TensorShape ExpandTensorShapeTo3dForPermute(const armnn::TensorShape & inputShape)
2029{
2030 unsigned int numDims = inputShape.GetNumDimensions();
2031 if (numDims >= 3)
2032 {
2033 // Nothing to do if the inputShape has at least 3 dimensions.
2034 return inputShape;
2035 }
2036
2037 std::vector<unsigned int> newDims(size_t(3), 1u);
2038 unsigned int expandedBy = 3 - numDims;
2039 for (unsigned int i=0; i<numDims; ++i)
2040 {
2041 newDims[expandedBy+i] = inputShape[i];
2042 }
2043 return armnn::TensorShape(3u, &newDims[0]);
2044}
2045
2046void Generate3dPermuteVectorForConcat(
2047 unsigned int numDimensions,
2048 unsigned int & concatDim,
2049 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutations)
2050{
2051 BOOST_ASSERT_MSG(numDimensions <= 3,
2052 "Only dimensions 1,2 and 3 are supported by this helper");
2053
2054 unsigned int expandedBy = 3 - numDimensions;
2055 unsigned int expandedConcatAxis = concatDim + expandedBy;
2056
2057 if (expandedConcatAxis == 2)
2058 {
2059 concatDim = 0;
2060 armnn::PermutationVector forwardPermutation({1, 2, 0});
2061 armnn::PermutationVector reversePermutation({2, 0, 1});
2062 permutations = std::make_pair(forwardPermutation, reversePermutation);
2063 }
2064 else if (expandedConcatAxis == 1)
2065 {
2066 concatDim = 0;
2067 armnn::PermutationVector forwardPermutation({2, 0, 1});
2068 armnn::PermutationVector reversePermutation({1, 2, 0});
2069 permutations = std::make_pair(forwardPermutation, reversePermutation);
2070 }
2071 else
2072 {
2073 BOOST_ASSERT(expandedConcatAxis == 0);
2074 concatDim = 0;
2075 }
2076}
2077
2078//
2079// Permute the input tensors so we can do a supported concatenation.
2080// Also treat lower than 3d tensors as 3d by adding dummy 1 dimensions
2081// at the front. Finally this function tells what the output shape
2082// of the permuted concatenated tensor is going to be.
2083//
2084template <typename T>
2085void PermuteInputsForConcat(
2086 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002087 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01002088 std::vector<armnn::TensorInfo> & inputTensorInfos,
2089 std::vector<T *> & inputData,
2090 std::vector<std::vector<T>> & inputDataStorage,
2091 armnn::PermutationVector & permuteVector,
2092 unsigned int & concatDim,
2093 armnn::TensorInfo & outputTensorInfo)
2094{
2095 BOOST_ASSERT_MSG(inputTensorInfos.size() > 1,
2096 "Expecting more than one tensor to be concatenated here");
2097
2098 unsigned int numDims = 0;
2099 unsigned int nthInput = 0;
2100 const armnn::PermutationVector identity({0, 1, 2});
2101
2102 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutations =
2103 std::make_pair(identity, identity);
2104
2105 inputDataStorage.resize(inputData.size());
2106
2107 for (auto && tensorInfo : inputTensorInfos)
2108 {
2109 if (numDims == 0)
2110 {
2111 numDims = tensorInfo.GetShape().GetNumDimensions();
2112 Generate3dPermuteVectorForConcat(numDims, concatDim, permutations);
telsoa01c577f2c2018-08-31 09:22:23 +01002113 // Store the reverese permutation.
surmeh013537c2c2018-05-18 16:31:43 +01002114 permuteVector = permutations.second;
2115 BOOST_ASSERT_MSG(!permuteVector.IsEqual(identity),
2116 "Test logic error, we don't need permutation, so we shouldn't arrive here");
2117 }
2118 else
2119 {
2120 BOOST_ASSERT_MSG(numDims == tensorInfo.GetShape().GetNumDimensions(),
2121 "All inputs must have the same number of dimensions");
2122 }
2123
2124 armnn::TensorInfo newTensorInfo = tensorInfo;
2125 newTensorInfo.SetShape(ExpandTensorShapeTo3dForPermute(tensorInfo.GetShape()));
2126
2127 PermuteTensorData<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002128 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01002129 permutations.first,
2130 newTensorInfo,
2131 inputData[nthInput],
2132 inputDataStorage[nthInput]);
2133
2134 inputData[nthInput] = inputDataStorage[nthInput].data();
2135 inputTensorInfos[nthInput] = newTensorInfo;
2136
2137 ++nthInput;
2138 }
2139
2140 outputTensorInfo.SetShape(
2141 armnnUtils::Permuted(
2142 ExpandTensorShapeTo3dForPermute(outputTensorInfo.GetShape()),
2143 permutations.first));
2144}
2145
2146
2147//
2148// This is the pair of PermuteInputsForConcat(...) which permutes back
telsoa01c577f2c2018-08-31 09:22:23 +01002149// the output of the concatenation so we can check it against an expected
surmeh013537c2c2018-05-18 16:31:43 +01002150// output.
2151//
2152template <typename T>
2153void PermuteOutputForConcat(
2154 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002155 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01002156 const armnn::TensorInfo & tensorInfo,
2157 const armnn::PermutationVector & permuteVector,
2158 std::unique_ptr<armnn::ITensorHandle> && inputDataHandle,
2159 T * data)
2160{
2161 BOOST_ASSERT_MSG(data != nullptr, "data must not be null");
2162 if (data == nullptr)
2163 {
2164 // Nullptr is an error in the test. By returning without doing the permutation
2165 // I expect the caller to fail the test. It still makes sense to report this as
2166 // an assert for Debug builds.
2167 return;
2168 }
2169
2170 armnn::TensorInfo resultTensorInfo = tensorInfo;
2171 std::vector<T> inputData(tensorInfo.GetNumElements());
2172 std::vector<T> outputData;
2173
2174 CopyDataFromITensorHandle(&inputData[0], inputDataHandle.get());
2175
2176 PermuteTensorData<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002177 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01002178 permuteVector,
2179 resultTensorInfo,
2180 &inputData[0],
2181 outputData);
2182
2183 ::memcpy(data, &outputData[0], sizeof(T)*outputData.size());
2184}
2185
2186template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002187void Concatenate(
2188 armnn::IWorkloadFactory& workloadFactory,
2189 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2190 std::initializer_list<const armnn::TensorInfo> inputTensorInfosOrig,
2191 std::initializer_list<T *> inputsOrig,
2192 const armnn::TensorInfo& outputTensorInfoOrig,
2193 T * output,
2194 unsigned int concatDim)
surmeh013537c2c2018-05-18 16:31:43 +01002195{
2196 BOOST_ASSERT_MSG(output != nullptr, "output must not be null");
2197 if (output == nullptr)
2198 {
2199 // Nullptr is an error in the test. By returning without doing the permutation
2200 // I expect the caller to fail the test. It still makes sense to report this as
2201 // an assert for Debug builds.
2202 return;
2203 }
2204
2205 armnn::MergerQueueDescriptor queueDescriptor;
2206
telsoa01c577f2c2018-08-31 09:22:23 +01002207 // Saves a copy of the parameters which we might need to change.
surmeh013537c2c2018-05-18 16:31:43 +01002208 std::vector<armnn::TensorInfo> inputTensorInfos(inputTensorInfosOrig.begin(), inputTensorInfosOrig.end());
2209 std::vector<T *> inputs = inputsOrig;
2210 armnn::TensorInfo outputTensorInfo = outputTensorInfoOrig;
2211
2212 armnn::PermutationVector permuteVector{0, 1, 2};
2213
telsoa01c577f2c2018-08-31 09:22:23 +01002214 // Holds and automatically releases memory for the reshaped input data.
surmeh013537c2c2018-05-18 16:31:43 +01002215 std::vector<std::vector<T>> tmpInputDataStorage;
2216
2217 const size_t inputCount = inputTensorInfos.size();
2218
2219 bool needPermuteForConcat = NeedPermuteForConcat(inputTensorInfos, concatDim);
2220
2221 if (needPermuteForConcat)
2222 {
2223 //
2224 // We need to permute the inputs, because concatenation along
telsoa01c577f2c2018-08-31 09:22:23 +01002225 // the requested axis is not supported.
surmeh013537c2c2018-05-18 16:31:43 +01002226 //
2227 PermuteInputsForConcat<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002228 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01002229 inputTensorInfos,
2230 inputs,
2231 tmpInputDataStorage,
2232 permuteVector,
2233 concatDim,
2234 outputTensorInfo);
2235 }
2236
2237 armnn::OriginsDescriptor viewsDescriptor = CreateMergerDescriptorForConcatenation(inputTensorInfos, concatDim);
telsoa014fcda012018-03-09 14:13:49 +00002238
2239 queueDescriptor.m_ViewOrigins.reserve(viewsDescriptor.GetNumViews());
2240 for (unsigned int i = 0; i < viewsDescriptor.GetNumViews(); ++i)
2241 {
2242 queueDescriptor.m_ViewOrigins.emplace_back(std::vector<unsigned int>(viewsDescriptor.GetViewOrigin(i),
2243 viewsDescriptor.GetViewOrigin(i) + viewsDescriptor.GetNumDimensions()));
2244 }
2245
telsoa014fcda012018-03-09 14:13:49 +00002246 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2247
2248 std::vector<std::unique_ptr<armnn::ITensorHandle>> inputHandles;
2249 inputHandles.reserve(inputCount);
2250
2251 const bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2252 for (unsigned int i = 0; i < inputCount; ++i)
2253 {
surmeh013537c2c2018-05-18 16:31:43 +01002254 const armnn::TensorInfo& inputTensorInfo = inputTensorInfos[i];
telsoa014fcda012018-03-09 14:13:49 +00002255
2256 std::unique_ptr<armnn::ITensorHandle> inputHandle = subTensorsSupported ?
2257 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo.GetShape(),
2258 queueDescriptor.m_ViewOrigins[i].m_Origin.data())
2259 : workloadFactory.CreateTensorHandle(inputTensorInfo);
2260
2261 inputHandles.emplace_back(std::move(inputHandle));
2262 }
2263
2264 armnn::WorkloadInfo workloadInfo;
2265
2266 for (unsigned int i = 0; i < inputCount; ++i)
2267 {
surmeh013537c2c2018-05-18 16:31:43 +01002268 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfos[i], inputHandles[i].get());
telsoa014fcda012018-03-09 14:13:49 +00002269 }
2270
2271 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
2272
2273 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(queueDescriptor, workloadInfo);
2274
2275 for (auto& inputHandle : inputHandles)
2276 {
2277 inputHandle->Allocate();
2278 }
2279
2280 outputHandle->Allocate();
2281
2282 unsigned int nextInputId = 0;
2283 for (auto& inputHandle : inputHandles)
2284 {
surmeh013537c2c2018-05-18 16:31:43 +01002285 CopyDataToITensorHandle(inputHandle.get(), inputs[nextInputId]);
2286 ++nextInputId;
telsoa014fcda012018-03-09 14:13:49 +00002287 }
2288
2289 workload->Execute();
2290
surmeh013537c2c2018-05-18 16:31:43 +01002291 if (needPermuteForConcat)
2292 {
2293 PermuteOutputForConcat<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002294 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01002295 outputTensorInfo,
2296 permuteVector,
2297 std::move(outputHandle),
2298 output);
2299 }
2300 else
2301 {
2302 CopyDataFromITensorHandle(output, outputHandle.get());
2303 }
telsoa014fcda012018-03-09 14:13:49 +00002304}
2305
2306template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002307LayerTestResult<T, 1> Concatenation1dTestImpl(
2308 armnn::IWorkloadFactory& workloadFactory,
2309 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2310 float qScale,
2311 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00002312{
2313 armnn::TensorInfo inputTensorInfo({ 3 }, armnn::GetDataType<T>());
2314
2315 auto input0 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 1.0f, 2.0f, 3.0f }));
2316 auto input1 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 4.0f, 5.0f, 6.0f }));
2317 auto input2 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 7.0f, 8.0f, 9.0f }));
2318
2319 armnn::TensorInfo outputTensorInfo({ 9 }, armnn::GetDataType<T>());
2320
2321 LayerTestResult<T, 1> result(outputTensorInfo);
2322
2323 std::vector<T> output;
2324 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002325 Concatenate<T>(workloadFactory, memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00002326 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
2327 { input0.data(), input1.data(), input2.data() },
2328 outputTensorInfo,
2329 output.data(),
2330 0);
2331
2332 result.output = MakeTensor<T, 1>(outputTensorInfo, output);
2333 result.outputExpected = MakeTensor<T, 1>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2334 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f
2335 }));
2336
2337 return result;
2338}
2339
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002340LayerTestResult<float, 1> Concatenation1dTest(
2341 armnn::IWorkloadFactory& workloadFactory,
2342 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002343{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002344 return Concatenation1dTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002345}
2346
2347template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002348LayerTestResult<T, 2> Concatenation2dTestImpl(
2349 armnn::IWorkloadFactory& workloadFactory,
2350 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00002351 const armnn::TensorInfo& outputTensorInfo,
2352 unsigned int dimension,
2353 const float qScale,
2354 const int32_t qOffset)
2355{
2356 armnn::TensorInfo inputTensorInfo({ 2, 3 }, armnn::GetDataType<T>());
2357
2358 auto input0 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2359 // Batch 0
2360 1.0f, 2.0f, 3.0f,
2361
2362 // Batch 1
2363 10.0f, 11.0f, 12.0f,
2364 }));
2365
2366 auto input1 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2367 // Batch 0
2368 4.0f, 5.0f, 6.0f,
2369
2370 // Batch 1
2371 13.0f, 14.0f, 15.0f,
2372 }));
2373
2374 auto input2 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2375 // Batch 0
2376 7.0f, 8.0f, 9.0f,
2377
2378 // Batch 1
2379 16.0f, 17.0f, 18.0f,
2380 }));
2381
2382 LayerTestResult<T, 2> result(outputTensorInfo);
2383
2384 std::vector<T> output;
2385 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002386 Concatenate<T>(workloadFactory, memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00002387 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
2388 { input0.data(), input1.data(), input2.data() },
2389 outputTensorInfo,
2390 output.data(),
2391 dimension);
2392
2393 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
2394 return result;
2395}
2396
2397template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002398LayerTestResult<T, 2> Concatenation2dDim0TestImpl(
2399 armnn::IWorkloadFactory& workloadFactory,
2400 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2401 float qScale,
2402 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00002403{
2404 armnn::TensorInfo outputTensorInfo({ 6, 3 }, armnn::GetDataType<T>());
2405
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002406 LayerTestResult<T, 2> result =
2407 Concatenation2dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 0, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00002408 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2409 // Batch 0
2410 1.0f, 2.0f, 3.0f,
2411
2412 // Batch 1
2413 10.0f, 11.0f, 12.0f,
2414
2415 // Batch 2
2416 4.0f, 5.0f, 6.0f,
2417
2418 // Batch 3
2419 13.0f, 14.0f, 15.0f,
2420
2421 // Batch 4
2422 7.0f, 8.0f, 9.0f,
2423
2424 // Batch 5
2425 16.0f, 17.0f, 18.0f,
2426 }));
2427
2428 return result;
2429}
2430
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002431LayerTestResult<float, 2> Concatenation2dDim0Test(
2432 armnn::IWorkloadFactory& workloadFactory,
2433 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002434{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002435 return Concatenation2dDim0TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002436}
2437
2438template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002439LayerTestResult<T, 2> Concatenation2dDim1TestImpl(
2440 armnn::IWorkloadFactory& workloadFactory,
2441 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2442 float qScale,
2443 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00002444{
2445 armnn::TensorInfo outputTensorInfo({ 2, 9 }, armnn::GetDataType<T>());
2446
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002447 LayerTestResult<T, 2> result =
2448 Concatenation2dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 1, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00002449 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2450 // Batch 0
2451 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
2452
2453 // Batch 1
2454 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f
2455 }));
2456
2457 return result;
2458}
2459
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002460LayerTestResult<float, 2> Concatenation2dDim1Test(
2461 armnn::IWorkloadFactory& workloadFactory,
2462 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002463{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002464 return Concatenation2dDim1TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002465}
2466
2467template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002468LayerTestResult<T, 2> Concatenation2dDim0DiffInputDimsTestImpl(
2469 armnn::IWorkloadFactory& workloadFactory,
2470 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2471 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00002472 int32_t qOffset)
2473{
2474 armnn::TensorInfo input0TensorInfo({ 2, 3 }, armnn::GetDataType<T>());
2475 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2476 // Batch 0
2477 1.0f, 2.0f, 3.0f,
2478
2479 // Batch 1
2480 10.0f, 11.0f, 12.0f,
2481 }));
2482
2483 armnn::TensorInfo input1TensorInfo({ 3, 3 }, armnn::GetDataType<T>());
2484 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2485 // Batch 0
2486 4.0f, 5.0f, 6.0f,
2487
2488 // Batch 1
2489 13.0f, 14.0f, 15.0f,
2490
2491 // Batch 0
2492 7.0f, 8.0f, 9.0f,
2493 }));
2494
2495 armnn::TensorInfo input2TensorInfo({ 1, 3 }, armnn::GetDataType<T>());
2496 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2497 // Batch 1
2498 16.0f, 17.0f, 18.0f,
2499 }));
2500
2501 armnn::TensorInfo outputTensorInfo({ 6, 3 }, armnn::GetDataType<T>());
2502 LayerTestResult<T, 2> result(outputTensorInfo);
2503
2504 std::vector<T> output;
2505 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002506 Concatenate<T>(workloadFactory, memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00002507 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
2508 { input0.data(), input1.data(), input2.data() },
2509 outputTensorInfo,
2510 output.data(),
2511 0);
2512
2513 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
2514 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2515 // Batch 0
2516 1.0f, 2.0f, 3.0f,
2517
2518 // Batch 1
2519 10.0f, 11.0f, 12.0f,
2520
2521 // Batch 2
2522 4.0f, 5.0f, 6.0f,
2523
2524 // Batch 3
2525 13.0f, 14.0f, 15.0f,
2526
2527 // Batch 4
2528 7.0f, 8.0f, 9.0f,
2529
2530 // Batch 5
2531 16.0f, 17.0f, 18.0f,
2532 }));
2533
2534 return result;
2535}
2536
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002537LayerTestResult<float, 2> Concatenation2dDim0DiffInputDimsTest(
2538 armnn::IWorkloadFactory& workloadFactory,
2539 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002540{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002541 return Concatenation2dDim0DiffInputDimsTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002542}
2543
2544template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002545LayerTestResult<T, 2> Concatenation2dDim1DiffInputDimsTestImpl(
2546 armnn::IWorkloadFactory& workloadFactory,
2547 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2548 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00002549 int32_t qOffset)
2550{
2551 armnn::TensorInfo input0TensorInfo({ 2, 3 }, armnn::GetDataType<T>());
2552 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2553 // Batch 0
2554 1.0f, 2.0f, 3.0f,
2555
2556 // Batch 1
2557 10.0f, 11.0f, 12.0f,
2558 }));
2559
2560 armnn::TensorInfo input1TensorInfo({ 2, 5 }, armnn::GetDataType<T>());
2561 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2562 // Batch 0
2563 4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
2564
2565 // Batch 1
2566 13.0f, 14.0f, 15.0f, 16.0f, 17.0f,
2567 }));
2568
2569 armnn::TensorInfo input2TensorInfo({ 2, 1 }, armnn::GetDataType<T>());
2570 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2571 // Batch 0
2572 9.0f,
2573
2574 // Batch 1
2575 18.0f
2576 }));
2577
2578 armnn::TensorInfo outputTensorInfo({ 2, 9 }, armnn::GetDataType<T>());
2579 LayerTestResult<T, 2> result(outputTensorInfo);
2580
2581 std::vector<T> output;
2582 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002583 Concatenate<T>(workloadFactory, memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00002584 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
2585 { input0.data(), input1.data(), input2.data() },
2586 outputTensorInfo,
2587 output.data(),
2588 1);
2589
2590 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
2591 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2592 // Batch 0
2593 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
2594
2595 // Batch 1
2596 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f,
2597 }));
2598
2599 return result;
2600}
2601
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002602LayerTestResult<float, 2> Concatenation2dDim1DiffInputDimsTest(
2603 armnn::IWorkloadFactory& workloadFactory,
2604 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002605{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002606 return Concatenation2dDim1DiffInputDimsTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002607}
2608
2609template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002610LayerTestResult<T, 3> Concatenation3dTestImpl(
2611 armnn::IWorkloadFactory& workloadFactory,
2612 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00002613 const armnn::TensorInfo& outputTensorInfo,
2614 unsigned int dimension,
2615 float qScale,
2616 int32_t qOffset)
2617{
2618 armnn::TensorInfo inputTensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
2619
2620 auto input0 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2621 // Batch 0, Channel 0
2622 1.0f, 2.0f,
2623
2624 // Batch 0, Channel 1
2625 3.0f, 4.0f,
2626
2627 // Batch 0, Channel 2
2628 5.0f, 6.0f,
2629
2630 // Batch 1, Channel 0
2631 19.0f, 20.0f,
2632
2633 // Batch 1, Channel 1
2634 21.0f, 22.0f,
2635
2636 // Batch 1, Channel 2
2637 23.0f, 24.0f
2638 }));
2639
2640 auto input1 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2641 // Batch 0, Channel 0
2642 7.0f, 8.0f,
2643
2644 // Batch 0, Channel 1
2645 9.0f, 10.0f,
2646
2647 // Batch 0, Channel 2
2648 11.0f, 12.0f,
2649
2650 // Batch 1, Channel 0
2651 25.0f, 26.0f,
2652
2653 // Batch 1, Channel 1
2654 27.0f, 28.0f,
2655
2656 // Batch 1, Channel 2
2657 29.0f, 30.0f
2658 }));
2659
2660 auto input2 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2661 // Batch 0, Channel 0
2662 13.0f, 14.0f,
2663
2664 // Batch 0, Channel 1
2665 15.0f, 16.0f,
2666
2667 // Batch 0, Channel 2
2668 17.0f, 18.0f,
2669
2670 // Batch 1, Channel 0
2671 31.0f, 32.0f,
2672
2673 // Batch 1, Channel 1
2674 33.0f, 34.0f,
2675
2676 // Batch 1, Channel 2
2677 35.0f, 36.0f
2678 }));
2679
2680 LayerTestResult<T, 3> result(outputTensorInfo);
2681
2682 std::vector<T> output;
2683 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002684 Concatenate<T>(workloadFactory, memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00002685 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
2686 { input0.data(), input1.data(), input2.data() },
2687 outputTensorInfo,
2688 output.data(),
2689 dimension);
2690
2691 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
2692 return result;
2693}
2694
2695template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002696LayerTestResult<T, 3> Concatenation3dDim0TestImpl(
2697 armnn::IWorkloadFactory& workloadFactory,
2698 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2699 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00002700 int32_t qOffset)
2701{
2702 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, armnn::GetDataType<T>());
2703
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002704 LayerTestResult<T, 3> result =
2705 Concatenation3dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 0, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00002706 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2707 // Batch 0, Channel 0
2708 1.0f, 2.0f,
2709
2710 // Batch 0, Channel 1
2711 3.0f, 4.0f,
2712
2713 // Batch 0, Channel 2
2714 5.0f, 6.0f,
2715
2716 // Batch 1, Channel 0
2717 19.0f, 20.0f,
2718
2719 // Batch 1, Channel 1
2720 21.0f, 22.0f,
2721
2722 // Batch 1, Channel 2
2723 23.0f, 24.0f,
2724
2725 // Batch 2, Channel 0
2726 7.0f, 8.0f,
2727
2728 // Batch 2, Channel 1
2729 9.0f, 10.0f,
2730
2731 // Batch 2, Channel 2
2732 11.0f, 12.0f,
2733
2734 // Batch 3, Channel 0
2735 25.0f, 26.0f,
2736
2737 // Batch 3, Channel 1
2738 27.0f, 28.0f,
2739
2740 // Batch 3, Channel 2
2741 29.0f, 30.0f,
2742
2743 // Batch 4, Channel 0
2744 13.0f, 14.0f,
2745
2746 // Batch 4, Channel 1
2747 15.0f, 16.0f,
2748
2749 // Batch 4, Channel 2
2750 17.0f, 18.0f,
2751
2752 // Batch 5, Channel 0
2753 31.0f, 32.0f,
2754
2755 // Batch 5, Channel 1
2756 33.0f, 34.0f,
2757
2758 // Batch 5, Channel 2
2759 35.0f, 36.0f
2760 }));
2761 return result;
2762}
2763
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002764LayerTestResult<float, 3> Concatenation3dDim0Test(
2765 armnn::IWorkloadFactory& workloadFactory,
2766 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002767{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002768 return Concatenation3dDim0TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002769}
2770
2771template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002772LayerTestResult<T, 3> Concatenation3dDim1TestImpl(
2773 armnn::IWorkloadFactory& workloadFactory,
2774 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2775 float qScale,
2776 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00002777{
2778 armnn::TensorInfo outputTensorInfo({ 2, 9, 2 }, armnn::GetDataType<T>());
2779
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002780 LayerTestResult<T, 3> result =
2781 Concatenation3dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 1, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00002782 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2783 // Batch 0, Channel 0
2784 1.0f, 2.0f,
2785
2786 // Batch 0, Channel 1
2787 3.0f, 4.0f,
2788
2789 // Batch 0, Channel 2
2790 5.0f, 6.0f,
2791
2792 // Batch 0, Channel 3
2793 7.0f, 8.0f,
2794
2795 // Batch 0, Channel 4
2796 9.0f, 10.0f,
2797
2798 // Batch 0, Channel 5
2799 11.0f, 12.0f,
2800
2801 // Batch 0, Channel 6
2802 13.0f, 14.0f,
2803
2804 // Batch 0, Channel 7
2805 15.0f, 16.0f,
2806
2807 // Batch 0, Channel 8
2808 17.0f, 18.0f,
2809
2810 // Batch 1, Channel 0
2811 19.0f, 20.0f,
2812
2813 // Batch 1, Channel 1
2814 21.0f, 22.0f,
2815
2816 // Batch 1, Channel 2
2817 23.0f, 24.0f,
2818
2819 // Batch 1, Channel 3
2820 25.0f, 26.0f,
2821
2822 // Batch 1, Channel 4
2823 27.0f, 28.0f,
2824
2825 // Batch 1, Channel 5
2826 29.0f, 30.0f,
2827
2828 // Batch 1, Channel 6
2829 31.0f, 32.0f,
2830
2831 // Batch 1, Channel 7
2832 33.0f, 34.0f,
2833
2834 // Batch 1, Channel 8
2835 35.0f, 36.0f
2836 }));
2837
2838 return result;
2839}
2840
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002841LayerTestResult<float, 3> Concatenation3dDim1Test(
2842 armnn::IWorkloadFactory& workloadFactory,
2843 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002844{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002845 return Concatenation3dDim1TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002846}
2847
2848template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002849LayerTestResult<T, 3> Concatenation3dDim2TestImpl(
2850 armnn::IWorkloadFactory& workloadFactory,
2851 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2852 float qScale,
2853 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00002854{
2855 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, armnn::GetDataType<T>());
2856
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002857 LayerTestResult<T, 3> result =
2858 Concatenation3dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 2, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00002859 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2860 // Batch 0, Channel 0
2861 1.0f, 2.0f, 7.0f, 8.0f, 13.0f, 14.0f,
2862
2863 // Batch 0, Channel 1
2864 3.0f, 4.0f, 9.0f, 10.0f, 15.0f, 16.0f,
2865
2866 // Batch 0, Channel 2
2867 5.0f, 6.0f, 11.0f, 12.0f, 17.0f, 18.0f,
2868
2869 // Batch 1, Channel 0
2870 19.0f, 20.0f, 25.0f, 26.0f, 31.0f, 32.0f,
2871
2872 // Batch 1, Channel 1
2873 21.0f, 22.0f, 27.0f, 28.0f, 33.0f, 34.0f,
2874
2875 // Batch 1, Channel 2
2876 23.0f, 24.0f, 29.0f, 30.0f, 35.0f, 36.0f,
2877 }));
2878
2879 return result;
2880}
2881
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002882LayerTestResult<float, 3> Concatenation3dDim2Test(
2883 armnn::IWorkloadFactory& workloadFactory,
2884 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002885{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002886 return Concatenation3dDim2TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002887}
2888
2889template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002890LayerTestResult<T, 3> Concatenation3dDim0DiffInputDimsTestImpl(
2891 armnn::IWorkloadFactory& workloadFactory,
2892 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2893 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00002894 int32_t qOffset)
2895{
2896 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
2897 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2898 // Batch 0, Channel 0
2899 1.0f, 2.0f,
2900
2901 // Batch 0, Channel 1
2902 3.0f, 4.0f,
2903
2904 // Batch 0, Channel 2
2905 5.0f, 6.0f,
2906
2907 // Batch 1, Channel 0
2908 19.0f, 20.0f,
2909
2910 // Batch 1, Channel 1
2911 21.0f, 22.0f,
2912
2913 // Batch 1, Channel 2
2914 23.0f, 24.0f
2915 }));
2916
2917 armnn::TensorInfo input1TensorInfo({ 1, 3, 2 }, armnn::GetDataType<T>());
2918 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2919 // Batch 0, Channel 0
2920 7.0f, 8.0f,
2921
2922 // Batch 0, Channel 1
2923 9.0f, 10.0f,
2924
2925 // Batch 0, Channel 2
2926 11.0f, 12.0f,
2927 }));
2928
2929 armnn::TensorInfo input2TensorInfo({ 3, 3, 2 }, armnn::GetDataType<T>());
2930 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2931 // Batch 0, Channel 0
2932 25.0f, 26.0f,
2933
2934 // Batch 0, Channel 1
2935 27.0f, 28.0f,
2936
2937 // Batch 0, Channel 2
2938 29.0f, 30.0f,
2939
2940 // Batch 1, Channel 0
2941 13.0f, 14.0f,
2942
2943 // Batch 1, Channel 1
2944 15.0f, 16.0f,
2945
2946 // Batch 1, Channel 2
2947 17.0f, 18.0f,
2948
2949 // Batch 2, Channel 0
2950 31.0f, 32.0f,
2951
2952 // Batch 2, Channel 1
2953 33.0f, 34.0f,
2954
2955 // Batch 2, Channel 2
2956 35.0f, 36.0f
2957 }));
2958
2959 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, armnn::GetDataType<T>());
2960 LayerTestResult<T, 3> result(outputTensorInfo);
2961
2962 std::vector<T> output;
2963 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002964 Concatenate<T>(workloadFactory, memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00002965 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
2966 { input0.data(), input1.data(), input2.data() },
2967 outputTensorInfo,
2968 output.data(),
2969 0);
2970
2971 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
2972 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2973 // Batch 0, Channel 0
2974 1.0f, 2.0f,
2975
2976 // Batch 0, Channel 1
2977 3.0f, 4.0f,
2978
2979 // Batch 0, Channel 2
2980 5.0f, 6.0f,
2981
2982 // Batch 1, Channel 0
2983 19.0f, 20.0f,
2984
2985 // Batch 1, Channel 1
2986 21.0f, 22.0f,
2987
2988 // Batch 1, Channel 2
2989 23.0f, 24.0f,
2990
2991 // Batch 2, Channel 0
2992 7.0f, 8.0f,
2993
2994 // Batch 2, Channel 1
2995 9.0f, 10.0f,
2996
2997 // Batch 2, Channel 2
2998 11.0f, 12.0f,
2999
3000 // Batch 3, Channel 0
3001 25.0f, 26.0f,
3002
3003 // Batch 3, Channel 1
3004 27.0f, 28.0f,
3005
3006 // Batch 3, Channel 2
3007 29.0f, 30.0f,
3008
3009 // Batch 4, Channel 0
3010 13.0f, 14.0f,
3011
3012 // Batch 4, Channel 1
3013 15.0f, 16.0f,
3014
3015 // Batch 4, Channel 2
3016 17.0f, 18.0f,
3017
3018 // Batch 5, Channel 0
3019 31.0f, 32.0f,
3020
3021 // Batch 5, Channel 1
3022 33.0f, 34.0f,
3023
3024 // Batch 5, Channel 2
3025 35.0f, 36.0f
3026 }));
3027
3028 return result;
3029}
3030
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003031LayerTestResult<float, 3> Concatenation3dDim0DiffInputDimsTest(
3032 armnn::IWorkloadFactory& workloadFactory,
3033 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003034{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003035 return Concatenation3dDim0DiffInputDimsTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003036}
3037
3038template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003039LayerTestResult<T, 3> Concatenation3dDim1DiffInputDimsTestImpl(
3040 armnn::IWorkloadFactory& workloadFactory,
3041 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3042 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00003043 int32_t qOffset)
3044{
3045 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
3046 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3047 // Batch 0, Channel 0
3048 1.0f, 2.0f,
3049
3050 // Batch 0, Channel 1
3051 3.0f, 4.0f,
3052
3053 // Batch 0, Channel 2
3054 5.0f, 6.0f,
3055
3056 // Batch 1, Channel 0
3057 19.0f, 20.0f,
3058
3059 // Batch 1, Channel 1
3060 21.0f, 22.0f,
3061
3062 // Batch 1, Channel 2
3063 23.0f, 24.0f
3064 }));
3065
3066 armnn::TensorInfo input1TensorInfo({ 2, 4, 2 }, armnn::GetDataType<T>());
3067 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3068 // Batch 0, Channel 0
3069 7.0f, 8.0f,
3070
3071 // Batch 0, Channel 1
3072 9.0f, 10.0f,
3073
3074 // Batch 0, Channel 2
3075 11.0f, 12.0f,
3076
3077 // Batch 0, Channel 3
3078 25.0f, 26.0f,
3079
3080 // Batch 1, Channel 0
3081 27.0f, 28.0f,
3082
3083 // Batch 1, Channel 1
3084 29.0f, 30.0f,
3085
3086 // Batch 1, Channel 2
3087 13.0f, 14.0f,
3088
3089 // Batch 1, Channel 3
3090 15.0f, 16.0f,
3091 }));
3092
3093 armnn::TensorInfo input2TensorInfo({ 2, 1, 2 }, armnn::GetDataType<T>());
3094 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3095 // Batch 0, Channel 0
3096 17.0f, 18.0f,
3097
3098 // Batch 1, Channel 0
3099 31.0f, 32.0f,
3100 }));
3101
3102 armnn::TensorInfo outputTensorInfo({ 2, 8, 2 }, armnn::GetDataType<T>());
3103 LayerTestResult<T, 3> result(outputTensorInfo);
3104
3105 std::vector<T> output;
3106 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003107 Concatenate<T>(workloadFactory, memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00003108 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
3109 { input0.data(), input1.data(), input2.data() },
3110 outputTensorInfo,
3111 output.data(),
3112 1);
3113
3114 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
3115 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3116 // Batch 0, Channel 0
3117 1.0f, 2.0f,
3118
3119 // Batch 0, Channel 1
3120 3.0f, 4.0f,
3121
3122 // Batch 0, Channel 2
3123 5.0f, 6.0f,
3124
3125 // Batch 0, Channel 3
3126 7.0f, 8.0f,
3127
3128 // Batch 0, Channel 4
3129 9.0f, 10.0f,
3130
3131 // Batch 0, Channel 5
3132 11.0f, 12.0f,
3133
3134 // Batch 0, Channel 6
3135 25.0f, 26.0f,
3136
3137 // Batch 0, Channel 7
3138 17.0f, 18.0f,
3139
3140 // Batch 1, Channel 0
3141 19.0f, 20.0f,
3142
3143 // Batch 1, Channel 1
3144 21.0f, 22.0f,
3145
3146 // Batch 1, Channel 2
3147 23.0f, 24.0f,
3148
3149 // Batch 1, Channel 3
3150 27.0f, 28.0f,
3151
3152 // Batch 1, Channel 4
3153 29.0f, 30.0f,
3154
3155 // Batch 1, Channel 5
3156 13.0f, 14.0f,
3157
3158 // Batch 1, Channel 6
3159 15.0f, 16.0f,
3160
3161 // Batch 1, Channel 7
3162 31.0f, 32.0f,
3163 }));
3164
3165 return result;
3166}
3167
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003168LayerTestResult<float, 3> Concatenation3dDim1DiffInputDimsTest(
3169 armnn::IWorkloadFactory& workloadFactory,
3170 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003171{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003172 return Concatenation3dDim1DiffInputDimsTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003173}
3174
3175template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003176LayerTestResult<T, 3> Concatenation3dDim2DiffInputDimsTestImpl(
3177 armnn::IWorkloadFactory& workloadFactory,
3178 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3179 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00003180 int32_t qOffset)
3181{
3182 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
3183 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3184 // Batch 0, Channel 0
3185 1.0f, 2.0f,
3186
3187 // Batch 0, Channel 1
3188 3.0f, 4.0f,
3189
3190 // Batch 0, Channel 2
3191 5.0f, 6.0f,
3192
3193 // Batch 1, Channel 0
3194 19.0f, 20.0f,
3195
3196 // Batch 1, Channel 1
3197 21.0f, 22.0f,
3198
3199 // Batch 1, Channel 2
3200 23.0f, 24.0f
3201 }));
3202
3203 armnn::TensorInfo input1TensorInfo({ 2, 3, 1 }, armnn::GetDataType<T>());
3204 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3205 // Batch 0, Channel 0
3206 7.0f,
3207
3208 // Batch 0, Channel 1
3209 9.0f,
3210
3211 // Batch 0, Channel 2
3212 11.0f,
3213
3214 // Batch 1, Channel 0
3215 25.0f,
3216
3217 // Batch 1, Channel 1
3218 27.0f,
3219
3220 // Batch 1, Channel 2
3221 29.0f
3222 }));
3223
3224 armnn::TensorInfo input2TensorInfo({ 2, 3, 3 }, armnn::GetDataType<T>());
3225 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3226 // Batch 0, Channel 0
3227 13.0f, 14.0f, 50.0f,
3228
3229 // Batch 0, Channel 1
3230 15.0f, 16.0f, 51.0f,
3231
3232 // Batch 0, Channel 2
3233 17.0f, 18.0f, 52.0f,
3234
3235 // Batch 1, Channel 0
3236 31.0f, 32.0f, 53.0f,
3237
3238 // Batch 1, Channel 1
3239 33.0f, 34.0f, 54.0f,
3240
3241 // Batch 1, Channel 2
3242 35.0f, 36.0f, 55.0f,
3243 }));
3244
3245 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, armnn::GetDataType<T>());
3246 LayerTestResult<T, 3> result(outputTensorInfo);
3247
3248 std::vector<T> output;
3249 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003250 Concatenate<T>(workloadFactory, memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00003251 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
3252 { input0.data(), input1.data(), input2.data() },
3253 outputTensorInfo,
3254 output.data(),
3255 2);
3256
3257 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
3258 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3259 // Batch 0, Channel 0
3260 1.0f, 2.0f, 7.0f, 13.0f, 14.0f, 50.0f,
3261
3262 // Batch 0, Channel 1
3263 3.0f, 4.0f, 9.0f, 15.0f, 16.0f, 51.0f,
3264
3265 // Batch 0, Channel 2
3266 5.0f, 6.0f, 11.0f, 17.0f, 18.0f, 52.0f,
3267
3268 // Batch 1, Channel 0
3269 19.0f, 20.0f, 25.0f, 31.0f, 32.0f, 53.0f,
3270
3271 // Batch 1, Channel 1
3272 21.0f, 22.0f, 27.0f, 33.0f, 34.0f, 54.0f,
3273
3274 // Batch 1, Channel 2
3275 23.0f, 24.0f, 29.0f, 35.0f, 36.0f, 55.0f,
3276 }));
3277
3278 return result;
3279}
3280
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003281LayerTestResult<float, 3> Concatenation3dDim2DiffInputDimsTest(
3282 armnn::IWorkloadFactory& workloadFactory,
3283 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003284{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003285 return Concatenation3dDim2DiffInputDimsTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003286}
3287
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003288LayerTestResult<float, 4> ResizeBilinearNopTest(
3289 armnn::IWorkloadFactory& workloadFactory,
3290 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3291 const armnn::DataLayoutIndexed& dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00003292{
James Conroy6b965822018-11-01 11:33:09 +00003293 const armnn::TensorInfo inputTensorInfo = GetTensorInfo<float>(1, 2, 4, 4, dataLayout);
3294 const armnn::TensorInfo outputTensorInfo = GetTensorInfo<float>(1, 2, 4, 4, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00003295
James Conroy6b965822018-11-01 11:33:09 +00003296 std::vector<float> inputData({
3297 1.0f, 2.0f, 3.0f, 4.0f,
3298 2.0f, 3.0f, 4.0f, 5.0f,
3299 3.0f, 4.0f, 5.0f, 6.0f,
3300 4.0f, 5.0f, 6.0f, 7.0f,
3301
telsoa014fcda012018-03-09 14:13:49 +00003302 1.0f, 2.0f, 3.0f, 4.0f,
3303 2.0f, 3.0f, 4.0f, 5.0f,
3304 3.0f, 4.0f, 5.0f, 6.0f,
3305 4.0f, 5.0f, 6.0f, 7.0f
James Conroy6b965822018-11-01 11:33:09 +00003306 });
3307
3308 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
3309 if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC)
3310 {
3311 std::vector<float> tmp(inputData.size());
3312 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
3313 inputData = tmp;
3314 }
3315
3316 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
telsoa014fcda012018-03-09 14:13:49 +00003317
3318 LayerTestResult<float, 4> result(outputTensorInfo);
3319 result.outputExpected = input;
3320
3321 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3322 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3323
3324 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01003325 descriptor.m_Parameters.m_DataLayout = dataLayout;
3326 armnn::WorkloadInfo info;
3327 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3328 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3329
3330 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
3331
3332 inputHandle->Allocate();
3333 outputHandle->Allocate();
3334 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3335
James Conroy074f3712018-10-03 09:32:03 +01003336 workload->Execute();
3337
3338 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3339 return result;
3340}
3341
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003342LayerTestResult<float, 4> SimpleResizeBilinearTest(
3343 armnn::IWorkloadFactory& workloadFactory,
3344 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3345 const armnn::DataLayoutIndexed& dataLayout)
James Conroy074f3712018-10-03 09:32:03 +01003346{
James Conroy6b965822018-11-01 11:33:09 +00003347 const armnn::TensorInfo inputTensorInfo = GetTensorInfo<float>(1, 2, 2, 2, dataLayout);
3348 const armnn::TensorInfo outputTensorInfo = GetTensorInfo<float>(1, 2, 1, 1, dataLayout);
James Conroy074f3712018-10-03 09:32:03 +01003349
James Conroy6b965822018-11-01 11:33:09 +00003350 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01003351 1.0f, 255.0f,
James Conroy6b965822018-11-01 11:33:09 +00003352 200.0f, 250.0f,
3353
3354 250.0f, 200.0f,
3355 250.0f, 1.0f
3356 });
James Conroy074f3712018-10-03 09:32:03 +01003357
3358 // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
3359 // then figures out the interpolants and weights. Note this is different to projecting the centre of the
James Conroy6b965822018-11-01 11:33:09 +00003360 // output texel. Thus, for a input matrix of 2x2, we'll expect the output 1x1 matrix to contain, as
3361 // its single element, the value that was at position (0,0) of the input matrix (rather than an average,
3362 // which we would expect if projecting the centre).
3363
3364 std::vector<float> outputData({
3365 1.0f,
3366
3367 250.0f
3368 });
3369
3370 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
3371 if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC)
3372 {
3373 std::vector<float> tmp(inputData.size());
3374 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
3375 inputData = tmp;
3376
3377 std::vector<float> tmp1(outputData.size());
3378 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data());
3379 outputData = tmp1;
3380 }
3381
3382 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
3383
James Conroy074f3712018-10-03 09:32:03 +01003384 LayerTestResult<float, 4> result(outputTensorInfo);
James Conroy6b965822018-11-01 11:33:09 +00003385 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
James Conroy074f3712018-10-03 09:32:03 +01003386
3387 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3388 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3389
3390 armnn::ResizeBilinearQueueDescriptor descriptor;
3391 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00003392 armnn::WorkloadInfo info;
3393 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3394 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3395
3396 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
3397
3398 inputHandle->Allocate();
3399 outputHandle->Allocate();
3400 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3401
3402 workload->Execute();
3403
3404 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3405 return result;
3406}
3407
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003408LayerTestResult<float, 4> ResizeBilinearSqMinTest(
3409 armnn::IWorkloadFactory& workloadFactory,
3410 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3411 const armnn::DataLayoutIndexed& dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00003412{
James Conroy6b965822018-11-01 11:33:09 +00003413 const armnn::TensorInfo inputTensorInfo = GetTensorInfo<float>(1, 2, 4, 4, dataLayout);
3414 const armnn::TensorInfo outputTensorInfo = GetTensorInfo<float>(1, 2, 2, 2, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00003415
James Conroy6b965822018-11-01 11:33:09 +00003416 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01003417 1.0f, 2.0f, 3.0f, 4.0f,
3418 2.0f, 3.0f, 4.0f, 5.0f,
3419 3.0f, 4.0f, 5.0f, 6.0f,
James Conroy6b965822018-11-01 11:33:09 +00003420 4.0f, 5.0f, 6.0f, 7.0f,
3421
3422 7.0f, 6.0f, 5.0f, 4.0f,
3423 6.0f, 5.0f, 4.0f, 3.0f,
3424 5.0f, 4.0f, 3.0f, 2.0f,
3425 4.0f, 3.0f, 2.0f, 1.0f
3426 });
3427
3428 std::vector<float> outputData({
3429 1.0f, 3.0f,
3430 3.0f, 5.0f,
3431
3432 7.0f, 5.0f,
3433 5.0f, 3.0f
3434 });
3435
3436 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
3437 if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC)
3438 {
3439 std::vector<float> tmp(inputData.size());
3440 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
3441 inputData = tmp;
3442
3443 std::vector<float> tmp1(outputData.size());
3444 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data());
3445 outputData = tmp1;
3446 }
3447
3448 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
telsoa014fcda012018-03-09 14:13:49 +00003449
telsoa014fcda012018-03-09 14:13:49 +00003450 LayerTestResult<float, 4> result(outputTensorInfo);
James Conroy6b965822018-11-01 11:33:09 +00003451 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
telsoa014fcda012018-03-09 14:13:49 +00003452
3453 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3454 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3455
3456 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01003457 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00003458 armnn::WorkloadInfo info;
3459 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3460 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3461
3462 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
3463
3464 inputHandle->Allocate();
3465 outputHandle->Allocate();
3466 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3467
3468 workload->Execute();
3469
3470 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3471 return result;
3472}
3473
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003474LayerTestResult<float, 4> ResizeBilinearMinTest(
3475 armnn::IWorkloadFactory& workloadFactory,
3476 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3477 const armnn::DataLayoutIndexed& dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00003478{
James Conroy6b965822018-11-01 11:33:09 +00003479 const armnn::TensorInfo inputTensorInfo = GetTensorInfo<float>(1, 2, 3, 5, dataLayout);
3480 const armnn::TensorInfo outputTensorInfo = GetTensorInfo<float>(1, 2, 2, 3, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00003481
James Conroy6b965822018-11-01 11:33:09 +00003482 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01003483 1.0f, 2.0f, 3.0f, 5.0f, 8.0f,
3484 13.0f, 21.0f, 34.0f, 55.0f, 89.0f,
James Conroy6b965822018-11-01 11:33:09 +00003485 144.0f, 233.0f, 377.0f, 610.0f, 987.0f,
3486
3487 987.0f, 610.0f, 377.0f, 233.0f, 144.0f,
3488 89.0f, 55.0f, 34.0f, 21.0f, 13.0f,
3489 8.0f, 5.0f, 3.0f, 2.0f, 1.0f
3490 });
3491
3492 std::vector<float> outputData({
3493 1.0f, 2.6666f, 6.00f,
3494 78.5f, 179.3333f, 401.00f,
3495
3496 987.0f, 454.6670f, 203.33f,
3497 48.5f, 22.3333f, 10.00f
3498 });
3499
3500 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
3501 if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC)
3502 {
3503 std::vector<float> tmp(inputData.size());
3504 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
3505 inputData = tmp;
3506
3507 std::vector<float> tmp1(outputData.size());
3508 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data());
3509 outputData = tmp1;
3510 }
3511
3512 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
telsoa014fcda012018-03-09 14:13:49 +00003513
3514 LayerTestResult<float, 4> result(outputTensorInfo);
James Conroy6b965822018-11-01 11:33:09 +00003515 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
telsoa014fcda012018-03-09 14:13:49 +00003516
3517 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3518 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3519
3520 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01003521 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00003522 armnn::WorkloadInfo info;
3523 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3524 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3525
3526 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
3527
3528 inputHandle->Allocate();
3529 outputHandle->Allocate();
3530 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3531
3532 workload->Execute();
3533
3534 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3535 return result;
3536}
3537
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003538LayerTestResult<float, 4> ResizeBilinearMagTest(
3539 armnn::IWorkloadFactory& workloadFactory,
3540 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3541 const armnn::DataLayoutIndexed& dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00003542{
James Conroy6b965822018-11-01 11:33:09 +00003543 const armnn::TensorInfo inputTensorInfo = GetTensorInfo<float>(1, 2, 3, 2, dataLayout);
3544 const armnn::TensorInfo outputTensorInfo = GetTensorInfo<float>(1, 2, 3, 5, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00003545
James Conroy6b965822018-11-01 11:33:09 +00003546 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01003547 1.0f, 2.0f,
3548 13.0f, 21.0f,
James Conroy6b965822018-11-01 11:33:09 +00003549 144.0f, 233.0f,
telsoa014fcda012018-03-09 14:13:49 +00003550
James Conroy6b965822018-11-01 11:33:09 +00003551 233.0f, 144.0f,
3552 21.0f, 13.0f,
3553 2.0f, 1.0f
3554 });
3555
3556 std::vector<float> outputData({
James Conroy074f3712018-10-03 09:32:03 +01003557 1.0f, 1.4f, 1.8f, 2.0f, 2.0f,
3558 13.0f, 16.2f, 19.4f, 21.0f, 21.0f,
James Conroy6b965822018-11-01 11:33:09 +00003559 144.0f, 179.6f, 215.2f, 233.0f, 233.0f,
3560
3561 233.0f, 197.4f, 161.8f, 144.0f, 144.0f,
3562 21.0f, 17.8f, 14.6f, 13.0f, 13.0f,
3563 2.0f, 1.6f, 1.2f, 1.0f, 1.0f
3564 });
3565
3566 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
3567 if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC)
3568 {
3569 std::vector<float> tmp(inputData.size());
3570 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
3571 inputData = tmp;
3572
3573 std::vector<float> tmp1(outputData.size());
3574 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data());
3575 outputData = tmp1;
3576 }
3577
3578 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
3579
3580 LayerTestResult<float, 4> result(outputTensorInfo);
3581 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
telsoa014fcda012018-03-09 14:13:49 +00003582
3583 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3584 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3585
3586 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01003587 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00003588 armnn::WorkloadInfo info;
3589 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3590 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3591
3592 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
3593
3594 inputHandle->Allocate();
3595 outputHandle->Allocate();
3596 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3597
3598 workload->Execute();
3599
3600 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3601 return result;
3602}
3603
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003604LayerTestResult<float, 2> FakeQuantizationTest(
3605 armnn::IWorkloadFactory& workloadFactory,
3606 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003607{
3608 constexpr unsigned int width = 2;
3609 constexpr unsigned int height = 3;
3610
3611 const armnn::TensorInfo tensorInfo({height, width },
3612 armnn::DataType::Float32);
3613 auto input = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
3614 -10.0f, -5.0f,
3615 0.0f, 5.0f,
3616 10.0f, 10.0f
3617 }));
3618
3619 LayerTestResult<float, 2> ret(tensorInfo);
3620
3621 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
3622
3623 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
3624
3625 armnn::FakeQuantizationQueueDescriptor data;
3626 armnn::WorkloadInfo info;
3627
3628 AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
3629 AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
3630 float min = -10.f;
3631 float max = 10.f;
3632
3633 data.m_Parameters.m_Min = min;
3634 data.m_Parameters.m_Max = max;
3635
3636 armnn::PassthroughCpuTensorHandle refHandle(tensorInfo, &ret.outputExpected[0][0]);
3637 armnn::FakeQuantizationQueueDescriptor refData = data;
3638 armnn::WorkloadInfo refInfo = info;
3639 SetWorkloadOutput(refData, refInfo, 0, tensorInfo, &refHandle);
3640
3641 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFakeQuantization(data, info);
3642
3643 inputHandle->Allocate();
3644 outputHandle->Allocate();
3645
3646 CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
3647
3648 workload->Execute();
3649
3650 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
3651
3652 ret.outputExpected = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
3653 0.0f, 63.0f,
3654 128.0f, 191.0f,
3655 255.0f, 255.0f
3656 }));
3657 return ret;
3658}
3659
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003660namespace
3661{
3662
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003663LayerTestResult<float, 4> L2NormalizationTestImpl(
3664 armnn::IWorkloadFactory& workloadFactory,
3665 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3666 const armnn::TensorShape& inputOutputTensorShape,
3667 const std::vector<float>& inputValues,
3668 const std::vector<float>& expectedOutputValues,
3669 const armnn::DataLayoutIndexed& layout)
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003670{
3671 const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
3672 const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
3673
jimfly013aab7c32018-11-12 13:32:08 +00003674 // at this point if we require it permute the input data
3675 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
3676 std::vector<float> inputData = inputValues;
3677 if (layout.GetDataLayout() == armnn::DataLayout::NHWC)
3678 {
3679 std::vector<float> tmp(inputData.size());
3680 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
3681 inputData = tmp;
3682 }
3683
3684 auto inputTensor = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(inputData));
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003685
3686 LayerTestResult<float, 4> result(outputTensorInfo);
jimfly013aab7c32018-11-12 13:32:08 +00003687 std::vector<float> expectedOutputData = expectedOutputValues;
3688 if (layout.GetDataLayout() == armnn::DataLayout::NHWC)
3689 {
3690 std::vector<float> tmp(expectedOutputData.size());
3691 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, expectedOutputData.data(), tmp.data());
3692 expectedOutputData = tmp;
3693 }
3694 result.outputExpected = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(expectedOutputData));
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003695
3696 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3697 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3698
3699 armnn::L2NormalizationQueueDescriptor descriptor;
jimfly013aab7c32018-11-12 13:32:08 +00003700 descriptor.m_Parameters.m_DataLayout = layout.GetDataLayout();
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003701 armnn::WorkloadInfo info;
3702
3703 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3704 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3705
3706 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
3707
3708 inputHandle->Allocate();
3709 outputHandle->Allocate();
3710
3711 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
3712
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003713 ExecuteWorkload(*workload, memoryManager);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003714
3715 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3716
3717 return result;
3718}
3719
3720float CalcInvL2Norm(std::initializer_list<float> elements)
3721{
3722 const float reduction = std::accumulate(elements.begin(), elements.end(), 0.0f,
3723 [](float acc, float element) { return acc + element * element; });
3724 return 1.0f / sqrtf(reduction);
3725}
3726
3727} // anonymous namespace
3728
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003729template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003730LayerTestResult<T, 2> Pad2dTestCommon(
3731 armnn::IWorkloadFactory& workloadFactory,
3732 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3733 float qScale,
3734 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003735{
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003736 const armnn::TensorShape inputShape{ 3, 3 };
3737 const armnn::TensorShape outputShape{ 7, 7 };
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003738
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003739 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::GetDataType<T>());
3740 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::GetDataType<T>());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003741
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003742 std::vector<T> inputValues(
3743 QuantizedVector<T>(qScale, qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003744 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003745 // Height (3) x Width (3)
3746 4, 8, 6,
3747 7, 4, 4,
3748 3, 2, 4
3749 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003750
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003751 std::vector<T> expectedOutputValues(
3752 QuantizedVector<T>(qScale, qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003753 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003754 0, 0, 0, 0, 0, 0, 0,
3755 0, 0, 0, 0, 0, 0, 0,
3756 0, 0, 4, 8, 6, 0, 0,
3757 0, 0, 7, 4, 4, 0, 0,
3758 0, 0, 3, 2, 4, 0, 0,
3759 0, 0, 0, 0, 0, 0, 0,
3760 0, 0, 0, 0, 0, 0, 0
3761 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003762
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003763 auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003764
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003765 LayerTestResult<T, 2> result(outputTensorInfo);
3766 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003767
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003768 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3769 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003770
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003771 armnn::PadQueueDescriptor descriptor;
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003772
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003773 std::vector<std::pair<unsigned int, unsigned int>> PadList;
3774 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
3775 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003776
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003777 descriptor.m_Parameters.m_PadList = PadList;
3778 armnn::WorkloadInfo info;
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003779
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003780 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3781 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003782
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003783 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003784
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003785 inputHandle->Allocate();
3786 outputHandle->Allocate();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003787
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003788 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003789
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003790 workload->Execute();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003791
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003792 CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003793
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003794 return result;
3795}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003796
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003797template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003798LayerTestResult<T, 3> Pad3dTestCommon(
3799 armnn::IWorkloadFactory& workloadFactory,
3800 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3801 float qScale,
3802 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003803{
3804 const armnn::TensorShape inputShape{ 2, 2, 2 };
3805 const armnn::TensorShape outputShape{ 3, 5, 6 };
3806
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003807 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::GetDataType<T>());
3808 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::GetDataType<T>());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003809
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003810 std::vector<T> inputValues(
3811 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003812 {
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003813 // Channel 0, Height (2) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003814 0, 4,
3815 2, 5,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003816
3817 // Channel 1, Height (2) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003818 6, 1,
3819 5, 2
3820 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003821
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003822 std::vector<T> expectedOutputValues(
3823 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003824 {
3825
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003826 0, 0, 0, 0, 0, 0,
3827 0, 0, 0, 0, 0, 0,
3828 0, 0, 0, 4, 0, 0,
3829 0, 0, 2, 5, 0, 0,
3830 0, 0, 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003831
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003832 0, 0, 0, 0, 0, 0,
3833 0, 0, 0, 0, 0, 0,
3834 0, 0, 6, 1, 0, 0,
3835 0, 0, 5, 2, 0, 0,
3836 0, 0, 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003837
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003838 0, 0, 0, 0, 0, 0,
3839 0, 0, 0, 0, 0, 0,
3840 0, 0, 0, 0, 0, 0,
3841 0, 0, 0, 0, 0, 0,
3842 0, 0, 0, 0, 0, 0
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003843
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003844 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003845
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003846 auto inputTensor = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003847
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003848 LayerTestResult<T, 3> result(outputTensorInfo);
3849 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003850
3851 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3852 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3853
3854 armnn::PadQueueDescriptor descriptor;
3855
3856 std::vector<std::pair<unsigned int, unsigned int>> PadList;
3857 PadList.push_back(std::pair<unsigned int, unsigned int>(0,1));
3858 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
3859 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
3860
3861 descriptor.m_Parameters.m_PadList = PadList;
3862 armnn::WorkloadInfo info;
3863
3864 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3865 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3866
3867 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
3868
3869 inputHandle->Allocate();
3870 outputHandle->Allocate();
3871
3872 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]);
3873
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003874 workload->Execute();
3875
3876 CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
3877
3878 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003879}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003880
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003881template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003882LayerTestResult<T, 4> Pad4dTestCommon(
3883 armnn::IWorkloadFactory& workloadFactory,
3884 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3885 float qScale,
3886 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003887{
3888 const armnn::TensorShape inputShape{ 2, 2, 3, 2 };
3889 const armnn::TensorShape outputShape{ 4, 5, 7, 4 };
3890
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003891 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::GetDataType<T>());
3892 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::GetDataType<T>());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003893
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003894 std::vector<T> inputValues(
3895 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003896 {
3897 // Batch 0, Channel 0, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003898 0, 1,
3899 2, 3,
3900 4, 5,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003901
3902 // Batch 0, Channel 1, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003903 6, 7,
3904 8, 9,
3905 10, 11,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003906
3907 // Batch 1, Channel 0, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003908 12, 13,
3909 14, 15,
3910 16, 17,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003911
3912 // Batch 1, Channel 1, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003913 18, 19,
3914 20, 21,
3915 22, 23
3916 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003917
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003918 std::vector<T> expectedOutputValues(
3919 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003920 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003921 0, 0, 0, 0,
3922 0, 0, 0, 0,
3923 0, 0, 0, 0,
3924 0, 0, 0, 0,
3925 0, 0, 0, 0,
3926 0, 0, 0, 0,
3927 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003928
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003929 0, 0, 0, 0,
3930 0, 0, 0, 0,
3931 0, 0, 0, 0,
3932 0, 0, 0, 0,
3933 0, 0, 0, 0,
3934 0, 0, 0, 0,
3935 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003936
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003937 0, 0, 0, 0,
3938 0, 0, 0, 0,
3939 0, 0, 0, 0,
3940 0, 0, 0, 0,
3941 0, 0, 0, 0,
3942 0, 0, 0, 0,
3943 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003944
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003945 0, 0, 0, 0,
3946 0, 0, 0, 0,
3947 0, 0, 0, 0,
3948 0, 0, 0, 0,
3949 0, 0, 0, 0,
3950 0, 0, 0, 0,
3951 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003952
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003953 0, 0, 0, 0,
3954 0, 0, 0, 0,
3955 0, 0, 0, 0,
3956 0, 0, 0, 0,
3957 0, 0, 0, 0,
3958 0, 0, 0, 0,
3959 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003960
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003961 0, 0, 0, 0,
3962 0, 0, 0, 0,
3963 0, 0, 0, 0,
3964 0, 0, 0, 0,
3965 0, 0, 0, 0,
3966 0, 0, 0, 0,
3967 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003968
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003969 0, 0, 0, 0,
3970 0, 0, 0, 0,
3971 0, 0, 0, 0,
3972 0, 0, 0, 0,
3973 0, 0, 0, 0,
3974 0, 0, 0, 0,
3975 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003976
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003977 0, 0, 0, 0,
3978 0, 0, 0, 0,
3979 0, 0, 0, 0,
3980 0, 0, 1, 0,
3981 0, 2, 3, 0,
3982 0, 4, 5, 0,
3983 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003984
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003985 0, 0, 0, 0,
3986 0, 0, 0, 0,
3987 0, 0, 0, 0,
3988 0, 6, 7, 0,
3989 0, 8, 9, 0,
3990 0, 10, 11, 0,
3991 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003992
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003993 0, 0, 0, 0,
3994 0, 0, 0, 0,
3995 0, 0, 0, 0,
3996 0, 0, 0, 0,
3997 0, 0, 0, 0,
3998 0, 0, 0, 0,
3999 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004000
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004001 0, 0, 0, 0,
4002 0, 0, 0, 0,
4003 0, 0, 0, 0,
4004 0, 0, 0, 0,
4005 0, 0, 0, 0,
4006 0, 0, 0, 0,
4007 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004008
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004009 0, 0, 0, 0,
4010 0, 0, 0, 0,
4011 0, 0, 0, 0,
4012 0, 0, 0, 0,
4013 0, 0, 0, 0,
4014 0, 0, 0, 0,
4015 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004016
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004017 0, 0, 0, 0,
4018 0, 0, 0, 0,
4019 0, 0, 0, 0,
4020 0, 12, 13, 0,
4021 0, 14, 15, 0,
4022 0, 16, 17, 0,
4023 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004024
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004025 0, 0, 0, 0,
4026 0, 0, 0, 0,
4027 0, 0, 0, 0,
4028 0, 18, 19, 0,
4029 0, 20, 21, 0,
4030 0, 22, 23, 0,
4031 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004032
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004033 0, 0, 0, 0,
4034 0, 0, 0, 0,
4035 0, 0, 0, 0,
4036 0, 0, 0, 0,
4037 0, 0, 0, 0,
4038 0, 0, 0, 0,
4039 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004040
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004041 0, 0, 0, 0,
4042 0, 0, 0, 0,
4043 0, 0, 0, 0,
4044 0, 0, 0, 0,
4045 0, 0, 0, 0,
4046 0, 0, 0, 0,
4047 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004048
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004049 0, 0, 0, 0,
4050 0, 0, 0, 0,
4051 0, 0, 0, 0,
4052 0, 0, 0, 0,
4053 0, 0, 0, 0,
4054 0, 0, 0, 0,
4055 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004056
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004057 0, 0, 0, 0,
4058 0, 0, 0, 0,
4059 0, 0, 0, 0,
4060 0, 0, 0, 0,
4061 0, 0, 0, 0,
4062 0, 0, 0, 0,
4063 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004064
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004065 0, 0, 0, 0,
4066 0, 0, 0, 0,
4067 0, 0, 0, 0,
4068 0, 0, 0, 0,
4069 0, 0, 0, 0,
4070 0, 0, 0, 0,
4071 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004072
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004073 0, 0, 0, 0,
4074 0, 0, 0, 0,
4075 0, 0, 0, 0,
4076 0, 0, 0, 0,
4077 0, 0, 0, 0,
4078 0, 0, 0, 0,
4079 0, 0, 0, 0
4080 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004081
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004082 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004083
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004084 LayerTestResult<T, 4> result(outputTensorInfo);
4085 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004086
4087 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4088 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4089
4090 armnn::PadQueueDescriptor descriptor;
4091
4092 std::vector<std::pair<unsigned int, unsigned int>> PadList;
4093 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
4094 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
4095 PadList.push_back(std::pair<unsigned int, unsigned int>(3,1));
4096 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
4097
4098 descriptor.m_Parameters.m_PadList = PadList;
4099 armnn::WorkloadInfo info;
4100
4101 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
4102 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4103
4104 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
4105
4106 inputHandle->Allocate();
4107 outputHandle->Allocate();
4108
4109 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
4110
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004111 workload->Execute();
4112
4113 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4114
4115 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004116}
4117
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004118LayerTestResult<uint8_t, 2> PadUint82dTest(
4119 armnn::IWorkloadFactory& workloadFactory,
4120 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004121{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004122 return Pad2dTestCommon<uint8_t>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004123}
4124
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004125LayerTestResult<uint8_t, 3> PadUint83dTest(
4126 armnn::IWorkloadFactory& workloadFactory,
4127 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004128{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004129 return Pad3dTestCommon<uint8_t>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004130}
4131
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004132LayerTestResult<uint8_t, 4> PadUint84dTest(
4133 armnn::IWorkloadFactory& workloadFactory,
4134 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004135{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004136 return Pad4dTestCommon<uint8_t>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004137}
4138
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004139LayerTestResult<float, 2> PadFloat322dTest(
4140 armnn::IWorkloadFactory& workloadFactory,
4141 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004142{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004143 return Pad2dTestCommon<float>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004144}
4145
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004146LayerTestResult<float, 3> PadFloat323dTest(
4147 armnn::IWorkloadFactory& workloadFactory,
4148 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004149{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004150 return Pad3dTestCommon<float>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004151}
4152
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004153LayerTestResult<float, 4> PadFloat324dTest(
4154 armnn::IWorkloadFactory& workloadFactory,
4155 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004156{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004157 return Pad4dTestCommon<float>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004158}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004159
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004160LayerTestResult<float, 4> L2Normalization1dTest(
4161 armnn::IWorkloadFactory& workloadFactory,
4162 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4163 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +00004164{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004165 // Width: 1
4166 // Height: 1
4167 // Channels: 10
4168 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00004169 unsigned int numberOfBatches = 1;
4170 unsigned int numberOfChannels = 10;
4171 unsigned int height = 1;
4172 unsigned int width = 1;
telsoa014fcda012018-03-09 14:13:49 +00004173
jimfly013aab7c32018-11-12 13:32:08 +00004174
4175 const armnn::TensorShape inputOutputShape = GetTestTensorShape(
4176 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004177 std::vector<float> inputValues
4178 {
4179 // Batch 0, Channel 0, Height (1) x Width (1)
4180 1.0f,
telsoa014fcda012018-03-09 14:13:49 +00004181
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004182 // Batch 0, Channel 1, Height (1) x Width (1)
4183 2.0f,
telsoa014fcda012018-03-09 14:13:49 +00004184
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004185 // Batch 0, Channel 2, Height (1) x Width (1)
4186 3.0f,
telsoa014fcda012018-03-09 14:13:49 +00004187
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004188 // Batch 0, Channel 3, Height (1) x Width (1)
4189 4.0f,
4190
4191 // Batch 0, Channel 4, Height (1) x Width (1)
4192 5.0f,
4193
4194 // Batch 0, Channel 5, Height (1) x Width (1)
4195 6.0f,
4196
4197 // Batch 0, Channel 6, Height (1) x Width (1)
4198 7.0f,
4199
4200 // Batch 0, Channel 7, Height (1) x Width (1)
4201 8.0f,
4202
4203 // Batch 0, Channel 8, Height (1) x Width (1)
4204 9.0f,
4205
4206 // Batch 0, Channel 9, Height (1) x Width (1)
4207 10.0f
4208 };
telsoa014fcda012018-03-09 14:13:49 +00004209 const float approxInvL2Norm = 0.050964719f;
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004210 std::vector<float> expectedOutputValues
4211 {
4212 // Batch 0, Channel 0, Height (1) x Width (1)
telsoa014fcda012018-03-09 14:13:49 +00004213 1.0f * approxInvL2Norm,
4214 2.0f * approxInvL2Norm,
4215 3.0f * approxInvL2Norm,
4216 4.0f * approxInvL2Norm,
4217 5.0f * approxInvL2Norm,
4218 6.0f * approxInvL2Norm,
4219 7.0f * approxInvL2Norm,
4220 8.0f * approxInvL2Norm,
4221 9.0f * approxInvL2Norm,
4222 10.0f * approxInvL2Norm
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004223 };
telsoa014fcda012018-03-09 14:13:49 +00004224
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004225
4226 return L2NormalizationTestImpl(workloadFactory, memoryManager, inputOutputShape,
jimfly013aab7c32018-11-12 13:32:08 +00004227 inputValues, expectedOutputValues, layout);
telsoa014fcda012018-03-09 14:13:49 +00004228}
4229
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004230LayerTestResult<float, 4> L2Normalization2dTest(
4231 armnn::IWorkloadFactory& workloadFactory,
4232 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4233 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +00004234{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004235 // Width: 5
4236 // Height: 1
4237 // Channels: 2
4238 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00004239 unsigned int numberOfBatches = 1;
4240 unsigned int numberOfChannels = 2;
4241 unsigned int height = 1;
4242 unsigned int width = 5;
telsoa014fcda012018-03-09 14:13:49 +00004243
jimfly013aab7c32018-11-12 13:32:08 +00004244 const armnn::TensorShape inputOutputShape = GetTestTensorShape(
4245 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004246 std::vector<float> inputValues
4247 {
4248 // Batch 0, Channel 0, Height (1) x Width (5)
telsoa014fcda012018-03-09 14:13:49 +00004249 1.0f, 3.0f, 5.0f, 7.0f, 9.0f,
telsoa014fcda012018-03-09 14:13:49 +00004250
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004251 // Batch 0, Channel 1, Height (1) x Width (5)
4252 2.0f, 4.0f, 6.0f, 8.0f, 10.0f
4253 };
4254 std::vector<float> expectedOutputValues
4255 {
4256 // Batch 0, Channel 0, Height (1) x Width (5)
4257 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
4258 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
4259 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
4260 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
telsoa014fcda012018-03-09 14:13:49 +00004261 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
4262
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004263 // Batch 0, Channel 1, Height (1) x Width (5)
4264 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
4265 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
4266 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
4267 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
telsoa014fcda012018-03-09 14:13:49 +00004268 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004269 };
telsoa014fcda012018-03-09 14:13:49 +00004270
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004271 return L2NormalizationTestImpl(workloadFactory, memoryManager, inputOutputShape,
jimfly013aab7c32018-11-12 13:32:08 +00004272 inputValues, expectedOutputValues, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004273}
telsoa014fcda012018-03-09 14:13:49 +00004274
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004275LayerTestResult<float, 4> L2Normalization3dTest(
4276 armnn::IWorkloadFactory& workloadFactory,
4277 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4278 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +00004279{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004280 // Width: 3
4281 // Height: 4
4282 // Channels: 2
4283 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00004284 unsigned int numberOfBatches = 1;
4285 unsigned int numberOfChannels = 2;
4286 unsigned int height = 4;
4287 unsigned int width = 3;
telsoa014fcda012018-03-09 14:13:49 +00004288
jimfly013aab7c32018-11-12 13:32:08 +00004289 const armnn::TensorShape inputOutputShape = GetTestTensorShape(
4290 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004291 std::vector<float> inputValues
4292 {
4293 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004294 119.0f, 21.0f, 150.0f,
4295 149.0f, 32.0f, 179.0f,
4296 15.0f, 227.0f, 141.0f,
4297 147.0f, 199.0f, 220.0f,
4298
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004299 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004300 110.0f, 140.0f, 73.0f,
4301 211.0f, 212.0f, 89.0f,
4302 24.0f, 138.0f, 188.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004303 162.0f, 12.0f, 161.0f
4304 };
4305 std::vector<float> expectedOutputValues
4306 {
4307 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004308 119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
4309 21.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
4310 150.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
4311 149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
4312 32.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
4313 179.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
4314 15.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
4315 227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
4316 141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
4317 147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
4318 199.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
4319 220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
4320
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004321 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004322 110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
4323 140.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
4324 73.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
4325 211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
4326 212.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
4327 89.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
4328 24.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
4329 138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
4330 188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
4331 162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
4332 12.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004333 161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
4334 };
telsoa014fcda012018-03-09 14:13:49 +00004335
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004336 return L2NormalizationTestImpl(workloadFactory, memoryManager, inputOutputShape,
jimfly013aab7c32018-11-12 13:32:08 +00004337 inputValues, expectedOutputValues, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004338}
telsoa014fcda012018-03-09 14:13:49 +00004339
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004340LayerTestResult<float, 4> L2Normalization4dTest(
4341 armnn::IWorkloadFactory& workloadFactory,
4342 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4343 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +00004344{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004345 // Width: 3
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004346 // Height: 4
4347 // Channels: 3
4348 // BatchSize: 2
jimfly013aab7c32018-11-12 13:32:08 +00004349 unsigned int numberOfBatches = 2;
4350 unsigned int numberOfChannels = 3;
4351 unsigned int height = 4;
4352 unsigned int width = 3;
telsoa014fcda012018-03-09 14:13:49 +00004353
jimfly013aab7c32018-11-12 13:32:08 +00004354 const armnn::TensorShape inputOutputShape = GetTestTensorShape(
4355 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004356 std::vector<float> inputValues
4357 {
4358 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004359 235.0f, 46.0f, 178.0f,
4360 100.0f, 123.0f, 19.0f,
4361 172.0f, 74.0f, 250.0f,
4362 6.0f, 195.0f, 80.0f,
4363
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004364 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004365 113.0f, 95.0f, 202.0f,
4366 77.0f, 114.0f, 71.0f,
4367 122.0f, 246.0f, 166.0f,
4368 82.0f, 28.0f, 37.0f,
4369
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004370 // Batch 0, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004371 56.0f, 170.0f, 162.0f,
4372 194.0f, 89.0f, 254.0f,
4373 12.0f, 209.0f, 200.0f,
4374 1.0f, 64.0f, 54.0f,
4375
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004376 // Batch 1, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004377 67.0f, 90.0f, 49.0f,
4378 7.0f, 163.0f, 18.0f,
4379 25.0f, 117.0f, 103.0f,
4380 247.0f, 59.0f, 189.0f,
4381
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004382 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004383 239.0f, 104.0f, 199.0f,
4384 17.0f, 124.0f, 153.0f,
4385 222.0f, 217.0f, 75.0f,
4386 32.0f, 126.0f, 21.0f,
4387
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004388 // Batch 1, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004389 97.0f, 145.0f, 215.0f,
4390 115.0f, 116.0f, 238.0f,
4391 226.0f, 16.0f, 132.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004392 92.0f, 125.0f, 88.0f
4393 };
4394 std::vector<float> expectedOutputValues
4395 {
4396 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004397 235.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
4398 46.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
4399 178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
4400 100.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
4401 123.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
4402 19.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
4403 172.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
4404 74.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
4405 250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
4406 6.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
4407 195.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
4408 80.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
4409
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004410 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004411 113.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
4412 95.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
4413 202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
4414 77.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
4415 114.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
4416 71.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
4417 122.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
4418 246.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
4419 166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
4420 82.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
4421 28.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
4422 37.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
4423
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004424 // Batch 0, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004425 56.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
4426 170.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
4427 162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
4428 194.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
4429 89.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
4430 254.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
4431 12.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
4432 209.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
4433 200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
4434 1.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
4435 64.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
4436 54.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
4437
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004438 // Batch 1, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004439 67.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
4440 90.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
4441 49.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
4442 7.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
4443 163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
4444 18.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
4445 25.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
4446 117.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
4447 103.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
4448 247.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
4449 59.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
4450 189.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
4451
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004452 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004453 239.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
4454 104.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
4455 199.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
4456 17.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
4457 124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
4458 153.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
4459 222.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
4460 217.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
4461 75.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
4462 32.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
4463 126.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
4464 21.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
4465
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004466 // Batch 1, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004467 97.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
4468 145.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
4469 215.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
4470 115.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
4471 116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
4472 238.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
4473 226.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
4474 16.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
4475 132.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
4476 92.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
4477 125.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004478 88.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f })
4479 };
telsoa014fcda012018-03-09 14:13:49 +00004480
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004481 return L2NormalizationTestImpl(workloadFactory, memoryManager, inputOutputShape,
jimfly013aab7c32018-11-12 13:32:08 +00004482 inputValues, expectedOutputValues, layout);
telsoa014fcda012018-03-09 14:13:49 +00004483}
4484
4485template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004486LayerTestResult<T, 4> ConstantTestImpl(
4487 armnn::IWorkloadFactory& workloadFactory,
4488 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00004489 float qScale,
4490 int32_t qOffset)
4491{
4492 constexpr unsigned int inputWidth = 3;
4493 constexpr unsigned int inputHeight = 4;
4494 constexpr unsigned int inputChannels = 3;
4495 constexpr unsigned int inputBatchSize = 2;
4496
4497 constexpr unsigned int outputWidth = inputWidth;
4498 constexpr unsigned int outputHeight = inputHeight;
4499 constexpr unsigned int outputChannels = inputChannels;
4500 constexpr unsigned int outputBatchSize = inputBatchSize;
4501
4502 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
4503 armnn::GetDataType<T>());
4504
4505 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
4506 armnn::GetDataType<T>());
4507
4508 // Set quantization parameters if the requested type is a quantized type.
4509 if(armnn::IsQuantizedType<T>())
4510 {
4511 inputTensorInfo.SetQuantizationScale(qScale);
4512 inputTensorInfo.SetQuantizationOffset(qOffset);
4513 outputTensorInfo.SetQuantizationScale(qScale);
4514 outputTensorInfo.SetQuantizationOffset(qOffset);
4515 }
4516
4517 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
4518 QuantizedVector<T>(qScale, qOffset, {
4519 // Batch 0, Channel 0
4520 235.0f, 46.0f, 178.0f,
4521 100.0f, 123.0f, 19.0f,
4522 172.0f, 74.0f, 250.0f,
4523 6.0f, 195.0f, 80.0f,
4524
4525 // Batch 0, Channel 1
4526 113.0f, 95.0f, 202.0f,
4527 77.0f, 114.0f, 71.0f,
4528 122.0f, 246.0f, 166.0f,
4529 82.0f, 28.0f, 37.0f,
4530
4531 // Batch 0, Channel 2
4532 56.0f, 170.0f, 162.0f,
4533 194.0f, 89.0f, 254.0f,
4534 12.0f, 209.0f, 200.0f,
4535 1.0f, 64.0f, 54.0f,
4536
4537 // Batch 1, Channel 0
4538 67.0f, 90.0f, 49.0f,
4539 7.0f, 163.0f, 18.0f,
4540 25.0f, 117.0f, 103.0f,
4541 247.0f, 59.0f, 189.0f,
4542
4543 // Batch 1, Channel 1
4544 239.0f, 104.0f, 199.0f,
4545 17.0f, 124.0f, 153.0f,
4546 222.0f, 217.0f, 75.0f,
4547 32.0f, 126.0f, 21.0f,
4548
4549 // Batch 1, Channel 2
4550 97.0f, 145.0f, 215.0f,
4551 115.0f, 116.0f, 238.0f,
4552 226.0f, 16.0f, 132.0f,
4553 92.0f, 125.0f, 88.0f,
4554 })));
4555
4556 LayerTestResult<T, 4> result(outputTensorInfo);
4557 result.outputExpected = input;
4558
4559 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4560
4561 armnn::ScopedCpuTensorHandle constantTensor(inputTensorInfo);
4562 AllocateAndCopyDataToITensorHandle(&constantTensor, &input[0][0][0][0]);
4563
4564 armnn::ConstantQueueDescriptor descriptor;
4565 descriptor.m_LayerOutput = &constantTensor;
4566
4567 armnn::WorkloadInfo info;
4568 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4569
4570 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConstant(descriptor, info);
4571
4572 outputHandle->Allocate();
4573
4574 workload->Execute();
4575
4576 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4577 return result;
4578}
4579
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004580LayerTestResult<float, 4> ConstantTest(
4581 armnn::IWorkloadFactory& workloadFactory,
4582 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004583{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004584 return ConstantTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004585}
4586
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004587LayerTestResult<uint8_t, 4> ConstantTestUint8(
4588 armnn::IWorkloadFactory& workloadFactory,
4589 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004590{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004591 return ConstantTestImpl<uint8_t>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004592}
4593
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004594LayerTestResult<uint8_t, 3> MergerUint8Test(
4595 armnn::IWorkloadFactory& workloadFactory,
4596 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004597{
surmeh013537c2c2018-05-18 16:31:43 +01004598 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +00004599 unsigned int outputHeight = 6;
4600 unsigned int outputChannels = 3;
4601
surmeh013537c2c2018-05-18 16:31:43 +01004602 unsigned int inputWidth1 = 3;
4603 unsigned int inputHeight1 = 6;
4604 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +00004605
surmeh013537c2c2018-05-18 16:31:43 +01004606 unsigned int inputWidth2 = 3;
4607 unsigned int inputHeight2 = 6;
4608 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +00004609
telsoa01c577f2c2018-08-31 09:22:23 +01004610 // Defines the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +00004611 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
4612 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
4613 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
telsoa014fcda012018-03-09 14:13:49 +00004614
telsoa01c577f2c2018-08-31 09:22:23 +01004615 // Arbitrary scale and offsets. They don't really matter as the merger operator doesn't dequantize/quantize them.
telsoa014fcda012018-03-09 14:13:49 +00004616 const float scale = 0.13497836f;
4617 const int32_t offset = -7;
4618
4619 outputTensorInfo.SetQuantizationScale(scale);
4620 outputTensorInfo.SetQuantizationOffset(offset);
4621 inputTensorInfo1.SetQuantizationScale(scale);
4622 inputTensorInfo1.SetQuantizationOffset(offset);
4623 inputTensorInfo2.SetQuantizationScale(scale);
4624 inputTensorInfo2.SetQuantizationOffset(offset);
telsoa014fcda012018-03-09 14:13:49 +00004625
4626 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
4627
4628 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
surmeh013537c2c2018-05-18 16:31:43 +01004629 {
4630 1, 2, 3,
4631 4, 5, 6,
4632 7, 8, 9,
4633 10, 11, 12,
4634 13, 14, 15,
4635 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00004636
surmeh013537c2c2018-05-18 16:31:43 +01004637 19, 20, 21,
4638 22, 23, 24,
4639 25, 26, 27,
4640 28, 29, 30,
4641 31, 32, 33,
4642 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00004643
surmeh013537c2c2018-05-18 16:31:43 +01004644 37, 38, 39,
4645 40, 41, 42,
4646 43, 44, 45,
4647 46, 47, 48,
4648 49, 50, 51,
4649 52, 53, 54,
4650 })
telsoa014fcda012018-03-09 14:13:49 +00004651 );
4652
telsoa014fcda012018-03-09 14:13:49 +00004653 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
4654 {
surmeh013537c2c2018-05-18 16:31:43 +01004655 1, 2, 3,
4656 4, 5, 6,
4657 7, 8, 9,
4658 10, 11, 12,
4659 13, 14, 15,
4660 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00004661
surmeh013537c2c2018-05-18 16:31:43 +01004662 19, 20, 21,
4663 22, 23, 24,
4664 25, 26, 27,
4665 28, 29, 30,
4666 31, 32, 33,
4667 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00004668 })
4669 );
4670
4671 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
4672 {
surmeh013537c2c2018-05-18 16:31:43 +01004673 37, 38, 39,
4674 40, 41, 42,
telsoa014fcda012018-03-09 14:13:49 +00004675 43, 44, 45,
surmeh013537c2c2018-05-18 16:31:43 +01004676 46, 47, 48,
4677 49, 50, 51,
4678 52, 53, 54,
telsoa014fcda012018-03-09 14:13:49 +00004679 })
4680 );
4681
telsoa01c577f2c2018-08-31 09:22:23 +01004682 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
telsoa014fcda012018-03-09 14:13:49 +00004683 armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1);
4684
telsoa01c577f2c2018-08-31 09:22:23 +01004685 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
telsoa014fcda012018-03-09 14:13:49 +00004686 armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2);
4687
telsoa014fcda012018-03-09 14:13:49 +00004688
4689 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4690
4691 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
4692
4693 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
4694 subTensorsSupported ?
4695 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
4696 workloadFactory.CreateTensorHandle(inputTensorInfo1);
4697
4698 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
4699 subTensorsSupported ?
4700 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
4701 workloadFactory.CreateTensorHandle(inputTensorInfo2);
4702
telsoa014fcda012018-03-09 14:13:49 +00004703
4704 armnn::MergerQueueDescriptor data;
4705 armnn::WorkloadInfo info;
4706 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
4707 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +00004708 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4709
4710 data.m_ViewOrigins.push_back(window1);
4711 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +00004712
4713 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(data, info);
4714
4715 inputHandle1->Allocate();
4716 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00004717 outputHandle->Allocate();
4718
4719 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
4720 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00004721
4722 workload->Execute();
4723
4724 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
4725
4726 return ret;
4727}
4728
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004729LayerTestResult<uint8_t, 4> AdditionUint8Test(
4730 armnn::IWorkloadFactory& workloadFactory,
4731 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004732{
4733 unsigned int batchSize = 1;
4734 unsigned int channels = 2;
4735 unsigned int height = 2;
4736 unsigned int width = 3;
4737
4738 const float scale = 7.0f;
4739 const int32_t offset = 3;
4740
4741 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
4742 armnn::TensorInfo outputTensorInfo;
4743
4744 const unsigned int shape[] = { batchSize, channels, height, width };
4745 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
4746 inputTensorInfo1.SetQuantizationScale(scale);
4747 inputTensorInfo1.SetQuantizationOffset(offset);
4748
4749 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
4750 inputTensorInfo2.SetQuantizationScale(scale);
4751 inputTensorInfo2.SetQuantizationOffset(offset);
4752
4753 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
4754 outputTensorInfo.SetQuantizationScale(scale);
4755 outputTensorInfo.SetQuantizationOffset(offset);
4756
telsoa01c577f2c2018-08-31 09:22:23 +01004757 // See dequantized values to the right.
telsoa014fcda012018-03-09 14:13:49 +00004758 auto input1 = MakeTensor<uint8_t, 4>(inputTensorInfo1, std::vector<uint8_t>(
4759 {
4760 63, 35, 77, 70, 56, 112, // 420, 224, 518, 469, 371, 763
4761 203, 28, 252, 168, 245, 91 // 1400, 175, 1743, 1155, 1694, 616
4762 }));
4763
telsoa01c577f2c2018-08-31 09:22:23 +01004764 // See dequantized values to the right.
telsoa014fcda012018-03-09 14:13:49 +00004765 auto input2 = MakeTensor<uint8_t, 4>(inputTensorInfo1, std::vector<uint8_t>(
4766 {
4767 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
4768 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
4769 }));
4770
telsoa01c577f2c2018-08-31 09:22:23 +01004771 // See dequantized values to the right.
telsoa014fcda012018-03-09 14:13:49 +00004772 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
4773 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>(
4774 {
4775 81, 39, 249, 255, 228, 255, // 546, 252, 1722, 2065(clamped), 1575, 2212(clamped)
4776 255, 186, 255, 186, 255, 214, // 2261(clamped), 1281, 2163(clamped), 1281, 2408(clamped), 1477
4777 }));
4778
4779 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
4780 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
4781 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4782
4783 armnn::AdditionQueueDescriptor data;
4784 armnn::WorkloadInfo info;
4785 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
4786 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
4787 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4788
4789 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
4790
4791 inputHandle1->Allocate();
4792 inputHandle2->Allocate();
4793 outputHandle->Allocate();
4794
4795 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
4796 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
4797
4798 workload->Execute();
4799
4800 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4801
4802 return result;
4803}
4804
surmeh01bceff2f2018-03-29 16:29:27 +01004805namespace
telsoa014fcda012018-03-09 14:13:49 +00004806{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004807LayerTestResult<uint8_t, 4> MultiplicationUint8TestHelper(
4808 armnn::IWorkloadFactory& workloadFactory,
4809 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4810 const unsigned int shape0[4],
4811 const std::vector<uint8_t> & values0,
4812 float scale0,
4813 int32_t offset0,
4814 const unsigned int shape1[4],
4815 const std::vector<uint8_t> & values1,
4816 float scale1,
4817 int32_t offset1,
4818 const unsigned int outShape[4],
4819 const std::vector<uint8_t> & outValues,
4820 float outScale,
4821 int32_t outOffset)
surmeh01bceff2f2018-03-29 16:29:27 +01004822{
4823 armnn::TensorInfo inputTensorInfo0(4, shape0, armnn::DataType::QuantisedAsymm8);
4824 armnn::TensorInfo inputTensorInfo1(4, shape1, armnn::DataType::QuantisedAsymm8);
4825 armnn::TensorInfo outputTensorInfo(4, outShape, armnn::DataType::QuantisedAsymm8);
telsoa014fcda012018-03-09 14:13:49 +00004826
surmeh01bceff2f2018-03-29 16:29:27 +01004827 inputTensorInfo0.SetQuantizationScale(scale0);
4828 inputTensorInfo0.SetQuantizationOffset(offset0);
telsoa014fcda012018-03-09 14:13:49 +00004829
surmeh01bceff2f2018-03-29 16:29:27 +01004830 inputTensorInfo1.SetQuantizationScale(scale1);
4831 inputTensorInfo1.SetQuantizationOffset(offset1);
telsoa014fcda012018-03-09 14:13:49 +00004832
surmeh01bceff2f2018-03-29 16:29:27 +01004833 outputTensorInfo.SetQuantizationScale(outScale);
4834 outputTensorInfo.SetQuantizationOffset(outOffset);
telsoa014fcda012018-03-09 14:13:49 +00004835
surmeh01bceff2f2018-03-29 16:29:27 +01004836 auto input0 = MakeTensor<uint8_t, 4>(inputTensorInfo0, values0);
4837 auto input1 = MakeTensor<uint8_t, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00004838
telsoa014fcda012018-03-09 14:13:49 +00004839 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
surmeh01bceff2f2018-03-29 16:29:27 +01004840 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00004841
surmeh01bceff2f2018-03-29 16:29:27 +01004842 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
telsoa014fcda012018-03-09 14:13:49 +00004843 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
telsoa014fcda012018-03-09 14:13:49 +00004844 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4845
4846 armnn::MultiplicationQueueDescriptor data;
4847 armnn::WorkloadInfo info;
surmeh01bceff2f2018-03-29 16:29:27 +01004848 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
4849 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
telsoa014fcda012018-03-09 14:13:49 +00004850 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4851
4852 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
4853
surmeh01bceff2f2018-03-29 16:29:27 +01004854 inputHandle0->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00004855 inputHandle1->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00004856 outputHandle->Allocate();
4857
surmeh01bceff2f2018-03-29 16:29:27 +01004858 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00004859 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00004860
4861 workload->Execute();
4862
4863 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4864
4865 return result;
4866}
surmeh01bceff2f2018-03-29 16:29:27 +01004867} // anonymous namespace
4868
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004869LayerTestResult<uint8_t, 4> MultiplicationUint8Test(
4870 armnn::IWorkloadFactory& workloadFactory,
4871 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01004872{
4873 unsigned int batchSize = 1;
4874 unsigned int channels = 2;
4875 unsigned int height = 2;
4876 unsigned int width = 3;
4877 const unsigned int shape[] = { batchSize, channels, height, width };
4878
telsoa01c577f2c2018-08-31 09:22:23 +01004879 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01004880 std::vector<uint8_t> input0({
4881 62, 37, 3, 172, 13, 111, // 244, 144, 8, 684, 48, 440,
4882 188, 20, 73, 31, 23, 31 // 748, 76, 288, 120, 88, 120
4883 });
4884
telsoa01c577f2c2018-08-31 09:22:23 +01004885 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01004886 std::vector<uint8_t> input1({
4887 126, 240, 252, 183, 121, 247, // 384, 726, 762, 555, 369, 747,
4888 48, 115, 151, 79, 78, 97 // 150, 351, 459, 243, 240, 297
4889 });
4890
telsoa01c577f2c2018-08-31 09:22:23 +01004891 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01004892 std::vector<uint8_t> output(
4893 {
4894 64, 72, 0, 255, 8, 236, // 93696, 104544, 6096(clamped), 379620(clamped), 17712, 328680,
4895 77, 15, 92, 16, 10, 21, // 112200, 26676, 132192, 29160, 21120, 35640
4896 });
4897
4898 return MultiplicationUint8TestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004899 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01004900 shape,
4901 input0,
4902 4.0f,
4903 1,
4904 shape,
4905 input1,
4906 3.0f,
4907 -2,
4908 shape,
4909 output,
telsoa01c577f2c2018-08-31 09:22:23 +01004910 1366.255f, // Scale/offset chosen to have output values out of range.
surmeh01bceff2f2018-03-29 16:29:27 +01004911 -5);
4912}
4913
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004914LayerTestResult<uint8_t, 4> MultiplicationBroadcast1ElementUint8Test(
4915 armnn::IWorkloadFactory& workloadFactory,
4916 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01004917{
4918 const unsigned int shape0[] = { 1, 2, 2, 3 };
4919 const unsigned int shape1[] = { 1, 1, 1, 1 };
4920
4921 std::vector<uint8_t> input0({
4922 1, 2, 3, 4, 5, 6,
4923 7, 8, 9, 10, 11, 12
4924 });
4925
4926 std::vector<uint8_t> input1({2});
4927
4928 std::vector<uint8_t> output({
4929 2, 4, 6, 8, 10, 12,
4930 14, 16, 18, 20, 22, 24
4931 });
4932
4933 return MultiplicationUint8TestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004934 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01004935 shape0,
4936 input0,
4937 1.0f,
4938 0,
4939 shape1,
4940 input1,
4941 1.0f,
4942 0,
4943 shape0,
4944 output,
4945 1.0f,
4946 0);
4947}
4948
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004949LayerTestResult<uint8_t, 4> MultiplicationBroadcast1DVectorUint8Test(
4950 armnn::IWorkloadFactory& workloadFactory,
4951 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01004952{
4953 const unsigned int shape0[] = { 1, 2, 2, 3 };
4954 const unsigned int shape1[] = { 1, 1, 1, 3 };
4955
4956 std::vector<uint8_t> input0({
4957 1, 2, 3, 4, 5, 6,
4958 7, 8, 9, 10, 11, 12
4959 });
4960
4961 std::vector<uint8_t> input1({1, 2, 3});
4962
4963 std::vector<uint8_t> output({
4964 1, 4, 9, 4, 10, 18,
4965 7, 16, 27, 10, 22, 36
4966 });
4967
4968 return MultiplicationUint8TestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004969 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01004970 shape0,
4971 input0,
4972 1.0f,
4973 0,
4974 shape1,
4975 input1,
4976 1.0f,
4977 0,
4978 shape0,
4979 output,
4980 1.0f,
4981 0);
4982}
telsoa014fcda012018-03-09 14:13:49 +00004983
David Beckf195f032018-09-06 16:46:34 +01004984namespace
4985{
4986template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004987LayerTestResult<T, 4> SubtractionTestHelper(
4988 armnn::IWorkloadFactory& workloadFactory,
4989 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4990 const unsigned int shape0[4],
4991 const std::vector<T>& values0,
4992 float scale0,
4993 int32_t offset0,
4994 const unsigned int shape1[4],
4995 const std::vector<T> & values1,
4996 float scale1,
4997 int32_t offset1,
4998 const unsigned int outShape[4],
4999 const std::vector<T> & outValues,
5000 float outScale,
5001 int32_t outOffset)
David Beckf195f032018-09-06 16:46:34 +01005002{
5003 auto dataType = (std::is_same<T, uint8_t>::value ?
5004 armnn::DataType::QuantisedAsymm8 :
5005 armnn::DataType::Float32);
5006
5007 armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
5008 armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
5009 armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
5010
5011 inputTensorInfo0.SetQuantizationScale(scale0);
5012 inputTensorInfo0.SetQuantizationOffset(offset0);
5013
5014 inputTensorInfo1.SetQuantizationScale(scale1);
5015 inputTensorInfo1.SetQuantizationOffset(offset1);
5016
5017 outputTensorInfo.SetQuantizationScale(outScale);
5018 outputTensorInfo.SetQuantizationOffset(outOffset);
5019
5020 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
5021 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
5022
5023 LayerTestResult<T, 4> result(outputTensorInfo);
5024 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
5025
5026 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
5027 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
5028 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5029
5030 armnn::SubtractionQueueDescriptor data;
5031 armnn::WorkloadInfo info;
5032 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
5033 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
5034 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
5035
5036 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSubtraction(data, info);
5037
5038 inputHandle0->Allocate();
5039 inputHandle1->Allocate();
5040 outputHandle->Allocate();
5041
5042 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
5043 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
5044
David Beckf195f032018-09-06 16:46:34 +01005045 workload->Execute();
5046
5047 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5048
5049 return result;
5050}
5051} // anonymous namespace
5052
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005053LayerTestResult<uint8_t, 4> SubtractionUint8Test(
5054 armnn::IWorkloadFactory& workloadFactory,
5055 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01005056{
5057 const unsigned int shape0[] = { 1, 1, 2, 2 };
5058 const unsigned int shape1[] = { 1, 1, 2, 2 };
5059
5060 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
5061 std::vector<uint8_t> input1({ 1, 2, 1, 2 });
5062 std::vector<uint8_t> output({ 3, 3, 5, 5 });
5063
5064 return SubtractionTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005065 memoryManager,
David Beckf195f032018-09-06 16:46:34 +01005066 shape0, input0, 0.5f, 2,
5067 shape1, input1, 1.0f, 0,
5068 shape0, output, 1.0f, 0);
5069}
5070
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005071LayerTestResult<uint8_t, 4> SubtractionBroadcast1ElementUint8Test(
5072 armnn::IWorkloadFactory& workloadFactory,
5073 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01005074{
5075 const unsigned int shape0[] = { 1, 1, 2, 2 };
5076 const unsigned int shape1[] = { 1, 1, 1, 1 };
5077
5078 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
5079 std::vector<uint8_t> input1({ 2 });
5080 std::vector<uint8_t> output({ 5, 6, 7, 8 });
5081
5082 return SubtractionTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005083 memoryManager,
David Beckf195f032018-09-06 16:46:34 +01005084 shape0, input0, 0.5f, 2,
5085 shape1, input1, 1.0f, 0,
5086 shape0, output, 1.0f, 3);
5087}
5088
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005089LayerTestResult<uint8_t, 4> SubtractionBroadcastUint8Test(
5090 armnn::IWorkloadFactory& workloadFactory,
5091 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01005092{
5093 const unsigned int shape0[] = { 1, 1, 2, 2 };
5094 const unsigned int shape1[] = { 1, 1, 2, 1 };
5095
5096 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
5097 std::vector<uint8_t> input1({ 2, 1 });
5098 std::vector<uint8_t> output({ 8, 11, 12, 15 });
5099
5100 return SubtractionTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005101 memoryManager,
David Beckf195f032018-09-06 16:46:34 +01005102 shape0, input0, 1.0f, 0,
5103 shape1, input1, 1.0f, 0,
5104 shape0, output, 1.0f, 0);
5105}
5106
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005107LayerTestResult<float, 4> SubtractionTest(
5108 armnn::IWorkloadFactory& workloadFactory,
5109 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01005110{
5111 const unsigned int shape0[] = { 1, 1, 2, 2 };
5112 const unsigned int shape1[] = { 1, 1, 2, 2 };
5113
5114 std::vector<float> input0({ 1, 2, 3, 4 });
5115 std::vector<float> input1({ 1, -1, 0, 2 });
5116 std::vector<float> output({ 0, 3, 3, 2 });
5117
5118 return SubtractionTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005119 memoryManager,
David Beckf195f032018-09-06 16:46:34 +01005120 shape0, input0, 1.0f, 0,
5121 shape1, input1, 1.0f, 0,
5122 shape0, output, 1.0f, 0);
5123}
5124
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005125LayerTestResult<float, 4> SubtractionBroadcast1ElementTest(
5126 armnn::IWorkloadFactory& workloadFactory,
5127 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01005128{
5129 const unsigned int shape0[] = { 1, 1, 2, 2 };
5130 const unsigned int shape1[] = { 1, 1, 1, 1 };
5131
5132 std::vector<float> input0({ 1, 2, 3, 4 });
5133 std::vector<float> input1({ 10 });
5134 std::vector<float> output({ -9, -8, -7, -6 });
5135
5136 return SubtractionTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005137 memoryManager,
David Beckf195f032018-09-06 16:46:34 +01005138 shape0, input0, 1.0f, 0,
5139 shape1, input1, 1.0f, 0,
5140 shape0, output, 1.0f, 0);
5141}
5142
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005143LayerTestResult<float, 4> SubtractionBroadcastTest(
5144 armnn::IWorkloadFactory& workloadFactory,
5145 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01005146{
5147 const unsigned int shape0[] = { 1, 1, 2, 2 };
5148 const unsigned int shape1[] = { 1, 1, 1, 2 };
5149
5150 std::vector<float> input0({ 1, 2, 3, 4 });
5151 std::vector<float> input1({ 10, -5 });
5152 std::vector<float> output({ -9, 7, -7, 9 });
5153
5154 return SubtractionTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005155 memoryManager,
David Beckf195f032018-09-06 16:46:34 +01005156 shape0, input0, 1.0f, 0,
5157 shape1, input1, 1.0f, 0,
5158 shape0, output, 1.0f, 0);
5159}
5160
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005161LayerTestResult<uint8_t, 4> ResizeBilinearNopUint8Test(
5162 armnn::IWorkloadFactory& workloadFactory,
5163 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005164{
5165 constexpr unsigned int inputWidth = 4;
5166 constexpr unsigned int inputHeight = 4;
5167 constexpr unsigned int inputChannels = 1;
5168 constexpr unsigned int inputBatchSize = 1;
5169
5170 constexpr unsigned int outputWidth = inputWidth;
5171 constexpr unsigned int outputHeight = inputHeight;
5172 constexpr unsigned int outputChannels = inputChannels;
5173 constexpr unsigned int outputBatchSize = inputBatchSize;
5174
5175 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
5176 armnn::DataType::QuantisedAsymm8);
5177 inputTensorInfo.SetQuantizationScale(1.5f);
5178 inputTensorInfo.SetQuantizationOffset(-3);
5179
5180 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
5181 armnn::DataType::QuantisedAsymm8);
5182 outputTensorInfo.SetQuantizationScale(1.5f);
5183 outputTensorInfo.SetQuantizationOffset(-3);
5184
5185 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
5186 1, 2, 3, 4,
5187 2, 3, 4, 5,
5188 3, 4, 5, 6,
5189 4, 5, 6, 7
5190 }));
5191
5192 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
5193 result.outputExpected = input;
5194
5195 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5196 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5197
5198 armnn::ResizeBilinearQueueDescriptor descriptor;
5199 armnn::WorkloadInfo info;
5200 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5201 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5202
5203 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5204
5205 inputHandle->Allocate();
5206 outputHandle->Allocate();
5207 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5208
5209 workload->Execute();
5210
5211 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5212 return result;
5213}
5214
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005215LayerTestResult<uint8_t, 4> SimpleResizeBilinearUint8Test(
5216 armnn::IWorkloadFactory& workloadFactory,
5217 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005218{
5219 constexpr unsigned int inputWidth = 2;
5220 constexpr unsigned int inputHeight = 2;
5221 constexpr unsigned int inputChannels = 1;
5222 constexpr unsigned int inputBatchSize = 1;
5223
5224 constexpr unsigned int outputWidth = inputWidth / 2;
5225 constexpr unsigned int outputHeight = inputHeight / 2;
5226 constexpr unsigned int outputChannels = inputChannels;
5227 constexpr unsigned int outputBatchSize = inputBatchSize;
5228
5229 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
5230 armnn::DataType::QuantisedAsymm8);
5231 inputTensorInfo.SetQuantizationScale(0.1567f);
5232 inputTensorInfo.SetQuantizationOffset(1);
5233
5234 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
5235 armnn::DataType::QuantisedAsymm8);
5236 outputTensorInfo.SetQuantizationScale(0.1567f);
5237 outputTensorInfo.SetQuantizationOffset(1);
5238
5239 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
5240 1, 255,
5241 200, 250
5242 }));
5243
5244 // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
5245 // then figures out the interpolants and weights. Note this is different to projecting the centre of the
telsoa01c577f2c2018-08-31 09:22:23 +01005246 // output texel - and thus we'll expect the output 1x1 matrix to contain, as its single element, the value
telsoa014fcda012018-03-09 14:13:49 +00005247 // that was at position (0,0) of the input matrix (rather than an average, which we would expect if projecting
5248 // the centre).
5249 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
5250 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
5251 1
5252 }));
5253
5254 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5255 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5256
5257 armnn::ResizeBilinearQueueDescriptor descriptor;
5258 armnn::WorkloadInfo info;
5259 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5260 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5261
5262 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5263
5264 inputHandle->Allocate();
5265 outputHandle->Allocate();
5266 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5267
5268 workload->Execute();
5269
5270 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5271 return result;
5272}
5273
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005274LayerTestResult<uint8_t, 4> ResizeBilinearSqMinUint8Test(
5275 armnn::IWorkloadFactory& workloadFactory,
5276 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005277{
5278 constexpr unsigned int inputWidth = 4;
5279 constexpr unsigned int inputHeight = 4;
5280 constexpr unsigned int inputChannels = 1;
5281 constexpr unsigned int inputBatchSize = 1;
5282
5283 constexpr unsigned int outputWidth = inputWidth / 2;
5284 constexpr unsigned int outputHeight = inputHeight / 2;
5285 constexpr unsigned int outputChannels = inputChannels;
5286 constexpr unsigned int outputBatchSize = inputBatchSize;
5287
5288 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
5289 armnn::DataType::QuantisedAsymm8);
5290 inputTensorInfo.SetQuantizationScale(3.141592f);
5291 inputTensorInfo.SetQuantizationOffset(3);
5292
5293 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
5294 armnn::DataType::QuantisedAsymm8);
5295 outputTensorInfo.SetQuantizationScale(3.141592f);
5296 outputTensorInfo.SetQuantizationOffset(3);
5297
5298 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
5299 1, 2, 3, 4,
5300 2, 3, 4, 5,
5301 3, 4, 5, 6,
5302 4, 5, 6, 7
5303 }));
5304
5305 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
5306 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
5307 1, 3,
5308 3, 5
5309 }));
5310
5311 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5312 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5313
5314 armnn::ResizeBilinearQueueDescriptor descriptor;
5315 armnn::WorkloadInfo info;
5316 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5317 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5318
5319 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5320
5321 inputHandle->Allocate();
5322 outputHandle->Allocate();
5323 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5324
5325 workload->Execute();
5326
5327 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5328 return result;
5329}
5330
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005331LayerTestResult<uint8_t, 4> ResizeBilinearMinUint8Test(
5332 armnn::IWorkloadFactory& workloadFactory,
5333 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005334{
5335 constexpr unsigned int inputWidth = 3;
5336 constexpr unsigned int inputHeight = 2;
5337 constexpr unsigned int inputChannels = 1;
5338 constexpr unsigned int inputBatchSize = 1;
5339
5340 constexpr unsigned int outputWidth = 2;
5341 constexpr unsigned int outputHeight = 1;
5342 constexpr unsigned int outputChannels = inputChannels;
5343 constexpr unsigned int outputBatchSize = inputBatchSize;
5344
5345 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
5346 armnn::DataType::QuantisedAsymm8);
5347 inputTensorInfo.SetQuantizationScale(1.5f);
5348 inputTensorInfo.SetQuantizationOffset(-1);
5349
5350 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
5351 armnn::DataType::QuantisedAsymm8);
5352 outputTensorInfo.SetQuantizationScale(1.5f);
5353 outputTensorInfo.SetQuantizationOffset(-1);
5354
5355 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
5356 1, 2, 3, // 3.0, 4.5, 6.0
5357 5, 8, 13 // 9.0, 13.5, 21.0
5358 }));
5359
5360 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
5361 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
5362 1, 3 // 3.0, 5.25
5363 }));
5364
5365 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5366 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5367
5368 armnn::ResizeBilinearQueueDescriptor descriptor;
5369 armnn::WorkloadInfo info;
5370 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5371 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5372
5373 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5374
5375 inputHandle->Allocate();
5376 outputHandle->Allocate();
5377
5378 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5379
5380 workload->Execute();
5381
5382 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5383 return result;
5384}
5385
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005386LayerTestResult<uint8_t, 4> ResizeBilinearMagUint8Test(
5387 armnn::IWorkloadFactory& workloadFactory,
5388 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005389{
5390 constexpr unsigned int inputWidth = 2;
5391 constexpr unsigned int inputHeight = 3;
5392 constexpr unsigned int inputChannels = 1;
5393 constexpr unsigned int inputBatchSize = 1;
5394
5395 constexpr unsigned int outputWidth = 5;
5396 constexpr unsigned int outputHeight = 3;
5397 constexpr unsigned int outputChannels = inputChannels;
5398 constexpr unsigned int outputBatchSize = inputBatchSize;
5399
5400 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
5401 armnn::DataType::QuantisedAsymm8);
5402 inputTensorInfo.SetQuantizationScale(0.010765f);
5403 inputTensorInfo.SetQuantizationOffset(7);
5404
5405 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
5406 armnn::DataType::QuantisedAsymm8);
5407 outputTensorInfo.SetQuantizationScale(0.010132f);
5408 outputTensorInfo.SetQuantizationOffset(-18);
5409
5410 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
5411 24, 228, // 0.183005, 2.379065,
5412 105, 128, // 1.05497, 1.302565
5413 230, 71 // 2.400595, 0.68896
5414 }));
5415
5416 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
5417 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
5418 0, 87, 173, 217, 217, // 0.18300501, 1.06142902, 1.93985295, 2.37906504, 2.37906504
5419 86, 96, 106, 111, 111, // 1.05497003, 1.15400803, 1.25304604, 1.30256498, 1.30256498
5420 219, 151, 84, 50, 50 // 2.40059495, 1.71594095, 1.03128707, 0.68896002, 0.68896002
5421 }));
5422
5423 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5424 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5425
5426 armnn::ResizeBilinearQueueDescriptor descriptor;
5427 armnn::WorkloadInfo info;
5428 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5429 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5430
5431 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5432
5433 inputHandle->Allocate();
5434 outputHandle->Allocate();
5435 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5436
5437 workload->Execute();
5438
5439 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5440 return result;
5441}
5442
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005443LayerTestResult<float, 4> BatchNormTest(
5444 armnn::IWorkloadFactory& workloadFactory,
5445 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005446{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01005447 // BatchSize: 1
5448 // Channels: 2
5449 // Height: 3
5450 // Width: 2
5451
5452 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
5453 std::vector<float> inputValues
5454 {
5455 // Batch 0, Channel 0, Height (3) x Width (2)
5456 1.f, 4.f,
5457 4.f, 2.f,
5458 1.f, 6.f,
5459
5460 // Batch 0, Channel 1, Height (3) x Width (2)
5461 1.f, 1.f,
5462 4.f, 1.f,
5463 -2.f, 4.f
5464 };
5465 std::vector<float> expectedOutputValues
5466 {
5467 // Batch 0, Channel 0, Height (3) x Width (2)
5468 1.f, 4.f,
5469 4.f, 2.f,
5470 1.f, 6.f,
5471
5472 // Batch 0, Channel 1, Height (3) x Width (2)
5473 3.f, 3.f,
5474 4.f, 3.f,
5475 2.f, 4.f
5476 };
5477
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005478 return BatchNormTestImpl<float>(workloadFactory, memoryManager,
5479 inputOutputShape, inputValues, expectedOutputValues,
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01005480 0.f, 0, armnn::DataLayout::NCHW);
5481}
5482
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005483LayerTestResult<float, 4> BatchNormNhwcTest(
5484 armnn::IWorkloadFactory& workloadFactory,
5485 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01005486{
5487 // BatchSize: 1
5488 // Height: 3
5489 // Width: 2
5490 // Channels: 2
5491
5492 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
5493 std::vector<float> inputValues
5494 {
5495 // Batch 0, Height 0, Width (2) x Channel (2)
5496 1.f, 1.f,
5497 4.f, 1.f,
5498
5499 // Batch 0, Height 1, Width (2) x Channel (2)
5500 4.f, 4.f,
5501 2.f, 1.f,
5502
5503 // Batch 0, Height 2, Width (2) x Channel (2)
5504 1.f, -2.f,
5505 6.f, 4.f
5506 };
5507 std::vector<float> expectedOutputValues
5508 {
5509 // Batch 0, Height 0, Width (2) x Channel (2)
5510 1.f, 3.f,
5511 4.f, 3.f,
5512
5513 // Batch 0, Height 1, Width (2) x Channel (2)
5514 4.f, 4.f,
5515 2.f, 3.f,
5516
5517 // Batch 0, Height 2, Width (2) x Channel (2)
5518 1.f, 2.f,
5519 6.f, 4.f
5520 };
5521
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005522 return BatchNormTestImpl<float>(workloadFactory, memoryManager,
5523 inputOutputShape, inputValues, expectedOutputValues,
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01005524 0.f, 0, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00005525}
5526
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005527LayerTestResult<uint8_t, 4> BatchNormUint8Test(
5528 armnn::IWorkloadFactory& workloadFactory,
5529 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005530{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01005531 // BatchSize: 1
5532 // Channels: 2
5533 // Height: 3
5534 // Width: 2
5535
5536 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
5537 std::vector<float> inputValues
5538 {
5539 // Batch 0, Channel 0, Height (3) x Width (2)
5540 1.f, 4.f,
5541 4.f, 2.f,
5542 1.f, 6.f,
5543
5544 // Batch 0, Channel 1, Height (3) x Width (2)
5545 1.f, 1.f,
5546 4.f, 1.f,
5547 -2.f, 4.f
5548 };
5549 std::vector<float> expectedOutputValues
5550 {
5551 // Batch 0, Channel 0, Height (3) x Width (2)
5552 1.f, 4.f,
5553 4.f, 2.f,
5554 1.f, 6.f,
5555
5556 // Batch 0, Channel 1, Height (3) x Width (2)
5557 3.f, 3.f,
5558 4.f, 3.f,
5559 2.f, 4.f
5560 };
5561
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005562 return BatchNormTestImpl<uint8_t>(workloadFactory, memoryManager,
5563 inputOutputShape, inputValues, expectedOutputValues,
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01005564 1.f/20.f, 50, armnn::DataLayout::NCHW);
5565}
5566
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005567LayerTestResult<uint8_t, 4> BatchNormUint8NhwcTest(
5568 armnn::IWorkloadFactory& workloadFactory,
5569 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01005570{
5571 // BatchSize: 1
5572 // Height: 3
5573 // Width: 2
5574 // Channels: 2
5575
5576 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
5577 std::vector<float> inputValues
5578 {
5579 // Batch 0, Height 0, Width (2) x Channel (2)
5580 1.f, 1.f,
5581 4.f, 1.f,
5582
5583 // Batch 0, Height 1, Width (2) x Channel (2)
5584 4.f, 4.f,
5585 2.f, 1.f,
5586
5587 // Batch 0, Height 2, Width (2) x Channel (2)
5588 1.f, -2.f,
5589 6.f, 4.f
5590 };
5591 std::vector<float> expectedOutputValues
5592 {
5593 // Batch 0, Height 0, Width (2) x Channel (2)
5594 1.f, 3.f,
5595 4.f, 3.f,
5596
5597 // Batch 0, Height 1, Width (2) x Channel (2)
5598 4.f, 4.f,
5599 2.f, 3.f,
5600
5601 // Batch 0, Height 2, Width (2) x Channel (2)
5602 1.f, 2.f,
5603 6.f, 4.f
5604 };
5605
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005606 return BatchNormTestImpl<uint8_t>(workloadFactory, memoryManager,
5607 inputOutputShape, inputValues, expectedOutputValues,
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01005608 1.f/20.f, 50, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00005609}
5610
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005611LayerTestResult<uint8_t, 4> ConstantUint8Test(
5612 armnn::IWorkloadFactory& workloadFactory,
5613 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005614{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005615 return ConstantTestImpl<uint8_t>(workloadFactory, memoryManager, 2e-6f, 1);
telsoa014fcda012018-03-09 14:13:49 +00005616}
5617
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005618LayerTestResult<uint8_t, 1> Concatenation1dUint8Test(
5619 armnn::IWorkloadFactory& workloadFactory,
5620 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005621{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005622 return Concatenation1dTestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00005623}
5624
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005625LayerTestResult<uint8_t, 2> Concatenation2dDim0Uint8Test(
5626 armnn::IWorkloadFactory& workloadFactory,
5627 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005628{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005629 return Concatenation2dDim0TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00005630}
5631
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005632LayerTestResult<uint8_t, 2> Concatenation2dDim1Uint8Test(
5633 armnn::IWorkloadFactory& workloadFactory,
5634 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005635{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005636 return Concatenation2dDim1TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00005637}
5638
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005639LayerTestResult<uint8_t, 2> Concatenation2dDim0DiffInputDimsUint8Test(
5640 armnn::IWorkloadFactory& workloadFactory,
5641 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005642{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005643 return Concatenation2dDim0DiffInputDimsTestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00005644}
5645
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005646LayerTestResult<uint8_t, 2> Concatenation2dDim1DiffInputDimsUint8Test(
5647 armnn::IWorkloadFactory& workloadFactory,
5648 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005649{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005650 return Concatenation2dDim1DiffInputDimsTestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00005651}
5652
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005653LayerTestResult<uint8_t, 3> Concatenation3dDim0Uint8Test(
5654 armnn::IWorkloadFactory& workloadFactory,
5655 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005656{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005657 return Concatenation3dDim0TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00005658}
5659
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005660LayerTestResult<uint8_t, 3> Concatenation3dDim1Uint8Test(
5661 armnn::IWorkloadFactory& workloadFactory,
5662 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005663{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005664 return Concatenation3dDim1TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00005665}
5666
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005667LayerTestResult<uint8_t, 3> Concatenation3dDim2Uint8Test(
5668 armnn::IWorkloadFactory& workloadFactory,
5669 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005670{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005671 return Concatenation3dDim2TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00005672}
5673
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005674LayerTestResult<uint8_t, 3> Concatenation3dDim0DiffInputDimsUint8Test(
5675 armnn::IWorkloadFactory& workloadFactory,
5676 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005677{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005678 return Concatenation3dDim0TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00005679}
5680
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005681LayerTestResult<uint8_t, 3> Concatenation3dDim1DiffInputDimsUint8Test(
5682 armnn::IWorkloadFactory& workloadFactory,
5683 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005684{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005685 return Concatenation3dDim1DiffInputDimsTestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00005686}
5687
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005688LayerTestResult<uint8_t, 3> Concatenation3dDim2DiffInputDimsUint8Test(
5689 armnn::IWorkloadFactory& workloadFactory,
5690 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005691{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005692 return Concatenation3dDim2DiffInputDimsTestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00005693}
5694
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005695LayerTestResult<float, 4> SimpleMaxPooling2dSize2x2Stride2x2Test(
5696 armnn::IWorkloadFactory& workloadFactory,
5697 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5698 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00005699{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005700 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<float>(workloadFactory, memoryManager, forceNoPadding);
telsoa014fcda012018-03-09 14:13:49 +00005701}
5702
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005703LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(
5704 armnn::IWorkloadFactory& workloadFactory,
5705 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5706 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00005707{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005708 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<uint8_t>(
5709 workloadFactory, memoryManager, forceNoPadding, 3.0f, -5);
telsoa014fcda012018-03-09 14:13:49 +00005710}
5711
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005712LayerTestResult<float, 4> SimpleMaxPooling2dSize3x3Stride2x4Test(
5713 armnn::IWorkloadFactory& workloadFactory,
5714 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5715 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00005716{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005717 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<float>(workloadFactory, memoryManager, forceNoPadding);
telsoa014fcda012018-03-09 14:13:49 +00005718}
5719
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005720LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(
5721 armnn::IWorkloadFactory& workloadFactory,
5722 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5723 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00005724{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005725 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<uint8_t>(
5726 workloadFactory, memoryManager, forceNoPadding, 0.1f, 128);
telsoa014fcda012018-03-09 14:13:49 +00005727}
5728
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005729LayerTestResult<float, 4> SimpleMaxPooling2dTest(
5730 armnn::IWorkloadFactory& workloadFactory,
5731 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5732 const armnn::DataLayoutIndexed& dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00005733{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005734 return SimpleMaxPooling2dTestCommon<float>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00005735}
5736
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005737LayerTestResult<uint8_t, 4> SimpleMaxPooling2dUint8Test(
5738 armnn::IWorkloadFactory& workloadFactory,
5739 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5740 const armnn::DataLayoutIndexed& dataLayout)
Francis Murtagh043d0d02018-10-05 14:08:48 +01005741{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005742 return SimpleMaxPooling2dTestCommon<uint8_t>(workloadFactory, memoryManager, dataLayout);
Francis Murtagh043d0d02018-10-05 14:08:48 +01005743}
5744
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005745LayerTestResult<float, 4> SimpleAveragePooling2dTest(
5746 armnn::IWorkloadFactory& workloadFactory,
5747 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5748 const armnn::DataLayoutIndexed& dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00005749{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005750 return SimpleAveragePooling2dTestCommon<float>(workloadFactory, memoryManager, dataLayout);
James Conroy69482272018-10-19 10:41:35 +01005751}
5752
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005753LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(
5754 armnn::IWorkloadFactory& workloadFactory,
5755 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5756 const armnn::DataLayoutIndexed& dataLayout)
James Conroy69482272018-10-19 10:41:35 +01005757{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005758 return SimpleAveragePooling2dTestCommon<uint8_t>(
5759 workloadFactory, memoryManager, dataLayout, 0.5, -1);
telsoa014fcda012018-03-09 14:13:49 +00005760}
5761
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005762LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(
5763 armnn::IWorkloadFactory& workloadFactory,
5764 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5765 bool forceNoPadding)
surmeh01bceff2f2018-03-29 16:29:27 +01005766{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005767 return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<float>(
5768 workloadFactory, memoryManager, forceNoPadding);
surmeh01bceff2f2018-03-29 16:29:27 +01005769}
5770
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005771LayerTestResult<float, 4> LargeTensorsAveragePooling2dTest(
5772 armnn::IWorkloadFactory& workloadFactory,
5773 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005774{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005775 return LargeTensorsAveragePooling2dTestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00005776}
5777
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005778LayerTestResult<uint8_t, 4> LargeTensorsAveragePooling2dUint8Test(
5779 armnn::IWorkloadFactory& workloadFactory,
5780 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005781{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005782 return LargeTensorsAveragePooling2dTestCommon<uint8_t>(workloadFactory, memoryManager, 0.5, -1);
telsoa014fcda012018-03-09 14:13:49 +00005783}
5784
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005785LayerTestResult<float, 4> SimpleL2Pooling2dTest(
5786 armnn::IWorkloadFactory& workloadFactory,
5787 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5788 const armnn::DataLayoutIndexed& dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00005789{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005790 return SimpleL2Pooling2dTestCommon<float>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00005791}
5792
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005793LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(
5794 armnn::IWorkloadFactory& workloadFactory,
5795 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5796 const armnn::DataLayoutIndexed& dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00005797{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005798 return SimpleL2Pooling2dTestCommon<uint8_t>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00005799}
5800
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005801LayerTestResult<float, 4> L2Pooling2dSize3Stride1Test(
5802 armnn::IWorkloadFactory& workloadFactory,
5803 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005804{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005805 return L2Pooling2dSize3Stride1TestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00005806}
5807
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005808LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride1Uint8Test(
5809 armnn::IWorkloadFactory& workloadFactory,
5810 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005811{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005812 return L2Pooling2dSize3Stride1TestCommon<uint8_t>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00005813}
5814
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005815LayerTestResult<float, 4> L2Pooling2dSize3Stride3Test(
5816 armnn::IWorkloadFactory& workloadFactory,
5817 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005818{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005819 return L2Pooling2dSize3Stride3TestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00005820}
5821
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005822LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride3Uint8Test(
5823 armnn::IWorkloadFactory& workloadFactory,
5824 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005825{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005826 return L2Pooling2dSize3Stride3TestCommon<uint8_t>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00005827}
5828
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005829LayerTestResult<float, 4> L2Pooling2dSize3Stride4Test(
5830 armnn::IWorkloadFactory& workloadFactory,
5831 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005832{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005833 return L2Pooling2dSize3Stride4TestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00005834}
5835
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005836LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride4Uint8Test(
5837 armnn::IWorkloadFactory& workloadFactory,
5838 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005839{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005840 return L2Pooling2dSize3Stride4TestCommon<uint8_t>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00005841}
5842
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005843LayerTestResult<float, 4> L2Pooling2dSize7Test(
5844 armnn::IWorkloadFactory& workloadFactory,
5845 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005846{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005847 return L2Pooling2dSize7TestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00005848}
5849
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005850LayerTestResult<uint8_t, 4> L2Pooling2dSize7Uint8Test(
5851 armnn::IWorkloadFactory& workloadFactory,
5852 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005853{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005854 return L2Pooling2dSize7TestCommon<uint8_t>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00005855}
5856
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005857LayerTestResult<float, 4> L2Pooling2dSize9Test(
5858 armnn::IWorkloadFactory& workloadFactory,
5859 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005860{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005861 return L2Pooling2dSize9TestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00005862}
5863
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005864LayerTestResult<uint8_t, 4> L2Pooling2dSize9Uint8Test(
5865 armnn::IWorkloadFactory& workloadFactory,
5866 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005867{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005868 return L2Pooling2dSize9TestCommon<uint8_t>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00005869}
5870
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005871LayerTestResult<float, 4> AsymmetricNonSquarePooling2dTest(
5872 armnn::IWorkloadFactory& workloadFactory,
5873 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005874{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005875 return AsymmetricNonSquarePooling2dTestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00005876}
5877
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005878LayerTestResult<uint8_t, 4> AsymmetricNonSquarePooling2dUint8Test(
5879 armnn::IWorkloadFactory& workloadFactory,
5880 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005881{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005882 return AsymmetricNonSquarePooling2dTestCommon<uint8_t>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00005883}
5884
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005885LayerTestResult<float, 4> ComparePooling2dTest(
5886 armnn::IWorkloadFactory& workloadFactory,
5887 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5888 armnn::IWorkloadFactory& refWorkloadFactory,
5889 armnn::PoolingAlgorithm poolingType)
telsoa014fcda012018-03-09 14:13:49 +00005890{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005891 return ComparePooling2dTestCommon<float>(
5892 workloadFactory, memoryManager, refWorkloadFactory, poolingType);
telsoa014fcda012018-03-09 14:13:49 +00005893}
5894
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005895LayerTestResult<uint8_t, 4> ComparePooling2dUint8Test(
5896 armnn::IWorkloadFactory& workloadFactory,
5897 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5898 armnn::IWorkloadFactory& refWorkloadFactory,
5899 armnn::PoolingAlgorithm poolingType)
telsoa014fcda012018-03-09 14:13:49 +00005900{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005901 return ComparePooling2dTestCommon<uint8_t>(
5902 workloadFactory, memoryManager, refWorkloadFactory, poolingType, 0.1f, 128);
telsoa014fcda012018-03-09 14:13:49 +00005903}
5904
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005905LayerTestResult<float, 2> FullyConnectedLargeTest(
5906 armnn::IWorkloadFactory& workloadFactory,
5907 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5908 bool transposeWeights)
telsoa014fcda012018-03-09 14:13:49 +00005909{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005910 return FullyConnectedLargeTestCommon<float>(workloadFactory, memoryManager, transposeWeights);
telsoa014fcda012018-03-09 14:13:49 +00005911}
5912
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005913LayerTestResult<float, 4> IgnorePaddingSimpleMaxPooling2dTest(
5914 armnn::IWorkloadFactory& workloadFactory,
5915 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005916{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005917 return IgnorePaddingSimpleMaxPooling2dTestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00005918}
5919
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005920LayerTestResult<uint8_t, 4> IgnorePaddingSimpleMaxPooling2dUint8Test(
5921 armnn::IWorkloadFactory& workloadFactory,
5922 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005923{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005924 return IgnorePaddingSimpleMaxPooling2dTestCommon<uint8_t>(workloadFactory, memoryManager, 1.0f, -5);
telsoa014fcda012018-03-09 14:13:49 +00005925}
5926
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005927LayerTestResult<float, 4> IgnorePaddingMaxPooling2dSize3Test(
5928 armnn::IWorkloadFactory& workloadFactory,
5929 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005930{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005931 return IgnorePaddingMaxPooling2dSize3TestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00005932}
5933
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005934LayerTestResult<uint8_t, 4> IgnorePaddingMaxPooling2dSize3Uint8Test(
5935 armnn::IWorkloadFactory& workloadFactory,
5936 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005937{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005938 return IgnorePaddingMaxPooling2dSize3TestCommon<uint8_t>(workloadFactory, memoryManager, 1.0f, -5);
telsoa014fcda012018-03-09 14:13:49 +00005939}
5940
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005941LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dTest(
5942 armnn::IWorkloadFactory& workloadFactory,
5943 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005944{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005945 return IgnorePaddingSimpleAveragePooling2dTestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00005946}
5947
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005948LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dUint8Test(
5949 armnn::IWorkloadFactory& workloadFactory,
5950 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005951{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005952 return IgnorePaddingSimpleAveragePooling2dTestCommon<uint8_t>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00005953}
5954
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005955LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTest(
5956 armnn::IWorkloadFactory& workloadFactory,
5957 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005958{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005959 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00005960}
5961
5962LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005963 armnn::IWorkloadFactory& workloadFactory,
5964 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005965{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005966 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<uint8_t>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00005967}
5968
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005969LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3Test(
5970 armnn::IWorkloadFactory& workloadFactory,
5971 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005972{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005973 return IgnorePaddingAveragePooling2dSize3TestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00005974}
5975
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005976LayerTestResult<uint8_t, 4> IgnorePaddingAveragePooling2dSize3Uint8Test(
5977 armnn::IWorkloadFactory& workloadFactory,
5978 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005979{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005980 return IgnorePaddingAveragePooling2dSize3TestCommon<uint8_t>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00005981}
5982
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005983LayerTestResult<float, 4> IgnorePaddingSimpleL2Pooling2dTest(
5984 armnn::IWorkloadFactory& workloadFactory,
5985 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005986{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005987 return IgnorePaddingSimpleL2Pooling2dTestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00005988}
5989
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005990LayerTestResult<uint8_t, 4> IgnorePaddingSimpleL2Pooling2dUint8Test(
5991 armnn::IWorkloadFactory& workloadFactory,
5992 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005993{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005994 return IgnorePaddingSimpleL2Pooling2dTestCommon<uint8_t>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00005995}
5996
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005997LayerTestResult<float, 4> IgnorePaddingL2Pooling2dSize3Test(
5998 armnn::IWorkloadFactory& workloadFactory,
5999 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006000{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006001 return IgnorePaddingL2Pooling2dSize3TestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00006002}
6003
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006004LayerTestResult<uint8_t, 4> IgnorePaddingL2Pooling2dSize3Uint8Test(
6005 armnn::IWorkloadFactory& workloadFactory,
6006 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006007{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006008 return IgnorePaddingL2Pooling2dSize3TestCommon<uint8_t>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00006009}
6010
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006011LayerTestResult<float, 4> SimplePermuteFloat32Test(
6012 armnn::IWorkloadFactory& workloadFactory,
6013 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006014{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006015 return SimplePermuteFloat32TestCommon(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00006016};
6017
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006018LayerTestResult<uint8_t, 4> SimplePermuteUint8Test(
6019 armnn::IWorkloadFactory& workloadFactory,
6020 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006021{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006022 return SimplePermuteUint8TestCommon(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00006023};
surmeh01bceff2f2018-03-29 16:29:27 +01006024
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006025LayerTestResult<float, 4> PermuteFloat32ValueSet1Test(
6026 armnn::IWorkloadFactory& workloadFactory,
6027 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01006028{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006029 return PermuteFloat32ValueSet1TestCommon(workloadFactory, memoryManager);
surmeh01bceff2f2018-03-29 16:29:27 +01006030};
6031
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006032LayerTestResult<float, 4> PermuteFloat32ValueSet2Test(
6033 armnn::IWorkloadFactory& workloadFactory,
6034 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01006035{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006036 return PermuteFloat32ValueSet2TestCommon(workloadFactory, memoryManager);
surmeh01bceff2f2018-03-29 16:29:27 +01006037};
6038
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006039LayerTestResult<float, 4> PermuteFloat32ValueSet3Test(
6040 armnn::IWorkloadFactory& workloadFactory,
6041 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01006042{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006043 return PermuteFloat32ValueSet3TestCommon(workloadFactory, memoryManager);
narpra011e4c31d2018-09-28 11:07:51 +01006044};
6045
6046namespace
6047{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01006048
narpra011e4c31d2018-09-28 11:07:51 +01006049template <typename T, std::size_t InputDim, std::size_t OutputDim>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006050LayerTestResult<T, OutputDim> MeanTestHelper(
6051 armnn::IWorkloadFactory& workloadFactory,
6052 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6053 const unsigned int* inputShape,
6054 const std::vector<T>& inputData,
6055 const std::vector<unsigned int>& axis,
6056 bool keepDims,
6057 const unsigned int* outputShape,
6058 const std::vector<T>& outputData,
6059 float scale = 1.0f,
6060 int32_t offset = 0)
narpra011e4c31d2018-09-28 11:07:51 +01006061{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01006062 auto dataType = (std::is_same<T, uint8_t>::value ? armnn::DataType::QuantisedAsymm8 : armnn::DataType::Float32);
narpra011e4c31d2018-09-28 11:07:51 +01006063
6064 armnn::TensorInfo inputTensorInfo(InputDim, inputShape, dataType);
6065 armnn::TensorInfo outputTensorInfo(OutputDim, outputShape, dataType);
6066
6067 inputTensorInfo.SetQuantizationScale(scale);
6068 inputTensorInfo.SetQuantizationOffset(offset);
6069
6070 outputTensorInfo.SetQuantizationScale(scale);
6071 outputTensorInfo.SetQuantizationOffset(offset);
6072
6073 auto input = MakeTensor<T, InputDim>(inputTensorInfo, inputData);
6074
6075 LayerTestResult<T, OutputDim> result(outputTensorInfo);
6076 result.outputExpected = MakeTensor<T, OutputDim>(outputTensorInfo, outputData);
6077
6078 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6079 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6080
6081 armnn::MeanQueueDescriptor data;
6082 data.m_Parameters.m_Axis = axis;
6083 data.m_Parameters.m_KeepDims = keepDims;
6084 armnn::WorkloadInfo info;
6085 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
6086 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
6087
6088 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMean(data, info);
6089
6090 inputHandle->Allocate();
6091 outputHandle->Allocate();
6092
6093 CopyDataToITensorHandle(inputHandle.get(), input.origin());
6094
narpra011e4c31d2018-09-28 11:07:51 +01006095 workload->Execute();
6096
6097 CopyDataFromITensorHandle(result.output.origin(), outputHandle.get());
6098
6099 return result;
6100}
Matteo Martincigh28dcab62018-10-19 16:40:03 +01006101
narpra011e4c31d2018-09-28 11:07:51 +01006102} // anonymous namespace
6103
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006104LayerTestResult<uint8_t, 1> MeanUint8SimpleTest(
6105 armnn::IWorkloadFactory& workloadFactory,
6106 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01006107{
6108 const unsigned int inputShape[] = { 3, 2 };
6109 const unsigned int outputShape[] = { 1 };
6110
6111 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
6112 std::vector<uint8_t> output({ 2 });
6113
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006114 return MeanTestHelper<uint8_t, 2, 1>(
6115 workloadFactory, memoryManager, inputShape, input, {}, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01006116}
6117
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006118LayerTestResult<uint8_t, 3> MeanUint8SimpleAxisTest(
6119 armnn::IWorkloadFactory& workloadFactory,
6120 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01006121{
6122 const unsigned int inputShape[] = { 1, 1, 3, 2 };
6123 const unsigned int outputShape[] = { 1, 1, 2 };
6124
6125 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
6126 std::vector<uint8_t> output({ 2, 2 });
6127
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006128 return MeanTestHelper<uint8_t, 4, 3>(
6129 workloadFactory, memoryManager, inputShape, input, { 2 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01006130}
6131
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006132LayerTestResult<uint8_t, 4> MeanUint8KeepDimsTest(
6133 armnn::IWorkloadFactory& workloadFactory,
6134 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01006135{
6136 const unsigned int inputShape[] = { 1, 1, 3, 2 };
6137 const unsigned int outputShape[] = { 1, 1, 1, 2 };
6138
6139 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
6140 std::vector<uint8_t> output({ 2, 2 });
6141
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006142 return MeanTestHelper<uint8_t, 4, 4>(
6143 workloadFactory, memoryManager, inputShape, input, { 2 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01006144}
6145
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006146LayerTestResult<uint8_t, 4> MeanUint8MultipleDimsTest(
6147 armnn::IWorkloadFactory& workloadFactory,
6148 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01006149{
6150 const unsigned int inputShape[] = { 2, 3, 1, 2 };
6151 const unsigned int outputShape[] = { 1, 3, 1, 1 };
6152
Matteo Martincigh28dcab62018-10-19 16:40:03 +01006153 std::vector<uint8_t> input({ 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6 });
narpra011e4c31d2018-09-28 11:07:51 +01006154 std::vector<uint8_t> output({ 1, 3, 5 });
6155
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006156 return MeanTestHelper<uint8_t, 4, 4>(
6157 workloadFactory, memoryManager, inputShape, input, { 0, 3 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01006158}
6159
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006160LayerTestResult<uint8_t, 1> MeanVtsUint8Test(
6161 armnn::IWorkloadFactory& workloadFactory,
6162 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01006163{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01006164 const unsigned int inputShape[] = { 4, 3, 2 };
narpra011e4c31d2018-09-28 11:07:51 +01006165 const unsigned int outputShape[] = { 2 };
6166
Matteo Martincigh28dcab62018-10-19 16:40:03 +01006167 std::vector<uint8_t> input({ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
6168 24 });
6169 std::vector<uint8_t> output({ 12, 13 });
narpra011e4c31d2018-09-28 11:07:51 +01006170
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006171 return MeanTestHelper<uint8_t, 3, 1>(workloadFactory, memoryManager,
6172 inputShape, input, { 0, 1 }, false, outputShape,
Matteo Martincigh28dcab62018-10-19 16:40:03 +01006173 output, 0.8f, 5);
narpra011e4c31d2018-09-28 11:07:51 +01006174}
6175
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006176LayerTestResult<float, 1> MeanFloatSimpleTest(
6177 armnn::IWorkloadFactory& workloadFactory,
6178 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01006179{
6180 const unsigned int inputShape[] = { 3, 2 };
6181 const unsigned int outputShape[] = { 1 };
6182
Matteo Martincigh28dcab62018-10-19 16:40:03 +01006183 std::vector<float> input({ 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f });
6184 std::vector<float> output({ 2.0f });
narpra011e4c31d2018-09-28 11:07:51 +01006185
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006186 return MeanTestHelper<float, 2, 1>(
6187 workloadFactory, memoryManager, inputShape, input, {}, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01006188}
6189
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006190LayerTestResult<float, 3> MeanFloatSimpleAxisTest(
6191 armnn::IWorkloadFactory& workloadFactory,
6192 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01006193{
6194 const unsigned int inputShape[] = { 2, 3, 1, 2 };
6195 const unsigned int outputShape[] = { 3, 1, 2 };
6196
Matteo Martincigh28dcab62018-10-19 16:40:03 +01006197 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f });
6198 std::vector<float> output({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f });
narpra011e4c31d2018-09-28 11:07:51 +01006199
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006200 return MeanTestHelper<float, 4, 3>(
6201 workloadFactory, memoryManager, inputShape, input, { 0 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01006202}
6203
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006204LayerTestResult<float, 4> MeanFloatKeepDimsTest(
6205 armnn::IWorkloadFactory& workloadFactory,
6206 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01006207{
6208 const unsigned int inputShape[] = { 1, 1, 3, 2 };
6209 const unsigned int outputShape[] = { 1, 1, 1, 2 };
6210
Matteo Martincigh28dcab62018-10-19 16:40:03 +01006211 std::vector<float> input({ 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f });
6212 std::vector<float> output({ 2.0f, 2.0f });
narpra011e4c31d2018-09-28 11:07:51 +01006213
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006214 return MeanTestHelper<float, 4, 4>(
6215 workloadFactory, memoryManager, inputShape, input, { 2 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01006216}
6217
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006218LayerTestResult<float, 4> MeanFloatMultipleDimsTest(
6219 armnn::IWorkloadFactory& workloadFactory,
6220 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01006221{
6222 const unsigned int inputShape[] = { 2, 3, 1, 2 };
6223 const unsigned int outputShape[] = { 1, 3, 1, 1 };
6224
Matteo Martincigh28dcab62018-10-19 16:40:03 +01006225 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f });
6226 std::vector<float> output({ 1.5f, 3.5f, 5.5f });
narpra011e4c31d2018-09-28 11:07:51 +01006227
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006228 return MeanTestHelper<float, 4, 4>(
6229 workloadFactory, memoryManager, inputShape, input, { 0, 3 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01006230}
6231
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006232LayerTestResult<float, 1> MeanVtsFloat1Test(
6233 armnn::IWorkloadFactory& workloadFactory,
6234 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01006235{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01006236 const unsigned int inputShape[] = { 4, 3, 2 };
narpra011e4c31d2018-09-28 11:07:51 +01006237 const unsigned int outputShape[] = { 2 };
6238
Matteo Martincigh28dcab62018-10-19 16:40:03 +01006239 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f,
6240 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f });
6241 std::vector<float> output({ 12.0f, 13.0f });
narpra011e4c31d2018-09-28 11:07:51 +01006242
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006243 return MeanTestHelper<float, 3, 1>(
6244 workloadFactory, memoryManager, inputShape, input, { 0, 1 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01006245}
6246
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006247LayerTestResult<float, 3> MeanVtsFloat2Test(
6248 armnn::IWorkloadFactory& workloadFactory,
6249 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01006250{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01006251 const unsigned int inputShape[] = { 4, 3, 2 };
6252 const unsigned int outputShape[] = { 1, 3, 1 };
narpra011e4c31d2018-09-28 11:07:51 +01006253
Matteo Martincigh28dcab62018-10-19 16:40:03 +01006254 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f,
6255 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f });
6256 std::vector<float> output({ 10.5f, 12.5f, 14.5f });
narpra011e4c31d2018-09-28 11:07:51 +01006257
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006258 return MeanTestHelper<float, 3, 3>(
6259 workloadFactory, memoryManager, inputShape, input, { 0, 2 }, true, outputShape, output);
Matteo Martincigh28dcab62018-10-19 16:40:03 +01006260}
6261
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006262LayerTestResult<float, 3> MeanVtsFloat3Test(
6263 armnn::IWorkloadFactory& workloadFactory,
6264 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matteo Martincigh28dcab62018-10-19 16:40:03 +01006265{
6266 const unsigned int inputShape[] = { 1, 2, 2, 1 };
6267 const unsigned int outputShape[] = { 1, 2, 1 };
6268
6269 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f });
6270 std::vector<float> output({ 1.5f, 3.5f });
6271
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006272 return MeanTestHelper<float, 4, 3>(
6273 workloadFactory, memoryManager, inputShape, input, { 2 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01006274}
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01006275
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006276LayerTestResult<float, 4> AdditionAfterMaxPoolTest(
6277 armnn::IWorkloadFactory& workloadFactory,
6278 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01006279{
6280 // Create Initial Tensor
6281 // 1, 2, 3
6282 // 4, 5, 6
6283 // 7, 8, 9
6284
6285 armnn::TensorInfo poolingInputTensorInfo({ 1, 1, 3, 3}, armnn::GetDataType<float>());
6286 armnn::TensorInfo poolingOutputTensorInfo({ 1, 1, 2, 2}, armnn::GetDataType<float>());
6287
6288 boost::multi_array<float, 4> poolingInput = MakeTensor<float,4>(poolingInputTensorInfo,
6289 {1, 2, 3,
6290 4, 5, 6,
6291 7, 8, 9
6292 });
6293
6294 std::unique_ptr<armnn::ITensorHandle> poolingInputHandle =
6295 workloadFactory.CreateTensorHandle(poolingInputTensorInfo);
6296 std::unique_ptr<armnn::ITensorHandle> poolingOutputHandle =
6297 workloadFactory.CreateTensorHandle(poolingOutputTensorInfo);
6298
6299 // Apply MaxPool poolSize = 1x1, stride=2x2
6300 // Result =
6301 // 1, 3
6302 // 7, 9
6303 armnn::Pooling2dDescriptor descriptor;
6304 descriptor.m_PoolHeight = 1;
6305 descriptor.m_PoolWidth = 1;
6306 descriptor.m_StrideX = 2;
6307 descriptor.m_StrideY = 2;
6308 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
6309
6310 armnn::Pooling2dQueueDescriptor queueDescriptor;
6311 queueDescriptor.m_Parameters = descriptor;
6312 armnn::WorkloadInfo workloadInfo;
6313 AddInputToWorkload(queueDescriptor, workloadInfo, poolingInputTensorInfo, poolingInputHandle.get());
6314 AddOutputToWorkload(queueDescriptor, workloadInfo, poolingOutputTensorInfo, poolingOutputHandle.get());
6315
6316 // Create the MaxPool
6317 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
6318
6319 //LayerTestResult<float, 4> result(poolingOutputTensorInfo);
6320 auto shape( GetTensorShapeAsArray<4>(poolingOutputTensorInfo));
6321 boost::multi_array<float, 4> resultMaxPool;
6322 resultMaxPool.resize(shape);
6323
6324
6325 // Create addition with another tensor the same size
6326 // This would be the result to apply a Conv2d with kernel ones(2) and stride 1x1
6327 // with the initial tensor.
6328 // 12, 16
6329 // 24, 28
6330
6331 armnn::TensorInfo addInputTensorInfo({ 1,1,2,2}, armnn::GetDataType<float>());
6332 armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2}, armnn::GetDataType<float>());
6333
6334 boost::multi_array<float, 4> addInput = MakeTensor<float,4>(addInputTensorInfo,
6335 {12, 16,
6336 24, 28,
6337 });
6338
6339 // Expected output tensor after MaxPool and Addition.
6340 LayerTestResult<float,4> addRet(addOutputTensorInfo);
6341 addRet.outputExpected = MakeTensor<float, 4>(addOutputTensorInfo, std::vector<float>(
6342 {
6343 13, 19,
6344 31, 37
6345 }));
6346
6347 std::unique_ptr<armnn::ITensorHandle> addInputHandle = workloadFactory.CreateTensorHandle(addInputTensorInfo);
6348 std::unique_ptr<armnn::ITensorHandle> addOutputHandle = workloadFactory.CreateTensorHandle(addOutputTensorInfo);
6349
6350 armnn::AdditionQueueDescriptor data;
6351 armnn::WorkloadInfo info;
6352
6353 // Add the output of the MaxPool and the new tensor
6354 AddInputToWorkload(data, info, poolingOutputTensorInfo, poolingOutputHandle.get());
6355 AddInputToWorkload(data, info, addInputTensorInfo, addInputHandle.get());
6356 AddOutputToWorkload(data, info, addOutputTensorInfo, addOutputHandle.get());
6357
6358 std::unique_ptr<armnn::IWorkload> addWorkload = workloadFactory.CreateAddition(data, info);
6359
6360 poolingInputHandle->Allocate();
6361 poolingOutputHandle->Allocate();
6362 addInputHandle->Allocate();
6363 addOutputHandle->Allocate();
6364
6365 CopyDataToITensorHandle(poolingInputHandle.get(), &poolingInput[0][0][0][0]);
6366 CopyDataFromITensorHandle(&resultMaxPool[0][0][0][0], poolingOutputHandle.get());
6367
6368 CopyDataToITensorHandle(poolingOutputHandle.get(), &resultMaxPool[0][0][0][0]);
6369 CopyDataToITensorHandle(addInputHandle.get(), &addInput[0][0][0][0]);
6370
6371 workload->Execute();
6372 addWorkload->Execute();
6373
6374 CopyDataFromITensorHandle(&addRet.output[0][0][0][0], addOutputHandle.get());
6375
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01006376 return addRet;
6377}
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006378
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006379LayerTestResult<float, 4> SpaceToBatchNdSimpleFloat32Test(
6380 armnn::IWorkloadFactory& workloadFactory,
6381 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006382{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006383 return SpaceToBatchNdSimpleTest<float>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006384}
6385
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006386LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsFloat32Test(
6387 armnn::IWorkloadFactory& workloadFactory,
6388 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006389{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006390 return SpaceToBatchNdMultiChannelsTest<float>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006391}
6392
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006393LayerTestResult<float, 4> SpaceToBatchNdMultiBlockFloat32Test(
6394 armnn::IWorkloadFactory& workloadFactory,
6395 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006396{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006397 return SpaceToBatchNdMultiBlockTest<float>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006398}
6399
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006400LayerTestResult<float, 4> SpaceToBatchNdPaddingFloat32Test(
6401 armnn::IWorkloadFactory& workloadFactory,
6402 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006403{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006404 return SpaceToBatchNdPaddingTest<float>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006405}
6406
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006407LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleUint8Test(
6408 armnn::IWorkloadFactory& workloadFactory,
6409 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006410{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006411 return SpaceToBatchNdSimpleTest<uint8_t>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006412}
6413
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006414LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsUint8Test(
6415 armnn::IWorkloadFactory& workloadFactory,
6416 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006417{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006418 return SpaceToBatchNdMultiChannelsTest<uint8_t>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006419}
6420
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006421LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockUint8Test(
6422 armnn::IWorkloadFactory& workloadFactory,
6423 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006424{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006425 return SpaceToBatchNdMultiBlockTest<uint8_t>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006426}
6427
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006428LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingUint8Test(
6429 armnn::IWorkloadFactory& workloadFactory,
6430 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006431{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006432 return SpaceToBatchNdPaddingTest<uint8_t>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006433}
6434
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006435LayerTestResult<float, 4> SpaceToBatchNdSimpleNHWCFloat32Test(
6436 armnn::IWorkloadFactory& workloadFactory,
6437 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006438{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006439 return SpaceToBatchNdSimpleNHWCTest<float>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006440}
6441
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006442LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsNHWCFloat32Test(
6443 armnn::IWorkloadFactory& workloadFactory,
6444 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006445{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006446 return SpaceToBatchNdMultiChannelsNHWCTest<float>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006447}
6448
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006449LayerTestResult<float, 4> SpaceToBatchNdMultiBlockNHWCFloat32Test(
6450 armnn::IWorkloadFactory& workloadFactory,
6451 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006452{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006453 return SpaceToBatchNdMultiBlockNHWCTest<float>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006454}
6455
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006456LayerTestResult<float, 4> SpaceToBatchNdPaddingNHWCFloat32Test(
6457 armnn::IWorkloadFactory& workloadFactory,
6458 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006459{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006460 return SpaceToBatchNdPaddingNHWCTest<float>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006461}
6462
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006463LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleNHWCUint8Test(
6464 armnn::IWorkloadFactory& workloadFactory,
6465 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006466{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006467 return SpaceToBatchNdSimpleNHWCTest<uint8_t>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006468}
6469
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006470LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsNHWCUint8Test(
6471 armnn::IWorkloadFactory& workloadFactory,
6472 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006473{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006474 return SpaceToBatchNdMultiChannelsNHWCTest<uint8_t>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006475}
6476
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006477LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockNHWCUint8Test(
6478 armnn::IWorkloadFactory& workloadFactory,
6479 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006480{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006481 return SpaceToBatchNdMultiBlockNHWCTest<uint8_t>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006482}
6483
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006484LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingNHWCUint8Test(
6485 armnn::IWorkloadFactory& workloadFactory,
6486 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006487{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006488 return SpaceToBatchNdPaddingNHWCTest<uint8_t>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006489}
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00006490
6491namespace {
6492
6493template<typename T, std::size_t InputDim, std::size_t OutputDim>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006494LayerTestResult<T, OutputDim> BatchToSpaceNdHelper(
6495 armnn::IWorkloadFactory &workloadFactory,
6496 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6497 const armnn::DataLayout& dataLayout,
6498 const unsigned int *inputShape,
6499 const std::vector<T> &inputData,
6500 const std::vector<unsigned int> &blockShape,
6501 const std::vector<std::pair<unsigned int, unsigned int>> &crops,
6502 const unsigned int *outputShape,
6503 const std::vector<T> &outputData,
6504 float scale = 1.0f,
6505 int32_t offset = 0)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00006506 {
6507 auto dataType = (std::is_same<T, uint8_t>::value ? armnn::DataType::QuantisedAsymm8 : armnn::DataType::Float32);
6508
6509 armnn::TensorInfo inputTensorInfo(InputDim, inputShape, dataType);
6510 armnn::TensorInfo outputTensorInfo(OutputDim, outputShape, dataType);
6511
6512 inputTensorInfo.SetQuantizationScale(scale);
6513 inputTensorInfo.SetQuantizationOffset(offset);
6514
6515 outputTensorInfo.SetQuantizationScale(scale);
6516 outputTensorInfo.SetQuantizationOffset(offset);
6517
6518 auto input = MakeTensor<T, InputDim>(inputTensorInfo, inputData);
6519
6520 LayerTestResult<T, OutputDim> result(outputTensorInfo);
6521 result.outputExpected = MakeTensor<T, OutputDim>(outputTensorInfo, outputData);
6522
6523 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6524 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6525
6526 armnn::BatchToSpaceNdQueueDescriptor data;
6527 data.m_Parameters.m_DataLayout = dataLayout;
6528 data.m_Parameters.m_BlockShape = blockShape;
6529 data.m_Parameters.m_Crops = crops;
6530 armnn::WorkloadInfo info;
6531 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
6532 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
6533
6534 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchToSpaceNd(data, info);
6535
6536 inputHandle->Allocate();
6537 outputHandle->Allocate();
6538
6539 CopyDataToITensorHandle(inputHandle.get(), input.origin());
6540
6541 workload->Execute();
6542
6543 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6544
6545 return result;
6546}
6547
6548} // anonymous namespace
6549
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006550LayerTestResult<float, 4> BatchToSpaceNdNhwcFloat32Test1(
6551 armnn::IWorkloadFactory& workloadFactory,
6552 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00006553{
6554 const unsigned int inputShape[] = {4, 2, 2, 1};
6555 const unsigned int outputShape[] = {1, 4, 4, 1 };
6556
6557 std::vector<float> input
6558 ({
6559 // Batch 0, Height 0, Width (2) x Channel (1)
6560 1.0f, 3.0f,
6561 // Batch 0, Height 1, Width (2) x Channel (1)
6562 9.0f, 11.0f,
6563
6564
6565 // Batch 1, Height 0, Width (2) x Channel (1)
6566 2.0f, 4.0f,
6567 // Batch 1, Height 1, Width (2) x Channel (1)
6568 10.0f, 12.0f,
6569
6570
6571 // Batch 2, Height 0, Width (2) x Channel (1)
6572 5.0f, 7.0f,
6573 // Batch 2, Height 1, Width (2) x Channel (1)
6574 13.0f, 15.0f,
6575
6576 // Batch 3, Height 0, Width (2) x Channel (3)
6577 6.0f, 8.0f,
6578 // Batch 3, Height 1, Width (2) x Channel (1)
6579 14.0f, 16.0f
6580 });
6581
6582 std::vector<float> expectedOutput
6583 ({
6584 1.0f, 2.0f, 3.0f, 4.0f,
6585 5.0f, 6.0f, 7.0f, 8.0f,
6586 9.0f, 10.0f, 11.0f, 12.0f,
6587 13.0f, 14.0f, 15.0f, 16.0f
6588 });
6589
6590 std::vector<unsigned int> blockShape {2, 2};
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00006591 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00006592
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006593 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
6594 armnn::DataLayout::NHWC, inputShape, input, blockShape,
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00006595 crops, outputShape, expectedOutput);
6596}
6597
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006598LayerTestResult<float, 4> BatchToSpaceNdNhwcFloat32Test2(
6599 armnn::IWorkloadFactory& workloadFactory,
6600 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00006601{
6602 const unsigned int inputShape[] = {4, 1, 1, 1};
6603 const unsigned int outputShape[] = {1, 2, 2, 1};
6604
6605 std::vector<float> input
6606 ({
6607 // Batch 0, Height 0, Width (2) x Channel (1)
6608 1.0f, 2.0f, 3.0f, 4.0f
6609 });
6610
6611 std::vector<float> expectedOutput({1.0f, 2.0f, 3.0f, 4.0f});
6612
6613 std::vector<unsigned int> blockShape({2, 2});
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00006614 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00006615
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006616 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
6617 armnn::DataLayout::NHWC, inputShape, input, blockShape,
6618 crops, outputShape, expectedOutput);
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00006619}
6620
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006621LayerTestResult<float, 4> BatchToSpaceNdNhwcFloat32Test3(
6622 armnn::IWorkloadFactory& workloadFactory,
6623 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00006624{
6625 const unsigned int inputShape[] = {4, 1, 1, 3};
6626 const unsigned int outputShape[] = {1, 2, 2, 3};
6627
6628 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f });
6629
6630 std::vector<float> expectedOutput({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f });
6631
6632 std::vector<unsigned int> blockShape({2, 2});
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00006633 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00006634
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006635 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
6636 armnn::DataLayout::NHWC, inputShape, input, blockShape,
6637 crops, outputShape, expectedOutput);
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00006638}
6639
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006640LayerTestResult<float, 4> BatchToSpaceNdNchwFloat32Test1(
6641 armnn::IWorkloadFactory &workloadFactory,
6642 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00006643{
6644 const unsigned int inputShape[] = {4, 3, 1, 1};
6645 const unsigned int outputShape[] = {1, 3, 2, 2};
6646
6647 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f });
6648
6649 std::vector<float> expectedOutput
6650 ({
6651 // Batch 0, Channel 0, Height (2) x Width (2)
6652 1.0f, 4.0f,
6653 7.0f, 10.0f,
6654
6655 // Batch 0, Channel 1, Height (2) x Width (2)
6656 2.0f, 5.0f,
6657 8.0f, 11.0f,
6658
6659 // Batch 0, Channel 2, Height (2) x Width (2)
6660 3.0f, 6.0f,
6661 9.0f, 12.0f,
6662 });
6663
6664 std::vector<unsigned int> blockShape({2, 2});
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00006665 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00006666
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006667 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
6668 armnn::DataLayout::NCHW, inputShape, input, blockShape,
6669 crops, outputShape, expectedOutput);
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00006670}
Éanna Ó Catháin262553e2018-11-14 11:26:23 +00006671
6672
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006673LayerTestResult<uint8_t, 4> BatchToSpaceNdNhwcUintTest1(
6674 armnn::IWorkloadFactory& workloadFactory,
6675 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin262553e2018-11-14 11:26:23 +00006676{
6677 const unsigned int inputShape[] = {4, 2, 2, 1};
6678 const unsigned int outputShape[] = {1, 4, 4, 1};
6679
6680 std::vector<uint8_t> input({ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 });
6681 std::vector<uint8_t> expectedOutput({ 1, 5, 2, 6, 9, 13, 10, 14, 3, 7, 4, 8, 11, 15, 12, 16});
6682
6683 std::vector<unsigned int> blockShape({2, 2});
6684 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
6685
Matteo Martincigha65b7ae2018-11-14 12:39:55 +00006686 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager, armnn::DataLayout::NHWC, inputShape,
6687 input, blockShape, crops, outputShape, expectedOutput);
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006688}