blob: 49c6d30b6d7b29353466b0989e31c3d435d7fcc5 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#include "LayerTests.hpp"
6
7#include "test/TensorHelpers.hpp"
8#include "TensorCopyUtils.hpp"
surmeh013537c2c2018-05-18 16:31:43 +01009#include "Permute.hpp"
telsoa014fcda012018-03-09 14:13:49 +000010
11#include <boost/test/unit_test.hpp>
surmeh013537c2c2018-05-18 16:31:43 +010012#include <boost/assert.hpp>
telsoa014fcda012018-03-09 14:13:49 +000013
David Beck711fa312018-09-24 10:46:38 +010014#include <armnn/LayerSupport.hpp>
telsoa014fcda012018-03-09 14:13:49 +000015
David Beck711fa312018-09-24 10:46:38 +010016#include <backends/CpuTensorHandle.hpp>
17#include <backends/WorkloadFactory.hpp>
telsoa014fcda012018-03-09 14:13:49 +000018
telsoa014fcda012018-03-09 14:13:49 +000019#include <algorithm>
20#include <boost/cast.hpp>
21
22#include "WorkloadTestUtils.hpp"
23#include "Conv2dTestImpl.hpp"
24#include "BatchNormTestImpl.hpp"
25#include "ActivationTestImpl.hpp"
26#include "Pooling2dTestImpl.hpp"
27#include "ReshapeTestImpl.hpp"
28#include "FullyConnectedTestImpl.hpp"
29#include "SplitterTestImpl.hpp"
30#include "SoftmaxTestImpl.hpp"
31#include "NormTestImpl.hpp"
32#include "PermuteTestImpl.hpp"
telsoa01c577f2c2018-08-31 09:22:23 +010033#include "LstmTestImpl.hpp"
34#include "ConvertFp16ToFp32TestImpl.hpp"
35#include "ConvertFp32ToFp16TestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000036
telsoa01c577f2c2018-08-31 09:22:23 +010037// 3-channel 16x8 image used as common input data for a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000038static std::vector<float> ConvInput3x8x16({
39 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
40 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
41 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
42 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
43 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
44 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
45 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
46 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
47 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
48 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
49 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
50 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
51 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
52 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
53 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
54 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
55 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
56 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
57 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
58 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
59 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
60 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
61 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
62 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
63});
64
telsoa01c577f2c2018-08-31 09:22:23 +010065// 2-channel bias used by a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000066static std::vector<float> Bias2({0, 2});
67
telsoa01c577f2c2018-08-31 09:22:23 +010068// Helper function that returns either Bias2 or an empty vector depending on whether bias is enabled.
telsoa014fcda012018-03-09 14:13:49 +000069template<typename T>
70boost::multi_array<T, 1> GetBias2(bool biasEnabled, float qScale, int32_t qOffset)
71{
72 if(biasEnabled)
73 {
74 armnn::TensorInfo biasDesc({static_cast<unsigned int>(Bias2.size())}, armnn::GetDataType<T>());
75 boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(qScale, qOffset, Bias2));
76 return bias;
77 }
78 else
79 {
80 return boost::multi_array<T, 1>();
81 }
82}
83
84template<typename T>
85LayerTestResult<T, 4> SimpleConvolution2d3x5TestCommon(armnn::IWorkloadFactory& workloadFactory,
86 float qScale,
87 int32_t qOffset,
jimfly010a088a62018-10-25 17:05:05 +010088 bool biasEnabled,
89 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +000090{
telsoa01c577f2c2018-08-31 09:22:23 +010091 // Use common single-batch 3-channel 16x8 image.
telsoa014fcda012018-03-09 14:13:49 +000092 armnn::TensorInfo inputDesc({1, 3, 8, 16}, armnn::GetDataType<T>());
93 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
94
telsoa01c577f2c2018-08-31 09:22:23 +010095 // Use a 2-element batch with 3-channel 3x5 kernels.
telsoa014fcda012018-03-09 14:13:49 +000096 armnn::TensorInfo kernelDesc({2, 3, 5, 3}, armnn::GetDataType<T>());
97 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
98 QuantizedVector<T>(qScale, qOffset, {
99 1, 1, 1,
100 1, -1, 1,
101 1, 1, 1,
102 1, 1, 1,
103 1, 1, 1,
104
105 0, 0, 0,
106 0, 0, 0,
107 0, 0, 0,
108 0, 0, 0,
109 0, 0, 0,
110
111 2, 2, 2,
112 2, 2, 2,
113 2, 2, 2,
114 2, 2, 2,
115 2, 2, 2,
116
117
118 0, 0, 0,
119 0, 0, 0,
120 0, 0, 0,
121 0, 0, 0,
122 0, 0, 0,
123
124 1, 1, 1,
125 1, 1, 1,
126 1, 1, 1,
127 1, 1, 1,
128 1, 1, 1,
129
130 0, 0, 0,
131 0, 0, 0,
132 0, 0, 0,
133 0, 0, 0,
134 0, 0, 0
135 })));
136
telsoa01c577f2c2018-08-31 09:22:23 +0100137 // Expected output is 2 batch elements of a 1-channel 14x4 image.
telsoa014fcda012018-03-09 14:13:49 +0000138 armnn::TensorInfo outputDesc({1, 2, 4, 14}, armnn::GetDataType<T>());
139 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
140 QuantizedVector<T>(qScale, qOffset, {
141 -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24,
142 -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25,
143 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
144 -23.5f, -23.5f, -23.5f,
145 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
146 -23.5f, -23.5f, -23.5f,
147
148 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
149 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
150 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
151 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
152 })));
153
154 return SimpleConvolution2dTestImpl<T>(workloadFactory,
155 input,
156 kernel,
157 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
158 expectedOutput,
159 qScale,
jimfly010a088a62018-10-25 17:05:05 +0100160 qOffset,
161 layout);
telsoa014fcda012018-03-09 14:13:49 +0000162}
163
164template<typename T>
165LayerTestResult<T, 4> SimpleConvolution2d3x3TestCommon(armnn::IWorkloadFactory& workloadFactory,
166 float qScale,
167 int32_t qOffset,
narpra015f703182018-10-26 16:24:58 +0100168 bool biasEnabled,
169 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000170{
telsoa01c577f2c2018-08-31 09:22:23 +0100171 // Use a 3x3 kernel, which exercises ArmCompute's direct convolution path.
telsoa014fcda012018-03-09 14:13:49 +0000172
telsoa01c577f2c2018-08-31 09:22:23 +0100173 // Use common single-batch 3-channel 16x8 image.
telsoa014fcda012018-03-09 14:13:49 +0000174 armnn::TensorInfo inputDesc({1, 3, 8, 16}, armnn::GetDataType<T>());
175 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
176
telsoa01c577f2c2018-08-31 09:22:23 +0100177 // Use a 2-element batch of 3-channel 3x3 kernels.
telsoa014fcda012018-03-09 14:13:49 +0000178 armnn::TensorInfo kernelDesc({2, 3, 3, 3}, armnn::GetDataType<T>());
179 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
180 QuantizedVector<T>(qScale, qOffset, {
181 1, 1, 1,
182 1, -1, 1,
183 1, 1, 1,
184
185 0, 0, 0,
186 0, 0, 0,
187 0, 0, 0,
188
189 2, 2, 2,
190 2, 2, 2,
191 2, 2, 2,
192
193
194 0, 0, 0,
195 0, 0, 0,
196 0, 0, 0,
197
198 1, 1, 1,
199 1, 1, 1,
200 1, 1, 1,
201
202 0, 0, 0,
203 0, 0, 0,
204 0, 0, 0
205 })));
206
telsoa01c577f2c2018-08-31 09:22:23 +0100207 // Expected output is 1 batch of a 2-channel 14x6 image.
telsoa014fcda012018-03-09 14:13:49 +0000208 armnn::TensorInfo outputDesc({1, 2, 6, 14}, armnn::GetDataType<T>());
209 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
210 QuantizedVector<T>(qScale, qOffset, {
211 -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15,
212 -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16,
213 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
214 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
215 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
216 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
217
218 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
219 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
220 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
221 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
222 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
223 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
224 })));
225
226 return SimpleConvolution2dTestImpl<T>(workloadFactory,
227 input,
228 kernel,
229 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
230 expectedOutput,
231 qScale,
narpra015f703182018-10-26 16:24:58 +0100232 qOffset,
233 layout);
telsoa014fcda012018-03-09 14:13:49 +0000234}
235
Francis Murtaghd59116e2018-10-04 16:03:07 +0100236template<typename T>
237LayerTestResult<T, 4> SimpleConvolution2d3x3NhwcTestCommon(armnn::IWorkloadFactory& workloadFactory,
238 float qScale,
239 int32_t qOffset,
240 bool biasEnabled,
241 armnn::DataLayout dataLayout)
242{
243 // Use common single-batch 5x5 image.
244
245 armnn::TensorInfo inputDesc({1, 3, 4, 1}, armnn::GetDataType<T>());
246 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
247 {
248 1, 5, 2, 3,
249 8, 7, 3, 6,
250 3, 3, 9, 1
251 });
252
253
254 // Use a 2-element batch of 3-channel 3x3 kernels.
255 armnn::TensorInfo kernelDesc({1, 3, 3, 1}, armnn::GetDataType<T>());
256 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, {
257 4, 5, 6,
258 0, 0, 0,
259 3, 2, 1
260 });
261
262 // Expected output is 1 batch of a 5x5 image.
263 armnn::TensorInfo outputDesc({1, 3, 4, 1}, armnn::GetDataType<T>());
264
265 const std::vector<float> outputData =
266 {
267 23, 41, 33, 21,
268 44, 65, 76, 52,
269 82, 85, 79, 42
270 };
271
272 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
273
274 return SimpleConvolution2dNhwcTestImpl<T>(workloadFactory,
275 input,
276 kernel,
277 boost::multi_array<T, 1>(),
278 expectedOutput,
279 dataLayout,
280 qScale,
281 qOffset);
282}
283
telsoa014fcda012018-03-09 14:13:49 +0000284LayerTestResult<float, 4> SimpleConvolution2d3x5Test(armnn::IWorkloadFactory& workloadFactory,
jimfly010a088a62018-10-25 17:05:05 +0100285 bool biasEnabled,
286 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000287{
jimfly010a088a62018-10-25 17:05:05 +0100288 return SimpleConvolution2d3x5TestCommon<float>(workloadFactory, 0.f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000289}
290
291LayerTestResult<uint8_t, 4> SimpleConvolution2d3x5Uint8Test(armnn::IWorkloadFactory& workloadFactory,
jimfly010a088a62018-10-25 17:05:05 +0100292 bool biasEnabled,
293 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000294{
jimfly010a088a62018-10-25 17:05:05 +0100295 return SimpleConvolution2d3x5TestCommon<uint8_t>(workloadFactory, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000296}
297
298LayerTestResult<float, 4> SimpleConvolution2d3x3Test(armnn::IWorkloadFactory& workloadFactory,
narpra015f703182018-10-26 16:24:58 +0100299 bool biasEnabled,
300 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000301{
narpra015f703182018-10-26 16:24:58 +0100302 return SimpleConvolution2d3x3TestCommon<float>(workloadFactory, 0.f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000303}
304
Francis Murtaghd59116e2018-10-04 16:03:07 +0100305LayerTestResult<float, 4> SimpleConvolution2d3x3NhwcTest(armnn::IWorkloadFactory& workloadFactory,
306 bool biasEnabled)
307{
308 return SimpleConvolution2d3x3NhwcTestCommon<float>(workloadFactory, 0.f, 0, biasEnabled, armnn::DataLayout::NHWC);
309}
310
telsoa014fcda012018-03-09 14:13:49 +0000311LayerTestResult<uint8_t, 4> SimpleConvolution2d3x3Uint8Test(armnn::IWorkloadFactory& workloadFactory,
narpra015f703182018-10-26 16:24:58 +0100312 bool biasEnabled,
313 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000314{
narpra015f703182018-10-26 16:24:58 +0100315 return SimpleConvolution2d3x3TestCommon<uint8_t>(workloadFactory, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000316}
317
318template<typename T>
319LayerTestResult<T, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon(
320 armnn::IWorkloadFactory& workloadFactory,
narpra015f703182018-10-26 16:24:58 +0100321 const armnn::DataLayoutIndexed& layout,
telsoa014fcda012018-03-09 14:13:49 +0000322 float qScale,
323 int32_t qOffset)
324{
telsoa01c577f2c2018-08-31 09:22:23 +0100325 // Use a single-batch 1-channel 3x3 image as input.
telsoa014fcda012018-03-09 14:13:49 +0000326 armnn::TensorInfo inputDesc({1, 1, 3, 3}, armnn::GetDataType<T>());
327 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
328 QuantizedVector<T>(qScale, qOffset, {
329 11,21,31,
330 12,22,32,
331 13,23,33
332 })));
333
telsoa01c577f2c2018-08-31 09:22:23 +0100334 // Use 1 batch of a 1-channel 2x2 kernel.
telsoa014fcda012018-03-09 14:13:49 +0000335 armnn::TensorInfo kernelDesc({1, 1, 2, 2}, armnn::GetDataType<T>());
336 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
337 QuantizedVector<T>(qScale, qOffset, {
338 -11,-21,
339 -12,-22,
340 })));
341
telsoa01c577f2c2018-08-31 09:22:23 +0100342// Expected output is 1 batch of a 1-channel 6x8 image.
telsoa014fcda012018-03-09 14:13:49 +0000343// Manually calculated like this:
344//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
345//[-11*0 -21*0 -12*0 -22*11 ; -11*0 -21*0 -12*11 -22*21 ; -11*0 -21*0 -12*21 -22*31 ; -11*0 -21*0 -12*31 -22*0 ..]
346//[-11*0 -21*11 -12*0 -22*12 ; -11*11 -21*21 -12*12 -22*22 ; -11*21 -21*31 -12*22 -22*32 ; -11*31 -21*0 -12*32 -22*0 ..]
347//[-11*0 -21*12 -12*0 -22*13 ; -11*12 -21*22 -12*13 -22*23 ; -11*22 -21*32 -12*23 -22*33 ; -11*32 -21*0 -12*33 -22*0 ..]
348//[-11*0 -21*13 -12*0 -22*0 ; -11*13 -21*23 -12*0 -22*0 ; -11*23 -21*33 -12*0 -22*0 ; -11*33 -21*0 -12*0 -22*0 ..]
349//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
350//[..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ..]
351 armnn::TensorInfo outputDesc({1, 1, 8, 6}, armnn::GetDataType<T>());
352 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
353 QuantizedVector<T>(qScale, qOffset, {
354 0, 0, 0, 0, 0, 0,
355 -242, -594, -934, -372, 0, 0,
356 -495, -1190, -1850, -725, 0, 0,
357 -538, -1256, -1916, -748, 0, 0,
358 -273, -626, -946, -363, 0, 0,
359 0, 0, 0, 0, 0, 0,
360 0, 0, 0, 0, 0, 0,
361 0, 0, 0, 0, 0, 0
362 })));
363
364 return SimpleConvolution2dTestImpl<T>(workloadFactory,
365 input,
366 kernel,
367 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(false, qScale, qOffset),
368 expectedOutput,
369 qScale,
370 qOffset,
narpra015f703182018-10-26 16:24:58 +0100371 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100372 1, // Padding left.
373 2, // Padding top.
374 3, // Padding right.
375 4); // Padding bottom.
telsoa014fcda012018-03-09 14:13:49 +0000376}
377
378template<typename T>
379LayerTestResult<T, 4> SimpleConvolution2dAsymmetricPaddingTestCommon(armnn::IWorkloadFactory& workloadFactory,
narpra015f703182018-10-26 16:24:58 +0100380 const armnn::DataLayoutIndexed& layout,
381 float qScale,
382 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +0000383{
telsoa01c577f2c2018-08-31 09:22:23 +0100384 // Use a single-batch 1-channel 5x5 image as input.
telsoa014fcda012018-03-09 14:13:49 +0000385 armnn::TensorInfo inputDesc({ 1, 1, 5, 5 }, armnn::GetDataType<T>());
386 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
387 QuantizedVector<T>(qScale, qOffset, {
388 11,21,31,41,51,
389 12,22,32,42,52,
390 13,23,33,43,53,
391 14,24,34,44,54,
392 15,25,35,45,55,
393 })));
394
telsoa01c577f2c2018-08-31 09:22:23 +0100395 // Use 1 batch of a 1-channel 4x4 kernel.
telsoa014fcda012018-03-09 14:13:49 +0000396 armnn::TensorInfo kernelDesc({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
397 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
398 QuantizedVector<T>(qScale, qOffset, {
399 -11,-21,-31,-41,
400 -12,-22,-32,-42,
401 -13,-23,-33,-43,
402 -14,-24,-34,-44,
403 })));
404
telsoa01c577f2c2018-08-31 09:22:23 +0100405 // Expected output is 1 batch of a 1-channel 5x5 image.
telsoa014fcda012018-03-09 14:13:49 +0000406 armnn::TensorInfo outputDesc({ 1, 1, 5, 5 }, armnn::GetDataType<T>());
407 std::vector<T> myVec(outputDesc.GetNumElements(), 0);
408 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
409 QuantizedVector<T>(qScale, qOffset, {
telsoa014fcda012018-03-09 14:13:49 +0000410 -7140, -10580, -13940, -9300, -5230,
411 -9590, -14120, -18520, -12290, -6860,
412 -9980, -14560, -18960, -12560, -7000,
413 -7518, -10904, -14144, -9318, -5152,
surmeh013537c2c2018-05-18 16:31:43 +0100414 -5032, -7256, -9376, -6142, -3368,
telsoa014fcda012018-03-09 14:13:49 +0000415 })));
416
417 return SimpleConvolution2dTestImpl<T>(workloadFactory,
418 input,
419 kernel,
420 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(false, qScale, qOffset),
421 expectedOutput,
422 qScale,
423 qOffset,
narpra015f703182018-10-26 16:24:58 +0100424 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100425 1, // Padding left.
426 1, // Padding top.
427 2, // Padding right.
428 2); // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100429}
430
431template<typename T>
432LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(armnn::IWorkloadFactory& workloadFactory,
433 float qScale,
434 int32_t qOffset,
jimfly01382a91d2018-10-26 15:55:50 +0100435 bool biasEnabled,
436 const armnn::DataLayoutIndexed& layout)
surmeh013537c2c2018-05-18 16:31:43 +0100437{
telsoa01c577f2c2018-08-31 09:22:23 +0100438 // Use a single-batch 2-channel 5x5 image as input.
surmeh013537c2c2018-05-18 16:31:43 +0100439 armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5 }, armnn::GetDataType<T>());
440 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
441 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
442 0, 1, 2, 3, 4,
443 5, 6, 7, 8, 9,
444 10, 11, 12, 13, 14,
445 15, 16, 17, 18, 19,
446 20, 21, 22, 23, 24,
447
448 25, 26, 27, 28, 29,
449 30, 31, 32, 33, 34,
450 35, 36, 37, 38, 39,
451 40, 41, 42, 43, 44,
452 45, 46, 47, 48, 49
453 })));
454
telsoa01c577f2c2018-08-31 09:22:23 +0100455 // Use a depth multiplier of 1 on a 2-channel 4x4 kernel.
surmeh013537c2c2018-05-18 16:31:43 +0100456 armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, armnn::GetDataType<T>());
457 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
458 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
459 32, 31, 30, 29,
460 28, 27, 26, 25,
461 24, 23, 22, 21,
462 20, 19, 18, 17,
463
464 16, 15, 14, 13,
465 12, 11, 10, 9,
466 8, 7, 6, 5,
467 4, 3, 2, 1
468 })));
469
telsoa01c577f2c2018-08-31 09:22:23 +0100470 // Expected output is 1 batch of a 2-channel 5x5 image.
471 // Calculated using the python tensorflow library with strideX=1, strideY=1.
surmeh013537c2c2018-05-18 16:31:43 +0100472 armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5 }, armnn::GetDataType<T>());
473 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
474 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
475 1062, 1580, 1850, 1530, 1117,
476 2140, 3108, 3500, 2842, 2042,
477 3580, 5068, 5460, 4342, 3062,
478 3618, 5072, 5390, 4248, 2971,
479 3074, 4282, 4510, 3533, 2457,
480 1550, 2284, 2362, 1955, 1428,
481 2910, 4206, 4342, 3528, 2536,
482 3390, 4886, 5022, 4068, 2916,
483 3566, 5056, 5182, 4133, 2922,
484 3100, 4352, 4452, 3517, 2465
485 })));
486
487 return DepthwiseConvolution2dAsymmetricTestImpl<T>(workloadFactory,
488 input,
489 kernel,
490 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
491 expectedOutput,
492 qScale,
493 qOffset,
jimfly01382a91d2018-10-26 15:55:50 +0100494 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100495 1, // Padding left.
496 1, // Padding top.
497 2, // Padding right.
498 2, // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100499 1, // strideX
500 1); // strideY
telsoa014fcda012018-03-09 14:13:49 +0000501}
502
Nikhil Rajcec6b652018-10-12 13:51:57 +0100503template<typename T>
504LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestCommon(armnn::IWorkloadFactory& workloadFactory,
505 float qScale,
506 int32_t qOffset,
507 bool biasEnabled)
508{
509 armnn::TensorInfo inputTensorInfo({ 1, 5, 5, 2}, armnn::GetDataType<T>());
510 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
511 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
512 0, 25,
513 1, 26,
514 2, 27,
515 3, 28,
516 4, 29,
517
518 5, 30,
519 6, 31,
520 7, 32,
521 8, 33,
522 9, 34,
523
524 10, 35,
525 11, 36,
526 12, 37,
527 13, 38,
528 14, 39,
529
530 15, 40,
531 16, 41,
532 17, 42,
533 18, 43,
534 19, 44,
535
536 20, 45,
537 21, 46,
538 22, 47,
539 23, 48,
540 24, 49
541 })));
542
543 armnn::TensorInfo kernelTensorInfo({ 1, 4, 4, 2}, armnn::GetDataType<T>());
544 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
545 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
546 32, 16,
547 31, 15,
548 30, 14,
549 29, 13,
550
551 28, 12,
552 27, 11,
553 26, 10,
554 25, 9,
555
556 24, 8,
557 23, 7,
558 22, 6,
559 21, 5,
560
561 20, 4,
562 19, 3,
563 18, 2,
564 17, 1
565 })));
566
567 armnn::TensorInfo outputTensorInfo({ 1, 5, 5, 2}, armnn::GetDataType<T>());
568 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
569 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
570 1062, 1550,
571 1580, 2284,
572 1850, 2362,
573 1530, 1955,
574 1117, 1428,
575
576 2140, 2910,
577 3108, 4206,
578 3500, 4342,
579 2842, 3528,
580 2042, 2536,
581
582 3580, 3390,
583 5068, 4886,
584 5460, 5022,
585 4342, 4068,
586 3062, 2916,
587
588 3618, 3566,
589 5072, 5056,
590 5390, 5182,
591 4248, 4133,
592 2971, 2922,
593
594 3074, 3100,
595 4282, 4352,
596 4510, 4452,
597 3533, 3517,
598 2457, 2465
599 })));
600
601 return DepthwiseConvolution2dNhwcTestImpl<T>(workloadFactory,
602 input,
603 kernel,
604 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
605 expectedOutput,
606 qScale,
607 qOffset,
608 1, // Padding left.
609 1, // Padding top.
610 2, // Padding right.
611 2, // Padding bottom.
612 1, // strideX
613 1); // strideY
614}
615
telsoa014fcda012018-03-09 14:13:49 +0000616LayerTestResult<float, 4>
narpra015f703182018-10-26 16:24:58 +0100617Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest(armnn::IWorkloadFactory& workloadFactory,
618 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000619{
narpra015f703182018-10-26 16:24:58 +0100620 return Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon<float>(workloadFactory, layout, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000621}
622
narpra015f703182018-10-26 16:24:58 +0100623LayerTestResult<float, 4> Convolution2dAsymmetricPaddingTest(armnn::IWorkloadFactory& workloadFactory,
624 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000625{
narpra015f703182018-10-26 16:24:58 +0100626 return SimpleConvolution2dAsymmetricPaddingTestCommon<float>(workloadFactory, layout, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000627}
628
629LayerTestResult<float, 4> DepthwiseConvolution2dTest(armnn::IWorkloadFactory& workloadFactory,
jimfly01d84216a2018-10-26 12:56:21 +0100630 bool biasEnabled,
631 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000632{
jimfly01d84216a2018-10-26 12:56:21 +0100633 return DepthwiseConvolution2dTestImpl<float, float>(workloadFactory, 0.0f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000634}
635
Nikhil Rajcec6b652018-10-12 13:51:57 +0100636LayerTestResult<float, 4> DepthwiseConvolution2dDepthNhwcTest(armnn::IWorkloadFactory& workloadFactory,
637 bool biasEnabled)
638{
639 return DepthwiseConvolution2dNhwcTestCommon<float>(workloadFactory, 0.0f, 0, biasEnabled);
640}
641
telsoa014fcda012018-03-09 14:13:49 +0000642LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul1Test(armnn::IWorkloadFactory& workloadFactory,
643 bool biasEnabled)
644{
645 return DepthwiseConvolution2dDepthMul1TestImpl<float, float>(workloadFactory, 0.0f, 0, biasEnabled);
646}
647
surmeh013537c2c2018-05-18 16:31:43 +0100648LayerTestResult<float, 4> DepthwiseConvolution2dAsymmetricTest(armnn::IWorkloadFactory& workloadFactory,
jimfly01382a91d2018-10-26 15:55:50 +0100649 bool biasEnabled,
650 const armnn::DataLayoutIndexed& layout)
surmeh013537c2c2018-05-18 16:31:43 +0100651{
jimfly01382a91d2018-10-26 15:55:50 +0100652 return DepthwiseConvolution2dAsymmetricTestCommon<float>(workloadFactory, 0.0f, 0, biasEnabled, layout);
surmeh013537c2c2018-05-18 16:31:43 +0100653}
654
telsoa014fcda012018-03-09 14:13:49 +0000655LayerTestResult<uint8_t, 4> DepthwiseConvolution2dUint8Test(armnn::IWorkloadFactory& workloadFactory,
jimfly01d84216a2018-10-26 12:56:21 +0100656 bool biasEnabled,
657 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000658{
jimfly01d84216a2018-10-26 12:56:21 +0100659 return DepthwiseConvolution2dTestImpl<uint8_t, int32_t>(workloadFactory, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000660}
661
662LayerTestResult<uint8_t, 4> DepthwiseConvolution2dDepthMul1Uint8Test(armnn::IWorkloadFactory& workloadFactory,
663 bool biasEnabled)
664{
665 return DepthwiseConvolution2dDepthMul1TestImpl<uint8_t, int32_t>(workloadFactory, 0.5f, 50, biasEnabled);
666}
667
668LayerTestResult<float, 4> Convolution1dTest(armnn::IWorkloadFactory& workloadFactory, bool biasEnabled)
669{
670 return Convolution1dTestImpl<float>(workloadFactory, 0.0f, 0, biasEnabled);
671}
672
673LayerTestResult<uint8_t, 4> Convolution1dUint8Test(armnn::IWorkloadFactory& workloadFactory, bool biasEnabled)
674{
675 return Convolution1dTestImpl<uint8_t>(workloadFactory, 0.1f, 128, biasEnabled);
676}
677
678LayerTestResult<float,4> CompareConvolution2dTest(armnn::IWorkloadFactory& workloadFactory,
jimfly01382a91d2018-10-26 15:55:50 +0100679 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +0000680{
681 return CompareConvolution2dTestImpl<float>(workloadFactory, refWorkloadFactory);
682}
683
684template<typename T>
685LayerTestResult<T,4> CompareDepthwiseConvolution2dTest(armnn::IWorkloadFactory& workloadFactory,
jimfly017af00da2018-10-31 14:43:53 +0000686 armnn::IWorkloadFactory& refWorkloadFactory,
687 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000688{
jimfly017af00da2018-10-31 14:43:53 +0000689 return CompareDepthwiseConvolution2dTestImpl<T>(workloadFactory, refWorkloadFactory, layout);
telsoa014fcda012018-03-09 14:13:49 +0000690}
691
692template LayerTestResult<float, 4> CompareDepthwiseConvolution2dTest<float>(
jimfly017af00da2018-10-31 14:43:53 +0000693 armnn::IWorkloadFactory&, armnn::IWorkloadFactory&, const armnn::DataLayoutIndexed&);
telsoa014fcda012018-03-09 14:13:49 +0000694template LayerTestResult<uint8_t, 4> CompareDepthwiseConvolution2dTest<uint8_t>(
jimfly017af00da2018-10-31 14:43:53 +0000695 armnn::IWorkloadFactory&, armnn::IWorkloadFactory&, const armnn::DataLayoutIndexed&);
telsoa014fcda012018-03-09 14:13:49 +0000696
697LayerTestResult<float,4> SimpleNormalizationAcrossTest(armnn::IWorkloadFactory& workloadFactory)
698{
699 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
700 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
701 return SimpleNormalizationTestImpl(workloadFactory, normChannel, normMethod);
702}
703
704LayerTestResult<float,4> SimpleNormalizationWithinTest(armnn::IWorkloadFactory& workloadFactory)
705{
706 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
707 auto normChannel = armnn::NormalizationAlgorithmChannel::Within;
708 return SimpleNormalizationTestImpl(workloadFactory, normChannel, normMethod);
709}
710
narpra0155a97bc2018-10-02 14:35:53 +0100711LayerTestResult<float,4> SimpleNormalizationAcrossNhwcTest(armnn::IWorkloadFactory& workloadFactory)
712{
713 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
714 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Matteo Martincigh8e6f92d2018-10-18 08:45:39 +0100715 return SimpleNormalizationNhwcTestImpl(workloadFactory, normChannel, normMethod);
narpra0155a97bc2018-10-02 14:35:53 +0100716}
717
telsoa014fcda012018-03-09 14:13:49 +0000718LayerTestResult<float,2> SimpleSoftmaxTest(armnn::IWorkloadFactory& workloadFactory, float beta)
719{
720 return SimpleSoftmaxTestImpl<float>(workloadFactory, beta);
721}
722
723LayerTestResult<uint8_t,2> SimpleSoftmaxUint8Test(armnn::IWorkloadFactory& workloadFactory, float beta)
724{
725 return SimpleSoftmaxTestImpl<uint8_t>(workloadFactory, beta);
726}
727
728LayerTestResult<float,4> CompareNormalizationTest(armnn::IWorkloadFactory& workloadFactory,
729 armnn::IWorkloadFactory& refWorkloadFactory,
730 armnn::NormalizationAlgorithmChannel normChannel,
731 armnn::NormalizationAlgorithmMethod normMethod)
732{
733 return CompareNormalizationTestImpl(workloadFactory, refWorkloadFactory, normChannel, normMethod);
734}
735
736LayerTestResult<float,2> CompareSoftmaxTest(armnn::IWorkloadFactory& workloadFactory,
737 armnn::IWorkloadFactory& refWorkloadFactory,
738 float beta)
739{
740 return CompareSoftmaxTestImpl<float>(workloadFactory, refWorkloadFactory, beta);
741}
742
743LayerTestResult<uint8_t,2> CompareSoftmaxUint8Test(armnn::IWorkloadFactory& workloadFactory,
744 armnn::IWorkloadFactory& refWorkloadFactory,
745 float beta)
746{
747 return CompareSoftmaxTestImpl<uint8_t>(workloadFactory, refWorkloadFactory, beta);
748}
749
750std::vector<LayerTestResult<float,3>> SplitterTest(armnn::IWorkloadFactory& workloadFactory)
751{
752 return SplitterTestCommon<float>(workloadFactory);
753}
754
755std::vector<LayerTestResult<uint8_t,3>> SplitterUint8Test(armnn::IWorkloadFactory& workloadFactory)
756{
757 return SplitterTestCommon<uint8_t>(workloadFactory, 1.0f, 0);
758}
759
760LayerTestResult<float, 3> CopyViaSplitterTest(armnn::IWorkloadFactory& workloadFactory)
761{
762 return CopyViaSplitterTestImpl<float>(workloadFactory, 0.0f, 0);
763}
764
765LayerTestResult<uint8_t, 3> CopyViaSplitterUint8Test(armnn::IWorkloadFactory& workloadFactory)
766{
767 return CopyViaSplitterTestImpl<uint8_t>(workloadFactory, 1.0f, 0);
768}
769
telsoa01c577f2c2018-08-31 09:22:23 +0100770LayerTestResult<float, 2> LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest(
771 armnn::IWorkloadFactory& workloadFactory)
772{
773 armnn::TensorInfo inputDesc({ 2, 2 }, armnn::GetDataType<float>());
774 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
775 { 2., 3., 3., 4. }));
776
777 armnn::TensorInfo outputDesc({ 2, 4 }, armnn::GetDataType<float>());
778 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
779 {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
780 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f}));
781 return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl(workloadFactory, input, expectedOutput);
782}
783
784LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest(
785 armnn::IWorkloadFactory& workloadFactory)
786{
787 armnn::TensorInfo inputDesc({ 2, 5 }, armnn::GetDataType<float>());
788 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
789 {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
790 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f}));
791
792 armnn::TensorInfo outputDesc({ 2, 16 }, armnn::GetDataType<float>());
793 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
794 {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f,
795 -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f,
796 -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f,
797 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f,
798 -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f,
799 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f,
800 0.02168f}));
801 return LstmLayerFloat32NoCifgWithPeepholeWithProjectionTestImpl(workloadFactory, input, expectedOutput);
802}
803
804LayerTestResult<float, 2> LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest(armnn::IWorkloadFactory& workloadFactory)
805{
806 armnn::TensorInfo inputDesc({2, 2}, armnn::GetDataType<float>());
807 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
808 {2., 3., 3., 4.}));
809
810
811 armnn::TensorInfo outputDesc({2, 4}, armnn::GetDataType<float>());
812 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
813 {{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
814 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}}));
815
816 return LstmNoCifgNoPeepholeNoProjectionTestImpl(workloadFactory, input, expectedOutput);
817}
818
telsoa014fcda012018-03-09 14:13:49 +0000819LayerTestResult<float,3> MergerTest(armnn::IWorkloadFactory& workloadFactory)
820{
surmeh013537c2c2018-05-18 16:31:43 +0100821 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +0000822 unsigned int outputHeight = 6;
823 unsigned int outputChannels = 3;
824
surmeh013537c2c2018-05-18 16:31:43 +0100825 unsigned int inputWidth1 = 3;
826 unsigned int inputHeight1 = 6;
827 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +0000828
surmeh013537c2c2018-05-18 16:31:43 +0100829 unsigned int inputWidth2 = 3;
830 unsigned int inputHeight2 = 6;
831 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +0000832
telsoa01c577f2c2018-08-31 09:22:23 +0100833 // Define the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +0000834 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::Float32);
835 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::Float32);
836 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +0000837
838 LayerTestResult<float,3> ret(outputTensorInfo);
839
telsoa014fcda012018-03-09 14:13:49 +0000840 ret.outputExpected = MakeTensor<float, 3>(outputTensorInfo, std::vector<float>(
surmeh013537c2c2018-05-18 16:31:43 +0100841 {
842 1.0f, 2.0f, 3.0f,
843 4.0f, 5.0f, 6.0f,
844 7.0f, 8.0f, 9.0f,
845 10.0f, 11.0f, 12.0f,
846 13.0f, 14.0f, 15.0f,
847 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +0000848
surmeh013537c2c2018-05-18 16:31:43 +0100849 19.0f, 20.0f, 21.0f,
850 22.0f, 23.0f, 24.0f,
851 25.0f, 26.0f, 27.0f,
852 28.0f, 29.0f, 30.0f,
853 31.0f, 32.0f, 33.0f,
854 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +0000855
surmeh013537c2c2018-05-18 16:31:43 +0100856 37.0f, 38.0f, 39.0f,
857 40.0f, 41.0f, 42.0f,
858 43.0f, 44.0f, 45.0f,
859 46.0f, 47.0f, 48.0f,
860 49.0f, 50.0f, 51.0f,
861 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +0000862 })
863 );
864
telsoa014fcda012018-03-09 14:13:49 +0000865 auto input1 = MakeTensor<float, 3>(inputTensorInfo1, std::vector<float>(
866 {
surmeh013537c2c2018-05-18 16:31:43 +0100867 1.0f, 2.0f, 3.0f,
868 4.0f, 5.0f, 6.0f,
869 7.0f, 8.0f, 9.0f,
870 10.0f, 11.0f, 12.0f,
871 13.0f, 14.0f, 15.0f,
872 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +0000873
surmeh013537c2c2018-05-18 16:31:43 +0100874 19.0f, 20.0f, 21.0f,
875 22.0f, 23.0f, 24.0f,
876 25.0f, 26.0f, 27.0f,
877 28.0f, 29.0f, 30.0f,
878 31.0f, 32.0f, 33.0f,
879 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +0000880 })
881 );
882
883 auto input2 = MakeTensor<float, 3>(inputTensorInfo2, std::vector<float>(
884 {
surmeh013537c2c2018-05-18 16:31:43 +0100885 37.0f, 38.0f, 39.0f,
886 40.0f, 41.0f, 42.0f,
telsoa014fcda012018-03-09 14:13:49 +0000887 43.0f, 44.0f, 45.0f,
surmeh013537c2c2018-05-18 16:31:43 +0100888 46.0f, 47.0f, 48.0f,
889 49.0f, 50.0f, 51.0f,
890 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +0000891 })
892 );
893
telsoa01c577f2c2018-08-31 09:22:23 +0100894 std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of input[0].
telsoa014fcda012018-03-09 14:13:49 +0000895 armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1);
896
telsoa01c577f2c2018-08-31 09:22:23 +0100897 std::vector<unsigned int> wOrigin2 = {2, 0, 0}; //Extent of the window is defined by size of input[1].
telsoa014fcda012018-03-09 14:13:49 +0000898 armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2);
899
telsoa014fcda012018-03-09 14:13:49 +0000900 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
901
902 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
903
904 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
905 subTensorsSupported ?
906 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
907 workloadFactory.CreateTensorHandle(inputTensorInfo1);
908
909 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
910 subTensorsSupported ?
911 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
912 workloadFactory.CreateTensorHandle(inputTensorInfo2);
913
telsoa014fcda012018-03-09 14:13:49 +0000914 armnn::MergerQueueDescriptor data;
915 armnn::WorkloadInfo info;
916 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
917 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +0000918 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
919
920 data.m_ViewOrigins.push_back(window1);
921 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +0000922
923 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(data, info);
924
925 inputHandle1->Allocate();
926 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +0000927 outputHandle->Allocate();
928
929 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
930 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +0000931
surmeh013537c2c2018-05-18 16:31:43 +0100932 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +0000933 workload->Execute();
934
935 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
936
937 return ret;
938}
939
940LayerTestResult<float,4> AdditionTest(armnn::IWorkloadFactory& workloadFactory)
941{
942 unsigned int batchSize = 2;
943 unsigned int channels = 2;
944 unsigned int height = 2;
945 unsigned int width = 3;
946
947 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
948 armnn::TensorInfo outputTensorInfo;
949
950 unsigned int shape[] = {batchSize, channels, height, width};
951
952 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
953 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
954 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
955
956
957 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, std::vector<float>(
958 {
959 0.0f, 2.0f, 1.0f,
960 0.2f, 1.0f, 2.0f,
961
962 1.0f, 2.0f, 1.0f,
963 0.2f, 1.0f, 2.0f,
964
965 0.0f, 2.0f, 1.0f,
966 4.2f, 1.0f, 2.0f,
967
968 0.0f, 0.0f, 1.0f,
969 0.2f, 1.0f, 2.0f,
970 }));
971
972 auto input2 = MakeTensor<float, 4>(inputTensorInfo2, std::vector<float>(
973 {
974 1.0f, 2.0f, 1.0f,
975 0.0f, 1.0f, 2.0f,
976
977 1.0f, 2.0f, -2.0f,
978 0.2f, 1.0f, 2.0f,
979
980 0.0f, 2.0f, 1.0f,
981 4.2f, 0.0f, -3.0f,
982
983 0.0f, 0.0f, 1.0f,
984 0.7f, 1.0f, 5.0f,
985 }));
986
987 LayerTestResult<float,4> ret(outputTensorInfo);
988 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>(
989 {
990 1.0f, 4.0f, 2.0f,
991 0.2f, 2.0f, 4.0f,
992
993 2.0f, 4.0f, -1.0f,
994 0.4f, 2.0f, 4.0f,
995
996 0.0f, 4.0f, 2.0f,
997 8.4f, 1.0f, -1.0f,
998
999 0.0f, 0.0f, 2.0f,
1000 0.9f, 2.0f, 7.0f,
1001 }));
1002
1003 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1004 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1005 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1006
1007 armnn::AdditionQueueDescriptor data;
1008 armnn::WorkloadInfo info;
1009 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1010 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1011 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1012
1013 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1014
1015 inputHandle1->Allocate();
1016 inputHandle2->Allocate();
1017 outputHandle->Allocate();
1018
1019 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1020 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1021
surmeh013537c2c2018-05-18 16:31:43 +01001022 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001023 workload->Execute();
1024
1025 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1026
1027 return ret;
1028}
1029
1030template <typename T>
1031LayerTestResult<T, 4> AdditionBroadcastTestImpl(armnn::IWorkloadFactory& workloadFactory,
1032 float qScale,
1033 int32_t qOffset)
1034{
1035 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, armnn::GetDataType<T>());
1036 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, armnn::GetDataType<T>());
1037 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>());
1038
1039 if (armnn::IsQuantizedType<T>())
1040 {
1041 inputTensorInfo1.SetQuantizationScale(qScale);
1042 inputTensorInfo1.SetQuantizationOffset(qOffset);
1043 inputTensorInfo2.SetQuantizationScale(qScale);
1044 inputTensorInfo2.SetQuantizationOffset(qOffset);
1045 outputTensorInfo.SetQuantizationScale(qScale);
1046 outputTensorInfo.SetQuantizationOffset(qOffset);
1047 }
1048
1049 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
1050 {
1051 0.0f,
1052 1.0f,
1053
1054 2.0f,
1055 3.0f,
1056
1057 4.0f,
1058 5.0f,
1059 }));
1060
1061 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
1062 {
1063 0.5f, 1.5f, 2.5f,
1064 3.5f, 4.5f, 5.5f,
1065 }));
1066
1067 LayerTestResult<T,4> ret(outputTensorInfo);
1068 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
1069 {
1070 0.5f, 1.5f, 2.5f,
1071 4.5f, 5.5f, 6.5f,
1072
1073 2.5f, 3.5f, 4.5f,
1074 6.5f, 7.5f, 8.5f,
1075
1076 4.5f, 5.5f, 6.5f,
1077 8.5f, 9.5f, 10.5f,
1078 }));
1079
1080 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1081 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1082 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1083
1084 armnn::AdditionQueueDescriptor data;
1085 armnn::WorkloadInfo info;
1086 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1087 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1088 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1089
1090 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1091
1092 inputHandle1->Allocate();
1093 inputHandle2->Allocate();
1094 outputHandle->Allocate();
1095
1096 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1097 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1098
surmeh013537c2c2018-05-18 16:31:43 +01001099 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001100 workload->Execute();
1101
1102 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1103
1104 return ret;
1105}
1106
1107template <typename T>
1108LayerTestResult<T, 4> AdditionBroadcast1ElementTestImpl(armnn::IWorkloadFactory& workloadFactory,
1109 float qScale,
1110 int32_t qOffset)
1111{
1112 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>());
1113 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, armnn::GetDataType<T>());
1114 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>());
1115
1116 if (armnn::IsQuantizedType<T>())
1117 {
1118 inputTensorInfo1.SetQuantizationScale(qScale);
1119 inputTensorInfo1.SetQuantizationOffset(qOffset);
1120 inputTensorInfo2.SetQuantizationScale(qScale);
1121 inputTensorInfo2.SetQuantizationOffset(qOffset);
1122 outputTensorInfo.SetQuantizationScale(qScale);
1123 outputTensorInfo.SetQuantizationOffset(qOffset);
1124 }
1125
1126 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
1127 {
1128 0.0f, 1.0f, 2.0f,
1129 3.0f, 4.0f, 5.0f,
1130 6.0f, 7.0f, 8.0f,
1131 9.0f, 10.0f, 11.0f,
1132 12.0f, 13.0f, 14.0f,
1133 15.0f, 16.0f, 17.0f,
1134 }));
1135
1136 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
1137 {
1138 0.5f,
1139 }));
1140
1141 LayerTestResult<T,4> ret(outputTensorInfo);
1142 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
1143 {
1144 0.5f, 1.5f, 2.5f,
1145 3.5f, 4.5f, 5.5f,
1146 6.5f, 7.5f, 8.5f,
1147 9.5f, 10.5f, 11.5f,
1148 12.5f, 13.5f, 14.5f,
1149 15.5f, 16.5f, 17.5f,
1150 }));
1151
1152 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1153 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1154 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1155
1156 armnn::AdditionQueueDescriptor data;
1157 armnn::WorkloadInfo info;
1158 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1159 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1160 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1161
1162 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1163
1164 inputHandle1->Allocate();
1165 inputHandle2->Allocate();
1166 outputHandle->Allocate();
1167
1168 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1169 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1170
surmeh013537c2c2018-05-18 16:31:43 +01001171 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001172 workload->Execute();
1173
1174 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1175
1176 return ret;
1177}
1178
1179LayerTestResult<float, 4> AdditionBroadcastTest(armnn::IWorkloadFactory& workloadFactory)
1180{
1181 return AdditionBroadcastTestImpl<float>(workloadFactory, 0.0f, 0);
1182}
1183
1184LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test(armnn::IWorkloadFactory& workloadFactory)
1185{
1186 return AdditionBroadcastTestImpl<uint8_t>(workloadFactory, 2.f, 0);
1187}
1188
1189LayerTestResult<float, 4> AdditionBroadcast1ElementTest(armnn::IWorkloadFactory& workloadFactory)
1190{
1191 return AdditionBroadcast1ElementTestImpl<float>(workloadFactory, 0.0f, 0);
1192}
1193
1194LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test(armnn::IWorkloadFactory& workloadFactory)
1195{
1196 return AdditionBroadcast1ElementTestImpl<uint8_t>(workloadFactory, 0.1333333f, 128);
1197}
1198
1199LayerTestResult<float,4> CompareAdditionTest(armnn::IWorkloadFactory& workloadFactory,
David Beckf195f032018-09-06 16:46:34 +01001200 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00001201{
1202 unsigned int batchSize = 4;
1203 unsigned int channels = 1;
1204 unsigned int height = 2;
1205 unsigned int width = 3;
1206
1207 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
1208 armnn::TensorInfo outputTensorInfo;
1209
1210 unsigned int shape[] = {batchSize, channels, height, width};
1211
1212 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1213 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1214 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1215
1216 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 1232);
1217 auto input2 = MakeRandomTensor<float, 4>(inputTensorInfo2, 456);
1218
1219 LayerTestResult<float,4> ret(outputTensorInfo);
1220
1221 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1222 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1223 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1224
1225 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
1226 std::unique_ptr<armnn::ITensorHandle> inputHandle2Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo2);
1227 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
1228
1229 armnn::AdditionQueueDescriptor data;
1230 armnn::WorkloadInfo info;
1231 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1232 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1233 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1234
1235 armnn::AdditionQueueDescriptor refData = data;
1236 armnn::WorkloadInfo refInfo = info;
1237 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo1, inputHandle1Ref.get());
1238 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get());
1239 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1240
1241 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1242 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateAddition(refData, refInfo);
1243
1244 inputHandle1->Allocate();
1245 inputHandle2->Allocate();
1246 outputHandle->Allocate();
1247 inputHandle1Ref->Allocate();
1248 inputHandle2Ref->Allocate();
1249 outputHandleRef->Allocate();
1250
1251 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1252 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1253 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
1254 CopyDataToITensorHandle(inputHandle2Ref.get(), &input2[0][0][0][0]);
1255
surmeh013537c2c2018-05-18 16:31:43 +01001256 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001257 workload->Execute();
surmeh013537c2c2018-05-18 16:31:43 +01001258 refWorkloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001259 workloadRef->Execute();
1260
1261 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1262 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
1263
1264 return ret;
1265}
1266
surmeh01bceff2f2018-03-29 16:29:27 +01001267namespace {
David Beck5cd01f32018-09-12 16:00:08 +01001268template <typename T>
1269LayerTestResult<T, 4> DivisionTestHelper(armnn::IWorkloadFactory& workloadFactory,
1270 const unsigned int shape0[4],
1271 const std::vector<T>& values0,
1272 float scale0,
1273 int32_t offset0,
1274 const unsigned int shape1[4],
1275 const std::vector<T> & values1,
1276 float scale1,
1277 int32_t offset1,
1278 const unsigned int outShape[4],
1279 const std::vector<T> & outValues,
1280 float outScale,
1281 int32_t outOffset)
1282{
1283 auto dataType = (std::is_same<T, uint8_t>::value ?
1284 armnn::DataType::QuantisedAsymm8 :
1285 armnn::DataType::Float32);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001286
David Beck5cd01f32018-09-12 16:00:08 +01001287 armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
1288 armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
1289 armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001290
David Beck5cd01f32018-09-12 16:00:08 +01001291 inputTensorInfo0.SetQuantizationScale(scale0);
1292 inputTensorInfo0.SetQuantizationOffset(offset0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001293
David Beck5cd01f32018-09-12 16:00:08 +01001294 inputTensorInfo1.SetQuantizationScale(scale1);
1295 inputTensorInfo1.SetQuantizationOffset(offset1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001296
David Beck5cd01f32018-09-12 16:00:08 +01001297 outputTensorInfo.SetQuantizationScale(outScale);
1298 outputTensorInfo.SetQuantizationOffset(outOffset);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001299
David Beck5cd01f32018-09-12 16:00:08 +01001300 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
1301 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001302
David Beck5cd01f32018-09-12 16:00:08 +01001303 LayerTestResult<T, 4> result(outputTensorInfo);
1304 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001305
David Beck5cd01f32018-09-12 16:00:08 +01001306 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
1307 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1308 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001309
David Beck5cd01f32018-09-12 16:00:08 +01001310 armnn::DivisionQueueDescriptor data;
1311 armnn::WorkloadInfo info;
1312 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
1313 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1314 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001315
David Beck5cd01f32018-09-12 16:00:08 +01001316 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDivision(data, info);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001317
David Beck5cd01f32018-09-12 16:00:08 +01001318 inputHandle0->Allocate();
1319 inputHandle1->Allocate();
1320 outputHandle->Allocate();
1321
1322 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
1323 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1324
1325 workloadFactory.Finalize();
1326 workload->Execute();
1327
1328 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
1329
1330 return result;
1331}
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001332} // anonymous namespace
1333
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01001334LayerTestResult<float,4> DivisionByZeroTest(armnn::IWorkloadFactory& workloadFactory)
1335{
1336 const unsigned int width = 2;
1337 const unsigned int height = 2;
1338 const unsigned int channelCount = 2;
1339 const unsigned int batchSize = 2;
1340
1341 unsigned int shape[] = { batchSize, channelCount, height, width };
1342
1343 std::vector<float> input0({
1344 1.f, 1.f, 1.f, 1.f, 0.f, 0.f, 0.f, 0.f,
1345 -1.f, -1.f, -1.f, -1.f, 5.f, 5.f, 5.f, 5.f });
1346
1347 std::vector<float> input1({
1348 0.f, 0.f, -0.f, -0.f, 0.f, 0.f, -0.f, -0.f,
1349 0.f, 0.f, -0.f, -0.f, 5.f, 5.f, 5.f, 5.f });
1350
1351 std::vector<float> output({
1352 INFINITY, INFINITY, -INFINITY, -INFINITY, NAN, NAN, -NAN, -NAN,
1353 -INFINITY, -INFINITY, INFINITY, INFINITY, 1, 1, 1, 1 });
1354
David Beck5cd01f32018-09-12 16:00:08 +01001355 return DivisionTestHelper<float>(workloadFactory,
1356 shape, input0, 1.0f, 0,
1357 shape, input1, 1.0f, 0,
1358 shape, output, 1.0f, 0);
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01001359}
1360
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001361LayerTestResult<float,4> DivisionTest(armnn::IWorkloadFactory& workloadFactory)
1362{
1363 const unsigned int width = 2;
1364 const unsigned int height = 2;
1365 const unsigned int channelCount = 2;
1366 const unsigned int batchSize = 2;
1367
1368 unsigned int shape[] = { batchSize, channelCount, height, width };
1369
1370 std::vector<float> input0({
1371 2, 2, 2, 2, 3, 3, 3, 3,
1372 4, 4, 4, 4, 5, 5, 5, 5 });
1373
1374 std::vector<float> input1({
1375 1, 1, 1, 1, 2, 2, 2, 2,
1376 4, 4, 4, 4, 4, 4, 4, 4 });
1377
1378 std::vector<float> output({
1379 2, 2, 2, 2, 1.5, 1.5, 1.5, 1.5,
1380 1, 1, 1, 1, 1.25, 1.25, 1.25, 1.25 });
1381
David Beck5cd01f32018-09-12 16:00:08 +01001382
1383 return DivisionTestHelper<float>(workloadFactory,
1384 shape, input0, 1.0f, 0,
1385 shape, input1, 1.0f, 0,
1386 shape, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001387}
1388
1389LayerTestResult<float, 4> DivisionBroadcast1ElementTest(armnn::IWorkloadFactory& workloadFactory)
1390{
1391 unsigned int shape0[] = { 1, 2, 2, 2 };
1392 std::vector<float> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
1393
1394 unsigned int shape1[] = { 1, 1, 1, 1 };
1395 std::vector<float> input1({ 2 });
1396
1397 std::vector<float> output({ 1, 2, 3, 4, 5, 6, 7, 8});
1398
David Beck5cd01f32018-09-12 16:00:08 +01001399
1400 return DivisionTestHelper<float>(workloadFactory,
1401 shape0, input0, 1.0f, 0,
1402 shape1, input1, 1.0f, 0,
1403 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001404}
1405
1406LayerTestResult<float, 4> DivisionBroadcast1DVectorTest(armnn::IWorkloadFactory& workloadFactory)
1407{
1408 unsigned int shape0[] = { 1, 3, 3, 2 };
1409 std::vector<float> input0({
1410 1, 4, 3, 8, 5, 12,
1411 7, 16, 9, 20, 11, 24,
1412 13, 28, 15, 32, 17, 36});
1413
1414 unsigned int shape1[] = { 1, 1, 1, 2 };
1415 std::vector<float> input1({ 1, 2 });
1416
1417 std::vector<float> output({
1418 1, 2, 3, 4, 5, 6,
1419 7, 8, 9, 10, 11, 12,
1420 13, 14, 15, 16, 17, 18});
1421
David Beck5cd01f32018-09-12 16:00:08 +01001422 return DivisionTestHelper<float>(workloadFactory,
1423 shape0, input0, 1.0f, 0,
1424 shape1, input1, 1.0f, 0,
1425 shape0, output, 1.0f, 0);
1426}
1427
1428
1429LayerTestResult<uint8_t,4> DivisionUint8Test(armnn::IWorkloadFactory& workloadFactory)
1430{
1431 const unsigned int width = 2;
1432 const unsigned int height = 2;
1433 const unsigned int channelCount = 2;
1434 const unsigned int batchSize = 2;
1435
1436 unsigned int shape[] = { batchSize, channelCount, height, width };
1437
1438 std::vector<uint8_t> input0({2, 2, 2, 2, 3, 3, 3, 3,
1439 4, 4, 4, 4, 5, 5, 5, 5 });
1440
1441 std::vector<uint8_t> input1({1, 1, 1, 1, 2, 2, 2, 2,
1442 4, 4, 4, 4, 4, 4, 4, 4 });
1443
1444 std::vector<uint8_t> output({8, 8, 8, 8, 6, 6, 6, 6,
1445 4, 4, 4, 4, 5, 5, 5, 5});
1446
1447
1448 return DivisionTestHelper<uint8_t>(workloadFactory,
1449 shape, input0, 1.0f, 0,
1450 shape, input1, 1.0f, 0,
1451 shape, output, 0.25f, 0);
1452}
1453
1454LayerTestResult<uint8_t, 4> DivisionBroadcast1ElementUint8Test(armnn::IWorkloadFactory& workloadFactory)
1455{
1456 unsigned int shape0[] = { 1, 2, 2, 2 };
1457 std::vector<uint8_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
1458
1459 unsigned int shape1[] = { 1, 1, 1, 1 };
1460 std::vector<uint8_t> input1({ 2 });
1461
1462 std::vector<uint8_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
1463
1464 return DivisionTestHelper<uint8_t>(workloadFactory,
1465 shape0, input0, 1.0f, 0,
1466 shape1, input1, 1.0f, 0,
1467 shape0, output, 1.0f, 0);
1468}
1469
1470LayerTestResult<uint8_t, 4> DivisionBroadcast1DVectorUint8Test(armnn::IWorkloadFactory& workloadFactory)
1471{
1472 unsigned int shape0[] = { 1, 3, 3, 2 };
1473 std::vector<uint8_t> input0({1, 4, 3, 8, 5, 12,
1474 7, 16, 9, 20, 11, 24,
1475 13, 28, 15, 32, 17, 36});
1476
1477 unsigned int shape1[] = { 1, 1, 1, 2 };
1478 std::vector<uint8_t> input1({ 1, 2 });
1479
1480 std::vector<uint8_t> output({1, 2, 3, 4, 5, 6,
1481 7, 8, 9, 10, 11, 12,
1482 13, 14, 15, 16, 17, 18});
1483
1484 return DivisionTestHelper<uint8_t>(workloadFactory,
1485 shape0, input0, 1.0f, 0,
1486 shape1, input1, 1.0f, 0,
1487 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001488}
1489
1490namespace {
surmeh01bceff2f2018-03-29 16:29:27 +01001491LayerTestResult<float,4> MultiplicationTestHelper(armnn::IWorkloadFactory& workloadFactory,
1492 const unsigned int shape0[4],
1493 const std::vector<float> & values0,
1494 const unsigned int shape1[4],
1495 const std::vector<float> & values1,
1496 const unsigned int outShape[4],
1497 const std::vector<float> & outValues)
telsoa014fcda012018-03-09 14:13:49 +00001498{
surmeh01bceff2f2018-03-29 16:29:27 +01001499 const size_t dimensionCount = 4;
1500 armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, armnn::DataType::Float32};
1501 armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, armnn::DataType::Float32};
1502 armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, armnn::DataType::Float32};
telsoa014fcda012018-03-09 14:13:49 +00001503
surmeh01bceff2f2018-03-29 16:29:27 +01001504 auto input0 = MakeTensor<float, 4>(inputTensorInfo0, values0);
1505 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00001506
1507 LayerTestResult<float,4> ret(outputTensorInfo);
1508
1509 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
1510 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1511 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1512
1513 armnn::MultiplicationQueueDescriptor data;
1514 armnn::WorkloadInfo info;
1515 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
1516 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1517 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1518
1519 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
1520
1521 inputHandle0->Allocate();
1522 inputHandle1->Allocate();
1523 outputHandle->Allocate();
1524
1525 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
1526 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1527
surmeh013537c2c2018-05-18 16:31:43 +01001528 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001529 workload->Execute();
1530
1531 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1532
surmeh01bceff2f2018-03-29 16:29:27 +01001533 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00001534 return ret;
1535}
surmeh01bceff2f2018-03-29 16:29:27 +01001536} // anonymous namespace
1537
1538
1539LayerTestResult<float,4> MultiplicationTest(armnn::IWorkloadFactory& workloadFactory)
1540{
1541 const unsigned int width = 2;
1542 const unsigned int height = 2;
1543 const unsigned int channelCount = 2;
1544 const unsigned int batchSize = 2;
1545
1546 unsigned int shape[] = { batchSize, channelCount, height, width };
1547
1548 std::vector<float> input0({
1549 1, 1, 1, 1, 2, 2, 2, 2,
1550 3, 3, 3, 3, 4, 4, 4, 4 });
1551
1552 std::vector<float> input1({
1553 2, 2, 2, 2, 3, 3, 3, 3,
1554 4, 4, 4, 4, 5, 5, 5, 5 });
1555
1556 std::vector<float> output({
1557 2, 2, 2, 2, 6, 6, 6, 6,
1558 12, 12, 12, 12, 20, 20, 20, 20 });
1559
1560 return MultiplicationTestHelper(workloadFactory,
1561 shape,
1562 input0,
1563 shape,
1564 input1,
1565 shape,
1566 output);
1567}
1568
1569LayerTestResult<float, 4> MultiplicationBroadcast1ElementTest(armnn::IWorkloadFactory& workloadFactory)
1570{
1571 unsigned int shape0[] = { 1, 2, 2, 2 };
1572 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
1573
1574 unsigned int shape1[] = { 1, 1, 1, 1 };
1575 std::vector<float> input1({ 2 });
1576
1577 std::vector<float> output({ 2, 4, 6, 8, 10, 12, 14, 16});
1578
1579 return MultiplicationTestHelper(workloadFactory,
1580 shape0,
1581 input0,
1582 shape1,
1583 input1,
1584 shape0,
1585 output);
1586}
1587
1588LayerTestResult<float, 4> MultiplicationBroadcast1DVectorTest(armnn::IWorkloadFactory& workloadFactory)
1589{
1590 unsigned int shape0[] = { 1, 3, 3, 2 };
1591 std::vector<float> input0({
1592 1, 2, 3, 4, 5, 6,
1593 7, 8, 9, 10, 11, 12,
1594 13, 14, 15, 16, 17, 18});
1595
1596 unsigned int shape1[] = { 1, 1, 1, 2 };
1597 std::vector<float> input1({ 1, 2 });
1598
1599 std::vector<float> output({
1600 1, 4, 3, 8, 5, 12,
1601 7, 16, 9, 20, 11, 24,
1602 13, 28, 15, 32, 17, 36});
1603
1604 return MultiplicationTestHelper(workloadFactory,
1605 shape0,
1606 input0,
1607 shape1,
1608 input1,
1609 shape0,
1610 output);
1611}
telsoa014fcda012018-03-09 14:13:49 +00001612
1613LayerTestResult<float,4> CompareMultiplicationTest(armnn::IWorkloadFactory& workloadFactory,
1614 armnn::IWorkloadFactory& refWorkloadFactory)
1615{
1616 const unsigned int width = 16;
1617 const unsigned int height = 32;
1618 const unsigned int channelCount = 2;
1619 const unsigned int batchSize = 5;
1620
1621 armnn::TensorInfo inputTensorInfo0;
1622 armnn::TensorInfo inputTensorInfo1;
1623 armnn::TensorInfo outputTensorInfo;
1624
1625 constexpr unsigned int shape[] = { batchSize, channelCount, height, width };
1626
1627 inputTensorInfo0 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1628 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1629 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1630
1631 LayerTestResult<float,4> comparisonResult(outputTensorInfo);
1632
1633 auto input0 = MakeRandomTensor<float, 4>(inputTensorInfo0, 803506992);
1634 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 54902257);
1635
1636 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
1637 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1638 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1639
1640 std::unique_ptr<armnn::ITensorHandle> inputHandle0Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo0);
1641 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
1642 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
1643
1644 armnn::MultiplicationQueueDescriptor data;
1645 armnn::WorkloadInfo info;
1646 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
1647 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1648 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1649
1650 armnn::MultiplicationQueueDescriptor refData = data;
1651 armnn::WorkloadInfo refInfo = info;
1652 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo0, inputHandle0Ref.get());
1653 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo1, inputHandle1Ref.get());
1654 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1655
1656 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
1657 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateMultiplication(refData, refInfo);
1658
1659 inputHandle0->Allocate();
1660 inputHandle1->Allocate();
1661 outputHandle->Allocate();
1662 inputHandle0Ref->Allocate();
1663 inputHandle1Ref->Allocate();
1664 outputHandleRef->Allocate();
1665
1666 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
1667 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1668 CopyDataToITensorHandle(inputHandle0Ref.get(), &input0[0][0][0][0]);
1669 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
1670
surmeh013537c2c2018-05-18 16:31:43 +01001671 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001672 workload->Execute();
surmeh013537c2c2018-05-18 16:31:43 +01001673 refWorkloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001674 workloadRef->Execute();
1675
1676 CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get());
1677 CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get());
1678
1679 return comparisonResult;
1680}
1681
1682LayerTestResult<float,4> CompareBatchNormTest(armnn::IWorkloadFactory& workloadFactory,
1683 armnn::IWorkloadFactory& refWorkloadFactory)
1684{
1685 const unsigned int width = 2;
1686 const unsigned int height = 3;
1687 const unsigned int channels = 5;
1688 const unsigned int batchSize = 3;
1689
1690 armnn::TensorInfo inputTensorInfo;
1691 armnn::TensorInfo outputTensorInfo;
1692 armnn::TensorInfo tensorInfo;
1693
1694 constexpr unsigned int shape[] = {batchSize, channels, height, width};
1695 constexpr unsigned int tensorShape[] = {channels};
1696
1697 inputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1698 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1699 tensorInfo = armnn::TensorInfo(1, tensorShape, armnn::DataType::Float32);
1700
1701 auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 21312);
1702
1703 auto mean = MakeRandomTensor<float, 1>(tensorInfo, 123);
1704 auto variance = MakeRandomTensor<float, 1>(tensorInfo, 234, 0.0f);
1705 auto beta = MakeRandomTensor<float, 1>(tensorInfo, 123);
1706 auto gamma = MakeRandomTensor<float, 1>(tensorInfo, 345);
1707
1708 LayerTestResult<float,4> ret(outputTensorInfo);
1709
1710 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
1711 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1712
1713 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
1714 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
1715
1716 armnn::BatchNormalizationQueueDescriptor data;
1717 armnn::WorkloadInfo info;
1718 armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
1719 armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
1720 armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
1721 armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
1722
1723 AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
1724 AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
1725 AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
1726 AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
1727
1728 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1729 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1730 data.m_Mean = &meanTensor;
1731 data.m_Variance = &varianceTensor;
1732 data.m_Beta = &betaTensor;
1733 data.m_Gamma = &gammaTensor;
1734 data.m_Parameters.m_Eps = 0.01f;
1735
1736 armnn::BatchNormalizationQueueDescriptor refData = data;
1737 armnn::WorkloadInfo refInfo = info;
1738 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
1739 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1740
1741 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
1742 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateBatchNormalization(refData, refInfo);
1743
1744 inputHandle->Allocate();
1745 outputHandle->Allocate();
1746 inputHandleRef->Allocate();
1747 outputHandleRef->Allocate();
1748
1749 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
1750 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
1751
surmeh013537c2c2018-05-18 16:31:43 +01001752 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001753 workload->Execute();
surmeh013537c2c2018-05-18 16:31:43 +01001754 refWorkloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001755 workloadRef->Execute();
1756
1757 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1758 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
1759
1760 return ret;
1761}
1762
surmeh013537c2c2018-05-18 16:31:43 +01001763template<typename T>
1764void PermuteTensorData(
1765 armnn::IWorkloadFactory& workloadFactory,
1766 const armnn::PermutationVector& mappings,
1767 armnn::TensorInfo & inputTensorInfo,
1768 const T * inputData,
1769 std::vector<T>& outputData)
telsoa014fcda012018-03-09 14:13:49 +00001770{
surmeh013537c2c2018-05-18 16:31:43 +01001771 BOOST_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
1772 if (inputData == nullptr)
1773 {
1774 // Nullptr is an error in the test. By returning without doing the concatenation
1775 // I expect the caller to fail the test. It still makes sense to report this as
1776 // an assert for Debug builds.
1777 return;
1778 }
telsoa014fcda012018-03-09 14:13:49 +00001779
surmeh013537c2c2018-05-18 16:31:43 +01001780 armnn::TensorInfo outputTensorInfo = armnnUtils::Permuted(inputTensorInfo, mappings);
1781
1782 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
1783 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1784
1785 armnn::PermuteQueueDescriptor queueDescriptor;
1786 queueDescriptor.m_Parameters = armnn::PermuteDescriptor{mappings};
1787 armnn::WorkloadInfo workloadInfo;
1788 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
1789 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
1790
1791 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePermute(queueDescriptor, workloadInfo);
1792
1793 inputHandle->Allocate();
1794 outputHandle->Allocate();
1795
1796 CopyDataToITensorHandle(inputHandle.get(), inputData);
1797
1798 workload->Execute();
1799
1800 outputData.resize(outputTensorInfo.GetNumElements());
1801 CopyDataFromITensorHandle(&outputData[0], outputHandle.get());
1802 inputTensorInfo = outputTensorInfo;
1803}
1804
1805armnn::OriginsDescriptor CreateMergerDescriptorForConcatenation(
1806 const std::vector<armnn::TensorInfo> & inputTensorInfos,
1807 unsigned int concatDim)
1808{
telsoa014fcda012018-03-09 14:13:49 +00001809 std::vector<armnn::TensorShape> shapes;
1810 shapes.reserve(inputTensorInfos.size());
1811 for (const armnn::TensorInfo& it: inputTensorInfos)
1812 {
1813 shapes.push_back(it.GetShape());
1814 }
surmeh013537c2c2018-05-18 16:31:43 +01001815
1816 return armnn::CreateMergerDescriptorForConcatenation(shapes.begin(),
1817 shapes.end(),
1818 concatDim);
1819}
1820
1821//
1822// Concatenation is only supported for N and C dimensions for NCHW. In case of
telsoa01c577f2c2018-08-31 09:22:23 +01001823// <4 dimensions we need to make sure that the concat dimensions are at least
surmeh013537c2c2018-05-18 16:31:43 +01001824// the 3rd slowest iterating one.
1825//
1826
1827bool NeedPermuteForConcat(
1828 const std::vector<armnn::TensorInfo> & inputTensorInfos,
1829 unsigned int concatDim)
1830{
1831 // See note above. Additionally we expect the input shapes to have the
1832 // same number of dimensions.
1833 unsigned int nDimensions = 0;
1834
telsoa01c577f2c2018-08-31 09:22:23 +01001835 // Determine the number of dimensions as well as sanity check them
1836 // agains test implementation issues.
surmeh013537c2c2018-05-18 16:31:43 +01001837 for (auto && tensorInfo : inputTensorInfos)
1838 {
1839 if (!nDimensions)
1840 {
1841 nDimensions = tensorInfo.GetShape().GetNumDimensions();
1842 }
1843 else
1844 {
1845 BOOST_ASSERT_MSG(nDimensions == tensorInfo.GetShape().GetNumDimensions(),
1846 "Input shapes must have the same number of dimensions");
1847 }
1848 }
1849
1850 return (nDimensions-concatDim) < 3;
1851}
1852
1853armnn::TensorShape ExpandTensorShapeTo3dForPermute(const armnn::TensorShape & inputShape)
1854{
1855 unsigned int numDims = inputShape.GetNumDimensions();
1856 if (numDims >= 3)
1857 {
1858 // Nothing to do if the inputShape has at least 3 dimensions.
1859 return inputShape;
1860 }
1861
1862 std::vector<unsigned int> newDims(size_t(3), 1u);
1863 unsigned int expandedBy = 3 - numDims;
1864 for (unsigned int i=0; i<numDims; ++i)
1865 {
1866 newDims[expandedBy+i] = inputShape[i];
1867 }
1868 return armnn::TensorShape(3u, &newDims[0]);
1869}
1870
1871void Generate3dPermuteVectorForConcat(
1872 unsigned int numDimensions,
1873 unsigned int & concatDim,
1874 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutations)
1875{
1876 BOOST_ASSERT_MSG(numDimensions <= 3,
1877 "Only dimensions 1,2 and 3 are supported by this helper");
1878
1879 unsigned int expandedBy = 3 - numDimensions;
1880 unsigned int expandedConcatAxis = concatDim + expandedBy;
1881
1882 if (expandedConcatAxis == 2)
1883 {
1884 concatDim = 0;
1885 armnn::PermutationVector forwardPermutation({1, 2, 0});
1886 armnn::PermutationVector reversePermutation({2, 0, 1});
1887 permutations = std::make_pair(forwardPermutation, reversePermutation);
1888 }
1889 else if (expandedConcatAxis == 1)
1890 {
1891 concatDim = 0;
1892 armnn::PermutationVector forwardPermutation({2, 0, 1});
1893 armnn::PermutationVector reversePermutation({1, 2, 0});
1894 permutations = std::make_pair(forwardPermutation, reversePermutation);
1895 }
1896 else
1897 {
1898 BOOST_ASSERT(expandedConcatAxis == 0);
1899 concatDim = 0;
1900 }
1901}
1902
1903//
1904// Permute the input tensors so we can do a supported concatenation.
1905// Also treat lower than 3d tensors as 3d by adding dummy 1 dimensions
1906// at the front. Finally this function tells what the output shape
1907// of the permuted concatenated tensor is going to be.
1908//
1909template <typename T>
1910void PermuteInputsForConcat(
1911 armnn::IWorkloadFactory& workloadFactory,
1912 std::vector<armnn::TensorInfo> & inputTensorInfos,
1913 std::vector<T *> & inputData,
1914 std::vector<std::vector<T>> & inputDataStorage,
1915 armnn::PermutationVector & permuteVector,
1916 unsigned int & concatDim,
1917 armnn::TensorInfo & outputTensorInfo)
1918{
1919 BOOST_ASSERT_MSG(inputTensorInfos.size() > 1,
1920 "Expecting more than one tensor to be concatenated here");
1921
1922 unsigned int numDims = 0;
1923 unsigned int nthInput = 0;
1924 const armnn::PermutationVector identity({0, 1, 2});
1925
1926 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutations =
1927 std::make_pair(identity, identity);
1928
1929 inputDataStorage.resize(inputData.size());
1930
1931 for (auto && tensorInfo : inputTensorInfos)
1932 {
1933 if (numDims == 0)
1934 {
1935 numDims = tensorInfo.GetShape().GetNumDimensions();
1936 Generate3dPermuteVectorForConcat(numDims, concatDim, permutations);
telsoa01c577f2c2018-08-31 09:22:23 +01001937 // Store the reverese permutation.
surmeh013537c2c2018-05-18 16:31:43 +01001938 permuteVector = permutations.second;
1939 BOOST_ASSERT_MSG(!permuteVector.IsEqual(identity),
1940 "Test logic error, we don't need permutation, so we shouldn't arrive here");
1941 }
1942 else
1943 {
1944 BOOST_ASSERT_MSG(numDims == tensorInfo.GetShape().GetNumDimensions(),
1945 "All inputs must have the same number of dimensions");
1946 }
1947
1948 armnn::TensorInfo newTensorInfo = tensorInfo;
1949 newTensorInfo.SetShape(ExpandTensorShapeTo3dForPermute(tensorInfo.GetShape()));
1950
1951 PermuteTensorData<T>(workloadFactory,
1952 permutations.first,
1953 newTensorInfo,
1954 inputData[nthInput],
1955 inputDataStorage[nthInput]);
1956
1957 inputData[nthInput] = inputDataStorage[nthInput].data();
1958 inputTensorInfos[nthInput] = newTensorInfo;
1959
1960 ++nthInput;
1961 }
1962
1963 outputTensorInfo.SetShape(
1964 armnnUtils::Permuted(
1965 ExpandTensorShapeTo3dForPermute(outputTensorInfo.GetShape()),
1966 permutations.first));
1967}
1968
1969
1970//
1971// This is the pair of PermuteInputsForConcat(...) which permutes back
telsoa01c577f2c2018-08-31 09:22:23 +01001972// the output of the concatenation so we can check it against an expected
surmeh013537c2c2018-05-18 16:31:43 +01001973// output.
1974//
1975template <typename T>
1976void PermuteOutputForConcat(
1977 armnn::IWorkloadFactory& workloadFactory,
1978 const armnn::TensorInfo & tensorInfo,
1979 const armnn::PermutationVector & permuteVector,
1980 std::unique_ptr<armnn::ITensorHandle> && inputDataHandle,
1981 T * data)
1982{
1983 BOOST_ASSERT_MSG(data != nullptr, "data must not be null");
1984 if (data == nullptr)
1985 {
1986 // Nullptr is an error in the test. By returning without doing the permutation
1987 // I expect the caller to fail the test. It still makes sense to report this as
1988 // an assert for Debug builds.
1989 return;
1990 }
1991
1992 armnn::TensorInfo resultTensorInfo = tensorInfo;
1993 std::vector<T> inputData(tensorInfo.GetNumElements());
1994 std::vector<T> outputData;
1995
1996 CopyDataFromITensorHandle(&inputData[0], inputDataHandle.get());
1997
1998 PermuteTensorData<T>(workloadFactory,
1999 permuteVector,
2000 resultTensorInfo,
2001 &inputData[0],
2002 outputData);
2003
2004 ::memcpy(data, &outputData[0], sizeof(T)*outputData.size());
2005}
2006
2007template <typename T>
2008void Concatenate(armnn::IWorkloadFactory& workloadFactory,
2009 std::initializer_list<const armnn::TensorInfo> inputTensorInfosOrig,
2010 std::initializer_list<T *> inputsOrig,
2011 const armnn::TensorInfo& outputTensorInfoOrig,
2012 T * output,
2013 unsigned int concatDim)
2014{
2015 BOOST_ASSERT_MSG(output != nullptr, "output must not be null");
2016 if (output == nullptr)
2017 {
2018 // Nullptr is an error in the test. By returning without doing the permutation
2019 // I expect the caller to fail the test. It still makes sense to report this as
2020 // an assert for Debug builds.
2021 return;
2022 }
2023
2024 armnn::MergerQueueDescriptor queueDescriptor;
2025
telsoa01c577f2c2018-08-31 09:22:23 +01002026 // Saves a copy of the parameters which we might need to change.
surmeh013537c2c2018-05-18 16:31:43 +01002027 std::vector<armnn::TensorInfo> inputTensorInfos(inputTensorInfosOrig.begin(), inputTensorInfosOrig.end());
2028 std::vector<T *> inputs = inputsOrig;
2029 armnn::TensorInfo outputTensorInfo = outputTensorInfoOrig;
2030
2031 armnn::PermutationVector permuteVector{0, 1, 2};
2032
telsoa01c577f2c2018-08-31 09:22:23 +01002033 // Holds and automatically releases memory for the reshaped input data.
surmeh013537c2c2018-05-18 16:31:43 +01002034 std::vector<std::vector<T>> tmpInputDataStorage;
2035
2036 const size_t inputCount = inputTensorInfos.size();
2037
2038 bool needPermuteForConcat = NeedPermuteForConcat(inputTensorInfos, concatDim);
2039
2040 if (needPermuteForConcat)
2041 {
2042 //
2043 // We need to permute the inputs, because concatenation along
telsoa01c577f2c2018-08-31 09:22:23 +01002044 // the requested axis is not supported.
surmeh013537c2c2018-05-18 16:31:43 +01002045 //
2046 PermuteInputsForConcat<T>(workloadFactory,
2047 inputTensorInfos,
2048 inputs,
2049 tmpInputDataStorage,
2050 permuteVector,
2051 concatDim,
2052 outputTensorInfo);
2053 }
2054
2055 armnn::OriginsDescriptor viewsDescriptor = CreateMergerDescriptorForConcatenation(inputTensorInfos, concatDim);
telsoa014fcda012018-03-09 14:13:49 +00002056
2057 queueDescriptor.m_ViewOrigins.reserve(viewsDescriptor.GetNumViews());
2058 for (unsigned int i = 0; i < viewsDescriptor.GetNumViews(); ++i)
2059 {
2060 queueDescriptor.m_ViewOrigins.emplace_back(std::vector<unsigned int>(viewsDescriptor.GetViewOrigin(i),
2061 viewsDescriptor.GetViewOrigin(i) + viewsDescriptor.GetNumDimensions()));
2062 }
2063
telsoa014fcda012018-03-09 14:13:49 +00002064 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2065
2066 std::vector<std::unique_ptr<armnn::ITensorHandle>> inputHandles;
2067 inputHandles.reserve(inputCount);
2068
2069 const bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2070 for (unsigned int i = 0; i < inputCount; ++i)
2071 {
surmeh013537c2c2018-05-18 16:31:43 +01002072 const armnn::TensorInfo& inputTensorInfo = inputTensorInfos[i];
telsoa014fcda012018-03-09 14:13:49 +00002073
2074 std::unique_ptr<armnn::ITensorHandle> inputHandle = subTensorsSupported ?
2075 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo.GetShape(),
2076 queueDescriptor.m_ViewOrigins[i].m_Origin.data())
2077 : workloadFactory.CreateTensorHandle(inputTensorInfo);
2078
2079 inputHandles.emplace_back(std::move(inputHandle));
2080 }
2081
2082 armnn::WorkloadInfo workloadInfo;
2083
2084 for (unsigned int i = 0; i < inputCount; ++i)
2085 {
surmeh013537c2c2018-05-18 16:31:43 +01002086 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfos[i], inputHandles[i].get());
telsoa014fcda012018-03-09 14:13:49 +00002087 }
2088
2089 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
2090
2091 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(queueDescriptor, workloadInfo);
2092
2093 for (auto& inputHandle : inputHandles)
2094 {
2095 inputHandle->Allocate();
2096 }
2097
2098 outputHandle->Allocate();
2099
2100 unsigned int nextInputId = 0;
2101 for (auto& inputHandle : inputHandles)
2102 {
surmeh013537c2c2018-05-18 16:31:43 +01002103 CopyDataToITensorHandle(inputHandle.get(), inputs[nextInputId]);
2104 ++nextInputId;
telsoa014fcda012018-03-09 14:13:49 +00002105 }
2106
surmeh013537c2c2018-05-18 16:31:43 +01002107 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00002108 workload->Execute();
2109
surmeh013537c2c2018-05-18 16:31:43 +01002110 if (needPermuteForConcat)
2111 {
2112 PermuteOutputForConcat<T>(workloadFactory,
2113 outputTensorInfo,
2114 permuteVector,
2115 std::move(outputHandle),
2116 output);
2117 }
2118 else
2119 {
2120 CopyDataFromITensorHandle(output, outputHandle.get());
2121 }
telsoa014fcda012018-03-09 14:13:49 +00002122}
2123
2124template <typename T>
2125LayerTestResult<T, 1> Concatenation1dTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale, int32_t qOffset)
2126{
2127 armnn::TensorInfo inputTensorInfo({ 3 }, armnn::GetDataType<T>());
2128
2129 auto input0 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 1.0f, 2.0f, 3.0f }));
2130 auto input1 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 4.0f, 5.0f, 6.0f }));
2131 auto input2 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 7.0f, 8.0f, 9.0f }));
2132
2133 armnn::TensorInfo outputTensorInfo({ 9 }, armnn::GetDataType<T>());
2134
2135 LayerTestResult<T, 1> result(outputTensorInfo);
2136
2137 std::vector<T> output;
2138 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002139 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002140 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
2141 { input0.data(), input1.data(), input2.data() },
2142 outputTensorInfo,
2143 output.data(),
2144 0);
2145
2146 result.output = MakeTensor<T, 1>(outputTensorInfo, output);
2147 result.outputExpected = MakeTensor<T, 1>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2148 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f
2149 }));
2150
2151 return result;
2152}
2153
2154LayerTestResult<float, 1> Concatenation1dTest(armnn::IWorkloadFactory& workloadFactory)
2155{
2156 return Concatenation1dTestImpl<float>(workloadFactory, 0.0f, 0);
2157}
2158
2159template <typename T>
2160LayerTestResult<T, 2> Concatenation2dTestImpl(armnn::IWorkloadFactory& workloadFactory,
2161 const armnn::TensorInfo& outputTensorInfo,
2162 unsigned int dimension,
2163 const float qScale,
2164 const int32_t qOffset)
2165{
2166 armnn::TensorInfo inputTensorInfo({ 2, 3 }, armnn::GetDataType<T>());
2167
2168 auto input0 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2169 // Batch 0
2170 1.0f, 2.0f, 3.0f,
2171
2172 // Batch 1
2173 10.0f, 11.0f, 12.0f,
2174 }));
2175
2176 auto input1 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2177 // Batch 0
2178 4.0f, 5.0f, 6.0f,
2179
2180 // Batch 1
2181 13.0f, 14.0f, 15.0f,
2182 }));
2183
2184 auto input2 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2185 // Batch 0
2186 7.0f, 8.0f, 9.0f,
2187
2188 // Batch 1
2189 16.0f, 17.0f, 18.0f,
2190 }));
2191
2192 LayerTestResult<T, 2> result(outputTensorInfo);
2193
2194 std::vector<T> output;
2195 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002196 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002197 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
2198 { input0.data(), input1.data(), input2.data() },
2199 outputTensorInfo,
2200 output.data(),
2201 dimension);
2202
2203 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
2204 return result;
2205}
2206
2207template <typename T>
2208LayerTestResult<T, 2> Concatenation2dDim0TestImpl(armnn::IWorkloadFactory& workloadFactory,
2209 float qScale, int32_t qOffset)
2210{
2211 armnn::TensorInfo outputTensorInfo({ 6, 3 }, armnn::GetDataType<T>());
2212
2213 LayerTestResult<T, 2> result = Concatenation2dTestImpl<T>(workloadFactory, outputTensorInfo, 0, qScale, qOffset);
2214 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2215 // Batch 0
2216 1.0f, 2.0f, 3.0f,
2217
2218 // Batch 1
2219 10.0f, 11.0f, 12.0f,
2220
2221 // Batch 2
2222 4.0f, 5.0f, 6.0f,
2223
2224 // Batch 3
2225 13.0f, 14.0f, 15.0f,
2226
2227 // Batch 4
2228 7.0f, 8.0f, 9.0f,
2229
2230 // Batch 5
2231 16.0f, 17.0f, 18.0f,
2232 }));
2233
2234 return result;
2235}
2236
2237LayerTestResult<float, 2> Concatenation2dDim0Test(armnn::IWorkloadFactory& workloadFactory)
2238{
2239 return Concatenation2dDim0TestImpl<float>(workloadFactory, 0.0f, 0);
2240}
2241
2242template <typename T>
2243LayerTestResult<T, 2> Concatenation2dDim1TestImpl(armnn::IWorkloadFactory& workloadFactory,
2244 float qScale, int32_t qOffset)
2245{
2246 armnn::TensorInfo outputTensorInfo({ 2, 9 }, armnn::GetDataType<T>());
2247
2248 LayerTestResult<T, 2> result = Concatenation2dTestImpl<T>(workloadFactory, outputTensorInfo, 1, qScale, qOffset);
2249 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2250 // Batch 0
2251 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
2252
2253 // Batch 1
2254 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f
2255 }));
2256
2257 return result;
2258}
2259
2260LayerTestResult<float, 2> Concatenation2dDim1Test(armnn::IWorkloadFactory& workloadFactory)
2261{
2262 return Concatenation2dDim1TestImpl<float>(workloadFactory, 0.0f, 0);
2263}
2264
2265template <typename T>
2266LayerTestResult<T, 2> Concatenation2dDim0DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2267 int32_t qOffset)
2268{
2269 armnn::TensorInfo input0TensorInfo({ 2, 3 }, armnn::GetDataType<T>());
2270 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2271 // Batch 0
2272 1.0f, 2.0f, 3.0f,
2273
2274 // Batch 1
2275 10.0f, 11.0f, 12.0f,
2276 }));
2277
2278 armnn::TensorInfo input1TensorInfo({ 3, 3 }, armnn::GetDataType<T>());
2279 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2280 // Batch 0
2281 4.0f, 5.0f, 6.0f,
2282
2283 // Batch 1
2284 13.0f, 14.0f, 15.0f,
2285
2286 // Batch 0
2287 7.0f, 8.0f, 9.0f,
2288 }));
2289
2290 armnn::TensorInfo input2TensorInfo({ 1, 3 }, armnn::GetDataType<T>());
2291 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2292 // Batch 1
2293 16.0f, 17.0f, 18.0f,
2294 }));
2295
2296 armnn::TensorInfo outputTensorInfo({ 6, 3 }, armnn::GetDataType<T>());
2297 LayerTestResult<T, 2> result(outputTensorInfo);
2298
2299 std::vector<T> output;
2300 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002301 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002302 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
2303 { input0.data(), input1.data(), input2.data() },
2304 outputTensorInfo,
2305 output.data(),
2306 0);
2307
2308 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
2309 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2310 // Batch 0
2311 1.0f, 2.0f, 3.0f,
2312
2313 // Batch 1
2314 10.0f, 11.0f, 12.0f,
2315
2316 // Batch 2
2317 4.0f, 5.0f, 6.0f,
2318
2319 // Batch 3
2320 13.0f, 14.0f, 15.0f,
2321
2322 // Batch 4
2323 7.0f, 8.0f, 9.0f,
2324
2325 // Batch 5
2326 16.0f, 17.0f, 18.0f,
2327 }));
2328
2329 return result;
2330}
2331
2332LayerTestResult<float, 2> Concatenation2dDim0DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory)
2333{
2334 return Concatenation2dDim0DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
2335}
2336
2337template <typename T>
2338LayerTestResult<T, 2> Concatenation2dDim1DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2339 int32_t qOffset)
2340{
2341 armnn::TensorInfo input0TensorInfo({ 2, 3 }, armnn::GetDataType<T>());
2342 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2343 // Batch 0
2344 1.0f, 2.0f, 3.0f,
2345
2346 // Batch 1
2347 10.0f, 11.0f, 12.0f,
2348 }));
2349
2350 armnn::TensorInfo input1TensorInfo({ 2, 5 }, armnn::GetDataType<T>());
2351 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2352 // Batch 0
2353 4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
2354
2355 // Batch 1
2356 13.0f, 14.0f, 15.0f, 16.0f, 17.0f,
2357 }));
2358
2359 armnn::TensorInfo input2TensorInfo({ 2, 1 }, armnn::GetDataType<T>());
2360 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2361 // Batch 0
2362 9.0f,
2363
2364 // Batch 1
2365 18.0f
2366 }));
2367
2368 armnn::TensorInfo outputTensorInfo({ 2, 9 }, armnn::GetDataType<T>());
2369 LayerTestResult<T, 2> result(outputTensorInfo);
2370
2371 std::vector<T> output;
2372 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002373 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002374 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
2375 { input0.data(), input1.data(), input2.data() },
2376 outputTensorInfo,
2377 output.data(),
2378 1);
2379
2380 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
2381 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2382 // Batch 0
2383 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
2384
2385 // Batch 1
2386 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f,
2387 }));
2388
2389 return result;
2390}
2391
2392LayerTestResult<float, 2> Concatenation2dDim1DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory)
2393{
2394 return Concatenation2dDim1DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
2395}
2396
2397template <typename T>
2398LayerTestResult<T, 3> Concatenation3dTestImpl(armnn::IWorkloadFactory& workloadFactory,
2399 const armnn::TensorInfo& outputTensorInfo,
2400 unsigned int dimension,
2401 float qScale,
2402 int32_t qOffset)
2403{
2404 armnn::TensorInfo inputTensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
2405
2406 auto input0 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2407 // Batch 0, Channel 0
2408 1.0f, 2.0f,
2409
2410 // Batch 0, Channel 1
2411 3.0f, 4.0f,
2412
2413 // Batch 0, Channel 2
2414 5.0f, 6.0f,
2415
2416 // Batch 1, Channel 0
2417 19.0f, 20.0f,
2418
2419 // Batch 1, Channel 1
2420 21.0f, 22.0f,
2421
2422 // Batch 1, Channel 2
2423 23.0f, 24.0f
2424 }));
2425
2426 auto input1 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2427 // Batch 0, Channel 0
2428 7.0f, 8.0f,
2429
2430 // Batch 0, Channel 1
2431 9.0f, 10.0f,
2432
2433 // Batch 0, Channel 2
2434 11.0f, 12.0f,
2435
2436 // Batch 1, Channel 0
2437 25.0f, 26.0f,
2438
2439 // Batch 1, Channel 1
2440 27.0f, 28.0f,
2441
2442 // Batch 1, Channel 2
2443 29.0f, 30.0f
2444 }));
2445
2446 auto input2 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2447 // Batch 0, Channel 0
2448 13.0f, 14.0f,
2449
2450 // Batch 0, Channel 1
2451 15.0f, 16.0f,
2452
2453 // Batch 0, Channel 2
2454 17.0f, 18.0f,
2455
2456 // Batch 1, Channel 0
2457 31.0f, 32.0f,
2458
2459 // Batch 1, Channel 1
2460 33.0f, 34.0f,
2461
2462 // Batch 1, Channel 2
2463 35.0f, 36.0f
2464 }));
2465
2466 LayerTestResult<T, 3> result(outputTensorInfo);
2467
2468 std::vector<T> output;
2469 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002470 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002471 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
2472 { input0.data(), input1.data(), input2.data() },
2473 outputTensorInfo,
2474 output.data(),
2475 dimension);
2476
2477 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
2478 return result;
2479}
2480
2481template <typename T>
2482LayerTestResult<T, 3> Concatenation3dDim0TestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2483 int32_t qOffset)
2484{
2485 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, armnn::GetDataType<T>());
2486
2487 LayerTestResult<T, 3> result = Concatenation3dTestImpl<T>(workloadFactory, outputTensorInfo, 0,
2488 qScale, qOffset);
2489 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2490 // Batch 0, Channel 0
2491 1.0f, 2.0f,
2492
2493 // Batch 0, Channel 1
2494 3.0f, 4.0f,
2495
2496 // Batch 0, Channel 2
2497 5.0f, 6.0f,
2498
2499 // Batch 1, Channel 0
2500 19.0f, 20.0f,
2501
2502 // Batch 1, Channel 1
2503 21.0f, 22.0f,
2504
2505 // Batch 1, Channel 2
2506 23.0f, 24.0f,
2507
2508 // Batch 2, Channel 0
2509 7.0f, 8.0f,
2510
2511 // Batch 2, Channel 1
2512 9.0f, 10.0f,
2513
2514 // Batch 2, Channel 2
2515 11.0f, 12.0f,
2516
2517 // Batch 3, Channel 0
2518 25.0f, 26.0f,
2519
2520 // Batch 3, Channel 1
2521 27.0f, 28.0f,
2522
2523 // Batch 3, Channel 2
2524 29.0f, 30.0f,
2525
2526 // Batch 4, Channel 0
2527 13.0f, 14.0f,
2528
2529 // Batch 4, Channel 1
2530 15.0f, 16.0f,
2531
2532 // Batch 4, Channel 2
2533 17.0f, 18.0f,
2534
2535 // Batch 5, Channel 0
2536 31.0f, 32.0f,
2537
2538 // Batch 5, Channel 1
2539 33.0f, 34.0f,
2540
2541 // Batch 5, Channel 2
2542 35.0f, 36.0f
2543 }));
2544 return result;
2545}
2546
2547LayerTestResult<float, 3> Concatenation3dDim0Test(armnn::IWorkloadFactory& workloadFactory)
2548{
2549 return Concatenation3dDim0TestImpl<float>(workloadFactory, 0.0f, 0);
2550}
2551
2552template <typename T>
2553LayerTestResult<T, 3> Concatenation3dDim1TestImpl(armnn::IWorkloadFactory& workloadFactory,
2554 float qScale, int32_t qOffset)
2555{
2556 armnn::TensorInfo outputTensorInfo({ 2, 9, 2 }, armnn::GetDataType<T>());
2557
2558 LayerTestResult<T, 3> result = Concatenation3dTestImpl<T>(workloadFactory, outputTensorInfo, 1, qScale, qOffset);
2559 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2560 // Batch 0, Channel 0
2561 1.0f, 2.0f,
2562
2563 // Batch 0, Channel 1
2564 3.0f, 4.0f,
2565
2566 // Batch 0, Channel 2
2567 5.0f, 6.0f,
2568
2569 // Batch 0, Channel 3
2570 7.0f, 8.0f,
2571
2572 // Batch 0, Channel 4
2573 9.0f, 10.0f,
2574
2575 // Batch 0, Channel 5
2576 11.0f, 12.0f,
2577
2578 // Batch 0, Channel 6
2579 13.0f, 14.0f,
2580
2581 // Batch 0, Channel 7
2582 15.0f, 16.0f,
2583
2584 // Batch 0, Channel 8
2585 17.0f, 18.0f,
2586
2587 // Batch 1, Channel 0
2588 19.0f, 20.0f,
2589
2590 // Batch 1, Channel 1
2591 21.0f, 22.0f,
2592
2593 // Batch 1, Channel 2
2594 23.0f, 24.0f,
2595
2596 // Batch 1, Channel 3
2597 25.0f, 26.0f,
2598
2599 // Batch 1, Channel 4
2600 27.0f, 28.0f,
2601
2602 // Batch 1, Channel 5
2603 29.0f, 30.0f,
2604
2605 // Batch 1, Channel 6
2606 31.0f, 32.0f,
2607
2608 // Batch 1, Channel 7
2609 33.0f, 34.0f,
2610
2611 // Batch 1, Channel 8
2612 35.0f, 36.0f
2613 }));
2614
2615 return result;
2616}
2617
2618LayerTestResult<float, 3> Concatenation3dDim1Test(armnn::IWorkloadFactory& workloadFactory)
2619{
2620 return Concatenation3dDim1TestImpl<float>(workloadFactory, 0.0f, 0);
2621}
2622
2623template <typename T>
2624LayerTestResult<T, 3> Concatenation3dDim2TestImpl(armnn::IWorkloadFactory& workloadFactory,
2625 float qScale, int32_t qOffset)
2626{
2627 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, armnn::GetDataType<T>());
2628
2629 LayerTestResult<T, 3> result = Concatenation3dTestImpl<T>(workloadFactory, outputTensorInfo, 2, qScale, qOffset);
2630 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2631 // Batch 0, Channel 0
2632 1.0f, 2.0f, 7.0f, 8.0f, 13.0f, 14.0f,
2633
2634 // Batch 0, Channel 1
2635 3.0f, 4.0f, 9.0f, 10.0f, 15.0f, 16.0f,
2636
2637 // Batch 0, Channel 2
2638 5.0f, 6.0f, 11.0f, 12.0f, 17.0f, 18.0f,
2639
2640 // Batch 1, Channel 0
2641 19.0f, 20.0f, 25.0f, 26.0f, 31.0f, 32.0f,
2642
2643 // Batch 1, Channel 1
2644 21.0f, 22.0f, 27.0f, 28.0f, 33.0f, 34.0f,
2645
2646 // Batch 1, Channel 2
2647 23.0f, 24.0f, 29.0f, 30.0f, 35.0f, 36.0f,
2648 }));
2649
2650 return result;
2651}
2652
2653LayerTestResult<float, 3> Concatenation3dDim2Test(armnn::IWorkloadFactory& workloadFactory)
2654{
2655 return Concatenation3dDim2TestImpl<float>(workloadFactory, 0.0f, 0);
2656}
2657
2658template <typename T>
2659LayerTestResult<T, 3> Concatenation3dDim0DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2660 int32_t qOffset)
2661{
2662 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
2663 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2664 // Batch 0, Channel 0
2665 1.0f, 2.0f,
2666
2667 // Batch 0, Channel 1
2668 3.0f, 4.0f,
2669
2670 // Batch 0, Channel 2
2671 5.0f, 6.0f,
2672
2673 // Batch 1, Channel 0
2674 19.0f, 20.0f,
2675
2676 // Batch 1, Channel 1
2677 21.0f, 22.0f,
2678
2679 // Batch 1, Channel 2
2680 23.0f, 24.0f
2681 }));
2682
2683 armnn::TensorInfo input1TensorInfo({ 1, 3, 2 }, armnn::GetDataType<T>());
2684 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2685 // Batch 0, Channel 0
2686 7.0f, 8.0f,
2687
2688 // Batch 0, Channel 1
2689 9.0f, 10.0f,
2690
2691 // Batch 0, Channel 2
2692 11.0f, 12.0f,
2693 }));
2694
2695 armnn::TensorInfo input2TensorInfo({ 3, 3, 2 }, armnn::GetDataType<T>());
2696 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2697 // Batch 0, Channel 0
2698 25.0f, 26.0f,
2699
2700 // Batch 0, Channel 1
2701 27.0f, 28.0f,
2702
2703 // Batch 0, Channel 2
2704 29.0f, 30.0f,
2705
2706 // Batch 1, Channel 0
2707 13.0f, 14.0f,
2708
2709 // Batch 1, Channel 1
2710 15.0f, 16.0f,
2711
2712 // Batch 1, Channel 2
2713 17.0f, 18.0f,
2714
2715 // Batch 2, Channel 0
2716 31.0f, 32.0f,
2717
2718 // Batch 2, Channel 1
2719 33.0f, 34.0f,
2720
2721 // Batch 2, Channel 2
2722 35.0f, 36.0f
2723 }));
2724
2725 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, armnn::GetDataType<T>());
2726 LayerTestResult<T, 3> result(outputTensorInfo);
2727
2728 std::vector<T> output;
2729 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002730 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002731 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
2732 { input0.data(), input1.data(), input2.data() },
2733 outputTensorInfo,
2734 output.data(),
2735 0);
2736
2737 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
2738 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2739 // Batch 0, Channel 0
2740 1.0f, 2.0f,
2741
2742 // Batch 0, Channel 1
2743 3.0f, 4.0f,
2744
2745 // Batch 0, Channel 2
2746 5.0f, 6.0f,
2747
2748 // Batch 1, Channel 0
2749 19.0f, 20.0f,
2750
2751 // Batch 1, Channel 1
2752 21.0f, 22.0f,
2753
2754 // Batch 1, Channel 2
2755 23.0f, 24.0f,
2756
2757 // Batch 2, Channel 0
2758 7.0f, 8.0f,
2759
2760 // Batch 2, Channel 1
2761 9.0f, 10.0f,
2762
2763 // Batch 2, Channel 2
2764 11.0f, 12.0f,
2765
2766 // Batch 3, Channel 0
2767 25.0f, 26.0f,
2768
2769 // Batch 3, Channel 1
2770 27.0f, 28.0f,
2771
2772 // Batch 3, Channel 2
2773 29.0f, 30.0f,
2774
2775 // Batch 4, Channel 0
2776 13.0f, 14.0f,
2777
2778 // Batch 4, Channel 1
2779 15.0f, 16.0f,
2780
2781 // Batch 4, Channel 2
2782 17.0f, 18.0f,
2783
2784 // Batch 5, Channel 0
2785 31.0f, 32.0f,
2786
2787 // Batch 5, Channel 1
2788 33.0f, 34.0f,
2789
2790 // Batch 5, Channel 2
2791 35.0f, 36.0f
2792 }));
2793
2794 return result;
2795}
2796
2797LayerTestResult<float, 3> Concatenation3dDim0DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory)
2798{
2799 return Concatenation3dDim0DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
2800}
2801
2802template <typename T>
2803LayerTestResult<T, 3> Concatenation3dDim1DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2804 int32_t qOffset)
2805{
2806 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
2807 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2808 // Batch 0, Channel 0
2809 1.0f, 2.0f,
2810
2811 // Batch 0, Channel 1
2812 3.0f, 4.0f,
2813
2814 // Batch 0, Channel 2
2815 5.0f, 6.0f,
2816
2817 // Batch 1, Channel 0
2818 19.0f, 20.0f,
2819
2820 // Batch 1, Channel 1
2821 21.0f, 22.0f,
2822
2823 // Batch 1, Channel 2
2824 23.0f, 24.0f
2825 }));
2826
2827 armnn::TensorInfo input1TensorInfo({ 2, 4, 2 }, armnn::GetDataType<T>());
2828 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2829 // Batch 0, Channel 0
2830 7.0f, 8.0f,
2831
2832 // Batch 0, Channel 1
2833 9.0f, 10.0f,
2834
2835 // Batch 0, Channel 2
2836 11.0f, 12.0f,
2837
2838 // Batch 0, Channel 3
2839 25.0f, 26.0f,
2840
2841 // Batch 1, Channel 0
2842 27.0f, 28.0f,
2843
2844 // Batch 1, Channel 1
2845 29.0f, 30.0f,
2846
2847 // Batch 1, Channel 2
2848 13.0f, 14.0f,
2849
2850 // Batch 1, Channel 3
2851 15.0f, 16.0f,
2852 }));
2853
2854 armnn::TensorInfo input2TensorInfo({ 2, 1, 2 }, armnn::GetDataType<T>());
2855 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2856 // Batch 0, Channel 0
2857 17.0f, 18.0f,
2858
2859 // Batch 1, Channel 0
2860 31.0f, 32.0f,
2861 }));
2862
2863 armnn::TensorInfo outputTensorInfo({ 2, 8, 2 }, armnn::GetDataType<T>());
2864 LayerTestResult<T, 3> result(outputTensorInfo);
2865
2866 std::vector<T> output;
2867 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002868 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002869 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
2870 { input0.data(), input1.data(), input2.data() },
2871 outputTensorInfo,
2872 output.data(),
2873 1);
2874
2875 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
2876 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2877 // Batch 0, Channel 0
2878 1.0f, 2.0f,
2879
2880 // Batch 0, Channel 1
2881 3.0f, 4.0f,
2882
2883 // Batch 0, Channel 2
2884 5.0f, 6.0f,
2885
2886 // Batch 0, Channel 3
2887 7.0f, 8.0f,
2888
2889 // Batch 0, Channel 4
2890 9.0f, 10.0f,
2891
2892 // Batch 0, Channel 5
2893 11.0f, 12.0f,
2894
2895 // Batch 0, Channel 6
2896 25.0f, 26.0f,
2897
2898 // Batch 0, Channel 7
2899 17.0f, 18.0f,
2900
2901 // Batch 1, Channel 0
2902 19.0f, 20.0f,
2903
2904 // Batch 1, Channel 1
2905 21.0f, 22.0f,
2906
2907 // Batch 1, Channel 2
2908 23.0f, 24.0f,
2909
2910 // Batch 1, Channel 3
2911 27.0f, 28.0f,
2912
2913 // Batch 1, Channel 4
2914 29.0f, 30.0f,
2915
2916 // Batch 1, Channel 5
2917 13.0f, 14.0f,
2918
2919 // Batch 1, Channel 6
2920 15.0f, 16.0f,
2921
2922 // Batch 1, Channel 7
2923 31.0f, 32.0f,
2924 }));
2925
2926 return result;
2927}
2928
2929LayerTestResult<float, 3> Concatenation3dDim1DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory)
2930{
2931 return Concatenation3dDim1DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
2932}
2933
2934template <typename T>
2935LayerTestResult<T, 3> Concatenation3dDim2DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2936 int32_t qOffset)
2937{
2938 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
2939 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2940 // Batch 0, Channel 0
2941 1.0f, 2.0f,
2942
2943 // Batch 0, Channel 1
2944 3.0f, 4.0f,
2945
2946 // Batch 0, Channel 2
2947 5.0f, 6.0f,
2948
2949 // Batch 1, Channel 0
2950 19.0f, 20.0f,
2951
2952 // Batch 1, Channel 1
2953 21.0f, 22.0f,
2954
2955 // Batch 1, Channel 2
2956 23.0f, 24.0f
2957 }));
2958
2959 armnn::TensorInfo input1TensorInfo({ 2, 3, 1 }, armnn::GetDataType<T>());
2960 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2961 // Batch 0, Channel 0
2962 7.0f,
2963
2964 // Batch 0, Channel 1
2965 9.0f,
2966
2967 // Batch 0, Channel 2
2968 11.0f,
2969
2970 // Batch 1, Channel 0
2971 25.0f,
2972
2973 // Batch 1, Channel 1
2974 27.0f,
2975
2976 // Batch 1, Channel 2
2977 29.0f
2978 }));
2979
2980 armnn::TensorInfo input2TensorInfo({ 2, 3, 3 }, armnn::GetDataType<T>());
2981 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2982 // Batch 0, Channel 0
2983 13.0f, 14.0f, 50.0f,
2984
2985 // Batch 0, Channel 1
2986 15.0f, 16.0f, 51.0f,
2987
2988 // Batch 0, Channel 2
2989 17.0f, 18.0f, 52.0f,
2990
2991 // Batch 1, Channel 0
2992 31.0f, 32.0f, 53.0f,
2993
2994 // Batch 1, Channel 1
2995 33.0f, 34.0f, 54.0f,
2996
2997 // Batch 1, Channel 2
2998 35.0f, 36.0f, 55.0f,
2999 }));
3000
3001 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, armnn::GetDataType<T>());
3002 LayerTestResult<T, 3> result(outputTensorInfo);
3003
3004 std::vector<T> output;
3005 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01003006 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00003007 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
3008 { input0.data(), input1.data(), input2.data() },
3009 outputTensorInfo,
3010 output.data(),
3011 2);
3012
3013 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
3014 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3015 // Batch 0, Channel 0
3016 1.0f, 2.0f, 7.0f, 13.0f, 14.0f, 50.0f,
3017
3018 // Batch 0, Channel 1
3019 3.0f, 4.0f, 9.0f, 15.0f, 16.0f, 51.0f,
3020
3021 // Batch 0, Channel 2
3022 5.0f, 6.0f, 11.0f, 17.0f, 18.0f, 52.0f,
3023
3024 // Batch 1, Channel 0
3025 19.0f, 20.0f, 25.0f, 31.0f, 32.0f, 53.0f,
3026
3027 // Batch 1, Channel 1
3028 21.0f, 22.0f, 27.0f, 33.0f, 34.0f, 54.0f,
3029
3030 // Batch 1, Channel 2
3031 23.0f, 24.0f, 29.0f, 35.0f, 36.0f, 55.0f,
3032 }));
3033
3034 return result;
3035}
3036
3037LayerTestResult<float, 3> Concatenation3dDim2DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory)
3038{
3039 return Concatenation3dDim2DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
3040}
3041
James Conroy074f3712018-10-03 09:32:03 +01003042LayerTestResult<float, 4> ResizeBilinearNopTestImpl(armnn::IWorkloadFactory& workloadFactory,
3043 const armnn::TensorShape& inputOutputTensorShape,
3044 armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00003045{
James Conroy074f3712018-10-03 09:32:03 +01003046 const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
3047 const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00003048
3049 auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
3050 1.0f, 2.0f, 3.0f, 4.0f,
3051 2.0f, 3.0f, 4.0f, 5.0f,
3052 3.0f, 4.0f, 5.0f, 6.0f,
3053 4.0f, 5.0f, 6.0f, 7.0f
3054 }));
3055
3056 LayerTestResult<float, 4> result(outputTensorInfo);
3057 result.outputExpected = input;
3058
3059 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3060 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3061
3062 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01003063 descriptor.m_Parameters.m_DataLayout = dataLayout;
3064 armnn::WorkloadInfo info;
3065 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3066 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3067
3068 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
3069
3070 inputHandle->Allocate();
3071 outputHandle->Allocate();
3072 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3073
3074 workloadFactory.Finalize();
3075 workload->Execute();
3076
3077 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3078 return result;
3079}
3080
3081LayerTestResult<float, 4> ResizeBilinearNopTest(armnn::IWorkloadFactory& workloadFactory)
3082{
3083 // BatchSize = 1, Channels = 1, Height = 4, Width = 4
3084 const armnn::TensorShape inputOutputShape{ 1, 1, 4, 4 };
3085
3086 return ResizeBilinearNopTestImpl(workloadFactory, inputOutputShape, armnn::DataLayout::NCHW);
3087}
3088
3089LayerTestResult<float, 4> ResizeBilinearNopNhwcTest(armnn::IWorkloadFactory& workloadFactory)
3090{
3091 // BatchSize = 1, Height = 4, Width = 4, Channels = 1
3092 const armnn::TensorShape inputOutputShape{ 1, 4, 4, 1 };
3093
3094 return ResizeBilinearNopTestImpl(workloadFactory, inputOutputShape, armnn::DataLayout::NHWC);
3095}
3096
3097LayerTestResult<float, 4> SimpleResizeBilinearTestImpl(armnn::IWorkloadFactory& workloadFactory,
3098 const armnn::TensorShape& inputTensorShape,
3099 const armnn::TensorShape& outputTensorShape,
3100 armnn::DataLayout dataLayout)
3101{
3102 const armnn::TensorInfo inputTensorInfo(inputTensorShape, armnn::DataType::Float32);
3103 const armnn::TensorInfo outputTensorInfo(outputTensorShape, armnn::DataType::Float32);
3104
3105 auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
3106 1.0f, 255.0f,
3107 200.0f, 250.0f
3108 }));
3109
3110 // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
3111 // then figures out the interpolants and weights. Note this is different to projecting the centre of the
3112 // output texel - and thus we'll expect the output 1x1 matrix to contain, as its single element, the value
3113 // that was at position (0,0) of the input matrix (rather than an average, which we would expect if projecting
3114 // the centre).
3115 LayerTestResult<float, 4> result(outputTensorInfo);
3116 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>({
3117 1.0f
3118 }));
3119
3120 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3121 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3122
3123 armnn::ResizeBilinearQueueDescriptor descriptor;
3124 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00003125 armnn::WorkloadInfo info;
3126 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3127 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3128
3129 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
3130
3131 inputHandle->Allocate();
3132 outputHandle->Allocate();
3133 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3134
surmeh013537c2c2018-05-18 16:31:43 +01003135 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00003136 workload->Execute();
3137
3138 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3139 return result;
3140}
3141
3142LayerTestResult<float, 4> SimpleResizeBilinearTest(armnn::IWorkloadFactory& workloadFactory)
3143{
James Conroy074f3712018-10-03 09:32:03 +01003144 // inputShape: BatchSize = 1, Channels = 1, Height = 2, Width = 2
3145 const armnn::TensorShape inputShape{ 1, 1, 2, 2 };
telsoa014fcda012018-03-09 14:13:49 +00003146
James Conroy074f3712018-10-03 09:32:03 +01003147 // outputShape: BatchSize = 1, Channels = 1, Height = 1, Width = 1
3148 const armnn::TensorShape outputShape{ 1, 1, 1, 1 };
telsoa014fcda012018-03-09 14:13:49 +00003149
James Conroy074f3712018-10-03 09:32:03 +01003150 return SimpleResizeBilinearTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NCHW);
3151}
3152
3153LayerTestResult<float, 4> SimpleResizeBilinearNhwcTest(armnn::IWorkloadFactory& workloadFactory)
3154{
3155 // inputShape: BatchSize = 1, Height = 2, Width = 2, Channels = 1
3156 const armnn::TensorShape inputShape{ 1, 2, 2, 1 };
3157
3158 // outputShape: BatchSize = 1, Height = 1, Width = 1, Channels = 1
3159 const armnn::TensorShape outputShape{ 1, 1, 1, 1 };
3160
3161 return SimpleResizeBilinearTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NHWC);
3162}
3163
3164LayerTestResult<float, 4> ResizeBilinearSqMinTestImpl(armnn::IWorkloadFactory& workloadFactory,
3165 const armnn::TensorShape& inputTensorShape,
3166 const armnn::TensorShape& outputTensorShape,
3167 armnn::DataLayout dataLayout)
3168{
3169 const armnn::TensorInfo inputTensorInfo(inputTensorShape, armnn::DataType::Float32);
3170 const armnn::TensorInfo outputTensorInfo(outputTensorShape, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00003171
3172 auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
James Conroy074f3712018-10-03 09:32:03 +01003173 1.0f, 2.0f, 3.0f, 4.0f,
3174 2.0f, 3.0f, 4.0f, 5.0f,
3175 3.0f, 4.0f, 5.0f, 6.0f,
3176 4.0f, 5.0f, 6.0f, 7.0f
telsoa014fcda012018-03-09 14:13:49 +00003177 }));
3178
telsoa014fcda012018-03-09 14:13:49 +00003179 LayerTestResult<float, 4> result(outputTensorInfo);
3180 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>({
James Conroy074f3712018-10-03 09:32:03 +01003181 1.0f, 3.0f,
3182 3.0f, 5.0f
telsoa014fcda012018-03-09 14:13:49 +00003183 }));
3184
3185 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3186 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3187
3188 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01003189 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00003190 armnn::WorkloadInfo info;
3191 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3192 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3193
3194 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
3195
3196 inputHandle->Allocate();
3197 outputHandle->Allocate();
3198 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3199
surmeh013537c2c2018-05-18 16:31:43 +01003200 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00003201 workload->Execute();
3202
3203 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3204 return result;
3205}
3206
3207LayerTestResult<float, 4> ResizeBilinearSqMinTest(armnn::IWorkloadFactory& workloadFactory)
3208{
James Conroy074f3712018-10-03 09:32:03 +01003209 // inputShape: BatchSize = 1, Channels = 1, Height = 4, Width = 4
3210 const armnn::TensorShape inputShape{ 1, 1, 4, 4 };
telsoa014fcda012018-03-09 14:13:49 +00003211
James Conroy074f3712018-10-03 09:32:03 +01003212 // outputShape: BatchSize = 1, Channels = 1, Height = 2, Width = 2
3213 const armnn::TensorShape outputShape{ 1, 1, 2, 2 };
telsoa014fcda012018-03-09 14:13:49 +00003214
James Conroy074f3712018-10-03 09:32:03 +01003215 return ResizeBilinearSqMinTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NCHW);
3216}
3217
3218LayerTestResult<float, 4> ResizeBilinearSqMinNhwcTest(armnn::IWorkloadFactory& workloadFactory)
3219{
3220 // inputShape: BatchSize = 1, Height = 4, Width = 4, Channels = 1
3221 const armnn::TensorShape inputShape{ 1, 4, 4, 1 };
3222
3223 // outputShape: BatchSize = 1, Height = 2, Width = 2, Channels = 1
3224 const armnn::TensorShape outputShape{ 1, 2, 2, 1 };
3225
3226 return ResizeBilinearSqMinTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NHWC);
3227}
3228
3229LayerTestResult<float, 4> ResizeBilinearMinTestImpl(armnn::IWorkloadFactory& workloadFactory,
3230 const armnn::TensorShape& inputTensorShape,
3231 const armnn::TensorShape& outputTensorShape,
3232 armnn::DataLayout dataLayout)
3233{
3234 const armnn::TensorInfo inputTensorInfo(inputTensorShape, armnn::DataType::Float32);
3235 const armnn::TensorInfo outputTensorInfo(outputTensorShape, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00003236
3237 auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
James Conroy074f3712018-10-03 09:32:03 +01003238 1.0f, 2.0f, 3.0f, 5.0f, 8.0f,
3239 13.0f, 21.0f, 34.0f, 55.0f, 89.0f,
3240 144.0f, 233.0f, 377.0f, 610.0f, 987.0f
telsoa014fcda012018-03-09 14:13:49 +00003241 }));
3242
3243 LayerTestResult<float, 4> result(outputTensorInfo);
3244 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>({
James Conroy074f3712018-10-03 09:32:03 +01003245 1.0f, 2.6666f, 6.0f,
3246 78.5f, 179.3333f, 401.0f
telsoa014fcda012018-03-09 14:13:49 +00003247 }));
3248
3249 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3250 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3251
3252 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01003253 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00003254 armnn::WorkloadInfo info;
3255 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3256 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3257
3258 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
3259
3260 inputHandle->Allocate();
3261 outputHandle->Allocate();
3262 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3263
surmeh013537c2c2018-05-18 16:31:43 +01003264 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00003265 workload->Execute();
3266
3267 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3268 return result;
3269}
3270
3271LayerTestResult<float, 4> ResizeBilinearMinTest(armnn::IWorkloadFactory& workloadFactory)
3272{
James Conroy074f3712018-10-03 09:32:03 +01003273 // inputShape: BatchSize = 1, Channels = 1, Height = 3, Width = 5
3274 const armnn::TensorShape inputShape{ 1, 1, 3, 5 };
telsoa014fcda012018-03-09 14:13:49 +00003275
James Conroy074f3712018-10-03 09:32:03 +01003276 // outputShape: BatchSize = 1, Channels = 1, Height = 2, Width = 3
3277 const armnn::TensorShape outputShape{ 1, 1, 2, 3 };
telsoa014fcda012018-03-09 14:13:49 +00003278
James Conroy074f3712018-10-03 09:32:03 +01003279 return ResizeBilinearMinTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NCHW);
3280}
3281
3282LayerTestResult<float, 4> ResizeBilinearMinNhwcTest(armnn::IWorkloadFactory& workloadFactory)
3283{
3284 // inputShape: BatchSize = 1, Height = 3, Width = 5, Channels = 1
3285 const armnn::TensorShape inputShape{ 1, 3, 5, 1 };
3286
3287 // outputShape: BatchSize = 1, Height = 2, Width = 3, Channels = 1
3288 const armnn::TensorShape outputShape{ 1, 2, 3, 1 };
3289
3290 return ResizeBilinearMinTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NHWC);
3291}
3292
3293LayerTestResult<float, 4> ResizeBilinearMagTestImpl(armnn::IWorkloadFactory& workloadFactory,
3294 const armnn::TensorShape& inputTensorShape,
3295 const armnn::TensorShape& outputTensorShape,
3296 armnn::DataLayout dataLayout)
3297{
3298 const armnn::TensorInfo inputTensorInfo(inputTensorShape, armnn::DataType::Float32);
3299 const armnn::TensorInfo outputTensorInfo(outputTensorShape, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00003300
3301 auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
James Conroy074f3712018-10-03 09:32:03 +01003302 1.0f, 2.0f,
3303 13.0f, 21.0f,
3304 144.0f, 233.0f
telsoa014fcda012018-03-09 14:13:49 +00003305 }));
3306
3307 LayerTestResult<float, 4> result(outputTensorInfo);
3308 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>({
James Conroy074f3712018-10-03 09:32:03 +01003309 1.0f, 1.4f, 1.8f, 2.0f, 2.0f,
3310 13.0f, 16.2f, 19.4f, 21.0f, 21.0f,
3311 144.0f, 179.6f, 215.2f, 233.0f, 233.0f
telsoa014fcda012018-03-09 14:13:49 +00003312 }));
3313
3314 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3315 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3316
3317 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01003318 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00003319 armnn::WorkloadInfo info;
3320 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3321 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3322
3323 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
3324
3325 inputHandle->Allocate();
3326 outputHandle->Allocate();
3327 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3328
surmeh013537c2c2018-05-18 16:31:43 +01003329 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00003330 workload->Execute();
3331
3332 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3333 return result;
3334}
3335
3336LayerTestResult<float, 4> ResizeBilinearMagTest(armnn::IWorkloadFactory& workloadFactory)
3337{
James Conroy074f3712018-10-03 09:32:03 +01003338 // inputShape: BatchSize = 1, Channels = 1, Height = 3, Width = 2
3339 const armnn::TensorShape inputShape{ 1, 1, 3, 2 };
telsoa014fcda012018-03-09 14:13:49 +00003340
James Conroy074f3712018-10-03 09:32:03 +01003341 // outputShape: BatchSize = 1, Channels = 1, Height = 3, Width = 5
3342 const armnn::TensorShape outputShape{ 1, 1, 3, 5 };
telsoa014fcda012018-03-09 14:13:49 +00003343
James Conroy074f3712018-10-03 09:32:03 +01003344 return ResizeBilinearMagTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NCHW);
3345}
telsoa014fcda012018-03-09 14:13:49 +00003346
James Conroy074f3712018-10-03 09:32:03 +01003347LayerTestResult<float, 4> ResizeBilinearMagNhwcTest(armnn::IWorkloadFactory& workloadFactory)
3348{
3349 // inputShape: BatchSize = 1, Height = 3, Width = 2, Channels = 1
3350 const armnn::TensorShape inputShape{ 1, 3, 2, 1 };
telsoa014fcda012018-03-09 14:13:49 +00003351
James Conroy074f3712018-10-03 09:32:03 +01003352 // outputShape: BatchSize = 1, Height = 3, Width = 5, Channels = 1
3353 const armnn::TensorShape outputShape{ 1, 3, 5, 1 };
telsoa014fcda012018-03-09 14:13:49 +00003354
James Conroy074f3712018-10-03 09:32:03 +01003355 return ResizeBilinearMagTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00003356}
3357
3358LayerTestResult<float, 2> FakeQuantizationTest(armnn::IWorkloadFactory& workloadFactory)
3359{
3360 constexpr unsigned int width = 2;
3361 constexpr unsigned int height = 3;
3362
3363 const armnn::TensorInfo tensorInfo({height, width },
3364 armnn::DataType::Float32);
3365 auto input = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
3366 -10.0f, -5.0f,
3367 0.0f, 5.0f,
3368 10.0f, 10.0f
3369 }));
3370
3371 LayerTestResult<float, 2> ret(tensorInfo);
3372
3373 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
3374
3375 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
3376
3377 armnn::FakeQuantizationQueueDescriptor data;
3378 armnn::WorkloadInfo info;
3379
3380 AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
3381 AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
3382 float min = -10.f;
3383 float max = 10.f;
3384
3385 data.m_Parameters.m_Min = min;
3386 data.m_Parameters.m_Max = max;
3387
3388 armnn::PassthroughCpuTensorHandle refHandle(tensorInfo, &ret.outputExpected[0][0]);
3389 armnn::FakeQuantizationQueueDescriptor refData = data;
3390 armnn::WorkloadInfo refInfo = info;
3391 SetWorkloadOutput(refData, refInfo, 0, tensorInfo, &refHandle);
3392
3393 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFakeQuantization(data, info);
3394
3395 inputHandle->Allocate();
3396 outputHandle->Allocate();
3397
3398 CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
3399
surmeh013537c2c2018-05-18 16:31:43 +01003400 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00003401 workload->Execute();
3402
3403 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
3404
3405 ret.outputExpected = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
3406 0.0f, 63.0f,
3407 128.0f, 191.0f,
3408 255.0f, 255.0f
3409 }));
3410 return ret;
3411}
3412
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003413namespace
3414{
3415
3416LayerTestResult<float, 4> L2NormalizationTestImpl(armnn::IWorkloadFactory& workloadFactory,
3417 const armnn::TensorShape& inputOutputTensorShape,
3418 const std::vector<float>& inputValues,
3419 const std::vector<float>& expectedOutputValues,
3420 armnn::DataLayout dataLayout)
3421{
3422 const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
3423 const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
3424
3425 auto inputTensor = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(inputValues));
3426
3427 LayerTestResult<float, 4> result(outputTensorInfo);
3428 result.outputExpected = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(expectedOutputValues));
3429
3430 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3431 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3432
3433 armnn::L2NormalizationQueueDescriptor descriptor;
3434 descriptor.m_Parameters.m_DataLayout = dataLayout;
3435 armnn::WorkloadInfo info;
3436
3437 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3438 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3439
3440 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
3441
3442 inputHandle->Allocate();
3443 outputHandle->Allocate();
3444
3445 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
3446
3447 workloadFactory.Finalize();
3448 workload->Execute();
3449
3450 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3451
3452 return result;
3453}
3454
3455float CalcInvL2Norm(std::initializer_list<float> elements)
3456{
3457 const float reduction = std::accumulate(elements.begin(), elements.end(), 0.0f,
3458 [](float acc, float element) { return acc + element * element; });
3459 return 1.0f / sqrtf(reduction);
3460}
3461
3462} // anonymous namespace
3463
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003464template<typename T>
3465LayerTestResult<T, 2> Pad2dTestCommon(armnn::IWorkloadFactory& workloadFactory, float qScale, int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003466{
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003467 const armnn::TensorShape inputShape{ 3, 3 };
3468 const armnn::TensorShape outputShape{ 7, 7 };
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003469
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003470 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::GetDataType<T>());
3471 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::GetDataType<T>());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003472
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003473 std::vector<T> inputValues(
3474 QuantizedVector<T>(qScale, qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003475 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003476 // Height (3) x Width (3)
3477 4, 8, 6,
3478 7, 4, 4,
3479 3, 2, 4
3480 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003481
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003482 std::vector<T> expectedOutputValues(
3483 QuantizedVector<T>(qScale, qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003484 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003485 0, 0, 0, 0, 0, 0, 0,
3486 0, 0, 0, 0, 0, 0, 0,
3487 0, 0, 4, 8, 6, 0, 0,
3488 0, 0, 7, 4, 4, 0, 0,
3489 0, 0, 3, 2, 4, 0, 0,
3490 0, 0, 0, 0, 0, 0, 0,
3491 0, 0, 0, 0, 0, 0, 0
3492 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003493
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003494 auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003495
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003496 LayerTestResult<T, 2> result(outputTensorInfo);
3497 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003498
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003499 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3500 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003501
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003502 armnn::PadQueueDescriptor descriptor;
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003503
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003504 std::vector<std::pair<unsigned int, unsigned int>> PadList;
3505 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
3506 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003507
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003508 descriptor.m_Parameters.m_PadList = PadList;
3509 armnn::WorkloadInfo info;
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003510
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003511 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3512 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003513
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003514 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003515
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003516 inputHandle->Allocate();
3517 outputHandle->Allocate();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003518
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003519 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003520
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003521 workloadFactory.Finalize();
3522 workload->Execute();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003523
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003524 CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003525
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003526 return result;
3527}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003528
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003529template <typename T>
3530LayerTestResult<T, 3> Pad3dTestCommon(armnn::IWorkloadFactory& workloadFactory, float qScale, int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003531{
3532 const armnn::TensorShape inputShape{ 2, 2, 2 };
3533 const armnn::TensorShape outputShape{ 3, 5, 6 };
3534
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003535 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::GetDataType<T>());
3536 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::GetDataType<T>());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003537
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003538 std::vector<T> inputValues(
3539 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003540 {
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003541 // Channel 0, Height (2) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003542 0, 4,
3543 2, 5,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003544
3545 // Channel 1, Height (2) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003546 6, 1,
3547 5, 2
3548 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003549
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003550 std::vector<T> expectedOutputValues(
3551 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003552 {
3553
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003554 0, 0, 0, 0, 0, 0,
3555 0, 0, 0, 0, 0, 0,
3556 0, 0, 0, 4, 0, 0,
3557 0, 0, 2, 5, 0, 0,
3558 0, 0, 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003559
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003560 0, 0, 0, 0, 0, 0,
3561 0, 0, 0, 0, 0, 0,
3562 0, 0, 6, 1, 0, 0,
3563 0, 0, 5, 2, 0, 0,
3564 0, 0, 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003565
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003566 0, 0, 0, 0, 0, 0,
3567 0, 0, 0, 0, 0, 0,
3568 0, 0, 0, 0, 0, 0,
3569 0, 0, 0, 0, 0, 0,
3570 0, 0, 0, 0, 0, 0
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003571
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003572 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003573
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003574 auto inputTensor = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003575
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003576 LayerTestResult<T, 3> result(outputTensorInfo);
3577 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003578
3579 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3580 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3581
3582 armnn::PadQueueDescriptor descriptor;
3583
3584 std::vector<std::pair<unsigned int, unsigned int>> PadList;
3585 PadList.push_back(std::pair<unsigned int, unsigned int>(0,1));
3586 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
3587 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
3588
3589 descriptor.m_Parameters.m_PadList = PadList;
3590 armnn::WorkloadInfo info;
3591
3592 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3593 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3594
3595 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
3596
3597 inputHandle->Allocate();
3598 outputHandle->Allocate();
3599
3600 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]);
3601
3602 workloadFactory.Finalize();
3603 workload->Execute();
3604
3605 CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
3606
3607 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003608}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003609
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003610template <typename T>
3611LayerTestResult<T, 4> Pad4dTestCommon(armnn::IWorkloadFactory& workloadFactory, float qScale, int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003612{
3613 const armnn::TensorShape inputShape{ 2, 2, 3, 2 };
3614 const armnn::TensorShape outputShape{ 4, 5, 7, 4 };
3615
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003616 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::GetDataType<T>());
3617 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::GetDataType<T>());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003618
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003619 std::vector<T> inputValues(
3620 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003621 {
3622 // Batch 0, Channel 0, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003623 0, 1,
3624 2, 3,
3625 4, 5,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003626
3627 // Batch 0, Channel 1, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003628 6, 7,
3629 8, 9,
3630 10, 11,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003631
3632 // Batch 1, Channel 0, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003633 12, 13,
3634 14, 15,
3635 16, 17,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003636
3637 // Batch 1, Channel 1, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003638 18, 19,
3639 20, 21,
3640 22, 23
3641 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003642
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003643 std::vector<T> expectedOutputValues(
3644 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003645 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003646 0, 0, 0, 0,
3647 0, 0, 0, 0,
3648 0, 0, 0, 0,
3649 0, 0, 0, 0,
3650 0, 0, 0, 0,
3651 0, 0, 0, 0,
3652 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003653
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003654 0, 0, 0, 0,
3655 0, 0, 0, 0,
3656 0, 0, 0, 0,
3657 0, 0, 0, 0,
3658 0, 0, 0, 0,
3659 0, 0, 0, 0,
3660 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003661
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003662 0, 0, 0, 0,
3663 0, 0, 0, 0,
3664 0, 0, 0, 0,
3665 0, 0, 0, 0,
3666 0, 0, 0, 0,
3667 0, 0, 0, 0,
3668 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003669
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003670 0, 0, 0, 0,
3671 0, 0, 0, 0,
3672 0, 0, 0, 0,
3673 0, 0, 0, 0,
3674 0, 0, 0, 0,
3675 0, 0, 0, 0,
3676 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003677
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003678 0, 0, 0, 0,
3679 0, 0, 0, 0,
3680 0, 0, 0, 0,
3681 0, 0, 0, 0,
3682 0, 0, 0, 0,
3683 0, 0, 0, 0,
3684 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003685
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003686 0, 0, 0, 0,
3687 0, 0, 0, 0,
3688 0, 0, 0, 0,
3689 0, 0, 0, 0,
3690 0, 0, 0, 0,
3691 0, 0, 0, 0,
3692 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003693
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003694 0, 0, 0, 0,
3695 0, 0, 0, 0,
3696 0, 0, 0, 0,
3697 0, 0, 0, 0,
3698 0, 0, 0, 0,
3699 0, 0, 0, 0,
3700 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003701
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003702 0, 0, 0, 0,
3703 0, 0, 0, 0,
3704 0, 0, 0, 0,
3705 0, 0, 1, 0,
3706 0, 2, 3, 0,
3707 0, 4, 5, 0,
3708 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003709
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003710 0, 0, 0, 0,
3711 0, 0, 0, 0,
3712 0, 0, 0, 0,
3713 0, 6, 7, 0,
3714 0, 8, 9, 0,
3715 0, 10, 11, 0,
3716 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003717
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003718 0, 0, 0, 0,
3719 0, 0, 0, 0,
3720 0, 0, 0, 0,
3721 0, 0, 0, 0,
3722 0, 0, 0, 0,
3723 0, 0, 0, 0,
3724 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003725
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003726 0, 0, 0, 0,
3727 0, 0, 0, 0,
3728 0, 0, 0, 0,
3729 0, 0, 0, 0,
3730 0, 0, 0, 0,
3731 0, 0, 0, 0,
3732 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003733
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003734 0, 0, 0, 0,
3735 0, 0, 0, 0,
3736 0, 0, 0, 0,
3737 0, 0, 0, 0,
3738 0, 0, 0, 0,
3739 0, 0, 0, 0,
3740 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003741
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003742 0, 0, 0, 0,
3743 0, 0, 0, 0,
3744 0, 0, 0, 0,
3745 0, 12, 13, 0,
3746 0, 14, 15, 0,
3747 0, 16, 17, 0,
3748 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003749
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003750 0, 0, 0, 0,
3751 0, 0, 0, 0,
3752 0, 0, 0, 0,
3753 0, 18, 19, 0,
3754 0, 20, 21, 0,
3755 0, 22, 23, 0,
3756 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003757
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003758 0, 0, 0, 0,
3759 0, 0, 0, 0,
3760 0, 0, 0, 0,
3761 0, 0, 0, 0,
3762 0, 0, 0, 0,
3763 0, 0, 0, 0,
3764 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003765
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003766 0, 0, 0, 0,
3767 0, 0, 0, 0,
3768 0, 0, 0, 0,
3769 0, 0, 0, 0,
3770 0, 0, 0, 0,
3771 0, 0, 0, 0,
3772 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003773
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003774 0, 0, 0, 0,
3775 0, 0, 0, 0,
3776 0, 0, 0, 0,
3777 0, 0, 0, 0,
3778 0, 0, 0, 0,
3779 0, 0, 0, 0,
3780 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003781
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003782 0, 0, 0, 0,
3783 0, 0, 0, 0,
3784 0, 0, 0, 0,
3785 0, 0, 0, 0,
3786 0, 0, 0, 0,
3787 0, 0, 0, 0,
3788 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003789
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003790 0, 0, 0, 0,
3791 0, 0, 0, 0,
3792 0, 0, 0, 0,
3793 0, 0, 0, 0,
3794 0, 0, 0, 0,
3795 0, 0, 0, 0,
3796 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003797
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003798 0, 0, 0, 0,
3799 0, 0, 0, 0,
3800 0, 0, 0, 0,
3801 0, 0, 0, 0,
3802 0, 0, 0, 0,
3803 0, 0, 0, 0,
3804 0, 0, 0, 0
3805 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003806
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003807 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003808
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003809 LayerTestResult<T, 4> result(outputTensorInfo);
3810 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003811
3812 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3813 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3814
3815 armnn::PadQueueDescriptor descriptor;
3816
3817 std::vector<std::pair<unsigned int, unsigned int>> PadList;
3818 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
3819 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
3820 PadList.push_back(std::pair<unsigned int, unsigned int>(3,1));
3821 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
3822
3823 descriptor.m_Parameters.m_PadList = PadList;
3824 armnn::WorkloadInfo info;
3825
3826 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3827 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3828
3829 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
3830
3831 inputHandle->Allocate();
3832 outputHandle->Allocate();
3833
3834 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
3835
3836 workloadFactory.Finalize();
3837
3838 workload->Execute();
3839
3840 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3841
3842 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003843}
3844
3845LayerTestResult<uint8_t, 2> PadUint82dTest(armnn::IWorkloadFactory& workloadFactory)
3846{
3847 return Pad2dTestCommon<uint8_t>(workloadFactory, 1.0f, 0);
3848}
3849
3850LayerTestResult<uint8_t, 3> PadUint83dTest(armnn::IWorkloadFactory& workloadFactory)
3851{
3852 return Pad3dTestCommon<uint8_t>(workloadFactory, 1.0f, 0);
3853}
3854
3855LayerTestResult<uint8_t, 4> PadUint84dTest(armnn::IWorkloadFactory& workloadFactory)
3856{
3857 return Pad4dTestCommon<uint8_t>(workloadFactory, 1.0f, 0);
3858}
3859
3860LayerTestResult<float, 2> PadFloat322dTest(armnn::IWorkloadFactory& workloadFactory)
3861{
3862 return Pad2dTestCommon<float>(workloadFactory, 0.0f, 0);
3863}
3864
3865LayerTestResult<float, 3> PadFloat323dTest(armnn::IWorkloadFactory& workloadFactory)
3866{
3867 return Pad3dTestCommon<float>(workloadFactory, 0.0f, 0);
3868}
3869
3870LayerTestResult<float, 4> PadFloat324dTest(armnn::IWorkloadFactory& workloadFactory)
3871{
3872 return Pad4dTestCommon<float>(workloadFactory, 0.0f, 0);
3873}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003874
telsoa014fcda012018-03-09 14:13:49 +00003875LayerTestResult<float, 4> L2Normalization1dTest(armnn::IWorkloadFactory& workloadFactory)
3876{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003877 // Width: 1
3878 // Height: 1
3879 // Channels: 10
3880 // BatchSize: 1
telsoa014fcda012018-03-09 14:13:49 +00003881
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003882 const armnn::TensorShape inputOutputShape{ 1, 10, 1, 1 };
3883 std::vector<float> inputValues
3884 {
3885 // Batch 0, Channel 0, Height (1) x Width (1)
3886 1.0f,
telsoa014fcda012018-03-09 14:13:49 +00003887
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003888 // Batch 0, Channel 1, Height (1) x Width (1)
3889 2.0f,
telsoa014fcda012018-03-09 14:13:49 +00003890
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003891 // Batch 0, Channel 2, Height (1) x Width (1)
3892 3.0f,
telsoa014fcda012018-03-09 14:13:49 +00003893
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003894 // Batch 0, Channel 3, Height (1) x Width (1)
3895 4.0f,
3896
3897 // Batch 0, Channel 4, Height (1) x Width (1)
3898 5.0f,
3899
3900 // Batch 0, Channel 5, Height (1) x Width (1)
3901 6.0f,
3902
3903 // Batch 0, Channel 6, Height (1) x Width (1)
3904 7.0f,
3905
3906 // Batch 0, Channel 7, Height (1) x Width (1)
3907 8.0f,
3908
3909 // Batch 0, Channel 8, Height (1) x Width (1)
3910 9.0f,
3911
3912 // Batch 0, Channel 9, Height (1) x Width (1)
3913 10.0f
3914 };
telsoa014fcda012018-03-09 14:13:49 +00003915 const float approxInvL2Norm = 0.050964719f;
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003916 std::vector<float> expectedOutputValues
3917 {
3918 // Batch 0, Channel 0, Height (1) x Width (1)
telsoa014fcda012018-03-09 14:13:49 +00003919 1.0f * approxInvL2Norm,
3920 2.0f * approxInvL2Norm,
3921 3.0f * approxInvL2Norm,
3922 4.0f * approxInvL2Norm,
3923 5.0f * approxInvL2Norm,
3924 6.0f * approxInvL2Norm,
3925 7.0f * approxInvL2Norm,
3926 8.0f * approxInvL2Norm,
3927 9.0f * approxInvL2Norm,
3928 10.0f * approxInvL2Norm
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003929 };
telsoa014fcda012018-03-09 14:13:49 +00003930
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003931 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
3932 inputValues, expectedOutputValues, armnn::DataLayout::NCHW);
telsoa014fcda012018-03-09 14:13:49 +00003933}
3934
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003935LayerTestResult<float, 4> L2Normalization1dNhwcTest(armnn::IWorkloadFactory& workloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00003936{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003937 // Width: 1
3938 // Height: 1
3939 // Channels: 10
3940 // BatchSize: 1
telsoa014fcda012018-03-09 14:13:49 +00003941
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003942 const armnn::TensorShape inputOutputShape{ 1, 1, 1, 10 };
3943 std::vector<float> inputValues
3944 {
3945 // Batch 0, Height 0, Width (1) x Channel (10)
3946 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f
3947 };
3948 const float approxInvL2Norm = 0.050964719f;
3949 std::vector<float> expectedOutputValues
3950 {
3951 // Batch 0, Height 0, Width (1) x Channel (10)
3952 1.0f * approxInvL2Norm,
3953 2.0f * approxInvL2Norm,
3954 3.0f * approxInvL2Norm,
3955 4.0f * approxInvL2Norm,
3956 5.0f * approxInvL2Norm,
3957 6.0f * approxInvL2Norm,
3958 7.0f * approxInvL2Norm,
3959 8.0f * approxInvL2Norm,
3960 9.0f * approxInvL2Norm,
3961 10.0f * approxInvL2Norm
3962 };
3963
3964 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
3965 inputValues, expectedOutputValues, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00003966}
3967
3968LayerTestResult<float, 4> L2Normalization2dTest(armnn::IWorkloadFactory& workloadFactory)
3969{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003970 // Width: 5
3971 // Height: 1
3972 // Channels: 2
3973 // BatchSize: 1
telsoa014fcda012018-03-09 14:13:49 +00003974
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003975 const armnn::TensorShape inputOutputShape{ 1, 2, 1, 5 };
3976 std::vector<float> inputValues
3977 {
3978 // Batch 0, Channel 0, Height (1) x Width (5)
telsoa014fcda012018-03-09 14:13:49 +00003979 1.0f, 3.0f, 5.0f, 7.0f, 9.0f,
telsoa014fcda012018-03-09 14:13:49 +00003980
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003981 // Batch 0, Channel 1, Height (1) x Width (5)
3982 2.0f, 4.0f, 6.0f, 8.0f, 10.0f
3983 };
3984 std::vector<float> expectedOutputValues
3985 {
3986 // Batch 0, Channel 0, Height (1) x Width (5)
3987 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
3988 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
3989 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
3990 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
telsoa014fcda012018-03-09 14:13:49 +00003991 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
3992
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003993 // Batch 0, Channel 1, Height (1) x Width (5)
3994 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
3995 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
3996 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
3997 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
telsoa014fcda012018-03-09 14:13:49 +00003998 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003999 };
telsoa014fcda012018-03-09 14:13:49 +00004000
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004001 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
4002 inputValues, expectedOutputValues, armnn::DataLayout::NCHW);
4003}
telsoa014fcda012018-03-09 14:13:49 +00004004
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004005LayerTestResult<float, 4> L2Normalization2dNhwcTest(armnn::IWorkloadFactory& workloadFactory)
4006{
4007 // Width: 5
4008 // Height: 1
4009 // Channels: 2
4010 // BatchSize: 1
telsoa014fcda012018-03-09 14:13:49 +00004011
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004012 const armnn::TensorShape inputOutputShape{ 1, 1, 5, 2 };
4013 std::vector<float> inputValues
4014 {
4015 // Batch 0, Height 0, Width (5) x Channel (2)
4016 1.0f, 2.0f,
4017 3.0f, 4.0f,
4018 5.0f, 6.0f,
4019 7.0f, 8.0f,
4020 9.0f, 10.0f
4021 };
4022 std::vector<float> expectedOutputValues
4023 {
4024 // Batch 0, Height 0, Width (5) x Channel (2)
4025 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
4026 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
4027 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
4028 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
4029 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
4030 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
4031 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
4032 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
4033 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
4034 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
4035 };
telsoa014fcda012018-03-09 14:13:49 +00004036
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004037 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
4038 inputValues, expectedOutputValues, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00004039}
4040
4041LayerTestResult<float, 4> L2Normalization3dTest(armnn::IWorkloadFactory& workloadFactory)
4042{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004043 // Width: 3
4044 // Height: 4
4045 // Channels: 2
4046 // BatchSize: 1
telsoa014fcda012018-03-09 14:13:49 +00004047
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004048 const armnn::TensorShape inputOutputShape{ 1, 2, 4, 3 };
4049 std::vector<float> inputValues
4050 {
4051 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004052 119.0f, 21.0f, 150.0f,
4053 149.0f, 32.0f, 179.0f,
4054 15.0f, 227.0f, 141.0f,
4055 147.0f, 199.0f, 220.0f,
4056
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004057 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004058 110.0f, 140.0f, 73.0f,
4059 211.0f, 212.0f, 89.0f,
4060 24.0f, 138.0f, 188.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004061 162.0f, 12.0f, 161.0f
4062 };
4063 std::vector<float> expectedOutputValues
4064 {
4065 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004066 119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
4067 21.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
4068 150.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
4069 149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
4070 32.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
4071 179.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
4072 15.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
4073 227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
4074 141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
4075 147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
4076 199.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
4077 220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
4078
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004079 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004080 110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
4081 140.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
4082 73.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
4083 211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
4084 212.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
4085 89.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
4086 24.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
4087 138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
4088 188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
4089 162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
4090 12.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004091 161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
4092 };
telsoa014fcda012018-03-09 14:13:49 +00004093
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004094 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
4095 inputValues, expectedOutputValues, armnn::DataLayout::NCHW);
4096}
telsoa014fcda012018-03-09 14:13:49 +00004097
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004098LayerTestResult<float, 4> L2Normalization3dNhwcTest(armnn::IWorkloadFactory& workloadFactory)
4099{
4100 // Width: 3
4101 // Height: 4
4102 // Channels: 2
4103 // BatchSize: 1
telsoa014fcda012018-03-09 14:13:49 +00004104
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004105 const armnn::TensorShape inputOutputShape{ 1, 4, 3, 2 };
4106 std::vector<float> inputValues
4107 {
4108 // Batch 0, Height 0, Width (3) x Channel (2)
4109 119.0f, 110.0f,
4110 21.0f, 140.0f,
4111 150.0f, 73.0f,
telsoa014fcda012018-03-09 14:13:49 +00004112
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004113 // Batch 0, Height 1, Width (3) x Channel (2)
4114 149.0f, 211.0f,
4115 32.0f, 212.0f,
4116 179.0f, 89.0f,
telsoa014fcda012018-03-09 14:13:49 +00004117
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004118 // Batch 0, Height 2, Width (3) x Channel (2)
4119 15.0f, 24.0f,
4120 227.0f, 138.0f,
4121 141.0f, 188.0f,
telsoa014fcda012018-03-09 14:13:49 +00004122
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004123 // Batch 0, Height 3, Width (3) x Channel (2)
4124 147.0f, 162.0f,
4125 199.0f, 12.0f,
4126 220.0f, 161.0f
4127 };
4128 std::vector<float> expectedOutputValues
4129 {
4130 // Batch 0, Height 0, Width (3) x Channel (2)
4131 119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
4132 110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
4133 21.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
4134 140.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
4135 150.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
4136 73.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
4137
4138 // Batch 0, Height 1, Width (3) x Channel (2)
4139 149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
4140 211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
4141 32.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
4142 212.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
4143 179.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
4144 89.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
4145
4146 // Batch 0, Height 2, Width (3) x Channel (2)
4147 15.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
4148 24.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
4149 227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
4150 138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
4151 141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
4152 188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
4153
4154 // Batch 0, Height 3, Width (3) x Channel (2)
4155 147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
4156 162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
4157 199.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
4158 12.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
4159 220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
4160 161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
4161 };
4162
4163 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
4164 inputValues, expectedOutputValues, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00004165}
4166
4167LayerTestResult<float, 4> L2Normalization4dTest(armnn::IWorkloadFactory& workloadFactory)
4168{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004169 // Width: 3
4170 // Height: 4
4171 // Channels: 3
4172 // BatchSize: 2
telsoa014fcda012018-03-09 14:13:49 +00004173
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004174 const armnn::TensorShape inputOutputShape{ 2, 3, 4, 3 };
4175 std::vector<float> inputValues
4176 {
4177 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004178 235.0f, 46.0f, 178.0f,
4179 100.0f, 123.0f, 19.0f,
4180 172.0f, 74.0f, 250.0f,
4181 6.0f, 195.0f, 80.0f,
4182
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004183 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004184 113.0f, 95.0f, 202.0f,
4185 77.0f, 114.0f, 71.0f,
4186 122.0f, 246.0f, 166.0f,
4187 82.0f, 28.0f, 37.0f,
4188
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004189 // Batch 0, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004190 56.0f, 170.0f, 162.0f,
4191 194.0f, 89.0f, 254.0f,
4192 12.0f, 209.0f, 200.0f,
4193 1.0f, 64.0f, 54.0f,
4194
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004195 // Batch 1, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004196 67.0f, 90.0f, 49.0f,
4197 7.0f, 163.0f, 18.0f,
4198 25.0f, 117.0f, 103.0f,
4199 247.0f, 59.0f, 189.0f,
4200
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004201 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004202 239.0f, 104.0f, 199.0f,
4203 17.0f, 124.0f, 153.0f,
4204 222.0f, 217.0f, 75.0f,
4205 32.0f, 126.0f, 21.0f,
4206
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004207 // Batch 1, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004208 97.0f, 145.0f, 215.0f,
4209 115.0f, 116.0f, 238.0f,
4210 226.0f, 16.0f, 132.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004211 92.0f, 125.0f, 88.0f
4212 };
4213 std::vector<float> expectedOutputValues
4214 {
4215 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004216 235.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
4217 46.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
4218 178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
4219 100.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
4220 123.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
4221 19.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
4222 172.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
4223 74.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
4224 250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
4225 6.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
4226 195.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
4227 80.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
4228
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004229 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004230 113.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
4231 95.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
4232 202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
4233 77.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
4234 114.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
4235 71.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
4236 122.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
4237 246.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
4238 166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
4239 82.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
4240 28.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
4241 37.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
4242
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004243 // Batch 0, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004244 56.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
4245 170.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
4246 162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
4247 194.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
4248 89.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
4249 254.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
4250 12.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
4251 209.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
4252 200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
4253 1.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
4254 64.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
4255 54.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
4256
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004257 // Batch 1, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004258 67.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
4259 90.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
4260 49.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
4261 7.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
4262 163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
4263 18.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
4264 25.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
4265 117.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
4266 103.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
4267 247.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
4268 59.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
4269 189.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
4270
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004271 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004272 239.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
4273 104.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
4274 199.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
4275 17.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
4276 124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
4277 153.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
4278 222.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
4279 217.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
4280 75.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
4281 32.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
4282 126.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
4283 21.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
4284
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004285 // Batch 1, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004286 97.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
4287 145.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
4288 215.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
4289 115.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
4290 116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
4291 238.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
4292 226.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
4293 16.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
4294 132.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
4295 92.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
4296 125.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004297 88.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f })
4298 };
telsoa014fcda012018-03-09 14:13:49 +00004299
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004300 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
4301 inputValues, expectedOutputValues, armnn::DataLayout::NCHW);
4302}
telsoa014fcda012018-03-09 14:13:49 +00004303
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004304LayerTestResult<float, 4> L2Normalization4dNhwcTest(armnn::IWorkloadFactory& workloadFactory)
4305{
4306 // Width: 3
4307 // Height: 4
4308 // Channels: 3
4309 // BatchSize: 2
telsoa014fcda012018-03-09 14:13:49 +00004310
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004311 const armnn::TensorShape inputOutputShape{ 2, 4, 3, 3 };
4312 std::vector<float> inputValues
4313 {
4314 // Batch 0, Height 0, Width (3) x Channel (3)
4315 235.0f, 113.0f, 56.0f,
4316 46.0f, 95.0f, 170.0f,
4317 178.0f, 202.0f, 162.0f,
telsoa014fcda012018-03-09 14:13:49 +00004318
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004319 // Batch 0, Height 1, Width (3) x Channel (3)
4320 100.0f, 77.0f, 194.0f,
4321 123.0f, 114.0f, 89.0f,
4322 19.0f, 71.0f, 254.0f,
telsoa014fcda012018-03-09 14:13:49 +00004323
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004324 // Batch 0, Height 2, Width (3) x Channel (3)
4325 172.0f, 122.0f, 12.0f,
4326 74.0f, 246.0f, 209.0f,
4327 250.0f, 166.0f, 200.0f,
telsoa014fcda012018-03-09 14:13:49 +00004328
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004329 // Batch 0, Height 3, Width (3) x Channel (3)
4330 6.0f, 82.0f, 1.0f,
4331 195.0f, 28.0f, 64.0f,
4332 80.0f, 37.0f, 54.0f,
4333
4334 // Batch 1, Height 0, Width (3) x Channel (3)
4335 67.0f, 239.0f, 97.0f,
4336 90.0f, 104.0f, 145.0f,
4337 49.0f, 199.0f, 215.0f,
4338
4339 // Batch 1, Height 1, Width (3) x Channel (3)
4340 7.0f, 17.0f, 115.0f,
4341 163.0f, 124.0f, 116.0f,
4342 18.0f, 153.0f, 238.0f,
4343
4344 // Batch 1, Height 2, Width (3) x Channel (3)
4345 25.0f, 222.0f, 226.0f,
4346 117.0f, 217.0f, 16.0f,
4347 103.0f, 75.0f, 132.0f,
4348
4349 // Batch 1, Height 3, Width (3) x Channel (3)
4350 247.0f, 32.0f, 92.0f,
4351 59.0f, 126.0f, 125.0f,
4352 189.0f, 21.0f, 88.0f
4353 };
4354 std::vector<float> expectedOutputValues
4355 {
4356 // Batch 0, Height 0, Width (3) x Channel (3)
4357 235.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
4358 113.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
4359 56.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
4360 46.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
4361 95.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
4362 170.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
4363 178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
4364 202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
4365 162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
4366
4367 // Batch 0, Height 1, Width (3) x Channel (3)
4368 100.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
4369 77.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
4370 194.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
4371 123.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
4372 114.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
4373 89.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
4374 19.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
4375 71.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
4376 254.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
4377
4378 // Batch 0, Height 2, Width (3) x Channel (3)
4379 172.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
4380 122.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
4381 12.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
4382 74.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
4383 246.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
4384 209.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
4385 250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
4386 166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
4387 200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
4388
4389 // Batch 0, Height 3, Width (3) x Channel (3)
4390 6.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
4391 82.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
4392 1.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
4393 195.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
4394 28.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
4395 64.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
4396 80.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
4397 37.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
4398 54.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
4399
4400 // Batch 1, Height 0, Width (3) x Channel (3)
4401 67.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
4402 239.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
4403 97.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
4404 90.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
4405 104.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
4406 145.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
4407 49.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
4408 199.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
4409 215.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
4410
4411 // Batch 1, Height 1, Width (3) x Channel (3)
4412 7.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
4413 17.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
4414 115.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
4415 163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
4416 124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
4417 116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
4418 18.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
4419 153.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
4420 238.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
4421
4422 // Batch 1, Height 2, Width (3) x Channel (3)
4423 25.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
4424 222.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
4425 226.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
4426 117.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
4427 217.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
4428 16.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
4429 103.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
4430 75.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
4431 132.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
4432
4433 // Batch 1, Height 3, Width (3) x Channel (3)
4434 247.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
4435 32.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
4436 92.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
4437 59.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
4438 126.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
4439 125.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
4440 189.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
4441 21.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
4442 88.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f })
4443 };
4444
4445 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
4446 inputValues, expectedOutputValues, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00004447}
4448
4449template <typename T>
4450LayerTestResult<T, 4> ConstantTestImpl(armnn::IWorkloadFactory& workloadFactory,
4451 float qScale,
4452 int32_t qOffset)
4453{
4454 constexpr unsigned int inputWidth = 3;
4455 constexpr unsigned int inputHeight = 4;
4456 constexpr unsigned int inputChannels = 3;
4457 constexpr unsigned int inputBatchSize = 2;
4458
4459 constexpr unsigned int outputWidth = inputWidth;
4460 constexpr unsigned int outputHeight = inputHeight;
4461 constexpr unsigned int outputChannels = inputChannels;
4462 constexpr unsigned int outputBatchSize = inputBatchSize;
4463
4464 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
4465 armnn::GetDataType<T>());
4466
4467 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
4468 armnn::GetDataType<T>());
4469
4470 // Set quantization parameters if the requested type is a quantized type.
4471 if(armnn::IsQuantizedType<T>())
4472 {
4473 inputTensorInfo.SetQuantizationScale(qScale);
4474 inputTensorInfo.SetQuantizationOffset(qOffset);
4475 outputTensorInfo.SetQuantizationScale(qScale);
4476 outputTensorInfo.SetQuantizationOffset(qOffset);
4477 }
4478
4479 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
4480 QuantizedVector<T>(qScale, qOffset, {
4481 // Batch 0, Channel 0
4482 235.0f, 46.0f, 178.0f,
4483 100.0f, 123.0f, 19.0f,
4484 172.0f, 74.0f, 250.0f,
4485 6.0f, 195.0f, 80.0f,
4486
4487 // Batch 0, Channel 1
4488 113.0f, 95.0f, 202.0f,
4489 77.0f, 114.0f, 71.0f,
4490 122.0f, 246.0f, 166.0f,
4491 82.0f, 28.0f, 37.0f,
4492
4493 // Batch 0, Channel 2
4494 56.0f, 170.0f, 162.0f,
4495 194.0f, 89.0f, 254.0f,
4496 12.0f, 209.0f, 200.0f,
4497 1.0f, 64.0f, 54.0f,
4498
4499 // Batch 1, Channel 0
4500 67.0f, 90.0f, 49.0f,
4501 7.0f, 163.0f, 18.0f,
4502 25.0f, 117.0f, 103.0f,
4503 247.0f, 59.0f, 189.0f,
4504
4505 // Batch 1, Channel 1
4506 239.0f, 104.0f, 199.0f,
4507 17.0f, 124.0f, 153.0f,
4508 222.0f, 217.0f, 75.0f,
4509 32.0f, 126.0f, 21.0f,
4510
4511 // Batch 1, Channel 2
4512 97.0f, 145.0f, 215.0f,
4513 115.0f, 116.0f, 238.0f,
4514 226.0f, 16.0f, 132.0f,
4515 92.0f, 125.0f, 88.0f,
4516 })));
4517
4518 LayerTestResult<T, 4> result(outputTensorInfo);
4519 result.outputExpected = input;
4520
4521 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4522
4523 armnn::ScopedCpuTensorHandle constantTensor(inputTensorInfo);
4524 AllocateAndCopyDataToITensorHandle(&constantTensor, &input[0][0][0][0]);
4525
4526 armnn::ConstantQueueDescriptor descriptor;
4527 descriptor.m_LayerOutput = &constantTensor;
4528
4529 armnn::WorkloadInfo info;
4530 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4531
4532 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConstant(descriptor, info);
4533
4534 outputHandle->Allocate();
4535
surmeh013537c2c2018-05-18 16:31:43 +01004536 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00004537 workload->Execute();
4538
4539 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4540 return result;
4541}
4542
4543LayerTestResult<float, 4> ConstantTest(armnn::IWorkloadFactory& workloadFactory)
4544{
4545 return ConstantTestImpl<float>(workloadFactory, 0.0f, 0);
4546}
4547
4548LayerTestResult<uint8_t, 4> ConstantTestUint8(armnn::IWorkloadFactory& workloadFactory)
4549{
4550 return ConstantTestImpl<uint8_t>(workloadFactory, 1.0f, 0);
4551}
4552
4553LayerTestResult<uint8_t, 3> MergerUint8Test(armnn::IWorkloadFactory& workloadFactory)
4554{
surmeh013537c2c2018-05-18 16:31:43 +01004555 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +00004556 unsigned int outputHeight = 6;
4557 unsigned int outputChannels = 3;
4558
surmeh013537c2c2018-05-18 16:31:43 +01004559 unsigned int inputWidth1 = 3;
4560 unsigned int inputHeight1 = 6;
4561 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +00004562
surmeh013537c2c2018-05-18 16:31:43 +01004563 unsigned int inputWidth2 = 3;
4564 unsigned int inputHeight2 = 6;
4565 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +00004566
telsoa01c577f2c2018-08-31 09:22:23 +01004567 // Defines the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +00004568 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
4569 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
4570 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
telsoa014fcda012018-03-09 14:13:49 +00004571
telsoa01c577f2c2018-08-31 09:22:23 +01004572 // Arbitrary scale and offsets. They don't really matter as the merger operator doesn't dequantize/quantize them.
telsoa014fcda012018-03-09 14:13:49 +00004573 const float scale = 0.13497836f;
4574 const int32_t offset = -7;
4575
4576 outputTensorInfo.SetQuantizationScale(scale);
4577 outputTensorInfo.SetQuantizationOffset(offset);
4578 inputTensorInfo1.SetQuantizationScale(scale);
4579 inputTensorInfo1.SetQuantizationOffset(offset);
4580 inputTensorInfo2.SetQuantizationScale(scale);
4581 inputTensorInfo2.SetQuantizationOffset(offset);
telsoa014fcda012018-03-09 14:13:49 +00004582
4583 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
4584
4585 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
surmeh013537c2c2018-05-18 16:31:43 +01004586 {
4587 1, 2, 3,
4588 4, 5, 6,
4589 7, 8, 9,
4590 10, 11, 12,
4591 13, 14, 15,
4592 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00004593
surmeh013537c2c2018-05-18 16:31:43 +01004594 19, 20, 21,
4595 22, 23, 24,
4596 25, 26, 27,
4597 28, 29, 30,
4598 31, 32, 33,
4599 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00004600
surmeh013537c2c2018-05-18 16:31:43 +01004601 37, 38, 39,
4602 40, 41, 42,
4603 43, 44, 45,
4604 46, 47, 48,
4605 49, 50, 51,
4606 52, 53, 54,
4607 })
telsoa014fcda012018-03-09 14:13:49 +00004608 );
4609
telsoa014fcda012018-03-09 14:13:49 +00004610 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
4611 {
surmeh013537c2c2018-05-18 16:31:43 +01004612 1, 2, 3,
4613 4, 5, 6,
4614 7, 8, 9,
4615 10, 11, 12,
4616 13, 14, 15,
4617 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00004618
surmeh013537c2c2018-05-18 16:31:43 +01004619 19, 20, 21,
4620 22, 23, 24,
4621 25, 26, 27,
4622 28, 29, 30,
4623 31, 32, 33,
4624 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00004625 })
4626 );
4627
4628 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
4629 {
surmeh013537c2c2018-05-18 16:31:43 +01004630 37, 38, 39,
4631 40, 41, 42,
telsoa014fcda012018-03-09 14:13:49 +00004632 43, 44, 45,
surmeh013537c2c2018-05-18 16:31:43 +01004633 46, 47, 48,
4634 49, 50, 51,
4635 52, 53, 54,
telsoa014fcda012018-03-09 14:13:49 +00004636 })
4637 );
4638
telsoa01c577f2c2018-08-31 09:22:23 +01004639 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
telsoa014fcda012018-03-09 14:13:49 +00004640 armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1);
4641
telsoa01c577f2c2018-08-31 09:22:23 +01004642 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
telsoa014fcda012018-03-09 14:13:49 +00004643 armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2);
4644
telsoa014fcda012018-03-09 14:13:49 +00004645
4646 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4647
4648 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
4649
4650 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
4651 subTensorsSupported ?
4652 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
4653 workloadFactory.CreateTensorHandle(inputTensorInfo1);
4654
4655 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
4656 subTensorsSupported ?
4657 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
4658 workloadFactory.CreateTensorHandle(inputTensorInfo2);
4659
telsoa014fcda012018-03-09 14:13:49 +00004660
4661 armnn::MergerQueueDescriptor data;
4662 armnn::WorkloadInfo info;
4663 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
4664 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +00004665 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4666
4667 data.m_ViewOrigins.push_back(window1);
4668 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +00004669
4670 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(data, info);
4671
4672 inputHandle1->Allocate();
4673 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00004674 outputHandle->Allocate();
4675
4676 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
4677 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00004678
surmeh013537c2c2018-05-18 16:31:43 +01004679 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00004680 workload->Execute();
4681
4682 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
4683
4684 return ret;
4685}
4686
4687LayerTestResult<uint8_t, 4> AdditionUint8Test(armnn::IWorkloadFactory& workloadFactory)
4688{
4689 unsigned int batchSize = 1;
4690 unsigned int channels = 2;
4691 unsigned int height = 2;
4692 unsigned int width = 3;
4693
4694 const float scale = 7.0f;
4695 const int32_t offset = 3;
4696
4697 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
4698 armnn::TensorInfo outputTensorInfo;
4699
4700 const unsigned int shape[] = { batchSize, channels, height, width };
4701 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
4702 inputTensorInfo1.SetQuantizationScale(scale);
4703 inputTensorInfo1.SetQuantizationOffset(offset);
4704
4705 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
4706 inputTensorInfo2.SetQuantizationScale(scale);
4707 inputTensorInfo2.SetQuantizationOffset(offset);
4708
4709 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
4710 outputTensorInfo.SetQuantizationScale(scale);
4711 outputTensorInfo.SetQuantizationOffset(offset);
4712
telsoa01c577f2c2018-08-31 09:22:23 +01004713 // See dequantized values to the right.
telsoa014fcda012018-03-09 14:13:49 +00004714 auto input1 = MakeTensor<uint8_t, 4>(inputTensorInfo1, std::vector<uint8_t>(
4715 {
4716 63, 35, 77, 70, 56, 112, // 420, 224, 518, 469, 371, 763
4717 203, 28, 252, 168, 245, 91 // 1400, 175, 1743, 1155, 1694, 616
4718 }));
4719
telsoa01c577f2c2018-08-31 09:22:23 +01004720 // See dequantized values to the right.
telsoa014fcda012018-03-09 14:13:49 +00004721 auto input2 = MakeTensor<uint8_t, 4>(inputTensorInfo1, std::vector<uint8_t>(
4722 {
4723 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
4724 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
4725 }));
4726
telsoa01c577f2c2018-08-31 09:22:23 +01004727 // See dequantized values to the right.
telsoa014fcda012018-03-09 14:13:49 +00004728 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
4729 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>(
4730 {
4731 81, 39, 249, 255, 228, 255, // 546, 252, 1722, 2065(clamped), 1575, 2212(clamped)
4732 255, 186, 255, 186, 255, 214, // 2261(clamped), 1281, 2163(clamped), 1281, 2408(clamped), 1477
4733 }));
4734
4735 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
4736 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
4737 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4738
4739 armnn::AdditionQueueDescriptor data;
4740 armnn::WorkloadInfo info;
4741 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
4742 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
4743 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4744
4745 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
4746
4747 inputHandle1->Allocate();
4748 inputHandle2->Allocate();
4749 outputHandle->Allocate();
4750
4751 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
4752 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
4753
surmeh013537c2c2018-05-18 16:31:43 +01004754 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00004755 workload->Execute();
4756
4757 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4758
4759 return result;
4760}
4761
surmeh01bceff2f2018-03-29 16:29:27 +01004762namespace
telsoa014fcda012018-03-09 14:13:49 +00004763{
surmeh01bceff2f2018-03-29 16:29:27 +01004764LayerTestResult<uint8_t, 4> MultiplicationUint8TestHelper(armnn::IWorkloadFactory& workloadFactory,
4765 const unsigned int shape0[4],
4766 const std::vector<uint8_t> & values0,
4767 float scale0,
4768 int32_t offset0,
4769 const unsigned int shape1[4],
4770 const std::vector<uint8_t> & values1,
4771 float scale1,
4772 int32_t offset1,
4773 const unsigned int outShape[4],
4774 const std::vector<uint8_t> & outValues,
4775 float outScale,
4776 int32_t outOffset)
4777{
4778 armnn::TensorInfo inputTensorInfo0(4, shape0, armnn::DataType::QuantisedAsymm8);
4779 armnn::TensorInfo inputTensorInfo1(4, shape1, armnn::DataType::QuantisedAsymm8);
4780 armnn::TensorInfo outputTensorInfo(4, outShape, armnn::DataType::QuantisedAsymm8);
telsoa014fcda012018-03-09 14:13:49 +00004781
surmeh01bceff2f2018-03-29 16:29:27 +01004782 inputTensorInfo0.SetQuantizationScale(scale0);
4783 inputTensorInfo0.SetQuantizationOffset(offset0);
telsoa014fcda012018-03-09 14:13:49 +00004784
surmeh01bceff2f2018-03-29 16:29:27 +01004785 inputTensorInfo1.SetQuantizationScale(scale1);
4786 inputTensorInfo1.SetQuantizationOffset(offset1);
telsoa014fcda012018-03-09 14:13:49 +00004787
surmeh01bceff2f2018-03-29 16:29:27 +01004788 outputTensorInfo.SetQuantizationScale(outScale);
4789 outputTensorInfo.SetQuantizationOffset(outOffset);
telsoa014fcda012018-03-09 14:13:49 +00004790
surmeh01bceff2f2018-03-29 16:29:27 +01004791 auto input0 = MakeTensor<uint8_t, 4>(inputTensorInfo0, values0);
4792 auto input1 = MakeTensor<uint8_t, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00004793
telsoa014fcda012018-03-09 14:13:49 +00004794 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
surmeh01bceff2f2018-03-29 16:29:27 +01004795 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00004796
surmeh01bceff2f2018-03-29 16:29:27 +01004797 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
telsoa014fcda012018-03-09 14:13:49 +00004798 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
telsoa014fcda012018-03-09 14:13:49 +00004799 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4800
4801 armnn::MultiplicationQueueDescriptor data;
4802 armnn::WorkloadInfo info;
surmeh01bceff2f2018-03-29 16:29:27 +01004803 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
4804 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
telsoa014fcda012018-03-09 14:13:49 +00004805 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4806
4807 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
4808
surmeh01bceff2f2018-03-29 16:29:27 +01004809 inputHandle0->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00004810 inputHandle1->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00004811 outputHandle->Allocate();
4812
surmeh01bceff2f2018-03-29 16:29:27 +01004813 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00004814 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00004815
surmeh013537c2c2018-05-18 16:31:43 +01004816 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00004817 workload->Execute();
4818
4819 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4820
4821 return result;
4822}
surmeh01bceff2f2018-03-29 16:29:27 +01004823} // anonymous namespace
4824
4825LayerTestResult<uint8_t, 4> MultiplicationUint8Test(armnn::IWorkloadFactory& workloadFactory)
4826{
4827 unsigned int batchSize = 1;
4828 unsigned int channels = 2;
4829 unsigned int height = 2;
4830 unsigned int width = 3;
4831 const unsigned int shape[] = { batchSize, channels, height, width };
4832
telsoa01c577f2c2018-08-31 09:22:23 +01004833 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01004834 std::vector<uint8_t> input0({
4835 62, 37, 3, 172, 13, 111, // 244, 144, 8, 684, 48, 440,
4836 188, 20, 73, 31, 23, 31 // 748, 76, 288, 120, 88, 120
4837 });
4838
telsoa01c577f2c2018-08-31 09:22:23 +01004839 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01004840 std::vector<uint8_t> input1({
4841 126, 240, 252, 183, 121, 247, // 384, 726, 762, 555, 369, 747,
4842 48, 115, 151, 79, 78, 97 // 150, 351, 459, 243, 240, 297
4843 });
4844
telsoa01c577f2c2018-08-31 09:22:23 +01004845 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01004846 std::vector<uint8_t> output(
4847 {
4848 64, 72, 0, 255, 8, 236, // 93696, 104544, 6096(clamped), 379620(clamped), 17712, 328680,
4849 77, 15, 92, 16, 10, 21, // 112200, 26676, 132192, 29160, 21120, 35640
4850 });
4851
4852 return MultiplicationUint8TestHelper(workloadFactory,
4853 shape,
4854 input0,
4855 4.0f,
4856 1,
4857 shape,
4858 input1,
4859 3.0f,
4860 -2,
4861 shape,
4862 output,
telsoa01c577f2c2018-08-31 09:22:23 +01004863 1366.255f, // Scale/offset chosen to have output values out of range.
surmeh01bceff2f2018-03-29 16:29:27 +01004864 -5);
4865}
4866
4867LayerTestResult<uint8_t, 4> MultiplicationBroadcast1ElementUint8Test(armnn::IWorkloadFactory& workloadFactory)
4868{
4869 const unsigned int shape0[] = { 1, 2, 2, 3 };
4870 const unsigned int shape1[] = { 1, 1, 1, 1 };
4871
4872 std::vector<uint8_t> input0({
4873 1, 2, 3, 4, 5, 6,
4874 7, 8, 9, 10, 11, 12
4875 });
4876
4877 std::vector<uint8_t> input1({2});
4878
4879 std::vector<uint8_t> output({
4880 2, 4, 6, 8, 10, 12,
4881 14, 16, 18, 20, 22, 24
4882 });
4883
4884 return MultiplicationUint8TestHelper(workloadFactory,
4885 shape0,
4886 input0,
4887 1.0f,
4888 0,
4889 shape1,
4890 input1,
4891 1.0f,
4892 0,
4893 shape0,
4894 output,
4895 1.0f,
4896 0);
4897}
4898
4899LayerTestResult<uint8_t, 4> MultiplicationBroadcast1DVectorUint8Test(armnn::IWorkloadFactory& workloadFactory)
4900{
4901 const unsigned int shape0[] = { 1, 2, 2, 3 };
4902 const unsigned int shape1[] = { 1, 1, 1, 3 };
4903
4904 std::vector<uint8_t> input0({
4905 1, 2, 3, 4, 5, 6,
4906 7, 8, 9, 10, 11, 12
4907 });
4908
4909 std::vector<uint8_t> input1({1, 2, 3});
4910
4911 std::vector<uint8_t> output({
4912 1, 4, 9, 4, 10, 18,
4913 7, 16, 27, 10, 22, 36
4914 });
4915
4916 return MultiplicationUint8TestHelper(workloadFactory,
4917 shape0,
4918 input0,
4919 1.0f,
4920 0,
4921 shape1,
4922 input1,
4923 1.0f,
4924 0,
4925 shape0,
4926 output,
4927 1.0f,
4928 0);
4929}
telsoa014fcda012018-03-09 14:13:49 +00004930
David Beckf195f032018-09-06 16:46:34 +01004931namespace
4932{
4933template <typename T>
4934LayerTestResult<T, 4> SubtractionTestHelper(armnn::IWorkloadFactory& workloadFactory,
4935 const unsigned int shape0[4],
4936 const std::vector<T>& values0,
4937 float scale0,
4938 int32_t offset0,
4939 const unsigned int shape1[4],
4940 const std::vector<T> & values1,
4941 float scale1,
4942 int32_t offset1,
4943 const unsigned int outShape[4],
4944 const std::vector<T> & outValues,
4945 float outScale,
4946 int32_t outOffset)
4947{
4948 auto dataType = (std::is_same<T, uint8_t>::value ?
4949 armnn::DataType::QuantisedAsymm8 :
4950 armnn::DataType::Float32);
4951
4952 armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
4953 armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
4954 armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
4955
4956 inputTensorInfo0.SetQuantizationScale(scale0);
4957 inputTensorInfo0.SetQuantizationOffset(offset0);
4958
4959 inputTensorInfo1.SetQuantizationScale(scale1);
4960 inputTensorInfo1.SetQuantizationOffset(offset1);
4961
4962 outputTensorInfo.SetQuantizationScale(outScale);
4963 outputTensorInfo.SetQuantizationOffset(outOffset);
4964
4965 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
4966 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
4967
4968 LayerTestResult<T, 4> result(outputTensorInfo);
4969 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
4970
4971 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
4972 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
4973 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4974
4975 armnn::SubtractionQueueDescriptor data;
4976 armnn::WorkloadInfo info;
4977 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
4978 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
4979 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4980
4981 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSubtraction(data, info);
4982
4983 inputHandle0->Allocate();
4984 inputHandle1->Allocate();
4985 outputHandle->Allocate();
4986
4987 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
4988 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
4989
4990 workloadFactory.Finalize();
4991 workload->Execute();
4992
4993 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4994
4995 return result;
4996}
4997} // anonymous namespace
4998
4999LayerTestResult<uint8_t, 4> SubtractionUint8Test(armnn::IWorkloadFactory& workloadFactory)
5000{
5001 const unsigned int shape0[] = { 1, 1, 2, 2 };
5002 const unsigned int shape1[] = { 1, 1, 2, 2 };
5003
5004 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
5005 std::vector<uint8_t> input1({ 1, 2, 1, 2 });
5006 std::vector<uint8_t> output({ 3, 3, 5, 5 });
5007
5008 return SubtractionTestHelper(workloadFactory,
5009 shape0, input0, 0.5f, 2,
5010 shape1, input1, 1.0f, 0,
5011 shape0, output, 1.0f, 0);
5012}
5013
5014LayerTestResult<uint8_t, 4> SubtractionBroadcast1ElementUint8Test(armnn::IWorkloadFactory& workloadFactory)
5015{
5016 const unsigned int shape0[] = { 1, 1, 2, 2 };
5017 const unsigned int shape1[] = { 1, 1, 1, 1 };
5018
5019 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
5020 std::vector<uint8_t> input1({ 2 });
5021 std::vector<uint8_t> output({ 5, 6, 7, 8 });
5022
5023 return SubtractionTestHelper(workloadFactory,
5024 shape0, input0, 0.5f, 2,
5025 shape1, input1, 1.0f, 0,
5026 shape0, output, 1.0f, 3);
5027}
5028
5029LayerTestResult<uint8_t, 4> SubtractionBroadcastUint8Test(armnn::IWorkloadFactory& workloadFactory)
5030{
5031 const unsigned int shape0[] = { 1, 1, 2, 2 };
5032 const unsigned int shape1[] = { 1, 1, 2, 1 };
5033
5034 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
5035 std::vector<uint8_t> input1({ 2, 1 });
5036 std::vector<uint8_t> output({ 8, 11, 12, 15 });
5037
5038 return SubtractionTestHelper(workloadFactory,
5039 shape0, input0, 1.0f, 0,
5040 shape1, input1, 1.0f, 0,
5041 shape0, output, 1.0f, 0);
5042}
5043
5044LayerTestResult<float, 4> SubtractionTest(armnn::IWorkloadFactory& workloadFactory)
5045{
5046 const unsigned int shape0[] = { 1, 1, 2, 2 };
5047 const unsigned int shape1[] = { 1, 1, 2, 2 };
5048
5049 std::vector<float> input0({ 1, 2, 3, 4 });
5050 std::vector<float> input1({ 1, -1, 0, 2 });
5051 std::vector<float> output({ 0, 3, 3, 2 });
5052
5053 return SubtractionTestHelper(workloadFactory,
5054 shape0, input0, 1.0f, 0,
5055 shape1, input1, 1.0f, 0,
5056 shape0, output, 1.0f, 0);
5057}
5058
5059LayerTestResult<float, 4> SubtractionBroadcast1ElementTest(armnn::IWorkloadFactory& workloadFactory)
5060{
5061 const unsigned int shape0[] = { 1, 1, 2, 2 };
5062 const unsigned int shape1[] = { 1, 1, 1, 1 };
5063
5064 std::vector<float> input0({ 1, 2, 3, 4 });
5065 std::vector<float> input1({ 10 });
5066 std::vector<float> output({ -9, -8, -7, -6 });
5067
5068 return SubtractionTestHelper(workloadFactory,
5069 shape0, input0, 1.0f, 0,
5070 shape1, input1, 1.0f, 0,
5071 shape0, output, 1.0f, 0);
5072}
5073
5074LayerTestResult<float, 4> SubtractionBroadcastTest(armnn::IWorkloadFactory& workloadFactory)
5075{
5076 const unsigned int shape0[] = { 1, 1, 2, 2 };
5077 const unsigned int shape1[] = { 1, 1, 1, 2 };
5078
5079 std::vector<float> input0({ 1, 2, 3, 4 });
5080 std::vector<float> input1({ 10, -5 });
5081 std::vector<float> output({ -9, 7, -7, 9 });
5082
5083 return SubtractionTestHelper(workloadFactory,
5084 shape0, input0, 1.0f, 0,
5085 shape1, input1, 1.0f, 0,
5086 shape0, output, 1.0f, 0);
5087}
5088
telsoa014fcda012018-03-09 14:13:49 +00005089LayerTestResult<uint8_t, 4> ResizeBilinearNopUint8Test(armnn::IWorkloadFactory& workloadFactory)
5090{
5091 constexpr unsigned int inputWidth = 4;
5092 constexpr unsigned int inputHeight = 4;
5093 constexpr unsigned int inputChannels = 1;
5094 constexpr unsigned int inputBatchSize = 1;
5095
5096 constexpr unsigned int outputWidth = inputWidth;
5097 constexpr unsigned int outputHeight = inputHeight;
5098 constexpr unsigned int outputChannels = inputChannels;
5099 constexpr unsigned int outputBatchSize = inputBatchSize;
5100
5101 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
5102 armnn::DataType::QuantisedAsymm8);
5103 inputTensorInfo.SetQuantizationScale(1.5f);
5104 inputTensorInfo.SetQuantizationOffset(-3);
5105
5106 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
5107 armnn::DataType::QuantisedAsymm8);
5108 outputTensorInfo.SetQuantizationScale(1.5f);
5109 outputTensorInfo.SetQuantizationOffset(-3);
5110
5111 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
5112 1, 2, 3, 4,
5113 2, 3, 4, 5,
5114 3, 4, 5, 6,
5115 4, 5, 6, 7
5116 }));
5117
5118 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
5119 result.outputExpected = input;
5120
5121 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5122 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5123
5124 armnn::ResizeBilinearQueueDescriptor descriptor;
5125 armnn::WorkloadInfo info;
5126 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5127 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5128
5129 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5130
5131 inputHandle->Allocate();
5132 outputHandle->Allocate();
5133 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5134
surmeh013537c2c2018-05-18 16:31:43 +01005135 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00005136 workload->Execute();
5137
5138 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5139 return result;
5140}
5141
5142LayerTestResult<uint8_t, 4> SimpleResizeBilinearUint8Test(armnn::IWorkloadFactory& workloadFactory)
5143{
5144 constexpr unsigned int inputWidth = 2;
5145 constexpr unsigned int inputHeight = 2;
5146 constexpr unsigned int inputChannels = 1;
5147 constexpr unsigned int inputBatchSize = 1;
5148
5149 constexpr unsigned int outputWidth = inputWidth / 2;
5150 constexpr unsigned int outputHeight = inputHeight / 2;
5151 constexpr unsigned int outputChannels = inputChannels;
5152 constexpr unsigned int outputBatchSize = inputBatchSize;
5153
5154 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
5155 armnn::DataType::QuantisedAsymm8);
5156 inputTensorInfo.SetQuantizationScale(0.1567f);
5157 inputTensorInfo.SetQuantizationOffset(1);
5158
5159 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
5160 armnn::DataType::QuantisedAsymm8);
5161 outputTensorInfo.SetQuantizationScale(0.1567f);
5162 outputTensorInfo.SetQuantizationOffset(1);
5163
5164 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
5165 1, 255,
5166 200, 250
5167 }));
5168
5169 // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
5170 // then figures out the interpolants and weights. Note this is different to projecting the centre of the
telsoa01c577f2c2018-08-31 09:22:23 +01005171 // output texel - and thus we'll expect the output 1x1 matrix to contain, as its single element, the value
telsoa014fcda012018-03-09 14:13:49 +00005172 // that was at position (0,0) of the input matrix (rather than an average, which we would expect if projecting
5173 // the centre).
5174 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
5175 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
5176 1
5177 }));
5178
5179 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5180 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5181
5182 armnn::ResizeBilinearQueueDescriptor descriptor;
5183 armnn::WorkloadInfo info;
5184 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5185 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5186
5187 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5188
5189 inputHandle->Allocate();
5190 outputHandle->Allocate();
5191 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5192
surmeh013537c2c2018-05-18 16:31:43 +01005193 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00005194 workload->Execute();
5195
5196 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5197 return result;
5198}
5199
5200LayerTestResult<uint8_t, 4> ResizeBilinearSqMinUint8Test(armnn::IWorkloadFactory& workloadFactory)
5201{
5202 constexpr unsigned int inputWidth = 4;
5203 constexpr unsigned int inputHeight = 4;
5204 constexpr unsigned int inputChannels = 1;
5205 constexpr unsigned int inputBatchSize = 1;
5206
5207 constexpr unsigned int outputWidth = inputWidth / 2;
5208 constexpr unsigned int outputHeight = inputHeight / 2;
5209 constexpr unsigned int outputChannels = inputChannels;
5210 constexpr unsigned int outputBatchSize = inputBatchSize;
5211
5212 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
5213 armnn::DataType::QuantisedAsymm8);
5214 inputTensorInfo.SetQuantizationScale(3.141592f);
5215 inputTensorInfo.SetQuantizationOffset(3);
5216
5217 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
5218 armnn::DataType::QuantisedAsymm8);
5219 outputTensorInfo.SetQuantizationScale(3.141592f);
5220 outputTensorInfo.SetQuantizationOffset(3);
5221
5222 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
5223 1, 2, 3, 4,
5224 2, 3, 4, 5,
5225 3, 4, 5, 6,
5226 4, 5, 6, 7
5227 }));
5228
5229 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
5230 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
5231 1, 3,
5232 3, 5
5233 }));
5234
5235 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5236 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5237
5238 armnn::ResizeBilinearQueueDescriptor descriptor;
5239 armnn::WorkloadInfo info;
5240 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5241 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5242
5243 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5244
5245 inputHandle->Allocate();
5246 outputHandle->Allocate();
5247 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5248
surmeh013537c2c2018-05-18 16:31:43 +01005249 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00005250 workload->Execute();
5251
5252 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5253 return result;
5254}
5255
5256LayerTestResult<uint8_t, 4> ResizeBilinearMinUint8Test(armnn::IWorkloadFactory& workloadFactory)
5257{
5258 constexpr unsigned int inputWidth = 3;
5259 constexpr unsigned int inputHeight = 2;
5260 constexpr unsigned int inputChannels = 1;
5261 constexpr unsigned int inputBatchSize = 1;
5262
5263 constexpr unsigned int outputWidth = 2;
5264 constexpr unsigned int outputHeight = 1;
5265 constexpr unsigned int outputChannels = inputChannels;
5266 constexpr unsigned int outputBatchSize = inputBatchSize;
5267
5268 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
5269 armnn::DataType::QuantisedAsymm8);
5270 inputTensorInfo.SetQuantizationScale(1.5f);
5271 inputTensorInfo.SetQuantizationOffset(-1);
5272
5273 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
5274 armnn::DataType::QuantisedAsymm8);
5275 outputTensorInfo.SetQuantizationScale(1.5f);
5276 outputTensorInfo.SetQuantizationOffset(-1);
5277
5278 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
5279 1, 2, 3, // 3.0, 4.5, 6.0
5280 5, 8, 13 // 9.0, 13.5, 21.0
5281 }));
5282
5283 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
5284 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
5285 1, 3 // 3.0, 5.25
5286 }));
5287
5288 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5289 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5290
5291 armnn::ResizeBilinearQueueDescriptor descriptor;
5292 armnn::WorkloadInfo info;
5293 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5294 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5295
5296 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5297
5298 inputHandle->Allocate();
5299 outputHandle->Allocate();
5300
5301 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5302
surmeh013537c2c2018-05-18 16:31:43 +01005303 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00005304 workload->Execute();
5305
5306 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5307 return result;
5308}
5309
5310LayerTestResult<uint8_t, 4> ResizeBilinearMagUint8Test(armnn::IWorkloadFactory& workloadFactory)
5311{
5312 constexpr unsigned int inputWidth = 2;
5313 constexpr unsigned int inputHeight = 3;
5314 constexpr unsigned int inputChannels = 1;
5315 constexpr unsigned int inputBatchSize = 1;
5316
5317 constexpr unsigned int outputWidth = 5;
5318 constexpr unsigned int outputHeight = 3;
5319 constexpr unsigned int outputChannels = inputChannels;
5320 constexpr unsigned int outputBatchSize = inputBatchSize;
5321
5322 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
5323 armnn::DataType::QuantisedAsymm8);
5324 inputTensorInfo.SetQuantizationScale(0.010765f);
5325 inputTensorInfo.SetQuantizationOffset(7);
5326
5327 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
5328 armnn::DataType::QuantisedAsymm8);
5329 outputTensorInfo.SetQuantizationScale(0.010132f);
5330 outputTensorInfo.SetQuantizationOffset(-18);
5331
5332 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
5333 24, 228, // 0.183005, 2.379065,
5334 105, 128, // 1.05497, 1.302565
5335 230, 71 // 2.400595, 0.68896
5336 }));
5337
5338 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
5339 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
5340 0, 87, 173, 217, 217, // 0.18300501, 1.06142902, 1.93985295, 2.37906504, 2.37906504
5341 86, 96, 106, 111, 111, // 1.05497003, 1.15400803, 1.25304604, 1.30256498, 1.30256498
5342 219, 151, 84, 50, 50 // 2.40059495, 1.71594095, 1.03128707, 0.68896002, 0.68896002
5343 }));
5344
5345 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5346 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5347
5348 armnn::ResizeBilinearQueueDescriptor descriptor;
5349 armnn::WorkloadInfo info;
5350 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5351 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5352
5353 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5354
5355 inputHandle->Allocate();
5356 outputHandle->Allocate();
5357 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5358
surmeh013537c2c2018-05-18 16:31:43 +01005359 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00005360 workload->Execute();
5361
5362 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5363 return result;
5364}
5365
5366LayerTestResult<float, 4> BatchNormTest(armnn::IWorkloadFactory& workloadFactory)
5367{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01005368 // BatchSize: 1
5369 // Channels: 2
5370 // Height: 3
5371 // Width: 2
5372
5373 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
5374 std::vector<float> inputValues
5375 {
5376 // Batch 0, Channel 0, Height (3) x Width (2)
5377 1.f, 4.f,
5378 4.f, 2.f,
5379 1.f, 6.f,
5380
5381 // Batch 0, Channel 1, Height (3) x Width (2)
5382 1.f, 1.f,
5383 4.f, 1.f,
5384 -2.f, 4.f
5385 };
5386 std::vector<float> expectedOutputValues
5387 {
5388 // Batch 0, Channel 0, Height (3) x Width (2)
5389 1.f, 4.f,
5390 4.f, 2.f,
5391 1.f, 6.f,
5392
5393 // Batch 0, Channel 1, Height (3) x Width (2)
5394 3.f, 3.f,
5395 4.f, 3.f,
5396 2.f, 4.f
5397 };
5398
5399 return BatchNormTestImpl<float>(workloadFactory, inputOutputShape, inputValues, expectedOutputValues,
5400 0.f, 0, armnn::DataLayout::NCHW);
5401}
5402
5403LayerTestResult<float, 4> BatchNormNhwcTest(armnn::IWorkloadFactory& workloadFactory)
5404{
5405 // BatchSize: 1
5406 // Height: 3
5407 // Width: 2
5408 // Channels: 2
5409
5410 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
5411 std::vector<float> inputValues
5412 {
5413 // Batch 0, Height 0, Width (2) x Channel (2)
5414 1.f, 1.f,
5415 4.f, 1.f,
5416
5417 // Batch 0, Height 1, Width (2) x Channel (2)
5418 4.f, 4.f,
5419 2.f, 1.f,
5420
5421 // Batch 0, Height 2, Width (2) x Channel (2)
5422 1.f, -2.f,
5423 6.f, 4.f
5424 };
5425 std::vector<float> expectedOutputValues
5426 {
5427 // Batch 0, Height 0, Width (2) x Channel (2)
5428 1.f, 3.f,
5429 4.f, 3.f,
5430
5431 // Batch 0, Height 1, Width (2) x Channel (2)
5432 4.f, 4.f,
5433 2.f, 3.f,
5434
5435 // Batch 0, Height 2, Width (2) x Channel (2)
5436 1.f, 2.f,
5437 6.f, 4.f
5438 };
5439
5440 return BatchNormTestImpl<float>(workloadFactory, inputOutputShape, inputValues, expectedOutputValues,
5441 0.f, 0, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00005442}
5443
5444LayerTestResult<uint8_t, 4> BatchNormUint8Test(armnn::IWorkloadFactory& workloadFactory)
5445{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01005446 // BatchSize: 1
5447 // Channels: 2
5448 // Height: 3
5449 // Width: 2
5450
5451 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
5452 std::vector<float> inputValues
5453 {
5454 // Batch 0, Channel 0, Height (3) x Width (2)
5455 1.f, 4.f,
5456 4.f, 2.f,
5457 1.f, 6.f,
5458
5459 // Batch 0, Channel 1, Height (3) x Width (2)
5460 1.f, 1.f,
5461 4.f, 1.f,
5462 -2.f, 4.f
5463 };
5464 std::vector<float> expectedOutputValues
5465 {
5466 // Batch 0, Channel 0, Height (3) x Width (2)
5467 1.f, 4.f,
5468 4.f, 2.f,
5469 1.f, 6.f,
5470
5471 // Batch 0, Channel 1, Height (3) x Width (2)
5472 3.f, 3.f,
5473 4.f, 3.f,
5474 2.f, 4.f
5475 };
5476
5477 return BatchNormTestImpl<uint8_t>(workloadFactory, inputOutputShape, inputValues, expectedOutputValues,
5478 1.f/20.f, 50, armnn::DataLayout::NCHW);
5479}
5480
5481LayerTestResult<uint8_t, 4> BatchNormUint8NhwcTest(armnn::IWorkloadFactory& workloadFactory)
5482{
5483 // BatchSize: 1
5484 // Height: 3
5485 // Width: 2
5486 // Channels: 2
5487
5488 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
5489 std::vector<float> inputValues
5490 {
5491 // Batch 0, Height 0, Width (2) x Channel (2)
5492 1.f, 1.f,
5493 4.f, 1.f,
5494
5495 // Batch 0, Height 1, Width (2) x Channel (2)
5496 4.f, 4.f,
5497 2.f, 1.f,
5498
5499 // Batch 0, Height 2, Width (2) x Channel (2)
5500 1.f, -2.f,
5501 6.f, 4.f
5502 };
5503 std::vector<float> expectedOutputValues
5504 {
5505 // Batch 0, Height 0, Width (2) x Channel (2)
5506 1.f, 3.f,
5507 4.f, 3.f,
5508
5509 // Batch 0, Height 1, Width (2) x Channel (2)
5510 4.f, 4.f,
5511 2.f, 3.f,
5512
5513 // Batch 0, Height 2, Width (2) x Channel (2)
5514 1.f, 2.f,
5515 6.f, 4.f
5516 };
5517
5518 return BatchNormTestImpl<uint8_t>(workloadFactory, inputOutputShape, inputValues, expectedOutputValues,
5519 1.f/20.f, 50, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00005520}
5521
5522LayerTestResult<uint8_t, 4> ConstantUint8Test(armnn::IWorkloadFactory& workloadFactory)
5523{
5524 return ConstantTestImpl<uint8_t>(workloadFactory, 2e-6f, 1);
5525}
5526
5527LayerTestResult<uint8_t, 1> Concatenation1dUint8Test(armnn::IWorkloadFactory& workloadFactory)
5528{
5529 return Concatenation1dTestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5530}
5531
5532LayerTestResult<uint8_t, 2> Concatenation2dDim0Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5533{
5534 return Concatenation2dDim0TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5535}
5536
5537LayerTestResult<uint8_t, 2> Concatenation2dDim1Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5538{
5539 return Concatenation2dDim1TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5540}
5541
5542LayerTestResult<uint8_t, 2> Concatenation2dDim0DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory)
5543{
5544 return Concatenation2dDim0DiffInputDimsTestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5545}
5546
5547LayerTestResult<uint8_t, 2> Concatenation2dDim1DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory)
5548{
5549 return Concatenation2dDim1DiffInputDimsTestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5550}
5551
5552LayerTestResult<uint8_t, 3> Concatenation3dDim0Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5553{
5554 return Concatenation3dDim0TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5555}
5556
5557LayerTestResult<uint8_t, 3> Concatenation3dDim1Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5558{
5559 return Concatenation3dDim1TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5560}
5561
5562LayerTestResult<uint8_t, 3> Concatenation3dDim2Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5563{
5564 return Concatenation3dDim2TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5565}
5566
5567LayerTestResult<uint8_t, 3> Concatenation3dDim0DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory)
5568{
5569 return Concatenation3dDim0TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5570}
5571
5572LayerTestResult<uint8_t, 3> Concatenation3dDim1DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory)
5573{
5574 return Concatenation3dDim1DiffInputDimsTestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5575}
5576
5577LayerTestResult<uint8_t, 3> Concatenation3dDim2DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory)
5578{
5579 return Concatenation3dDim2DiffInputDimsTestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5580}
5581
5582LayerTestResult<float, 4> SimpleMaxPooling2dSize2x2Stride2x2Test(armnn::IWorkloadFactory& workloadFactory,
5583 bool forceNoPadding)
5584{
5585 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<float>(workloadFactory, forceNoPadding);
5586}
5587
5588LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(armnn::IWorkloadFactory& workloadFactory,
5589 bool forceNoPadding)
5590{
5591 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<uint8_t>(workloadFactory, forceNoPadding, 3.0f, -5);
5592}
5593
5594LayerTestResult<float, 4> SimpleMaxPooling2dSize3x3Stride2x4Test(armnn::IWorkloadFactory& workloadFactory,
5595 bool forceNoPadding)
5596{
5597 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<float>(workloadFactory, forceNoPadding);
5598}
5599
5600LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(armnn::IWorkloadFactory& workloadFactory,
5601 bool forceNoPadding)
5602{
5603 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<uint8_t>(workloadFactory, forceNoPadding, 0.1f, 128);
5604}
5605
James Conroy45a9b772018-10-31 11:47:53 +00005606LayerTestResult<float, 4> SimpleMaxPooling2dTest(armnn::IWorkloadFactory& workloadFactory,
5607 const armnn::DataLayoutIndexed& dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00005608{
James Conroy45a9b772018-10-31 11:47:53 +00005609 return SimpleMaxPooling2dTestCommon<float>(workloadFactory, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00005610}
5611
James Conroy45a9b772018-10-31 11:47:53 +00005612LayerTestResult<uint8_t, 4> SimpleMaxPooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory,
5613 const armnn::DataLayoutIndexed& dataLayout)
Francis Murtagh043d0d02018-10-05 14:08:48 +01005614{
James Conroy45a9b772018-10-31 11:47:53 +00005615 return SimpleMaxPooling2dTestCommon<uint8_t>(workloadFactory, dataLayout);
Francis Murtagh043d0d02018-10-05 14:08:48 +01005616}
5617
James Conroy45a9b772018-10-31 11:47:53 +00005618LayerTestResult<float, 4> SimpleAveragePooling2dTest(armnn::IWorkloadFactory& workloadFactory,
5619 const armnn::DataLayoutIndexed& dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00005620{
James Conroy45a9b772018-10-31 11:47:53 +00005621 return SimpleAveragePooling2dTestCommon<float>(workloadFactory, dataLayout);
James Conroy69482272018-10-19 10:41:35 +01005622}
5623
James Conroy45a9b772018-10-31 11:47:53 +00005624LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory,
5625 const armnn::DataLayoutIndexed& dataLayout)
James Conroy69482272018-10-19 10:41:35 +01005626{
James Conroy45a9b772018-10-31 11:47:53 +00005627 return SimpleAveragePooling2dTestCommon<uint8_t>(workloadFactory, dataLayout, 0.5, -1);
telsoa014fcda012018-03-09 14:13:49 +00005628}
5629
surmeh01bceff2f2018-03-29 16:29:27 +01005630LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(armnn::IWorkloadFactory& workloadFactory,
5631 bool forceNoPadding)
5632{
5633 return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<float>(workloadFactory, forceNoPadding);
5634}
5635
telsoa014fcda012018-03-09 14:13:49 +00005636LayerTestResult<float, 4> LargeTensorsAveragePooling2dTest(armnn::IWorkloadFactory& workloadFactory)
5637{
5638 return LargeTensorsAveragePooling2dTestCommon<float>(workloadFactory);
5639}
5640
5641LayerTestResult<uint8_t, 4> LargeTensorsAveragePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
5642{
5643 return LargeTensorsAveragePooling2dTestCommon<uint8_t>(workloadFactory, 0.5, -1);
5644}
5645
James Conroy45a9b772018-10-31 11:47:53 +00005646LayerTestResult<float, 4> SimpleL2Pooling2dTest(armnn::IWorkloadFactory& workloadFactory,
5647 const armnn::DataLayoutIndexed& dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00005648{
James Conroy45a9b772018-10-31 11:47:53 +00005649 return SimpleL2Pooling2dTestCommon<float>(workloadFactory, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00005650}
5651
James Conroy45a9b772018-10-31 11:47:53 +00005652LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory,
5653 const armnn::DataLayoutIndexed& dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00005654{
James Conroy45a9b772018-10-31 11:47:53 +00005655 return SimpleL2Pooling2dTestCommon<uint8_t>(workloadFactory, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00005656}
5657
5658LayerTestResult<float, 4> L2Pooling2dSize3Stride1Test(armnn::IWorkloadFactory& workloadFactory)
5659{
5660 return L2Pooling2dSize3Stride1TestCommon<float>(workloadFactory);
5661}
5662
5663LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride1Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5664{
5665 return L2Pooling2dSize3Stride1TestCommon<uint8_t>(workloadFactory);
5666}
5667
5668LayerTestResult<float, 4> L2Pooling2dSize3Stride3Test(armnn::IWorkloadFactory& workloadFactory)
5669{
5670 return L2Pooling2dSize3Stride3TestCommon<float>(workloadFactory);
5671}
5672
5673LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride3Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5674{
5675 return L2Pooling2dSize3Stride3TestCommon<uint8_t>(workloadFactory);
5676}
5677
5678LayerTestResult<float, 4> L2Pooling2dSize3Stride4Test(armnn::IWorkloadFactory& workloadFactory)
5679{
5680 return L2Pooling2dSize3Stride4TestCommon<float>(workloadFactory);
5681}
5682
5683LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride4Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5684{
5685 return L2Pooling2dSize3Stride4TestCommon<uint8_t>(workloadFactory);
5686}
5687
5688LayerTestResult<float, 4> L2Pooling2dSize7Test(armnn::IWorkloadFactory& workloadFactory)
5689{
5690 return L2Pooling2dSize7TestCommon<float>(workloadFactory);
5691}
5692
5693LayerTestResult<uint8_t, 4> L2Pooling2dSize7Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5694{
5695 return L2Pooling2dSize7TestCommon<uint8_t>(workloadFactory);
5696}
5697
5698LayerTestResult<float, 4> L2Pooling2dSize9Test(armnn::IWorkloadFactory& workloadFactory)
5699{
5700 return L2Pooling2dSize9TestCommon<float>(workloadFactory);
5701}
5702
5703LayerTestResult<uint8_t, 4> L2Pooling2dSize9Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5704{
5705 return L2Pooling2dSize9TestCommon<uint8_t>(workloadFactory);
5706}
5707
5708LayerTestResult<float, 4> AsymmetricNonSquarePooling2dTest(armnn::IWorkloadFactory& workloadFactory)
5709{
5710 return AsymmetricNonSquarePooling2dTestCommon<float>(workloadFactory);
5711}
5712
5713LayerTestResult<uint8_t, 4> AsymmetricNonSquarePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
5714{
5715 return AsymmetricNonSquarePooling2dTestCommon<uint8_t>(workloadFactory);
5716}
5717
5718LayerTestResult<float, 4> ComparePooling2dTest(armnn::IWorkloadFactory& workloadFactory,
5719 armnn::IWorkloadFactory& refWorkloadFactory,
5720 armnn::PoolingAlgorithm poolingType)
5721{
5722 return ComparePooling2dTestCommon<float>(workloadFactory, refWorkloadFactory, poolingType);
5723}
5724
5725LayerTestResult<uint8_t, 4> ComparePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory,
5726 armnn::IWorkloadFactory& refWorkloadFactory,
5727 armnn::PoolingAlgorithm poolingType)
5728{
5729 return ComparePooling2dTestCommon<uint8_t>(workloadFactory, refWorkloadFactory, poolingType, 0.1f, 128);
5730}
5731
5732LayerTestResult<float, 2> FullyConnectedLargeTest(armnn::IWorkloadFactory& workloadFactory,
5733 bool transposeWeights)
5734{
5735 return FullyConnectedLargeTestCommon<float>(workloadFactory, transposeWeights);
5736}
5737
5738LayerTestResult<float, 4> IgnorePaddingSimpleMaxPooling2dTest(armnn::IWorkloadFactory& workloadFactory)
5739{
5740 return IgnorePaddingSimpleMaxPooling2dTestCommon<float>(workloadFactory);
5741}
5742
5743LayerTestResult<uint8_t, 4> IgnorePaddingSimpleMaxPooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
5744{
5745 return IgnorePaddingSimpleMaxPooling2dTestCommon<uint8_t>(workloadFactory, 1.0f, -5);
5746}
5747
5748LayerTestResult<float, 4> IgnorePaddingMaxPooling2dSize3Test(armnn::IWorkloadFactory& workloadFactory)
5749{
5750 return IgnorePaddingMaxPooling2dSize3TestCommon<float>(workloadFactory);
5751}
5752
5753LayerTestResult<uint8_t, 4> IgnorePaddingMaxPooling2dSize3Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5754{
5755 return IgnorePaddingMaxPooling2dSize3TestCommon<uint8_t>(workloadFactory, 1.0f, -5);
5756}
5757
5758LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dTest(armnn::IWorkloadFactory& workloadFactory)
5759{
5760 return IgnorePaddingSimpleAveragePooling2dTestCommon<float>(workloadFactory);
5761}
5762
5763LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
5764{
5765 return IgnorePaddingSimpleAveragePooling2dTestCommon<uint8_t>(workloadFactory);
5766}
5767
5768LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTest(armnn::IWorkloadFactory& workloadFactory)
5769{
5770 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<float>(workloadFactory);
5771}
5772
5773LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(
5774 armnn::IWorkloadFactory& workloadFactory)
5775{
5776 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<uint8_t>(workloadFactory);
5777}
5778
5779LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3Test(armnn::IWorkloadFactory& workloadFactory)
5780{
5781 return IgnorePaddingAveragePooling2dSize3TestCommon<float>(workloadFactory);
5782}
5783
5784LayerTestResult<uint8_t, 4> IgnorePaddingAveragePooling2dSize3Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5785{
5786 return IgnorePaddingAveragePooling2dSize3TestCommon<uint8_t>(workloadFactory);
5787}
5788
5789LayerTestResult<float, 4> IgnorePaddingSimpleL2Pooling2dTest(armnn::IWorkloadFactory& workloadFactory)
5790{
5791 return IgnorePaddingSimpleL2Pooling2dTestCommon<float>(workloadFactory);
5792}
5793
5794LayerTestResult<uint8_t, 4> IgnorePaddingSimpleL2Pooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
5795{
5796 return IgnorePaddingSimpleL2Pooling2dTestCommon<uint8_t>(workloadFactory);
5797}
5798
5799LayerTestResult<float, 4> IgnorePaddingL2Pooling2dSize3Test(armnn::IWorkloadFactory& workloadFactory)
5800{
5801 return IgnorePaddingL2Pooling2dSize3TestCommon<float>(workloadFactory);
5802}
5803
5804LayerTestResult<uint8_t, 4> IgnorePaddingL2Pooling2dSize3Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5805{
5806 return IgnorePaddingL2Pooling2dSize3TestCommon<uint8_t>(workloadFactory);
5807}
5808
5809LayerTestResult<float, 4> SimplePermuteFloat32Test(armnn::IWorkloadFactory& workloadFactory)
5810{
5811 return SimplePermuteFloat32TestCommon(workloadFactory);
5812};
5813
5814LayerTestResult<uint8_t, 4> SimplePermuteUint8Test(armnn::IWorkloadFactory& workloadFactory)
5815{
5816 return SimplePermuteUint8TestCommon(workloadFactory);
5817};
surmeh01bceff2f2018-03-29 16:29:27 +01005818
5819LayerTestResult<float, 4> PermuteFloat32ValueSet1Test(armnn::IWorkloadFactory& workloadFactory)
5820{
5821 return PermuteFloat32ValueSet1TestCommon(workloadFactory);
5822};
5823
5824LayerTestResult<float, 4> PermuteFloat32ValueSet2Test(armnn::IWorkloadFactory& workloadFactory)
5825{
5826 return PermuteFloat32ValueSet2TestCommon(workloadFactory);
5827};
5828
5829LayerTestResult<float, 4> PermuteFloat32ValueSet3Test(armnn::IWorkloadFactory& workloadFactory)
5830{
5831 return PermuteFloat32ValueSet3TestCommon(workloadFactory);
narpra011e4c31d2018-09-28 11:07:51 +01005832};
5833
5834namespace
5835{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005836
narpra011e4c31d2018-09-28 11:07:51 +01005837template <typename T, std::size_t InputDim, std::size_t OutputDim>
5838LayerTestResult<T, OutputDim> MeanTestHelper(armnn::IWorkloadFactory& workloadFactory,
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005839 const unsigned int* inputShape,
5840 const std::vector<T>& inputData,
5841 const std::vector<unsigned int>& axis,
5842 bool keepDims,
5843 const unsigned int* outputShape,
5844 const std::vector<T>& outputData,
5845 float scale = 1.0f,
5846 int32_t offset = 0)
narpra011e4c31d2018-09-28 11:07:51 +01005847{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005848 auto dataType = (std::is_same<T, uint8_t>::value ? armnn::DataType::QuantisedAsymm8 : armnn::DataType::Float32);
narpra011e4c31d2018-09-28 11:07:51 +01005849
5850 armnn::TensorInfo inputTensorInfo(InputDim, inputShape, dataType);
5851 armnn::TensorInfo outputTensorInfo(OutputDim, outputShape, dataType);
5852
5853 inputTensorInfo.SetQuantizationScale(scale);
5854 inputTensorInfo.SetQuantizationOffset(offset);
5855
5856 outputTensorInfo.SetQuantizationScale(scale);
5857 outputTensorInfo.SetQuantizationOffset(offset);
5858
5859 auto input = MakeTensor<T, InputDim>(inputTensorInfo, inputData);
5860
5861 LayerTestResult<T, OutputDim> result(outputTensorInfo);
5862 result.outputExpected = MakeTensor<T, OutputDim>(outputTensorInfo, outputData);
5863
5864 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5865 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5866
5867 armnn::MeanQueueDescriptor data;
5868 data.m_Parameters.m_Axis = axis;
5869 data.m_Parameters.m_KeepDims = keepDims;
5870 armnn::WorkloadInfo info;
5871 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
5872 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
5873
5874 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMean(data, info);
5875
5876 inputHandle->Allocate();
5877 outputHandle->Allocate();
5878
5879 CopyDataToITensorHandle(inputHandle.get(), input.origin());
5880
5881 workloadFactory.Finalize();
5882 workload->Execute();
5883
5884 CopyDataFromITensorHandle(result.output.origin(), outputHandle.get());
5885
5886 return result;
5887}
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005888
narpra011e4c31d2018-09-28 11:07:51 +01005889} // anonymous namespace
5890
5891LayerTestResult<uint8_t, 1> MeanUint8SimpleTest(armnn::IWorkloadFactory& workloadFactory)
5892{
5893 const unsigned int inputShape[] = { 3, 2 };
5894 const unsigned int outputShape[] = { 1 };
5895
5896 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
5897 std::vector<uint8_t> output({ 2 });
5898
5899 return MeanTestHelper<uint8_t, 2, 1>(workloadFactory, inputShape, input, {}, false, outputShape, output);
5900}
5901
5902LayerTestResult<uint8_t, 3> MeanUint8SimpleAxisTest(armnn::IWorkloadFactory& workloadFactory)
5903{
5904 const unsigned int inputShape[] = { 1, 1, 3, 2 };
5905 const unsigned int outputShape[] = { 1, 1, 2 };
5906
5907 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
5908 std::vector<uint8_t> output({ 2, 2 });
5909
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005910 return MeanTestHelper<uint8_t, 4, 3>(workloadFactory, inputShape, input, { 2 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01005911}
5912
5913LayerTestResult<uint8_t, 4> MeanUint8KeepDimsTest(armnn::IWorkloadFactory& workloadFactory)
5914{
5915 const unsigned int inputShape[] = { 1, 1, 3, 2 };
5916 const unsigned int outputShape[] = { 1, 1, 1, 2 };
5917
5918 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
5919 std::vector<uint8_t> output({ 2, 2 });
5920
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005921 return MeanTestHelper<uint8_t, 4, 4>(workloadFactory, inputShape, input, { 2 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01005922}
5923
5924LayerTestResult<uint8_t, 4> MeanUint8MultipleDimsTest(armnn::IWorkloadFactory& workloadFactory)
5925{
5926 const unsigned int inputShape[] = { 2, 3, 1, 2 };
5927 const unsigned int outputShape[] = { 1, 3, 1, 1 };
5928
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005929 std::vector<uint8_t> input({ 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6 });
narpra011e4c31d2018-09-28 11:07:51 +01005930 std::vector<uint8_t> output({ 1, 3, 5 });
5931
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005932 return MeanTestHelper<uint8_t, 4, 4>(workloadFactory, inputShape, input, { 0, 3 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01005933}
5934
5935LayerTestResult<uint8_t, 1> MeanVtsUint8Test(armnn::IWorkloadFactory& workloadFactory)
5936{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005937 const unsigned int inputShape[] = { 4, 3, 2 };
narpra011e4c31d2018-09-28 11:07:51 +01005938 const unsigned int outputShape[] = { 2 };
5939
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005940 std::vector<uint8_t> input({ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
5941 24 });
5942 std::vector<uint8_t> output({ 12, 13 });
narpra011e4c31d2018-09-28 11:07:51 +01005943
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005944 return MeanTestHelper<uint8_t, 3, 1>(workloadFactory, inputShape, input, { 0, 1 }, false, outputShape,
5945 output, 0.8f, 5);
narpra011e4c31d2018-09-28 11:07:51 +01005946}
5947
5948LayerTestResult<float, 1> MeanFloatSimpleTest(armnn::IWorkloadFactory& workloadFactory)
5949{
5950 const unsigned int inputShape[] = { 3, 2 };
5951 const unsigned int outputShape[] = { 1 };
5952
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005953 std::vector<float> input({ 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f });
5954 std::vector<float> output({ 2.0f });
narpra011e4c31d2018-09-28 11:07:51 +01005955
5956 return MeanTestHelper<float, 2, 1>(workloadFactory, inputShape, input, {}, false, outputShape, output);
5957}
5958
5959LayerTestResult<float, 3> MeanFloatSimpleAxisTest(armnn::IWorkloadFactory& workloadFactory)
5960{
5961 const unsigned int inputShape[] = { 2, 3, 1, 2 };
5962 const unsigned int outputShape[] = { 3, 1, 2 };
5963
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005964 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f });
5965 std::vector<float> output({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f });
narpra011e4c31d2018-09-28 11:07:51 +01005966
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005967 return MeanTestHelper<float, 4, 3>(workloadFactory, inputShape, input, { 0 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01005968}
5969
5970LayerTestResult<float, 4> MeanFloatKeepDimsTest(armnn::IWorkloadFactory& workloadFactory)
5971{
5972 const unsigned int inputShape[] = { 1, 1, 3, 2 };
5973 const unsigned int outputShape[] = { 1, 1, 1, 2 };
5974
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005975 std::vector<float> input({ 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f });
5976 std::vector<float> output({ 2.0f, 2.0f });
narpra011e4c31d2018-09-28 11:07:51 +01005977
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005978 return MeanTestHelper<float, 4, 4>(workloadFactory, inputShape, input, { 2 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01005979}
5980
5981LayerTestResult<float, 4> MeanFloatMultipleDimsTest(armnn::IWorkloadFactory& workloadFactory)
5982{
5983 const unsigned int inputShape[] = { 2, 3, 1, 2 };
5984 const unsigned int outputShape[] = { 1, 3, 1, 1 };
5985
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005986 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f });
5987 std::vector<float> output({ 1.5f, 3.5f, 5.5f });
narpra011e4c31d2018-09-28 11:07:51 +01005988
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005989 return MeanTestHelper<float, 4, 4>(workloadFactory, inputShape, input, { 0, 3 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01005990}
5991
5992LayerTestResult<float, 1> MeanVtsFloat1Test(armnn::IWorkloadFactory& workloadFactory)
5993{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005994 const unsigned int inputShape[] = { 4, 3, 2 };
narpra011e4c31d2018-09-28 11:07:51 +01005995 const unsigned int outputShape[] = { 2 };
5996
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005997 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f,
5998 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f });
5999 std::vector<float> output({ 12.0f, 13.0f });
narpra011e4c31d2018-09-28 11:07:51 +01006000
Matteo Martincigh28dcab62018-10-19 16:40:03 +01006001 return MeanTestHelper<float, 3, 1>(workloadFactory, inputShape, input, { 0, 1 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01006002}
6003
6004LayerTestResult<float, 3> MeanVtsFloat2Test(armnn::IWorkloadFactory& workloadFactory)
6005{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01006006 const unsigned int inputShape[] = { 4, 3, 2 };
6007 const unsigned int outputShape[] = { 1, 3, 1 };
narpra011e4c31d2018-09-28 11:07:51 +01006008
Matteo Martincigh28dcab62018-10-19 16:40:03 +01006009 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f,
6010 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f });
6011 std::vector<float> output({ 10.5f, 12.5f, 14.5f });
narpra011e4c31d2018-09-28 11:07:51 +01006012
Matteo Martincigh28dcab62018-10-19 16:40:03 +01006013 return MeanTestHelper<float, 3, 3>(workloadFactory, inputShape, input, { 0, 2 }, true, outputShape, output);
6014}
6015
6016LayerTestResult<float, 3> MeanVtsFloat3Test(armnn::IWorkloadFactory& workloadFactory)
6017{
6018 const unsigned int inputShape[] = { 1, 2, 2, 1 };
6019 const unsigned int outputShape[] = { 1, 2, 1 };
6020
6021 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f });
6022 std::vector<float> output({ 1.5f, 3.5f });
6023
6024 return MeanTestHelper<float, 4, 3>(workloadFactory, inputShape, input, { 2 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01006025}
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01006026
6027LayerTestResult<float, 4> AdditionAfterMaxPoolTest(armnn::IWorkloadFactory& workloadFactory)
6028{
6029 // Create Initial Tensor
6030 // 1, 2, 3
6031 // 4, 5, 6
6032 // 7, 8, 9
6033
6034 armnn::TensorInfo poolingInputTensorInfo({ 1, 1, 3, 3}, armnn::GetDataType<float>());
6035 armnn::TensorInfo poolingOutputTensorInfo({ 1, 1, 2, 2}, armnn::GetDataType<float>());
6036
6037 boost::multi_array<float, 4> poolingInput = MakeTensor<float,4>(poolingInputTensorInfo,
6038 {1, 2, 3,
6039 4, 5, 6,
6040 7, 8, 9
6041 });
6042
6043 std::unique_ptr<armnn::ITensorHandle> poolingInputHandle =
6044 workloadFactory.CreateTensorHandle(poolingInputTensorInfo);
6045 std::unique_ptr<armnn::ITensorHandle> poolingOutputHandle =
6046 workloadFactory.CreateTensorHandle(poolingOutputTensorInfo);
6047
6048 // Apply MaxPool poolSize = 1x1, stride=2x2
6049 // Result =
6050 // 1, 3
6051 // 7, 9
6052 armnn::Pooling2dDescriptor descriptor;
6053 descriptor.m_PoolHeight = 1;
6054 descriptor.m_PoolWidth = 1;
6055 descriptor.m_StrideX = 2;
6056 descriptor.m_StrideY = 2;
6057 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
6058
6059 armnn::Pooling2dQueueDescriptor queueDescriptor;
6060 queueDescriptor.m_Parameters = descriptor;
6061 armnn::WorkloadInfo workloadInfo;
6062 AddInputToWorkload(queueDescriptor, workloadInfo, poolingInputTensorInfo, poolingInputHandle.get());
6063 AddOutputToWorkload(queueDescriptor, workloadInfo, poolingOutputTensorInfo, poolingOutputHandle.get());
6064
6065 // Create the MaxPool
6066 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
6067
6068 //LayerTestResult<float, 4> result(poolingOutputTensorInfo);
6069 auto shape( GetTensorShapeAsArray<4>(poolingOutputTensorInfo));
6070 boost::multi_array<float, 4> resultMaxPool;
6071 resultMaxPool.resize(shape);
6072
6073
6074 // Create addition with another tensor the same size
6075 // This would be the result to apply a Conv2d with kernel ones(2) and stride 1x1
6076 // with the initial tensor.
6077 // 12, 16
6078 // 24, 28
6079
6080 armnn::TensorInfo addInputTensorInfo({ 1,1,2,2}, armnn::GetDataType<float>());
6081 armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2}, armnn::GetDataType<float>());
6082
6083 boost::multi_array<float, 4> addInput = MakeTensor<float,4>(addInputTensorInfo,
6084 {12, 16,
6085 24, 28,
6086 });
6087
6088 // Expected output tensor after MaxPool and Addition.
6089 LayerTestResult<float,4> addRet(addOutputTensorInfo);
6090 addRet.outputExpected = MakeTensor<float, 4>(addOutputTensorInfo, std::vector<float>(
6091 {
6092 13, 19,
6093 31, 37
6094 }));
6095
6096 std::unique_ptr<armnn::ITensorHandle> addInputHandle = workloadFactory.CreateTensorHandle(addInputTensorInfo);
6097 std::unique_ptr<armnn::ITensorHandle> addOutputHandle = workloadFactory.CreateTensorHandle(addOutputTensorInfo);
6098
6099 armnn::AdditionQueueDescriptor data;
6100 armnn::WorkloadInfo info;
6101
6102 // Add the output of the MaxPool and the new tensor
6103 AddInputToWorkload(data, info, poolingOutputTensorInfo, poolingOutputHandle.get());
6104 AddInputToWorkload(data, info, addInputTensorInfo, addInputHandle.get());
6105 AddOutputToWorkload(data, info, addOutputTensorInfo, addOutputHandle.get());
6106
6107 std::unique_ptr<armnn::IWorkload> addWorkload = workloadFactory.CreateAddition(data, info);
6108
6109 poolingInputHandle->Allocate();
6110 poolingOutputHandle->Allocate();
6111 addInputHandle->Allocate();
6112 addOutputHandle->Allocate();
6113
6114 CopyDataToITensorHandle(poolingInputHandle.get(), &poolingInput[0][0][0][0]);
6115 CopyDataFromITensorHandle(&resultMaxPool[0][0][0][0], poolingOutputHandle.get());
6116
6117 CopyDataToITensorHandle(poolingOutputHandle.get(), &resultMaxPool[0][0][0][0]);
6118 CopyDataToITensorHandle(addInputHandle.get(), &addInput[0][0][0][0]);
6119
6120 workload->Execute();
6121 addWorkload->Execute();
6122
6123 CopyDataFromITensorHandle(&addRet.output[0][0][0][0], addOutputHandle.get());
6124
6125 workloadFactory.Finalize();
6126
6127 return addRet;
6128}