blob: abe3704a17a4d70e56eb185dc6ee777d344bb0a7 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#include "LayerTests.hpp"
6
7#include "test/TensorHelpers.hpp"
8#include "TensorCopyUtils.hpp"
surmeh013537c2c2018-05-18 16:31:43 +01009#include "Permute.hpp"
telsoa014fcda012018-03-09 14:13:49 +000010
11#include <boost/test/unit_test.hpp>
surmeh013537c2c2018-05-18 16:31:43 +010012#include <boost/assert.hpp>
telsoa014fcda012018-03-09 14:13:49 +000013
David Beck711fa312018-09-24 10:46:38 +010014#include <armnn/LayerSupport.hpp>
telsoa014fcda012018-03-09 14:13:49 +000015
David Beck711fa312018-09-24 10:46:38 +010016#include <backends/CpuTensorHandle.hpp>
17#include <backends/WorkloadFactory.hpp>
telsoa014fcda012018-03-09 14:13:49 +000018
telsoa014fcda012018-03-09 14:13:49 +000019#include <algorithm>
20#include <boost/cast.hpp>
21
22#include "WorkloadTestUtils.hpp"
23#include "Conv2dTestImpl.hpp"
24#include "BatchNormTestImpl.hpp"
25#include "ActivationTestImpl.hpp"
26#include "Pooling2dTestImpl.hpp"
27#include "ReshapeTestImpl.hpp"
28#include "FullyConnectedTestImpl.hpp"
29#include "SplitterTestImpl.hpp"
30#include "SoftmaxTestImpl.hpp"
31#include "NormTestImpl.hpp"
32#include "PermuteTestImpl.hpp"
telsoa01c577f2c2018-08-31 09:22:23 +010033#include "LstmTestImpl.hpp"
34#include "ConvertFp16ToFp32TestImpl.hpp"
35#include "ConvertFp32ToFp16TestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000036
telsoa01c577f2c2018-08-31 09:22:23 +010037// 3-channel 16x8 image used as common input data for a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000038static std::vector<float> ConvInput3x8x16({
39 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
40 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
41 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
42 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
43 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
44 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
45 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
46 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
47 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
48 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
49 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
50 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
51 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
52 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
53 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
54 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
55 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
56 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
57 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
58 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
59 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
60 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
61 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
62 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
63});
64
telsoa01c577f2c2018-08-31 09:22:23 +010065// 2-channel bias used by a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000066static std::vector<float> Bias2({0, 2});
67
telsoa01c577f2c2018-08-31 09:22:23 +010068// Helper function that returns either Bias2 or an empty vector depending on whether bias is enabled.
telsoa014fcda012018-03-09 14:13:49 +000069template<typename T>
70boost::multi_array<T, 1> GetBias2(bool biasEnabled, float qScale, int32_t qOffset)
71{
72 if(biasEnabled)
73 {
74 armnn::TensorInfo biasDesc({static_cast<unsigned int>(Bias2.size())}, armnn::GetDataType<T>());
75 boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(qScale, qOffset, Bias2));
76 return bias;
77 }
78 else
79 {
80 return boost::multi_array<T, 1>();
81 }
82}
83
84template<typename T>
85LayerTestResult<T, 4> SimpleConvolution2d3x5TestCommon(armnn::IWorkloadFactory& workloadFactory,
86 float qScale,
87 int32_t qOffset,
jimfly010a088a62018-10-25 17:05:05 +010088 bool biasEnabled,
89 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +000090{
telsoa01c577f2c2018-08-31 09:22:23 +010091 // Use common single-batch 3-channel 16x8 image.
telsoa014fcda012018-03-09 14:13:49 +000092 armnn::TensorInfo inputDesc({1, 3, 8, 16}, armnn::GetDataType<T>());
93 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
94
telsoa01c577f2c2018-08-31 09:22:23 +010095 // Use a 2-element batch with 3-channel 3x5 kernels.
telsoa014fcda012018-03-09 14:13:49 +000096 armnn::TensorInfo kernelDesc({2, 3, 5, 3}, armnn::GetDataType<T>());
97 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
98 QuantizedVector<T>(qScale, qOffset, {
99 1, 1, 1,
100 1, -1, 1,
101 1, 1, 1,
102 1, 1, 1,
103 1, 1, 1,
104
105 0, 0, 0,
106 0, 0, 0,
107 0, 0, 0,
108 0, 0, 0,
109 0, 0, 0,
110
111 2, 2, 2,
112 2, 2, 2,
113 2, 2, 2,
114 2, 2, 2,
115 2, 2, 2,
116
117
118 0, 0, 0,
119 0, 0, 0,
120 0, 0, 0,
121 0, 0, 0,
122 0, 0, 0,
123
124 1, 1, 1,
125 1, 1, 1,
126 1, 1, 1,
127 1, 1, 1,
128 1, 1, 1,
129
130 0, 0, 0,
131 0, 0, 0,
132 0, 0, 0,
133 0, 0, 0,
134 0, 0, 0
135 })));
136
telsoa01c577f2c2018-08-31 09:22:23 +0100137 // Expected output is 2 batch elements of a 1-channel 14x4 image.
telsoa014fcda012018-03-09 14:13:49 +0000138 armnn::TensorInfo outputDesc({1, 2, 4, 14}, armnn::GetDataType<T>());
139 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
140 QuantizedVector<T>(qScale, qOffset, {
141 -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24,
142 -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25,
143 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
144 -23.5f, -23.5f, -23.5f,
145 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
146 -23.5f, -23.5f, -23.5f,
147
148 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
149 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
150 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
151 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
152 })));
153
154 return SimpleConvolution2dTestImpl<T>(workloadFactory,
155 input,
156 kernel,
157 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
158 expectedOutput,
159 qScale,
jimfly010a088a62018-10-25 17:05:05 +0100160 qOffset,
161 layout);
telsoa014fcda012018-03-09 14:13:49 +0000162}
163
164template<typename T>
165LayerTestResult<T, 4> SimpleConvolution2d3x3TestCommon(armnn::IWorkloadFactory& workloadFactory,
166 float qScale,
167 int32_t qOffset,
narpra015f703182018-10-26 16:24:58 +0100168 bool biasEnabled,
169 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000170{
telsoa01c577f2c2018-08-31 09:22:23 +0100171 // Use a 3x3 kernel, which exercises ArmCompute's direct convolution path.
telsoa014fcda012018-03-09 14:13:49 +0000172
telsoa01c577f2c2018-08-31 09:22:23 +0100173 // Use common single-batch 3-channel 16x8 image.
telsoa014fcda012018-03-09 14:13:49 +0000174 armnn::TensorInfo inputDesc({1, 3, 8, 16}, armnn::GetDataType<T>());
175 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
176
telsoa01c577f2c2018-08-31 09:22:23 +0100177 // Use a 2-element batch of 3-channel 3x3 kernels.
telsoa014fcda012018-03-09 14:13:49 +0000178 armnn::TensorInfo kernelDesc({2, 3, 3, 3}, armnn::GetDataType<T>());
179 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
180 QuantizedVector<T>(qScale, qOffset, {
181 1, 1, 1,
182 1, -1, 1,
183 1, 1, 1,
184
185 0, 0, 0,
186 0, 0, 0,
187 0, 0, 0,
188
189 2, 2, 2,
190 2, 2, 2,
191 2, 2, 2,
192
193
194 0, 0, 0,
195 0, 0, 0,
196 0, 0, 0,
197
198 1, 1, 1,
199 1, 1, 1,
200 1, 1, 1,
201
202 0, 0, 0,
203 0, 0, 0,
204 0, 0, 0
205 })));
206
telsoa01c577f2c2018-08-31 09:22:23 +0100207 // Expected output is 1 batch of a 2-channel 14x6 image.
telsoa014fcda012018-03-09 14:13:49 +0000208 armnn::TensorInfo outputDesc({1, 2, 6, 14}, armnn::GetDataType<T>());
209 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
210 QuantizedVector<T>(qScale, qOffset, {
211 -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15,
212 -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16,
213 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
214 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
215 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
216 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
217
218 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
219 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
220 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
221 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
222 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
223 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
224 })));
225
226 return SimpleConvolution2dTestImpl<T>(workloadFactory,
227 input,
228 kernel,
229 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
230 expectedOutput,
231 qScale,
narpra015f703182018-10-26 16:24:58 +0100232 qOffset,
233 layout);
telsoa014fcda012018-03-09 14:13:49 +0000234}
235
Francis Murtaghd59116e2018-10-04 16:03:07 +0100236template<typename T>
237LayerTestResult<T, 4> SimpleConvolution2d3x3NhwcTestCommon(armnn::IWorkloadFactory& workloadFactory,
238 float qScale,
239 int32_t qOffset,
240 bool biasEnabled,
241 armnn::DataLayout dataLayout)
242{
243 // Use common single-batch 5x5 image.
244
245 armnn::TensorInfo inputDesc({1, 3, 4, 1}, armnn::GetDataType<T>());
246 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
247 {
248 1, 5, 2, 3,
249 8, 7, 3, 6,
250 3, 3, 9, 1
251 });
252
253
254 // Use a 2-element batch of 3-channel 3x3 kernels.
255 armnn::TensorInfo kernelDesc({1, 3, 3, 1}, armnn::GetDataType<T>());
256 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, {
257 4, 5, 6,
258 0, 0, 0,
259 3, 2, 1
260 });
261
262 // Expected output is 1 batch of a 5x5 image.
263 armnn::TensorInfo outputDesc({1, 3, 4, 1}, armnn::GetDataType<T>());
264
265 const std::vector<float> outputData =
266 {
267 23, 41, 33, 21,
268 44, 65, 76, 52,
269 82, 85, 79, 42
270 };
271
272 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
273
274 return SimpleConvolution2dNhwcTestImpl<T>(workloadFactory,
275 input,
276 kernel,
277 boost::multi_array<T, 1>(),
278 expectedOutput,
279 dataLayout,
280 qScale,
281 qOffset);
282}
283
telsoa014fcda012018-03-09 14:13:49 +0000284LayerTestResult<float, 4> SimpleConvolution2d3x5Test(armnn::IWorkloadFactory& workloadFactory,
jimfly010a088a62018-10-25 17:05:05 +0100285 bool biasEnabled,
286 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000287{
jimfly010a088a62018-10-25 17:05:05 +0100288 return SimpleConvolution2d3x5TestCommon<float>(workloadFactory, 0.f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000289}
290
291LayerTestResult<uint8_t, 4> SimpleConvolution2d3x5Uint8Test(armnn::IWorkloadFactory& workloadFactory,
jimfly010a088a62018-10-25 17:05:05 +0100292 bool biasEnabled,
293 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000294{
jimfly010a088a62018-10-25 17:05:05 +0100295 return SimpleConvolution2d3x5TestCommon<uint8_t>(workloadFactory, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000296}
297
298LayerTestResult<float, 4> SimpleConvolution2d3x3Test(armnn::IWorkloadFactory& workloadFactory,
narpra015f703182018-10-26 16:24:58 +0100299 bool biasEnabled,
300 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000301{
narpra015f703182018-10-26 16:24:58 +0100302 return SimpleConvolution2d3x3TestCommon<float>(workloadFactory, 0.f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000303}
304
Francis Murtaghd59116e2018-10-04 16:03:07 +0100305LayerTestResult<float, 4> SimpleConvolution2d3x3NhwcTest(armnn::IWorkloadFactory& workloadFactory,
306 bool biasEnabled)
307{
308 return SimpleConvolution2d3x3NhwcTestCommon<float>(workloadFactory, 0.f, 0, biasEnabled, armnn::DataLayout::NHWC);
309}
310
telsoa014fcda012018-03-09 14:13:49 +0000311LayerTestResult<uint8_t, 4> SimpleConvolution2d3x3Uint8Test(armnn::IWorkloadFactory& workloadFactory,
narpra015f703182018-10-26 16:24:58 +0100312 bool biasEnabled,
313 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000314{
narpra015f703182018-10-26 16:24:58 +0100315 return SimpleConvolution2d3x3TestCommon<uint8_t>(workloadFactory, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000316}
317
318template<typename T>
319LayerTestResult<T, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon(
320 armnn::IWorkloadFactory& workloadFactory,
narpra015f703182018-10-26 16:24:58 +0100321 const armnn::DataLayoutIndexed& layout,
telsoa014fcda012018-03-09 14:13:49 +0000322 float qScale,
323 int32_t qOffset)
324{
telsoa01c577f2c2018-08-31 09:22:23 +0100325 // Use a single-batch 1-channel 3x3 image as input.
telsoa014fcda012018-03-09 14:13:49 +0000326 armnn::TensorInfo inputDesc({1, 1, 3, 3}, armnn::GetDataType<T>());
327 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
328 QuantizedVector<T>(qScale, qOffset, {
329 11,21,31,
330 12,22,32,
331 13,23,33
332 })));
333
telsoa01c577f2c2018-08-31 09:22:23 +0100334 // Use 1 batch of a 1-channel 2x2 kernel.
telsoa014fcda012018-03-09 14:13:49 +0000335 armnn::TensorInfo kernelDesc({1, 1, 2, 2}, armnn::GetDataType<T>());
336 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
337 QuantizedVector<T>(qScale, qOffset, {
338 -11,-21,
339 -12,-22,
340 })));
341
telsoa01c577f2c2018-08-31 09:22:23 +0100342// Expected output is 1 batch of a 1-channel 6x8 image.
telsoa014fcda012018-03-09 14:13:49 +0000343// Manually calculated like this:
344//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
345//[-11*0 -21*0 -12*0 -22*11 ; -11*0 -21*0 -12*11 -22*21 ; -11*0 -21*0 -12*21 -22*31 ; -11*0 -21*0 -12*31 -22*0 ..]
346//[-11*0 -21*11 -12*0 -22*12 ; -11*11 -21*21 -12*12 -22*22 ; -11*21 -21*31 -12*22 -22*32 ; -11*31 -21*0 -12*32 -22*0 ..]
347//[-11*0 -21*12 -12*0 -22*13 ; -11*12 -21*22 -12*13 -22*23 ; -11*22 -21*32 -12*23 -22*33 ; -11*32 -21*0 -12*33 -22*0 ..]
348//[-11*0 -21*13 -12*0 -22*0 ; -11*13 -21*23 -12*0 -22*0 ; -11*23 -21*33 -12*0 -22*0 ; -11*33 -21*0 -12*0 -22*0 ..]
349//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
350//[..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ..]
351 armnn::TensorInfo outputDesc({1, 1, 8, 6}, armnn::GetDataType<T>());
352 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
353 QuantizedVector<T>(qScale, qOffset, {
354 0, 0, 0, 0, 0, 0,
355 -242, -594, -934, -372, 0, 0,
356 -495, -1190, -1850, -725, 0, 0,
357 -538, -1256, -1916, -748, 0, 0,
358 -273, -626, -946, -363, 0, 0,
359 0, 0, 0, 0, 0, 0,
360 0, 0, 0, 0, 0, 0,
361 0, 0, 0, 0, 0, 0
362 })));
363
364 return SimpleConvolution2dTestImpl<T>(workloadFactory,
365 input,
366 kernel,
367 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(false, qScale, qOffset),
368 expectedOutput,
369 qScale,
370 qOffset,
narpra015f703182018-10-26 16:24:58 +0100371 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100372 1, // Padding left.
373 2, // Padding top.
374 3, // Padding right.
375 4); // Padding bottom.
telsoa014fcda012018-03-09 14:13:49 +0000376}
377
378template<typename T>
379LayerTestResult<T, 4> SimpleConvolution2dAsymmetricPaddingTestCommon(armnn::IWorkloadFactory& workloadFactory,
narpra015f703182018-10-26 16:24:58 +0100380 const armnn::DataLayoutIndexed& layout,
381 float qScale,
382 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +0000383{
telsoa01c577f2c2018-08-31 09:22:23 +0100384 // Use a single-batch 1-channel 5x5 image as input.
telsoa014fcda012018-03-09 14:13:49 +0000385 armnn::TensorInfo inputDesc({ 1, 1, 5, 5 }, armnn::GetDataType<T>());
386 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
387 QuantizedVector<T>(qScale, qOffset, {
388 11,21,31,41,51,
389 12,22,32,42,52,
390 13,23,33,43,53,
391 14,24,34,44,54,
392 15,25,35,45,55,
393 })));
394
telsoa01c577f2c2018-08-31 09:22:23 +0100395 // Use 1 batch of a 1-channel 4x4 kernel.
telsoa014fcda012018-03-09 14:13:49 +0000396 armnn::TensorInfo kernelDesc({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
397 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
398 QuantizedVector<T>(qScale, qOffset, {
399 -11,-21,-31,-41,
400 -12,-22,-32,-42,
401 -13,-23,-33,-43,
402 -14,-24,-34,-44,
403 })));
404
telsoa01c577f2c2018-08-31 09:22:23 +0100405 // Expected output is 1 batch of a 1-channel 5x5 image.
telsoa014fcda012018-03-09 14:13:49 +0000406 armnn::TensorInfo outputDesc({ 1, 1, 5, 5 }, armnn::GetDataType<T>());
407 std::vector<T> myVec(outputDesc.GetNumElements(), 0);
408 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
409 QuantizedVector<T>(qScale, qOffset, {
telsoa014fcda012018-03-09 14:13:49 +0000410 -7140, -10580, -13940, -9300, -5230,
411 -9590, -14120, -18520, -12290, -6860,
412 -9980, -14560, -18960, -12560, -7000,
413 -7518, -10904, -14144, -9318, -5152,
surmeh013537c2c2018-05-18 16:31:43 +0100414 -5032, -7256, -9376, -6142, -3368,
telsoa014fcda012018-03-09 14:13:49 +0000415 })));
416
417 return SimpleConvolution2dTestImpl<T>(workloadFactory,
418 input,
419 kernel,
420 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(false, qScale, qOffset),
421 expectedOutput,
422 qScale,
423 qOffset,
narpra015f703182018-10-26 16:24:58 +0100424 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100425 1, // Padding left.
426 1, // Padding top.
427 2, // Padding right.
428 2); // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100429}
430
431template<typename T>
432LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(armnn::IWorkloadFactory& workloadFactory,
433 float qScale,
434 int32_t qOffset,
jimfly01382a91d2018-10-26 15:55:50 +0100435 bool biasEnabled,
436 const armnn::DataLayoutIndexed& layout)
surmeh013537c2c2018-05-18 16:31:43 +0100437{
telsoa01c577f2c2018-08-31 09:22:23 +0100438 // Use a single-batch 2-channel 5x5 image as input.
surmeh013537c2c2018-05-18 16:31:43 +0100439 armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5 }, armnn::GetDataType<T>());
440 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
441 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
442 0, 1, 2, 3, 4,
443 5, 6, 7, 8, 9,
444 10, 11, 12, 13, 14,
445 15, 16, 17, 18, 19,
446 20, 21, 22, 23, 24,
447
448 25, 26, 27, 28, 29,
449 30, 31, 32, 33, 34,
450 35, 36, 37, 38, 39,
451 40, 41, 42, 43, 44,
452 45, 46, 47, 48, 49
453 })));
454
telsoa01c577f2c2018-08-31 09:22:23 +0100455 // Use a depth multiplier of 1 on a 2-channel 4x4 kernel.
surmeh013537c2c2018-05-18 16:31:43 +0100456 armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, armnn::GetDataType<T>());
457 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
458 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
459 32, 31, 30, 29,
460 28, 27, 26, 25,
461 24, 23, 22, 21,
462 20, 19, 18, 17,
463
464 16, 15, 14, 13,
465 12, 11, 10, 9,
466 8, 7, 6, 5,
467 4, 3, 2, 1
468 })));
469
telsoa01c577f2c2018-08-31 09:22:23 +0100470 // Expected output is 1 batch of a 2-channel 5x5 image.
471 // Calculated using the python tensorflow library with strideX=1, strideY=1.
surmeh013537c2c2018-05-18 16:31:43 +0100472 armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5 }, armnn::GetDataType<T>());
473 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
474 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
475 1062, 1580, 1850, 1530, 1117,
476 2140, 3108, 3500, 2842, 2042,
477 3580, 5068, 5460, 4342, 3062,
478 3618, 5072, 5390, 4248, 2971,
479 3074, 4282, 4510, 3533, 2457,
480 1550, 2284, 2362, 1955, 1428,
481 2910, 4206, 4342, 3528, 2536,
482 3390, 4886, 5022, 4068, 2916,
483 3566, 5056, 5182, 4133, 2922,
484 3100, 4352, 4452, 3517, 2465
485 })));
486
487 return DepthwiseConvolution2dAsymmetricTestImpl<T>(workloadFactory,
488 input,
489 kernel,
490 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
491 expectedOutput,
492 qScale,
493 qOffset,
jimfly01382a91d2018-10-26 15:55:50 +0100494 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100495 1, // Padding left.
496 1, // Padding top.
497 2, // Padding right.
498 2, // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100499 1, // strideX
500 1); // strideY
telsoa014fcda012018-03-09 14:13:49 +0000501}
502
Nikhil Rajcec6b652018-10-12 13:51:57 +0100503template<typename T>
504LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestCommon(armnn::IWorkloadFactory& workloadFactory,
505 float qScale,
506 int32_t qOffset,
507 bool biasEnabled)
508{
509 armnn::TensorInfo inputTensorInfo({ 1, 5, 5, 2}, armnn::GetDataType<T>());
510 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
511 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
512 0, 25,
513 1, 26,
514 2, 27,
515 3, 28,
516 4, 29,
517
518 5, 30,
519 6, 31,
520 7, 32,
521 8, 33,
522 9, 34,
523
524 10, 35,
525 11, 36,
526 12, 37,
527 13, 38,
528 14, 39,
529
530 15, 40,
531 16, 41,
532 17, 42,
533 18, 43,
534 19, 44,
535
536 20, 45,
537 21, 46,
538 22, 47,
539 23, 48,
540 24, 49
541 })));
542
543 armnn::TensorInfo kernelTensorInfo({ 1, 4, 4, 2}, armnn::GetDataType<T>());
544 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
545 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
546 32, 16,
547 31, 15,
548 30, 14,
549 29, 13,
550
551 28, 12,
552 27, 11,
553 26, 10,
554 25, 9,
555
556 24, 8,
557 23, 7,
558 22, 6,
559 21, 5,
560
561 20, 4,
562 19, 3,
563 18, 2,
564 17, 1
565 })));
566
567 armnn::TensorInfo outputTensorInfo({ 1, 5, 5, 2}, armnn::GetDataType<T>());
568 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
569 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
570 1062, 1550,
571 1580, 2284,
572 1850, 2362,
573 1530, 1955,
574 1117, 1428,
575
576 2140, 2910,
577 3108, 4206,
578 3500, 4342,
579 2842, 3528,
580 2042, 2536,
581
582 3580, 3390,
583 5068, 4886,
584 5460, 5022,
585 4342, 4068,
586 3062, 2916,
587
588 3618, 3566,
589 5072, 5056,
590 5390, 5182,
591 4248, 4133,
592 2971, 2922,
593
594 3074, 3100,
595 4282, 4352,
596 4510, 4452,
597 3533, 3517,
598 2457, 2465
599 })));
600
601 return DepthwiseConvolution2dNhwcTestImpl<T>(workloadFactory,
602 input,
603 kernel,
604 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
605 expectedOutput,
606 qScale,
607 qOffset,
608 1, // Padding left.
609 1, // Padding top.
610 2, // Padding right.
611 2, // Padding bottom.
612 1, // strideX
613 1); // strideY
614}
615
telsoa014fcda012018-03-09 14:13:49 +0000616LayerTestResult<float, 4>
narpra015f703182018-10-26 16:24:58 +0100617Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest(armnn::IWorkloadFactory& workloadFactory,
618 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000619{
narpra015f703182018-10-26 16:24:58 +0100620 return Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon<float>(workloadFactory, layout, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000621}
622
narpra015f703182018-10-26 16:24:58 +0100623LayerTestResult<float, 4> Convolution2dAsymmetricPaddingTest(armnn::IWorkloadFactory& workloadFactory,
624 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000625{
narpra015f703182018-10-26 16:24:58 +0100626 return SimpleConvolution2dAsymmetricPaddingTestCommon<float>(workloadFactory, layout, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000627}
628
629LayerTestResult<float, 4> DepthwiseConvolution2dTest(armnn::IWorkloadFactory& workloadFactory,
jimfly01d84216a2018-10-26 12:56:21 +0100630 bool biasEnabled,
631 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000632{
jimfly01d84216a2018-10-26 12:56:21 +0100633 return DepthwiseConvolution2dTestImpl<float, float>(workloadFactory, 0.0f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000634}
635
Nikhil Rajcec6b652018-10-12 13:51:57 +0100636LayerTestResult<float, 4> DepthwiseConvolution2dDepthNhwcTest(armnn::IWorkloadFactory& workloadFactory,
637 bool biasEnabled)
638{
639 return DepthwiseConvolution2dNhwcTestCommon<float>(workloadFactory, 0.0f, 0, biasEnabled);
640}
641
telsoa014fcda012018-03-09 14:13:49 +0000642LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul1Test(armnn::IWorkloadFactory& workloadFactory,
643 bool biasEnabled)
644{
645 return DepthwiseConvolution2dDepthMul1TestImpl<float, float>(workloadFactory, 0.0f, 0, biasEnabled);
646}
647
surmeh013537c2c2018-05-18 16:31:43 +0100648LayerTestResult<float, 4> DepthwiseConvolution2dAsymmetricTest(armnn::IWorkloadFactory& workloadFactory,
jimfly01382a91d2018-10-26 15:55:50 +0100649 bool biasEnabled,
650 const armnn::DataLayoutIndexed& layout)
surmeh013537c2c2018-05-18 16:31:43 +0100651{
jimfly01382a91d2018-10-26 15:55:50 +0100652 return DepthwiseConvolution2dAsymmetricTestCommon<float>(workloadFactory, 0.0f, 0, biasEnabled, layout);
surmeh013537c2c2018-05-18 16:31:43 +0100653}
654
telsoa014fcda012018-03-09 14:13:49 +0000655LayerTestResult<uint8_t, 4> DepthwiseConvolution2dUint8Test(armnn::IWorkloadFactory& workloadFactory,
jimfly01d84216a2018-10-26 12:56:21 +0100656 bool biasEnabled,
657 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000658{
jimfly01d84216a2018-10-26 12:56:21 +0100659 return DepthwiseConvolution2dTestImpl<uint8_t, int32_t>(workloadFactory, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000660}
661
662LayerTestResult<uint8_t, 4> DepthwiseConvolution2dDepthMul1Uint8Test(armnn::IWorkloadFactory& workloadFactory,
663 bool biasEnabled)
664{
665 return DepthwiseConvolution2dDepthMul1TestImpl<uint8_t, int32_t>(workloadFactory, 0.5f, 50, biasEnabled);
666}
667
668LayerTestResult<float, 4> Convolution1dTest(armnn::IWorkloadFactory& workloadFactory, bool biasEnabled)
669{
670 return Convolution1dTestImpl<float>(workloadFactory, 0.0f, 0, biasEnabled);
671}
672
673LayerTestResult<uint8_t, 4> Convolution1dUint8Test(armnn::IWorkloadFactory& workloadFactory, bool biasEnabled)
674{
675 return Convolution1dTestImpl<uint8_t>(workloadFactory, 0.1f, 128, biasEnabled);
676}
677
678LayerTestResult<float,4> CompareConvolution2dTest(armnn::IWorkloadFactory& workloadFactory,
jimfly01382a91d2018-10-26 15:55:50 +0100679 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +0000680{
681 return CompareConvolution2dTestImpl<float>(workloadFactory, refWorkloadFactory);
682}
683
684template<typename T>
685LayerTestResult<T,4> CompareDepthwiseConvolution2dTest(armnn::IWorkloadFactory& workloadFactory,
686 armnn::IWorkloadFactory& refWorkloadFactory)
687{
688 return CompareDepthwiseConvolution2dTestImpl<T>(workloadFactory, refWorkloadFactory);
689}
690
691template LayerTestResult<float, 4> CompareDepthwiseConvolution2dTest<float>(
692 armnn::IWorkloadFactory&, armnn::IWorkloadFactory&);
693template LayerTestResult<uint8_t, 4> CompareDepthwiseConvolution2dTest<uint8_t>(
694 armnn::IWorkloadFactory&, armnn::IWorkloadFactory&);
695
696LayerTestResult<float,4> SimpleNormalizationAcrossTest(armnn::IWorkloadFactory& workloadFactory)
697{
698 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
699 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
700 return SimpleNormalizationTestImpl(workloadFactory, normChannel, normMethod);
701}
702
703LayerTestResult<float,4> SimpleNormalizationWithinTest(armnn::IWorkloadFactory& workloadFactory)
704{
705 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
706 auto normChannel = armnn::NormalizationAlgorithmChannel::Within;
707 return SimpleNormalizationTestImpl(workloadFactory, normChannel, normMethod);
708}
709
narpra0155a97bc2018-10-02 14:35:53 +0100710LayerTestResult<float,4> SimpleNormalizationAcrossNhwcTest(armnn::IWorkloadFactory& workloadFactory)
711{
712 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
713 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Matteo Martincigh8e6f92d2018-10-18 08:45:39 +0100714 return SimpleNormalizationNhwcTestImpl(workloadFactory, normChannel, normMethod);
narpra0155a97bc2018-10-02 14:35:53 +0100715}
716
telsoa014fcda012018-03-09 14:13:49 +0000717LayerTestResult<float,2> SimpleSoftmaxTest(armnn::IWorkloadFactory& workloadFactory, float beta)
718{
719 return SimpleSoftmaxTestImpl<float>(workloadFactory, beta);
720}
721
722LayerTestResult<uint8_t,2> SimpleSoftmaxUint8Test(armnn::IWorkloadFactory& workloadFactory, float beta)
723{
724 return SimpleSoftmaxTestImpl<uint8_t>(workloadFactory, beta);
725}
726
727LayerTestResult<float,4> CompareNormalizationTest(armnn::IWorkloadFactory& workloadFactory,
728 armnn::IWorkloadFactory& refWorkloadFactory,
729 armnn::NormalizationAlgorithmChannel normChannel,
730 armnn::NormalizationAlgorithmMethod normMethod)
731{
732 return CompareNormalizationTestImpl(workloadFactory, refWorkloadFactory, normChannel, normMethod);
733}
734
735LayerTestResult<float,2> CompareSoftmaxTest(armnn::IWorkloadFactory& workloadFactory,
736 armnn::IWorkloadFactory& refWorkloadFactory,
737 float beta)
738{
739 return CompareSoftmaxTestImpl<float>(workloadFactory, refWorkloadFactory, beta);
740}
741
742LayerTestResult<uint8_t,2> CompareSoftmaxUint8Test(armnn::IWorkloadFactory& workloadFactory,
743 armnn::IWorkloadFactory& refWorkloadFactory,
744 float beta)
745{
746 return CompareSoftmaxTestImpl<uint8_t>(workloadFactory, refWorkloadFactory, beta);
747}
748
749std::vector<LayerTestResult<float,3>> SplitterTest(armnn::IWorkloadFactory& workloadFactory)
750{
751 return SplitterTestCommon<float>(workloadFactory);
752}
753
754std::vector<LayerTestResult<uint8_t,3>> SplitterUint8Test(armnn::IWorkloadFactory& workloadFactory)
755{
756 return SplitterTestCommon<uint8_t>(workloadFactory, 1.0f, 0);
757}
758
759LayerTestResult<float, 3> CopyViaSplitterTest(armnn::IWorkloadFactory& workloadFactory)
760{
761 return CopyViaSplitterTestImpl<float>(workloadFactory, 0.0f, 0);
762}
763
764LayerTestResult<uint8_t, 3> CopyViaSplitterUint8Test(armnn::IWorkloadFactory& workloadFactory)
765{
766 return CopyViaSplitterTestImpl<uint8_t>(workloadFactory, 1.0f, 0);
767}
768
telsoa01c577f2c2018-08-31 09:22:23 +0100769LayerTestResult<float, 2> LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest(
770 armnn::IWorkloadFactory& workloadFactory)
771{
772 armnn::TensorInfo inputDesc({ 2, 2 }, armnn::GetDataType<float>());
773 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
774 { 2., 3., 3., 4. }));
775
776 armnn::TensorInfo outputDesc({ 2, 4 }, armnn::GetDataType<float>());
777 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
778 {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
779 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f}));
780 return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl(workloadFactory, input, expectedOutput);
781}
782
783LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest(
784 armnn::IWorkloadFactory& workloadFactory)
785{
786 armnn::TensorInfo inputDesc({ 2, 5 }, armnn::GetDataType<float>());
787 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
788 {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
789 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f}));
790
791 armnn::TensorInfo outputDesc({ 2, 16 }, armnn::GetDataType<float>());
792 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
793 {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f,
794 -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f,
795 -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f,
796 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f,
797 -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f,
798 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f,
799 0.02168f}));
800 return LstmLayerFloat32NoCifgWithPeepholeWithProjectionTestImpl(workloadFactory, input, expectedOutput);
801}
802
803LayerTestResult<float, 2> LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest(armnn::IWorkloadFactory& workloadFactory)
804{
805 armnn::TensorInfo inputDesc({2, 2}, armnn::GetDataType<float>());
806 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
807 {2., 3., 3., 4.}));
808
809
810 armnn::TensorInfo outputDesc({2, 4}, armnn::GetDataType<float>());
811 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
812 {{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
813 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}}));
814
815 return LstmNoCifgNoPeepholeNoProjectionTestImpl(workloadFactory, input, expectedOutput);
816}
817
telsoa014fcda012018-03-09 14:13:49 +0000818LayerTestResult<float,3> MergerTest(armnn::IWorkloadFactory& workloadFactory)
819{
surmeh013537c2c2018-05-18 16:31:43 +0100820 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +0000821 unsigned int outputHeight = 6;
822 unsigned int outputChannels = 3;
823
surmeh013537c2c2018-05-18 16:31:43 +0100824 unsigned int inputWidth1 = 3;
825 unsigned int inputHeight1 = 6;
826 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +0000827
surmeh013537c2c2018-05-18 16:31:43 +0100828 unsigned int inputWidth2 = 3;
829 unsigned int inputHeight2 = 6;
830 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +0000831
telsoa01c577f2c2018-08-31 09:22:23 +0100832 // Define the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +0000833 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::Float32);
834 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::Float32);
835 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +0000836
837 LayerTestResult<float,3> ret(outputTensorInfo);
838
telsoa014fcda012018-03-09 14:13:49 +0000839 ret.outputExpected = MakeTensor<float, 3>(outputTensorInfo, std::vector<float>(
surmeh013537c2c2018-05-18 16:31:43 +0100840 {
841 1.0f, 2.0f, 3.0f,
842 4.0f, 5.0f, 6.0f,
843 7.0f, 8.0f, 9.0f,
844 10.0f, 11.0f, 12.0f,
845 13.0f, 14.0f, 15.0f,
846 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +0000847
surmeh013537c2c2018-05-18 16:31:43 +0100848 19.0f, 20.0f, 21.0f,
849 22.0f, 23.0f, 24.0f,
850 25.0f, 26.0f, 27.0f,
851 28.0f, 29.0f, 30.0f,
852 31.0f, 32.0f, 33.0f,
853 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +0000854
surmeh013537c2c2018-05-18 16:31:43 +0100855 37.0f, 38.0f, 39.0f,
856 40.0f, 41.0f, 42.0f,
857 43.0f, 44.0f, 45.0f,
858 46.0f, 47.0f, 48.0f,
859 49.0f, 50.0f, 51.0f,
860 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +0000861 })
862 );
863
telsoa014fcda012018-03-09 14:13:49 +0000864 auto input1 = MakeTensor<float, 3>(inputTensorInfo1, std::vector<float>(
865 {
surmeh013537c2c2018-05-18 16:31:43 +0100866 1.0f, 2.0f, 3.0f,
867 4.0f, 5.0f, 6.0f,
868 7.0f, 8.0f, 9.0f,
869 10.0f, 11.0f, 12.0f,
870 13.0f, 14.0f, 15.0f,
871 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +0000872
surmeh013537c2c2018-05-18 16:31:43 +0100873 19.0f, 20.0f, 21.0f,
874 22.0f, 23.0f, 24.0f,
875 25.0f, 26.0f, 27.0f,
876 28.0f, 29.0f, 30.0f,
877 31.0f, 32.0f, 33.0f,
878 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +0000879 })
880 );
881
882 auto input2 = MakeTensor<float, 3>(inputTensorInfo2, std::vector<float>(
883 {
surmeh013537c2c2018-05-18 16:31:43 +0100884 37.0f, 38.0f, 39.0f,
885 40.0f, 41.0f, 42.0f,
telsoa014fcda012018-03-09 14:13:49 +0000886 43.0f, 44.0f, 45.0f,
surmeh013537c2c2018-05-18 16:31:43 +0100887 46.0f, 47.0f, 48.0f,
888 49.0f, 50.0f, 51.0f,
889 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +0000890 })
891 );
892
telsoa01c577f2c2018-08-31 09:22:23 +0100893 std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of input[0].
telsoa014fcda012018-03-09 14:13:49 +0000894 armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1);
895
telsoa01c577f2c2018-08-31 09:22:23 +0100896 std::vector<unsigned int> wOrigin2 = {2, 0, 0}; //Extent of the window is defined by size of input[1].
telsoa014fcda012018-03-09 14:13:49 +0000897 armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2);
898
telsoa014fcda012018-03-09 14:13:49 +0000899 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
900
901 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
902
903 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
904 subTensorsSupported ?
905 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
906 workloadFactory.CreateTensorHandle(inputTensorInfo1);
907
908 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
909 subTensorsSupported ?
910 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
911 workloadFactory.CreateTensorHandle(inputTensorInfo2);
912
telsoa014fcda012018-03-09 14:13:49 +0000913 armnn::MergerQueueDescriptor data;
914 armnn::WorkloadInfo info;
915 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
916 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +0000917 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
918
919 data.m_ViewOrigins.push_back(window1);
920 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +0000921
922 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(data, info);
923
924 inputHandle1->Allocate();
925 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +0000926 outputHandle->Allocate();
927
928 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
929 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +0000930
surmeh013537c2c2018-05-18 16:31:43 +0100931 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +0000932 workload->Execute();
933
934 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
935
936 return ret;
937}
938
939LayerTestResult<float,4> AdditionTest(armnn::IWorkloadFactory& workloadFactory)
940{
941 unsigned int batchSize = 2;
942 unsigned int channels = 2;
943 unsigned int height = 2;
944 unsigned int width = 3;
945
946 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
947 armnn::TensorInfo outputTensorInfo;
948
949 unsigned int shape[] = {batchSize, channels, height, width};
950
951 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
952 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
953 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
954
955
956 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, std::vector<float>(
957 {
958 0.0f, 2.0f, 1.0f,
959 0.2f, 1.0f, 2.0f,
960
961 1.0f, 2.0f, 1.0f,
962 0.2f, 1.0f, 2.0f,
963
964 0.0f, 2.0f, 1.0f,
965 4.2f, 1.0f, 2.0f,
966
967 0.0f, 0.0f, 1.0f,
968 0.2f, 1.0f, 2.0f,
969 }));
970
971 auto input2 = MakeTensor<float, 4>(inputTensorInfo2, std::vector<float>(
972 {
973 1.0f, 2.0f, 1.0f,
974 0.0f, 1.0f, 2.0f,
975
976 1.0f, 2.0f, -2.0f,
977 0.2f, 1.0f, 2.0f,
978
979 0.0f, 2.0f, 1.0f,
980 4.2f, 0.0f, -3.0f,
981
982 0.0f, 0.0f, 1.0f,
983 0.7f, 1.0f, 5.0f,
984 }));
985
986 LayerTestResult<float,4> ret(outputTensorInfo);
987 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>(
988 {
989 1.0f, 4.0f, 2.0f,
990 0.2f, 2.0f, 4.0f,
991
992 2.0f, 4.0f, -1.0f,
993 0.4f, 2.0f, 4.0f,
994
995 0.0f, 4.0f, 2.0f,
996 8.4f, 1.0f, -1.0f,
997
998 0.0f, 0.0f, 2.0f,
999 0.9f, 2.0f, 7.0f,
1000 }));
1001
1002 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1003 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1004 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1005
1006 armnn::AdditionQueueDescriptor data;
1007 armnn::WorkloadInfo info;
1008 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1009 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1010 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1011
1012 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1013
1014 inputHandle1->Allocate();
1015 inputHandle2->Allocate();
1016 outputHandle->Allocate();
1017
1018 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1019 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1020
surmeh013537c2c2018-05-18 16:31:43 +01001021 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001022 workload->Execute();
1023
1024 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1025
1026 return ret;
1027}
1028
1029template <typename T>
1030LayerTestResult<T, 4> AdditionBroadcastTestImpl(armnn::IWorkloadFactory& workloadFactory,
1031 float qScale,
1032 int32_t qOffset)
1033{
1034 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, armnn::GetDataType<T>());
1035 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, armnn::GetDataType<T>());
1036 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>());
1037
1038 if (armnn::IsQuantizedType<T>())
1039 {
1040 inputTensorInfo1.SetQuantizationScale(qScale);
1041 inputTensorInfo1.SetQuantizationOffset(qOffset);
1042 inputTensorInfo2.SetQuantizationScale(qScale);
1043 inputTensorInfo2.SetQuantizationOffset(qOffset);
1044 outputTensorInfo.SetQuantizationScale(qScale);
1045 outputTensorInfo.SetQuantizationOffset(qOffset);
1046 }
1047
1048 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
1049 {
1050 0.0f,
1051 1.0f,
1052
1053 2.0f,
1054 3.0f,
1055
1056 4.0f,
1057 5.0f,
1058 }));
1059
1060 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
1061 {
1062 0.5f, 1.5f, 2.5f,
1063 3.5f, 4.5f, 5.5f,
1064 }));
1065
1066 LayerTestResult<T,4> ret(outputTensorInfo);
1067 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
1068 {
1069 0.5f, 1.5f, 2.5f,
1070 4.5f, 5.5f, 6.5f,
1071
1072 2.5f, 3.5f, 4.5f,
1073 6.5f, 7.5f, 8.5f,
1074
1075 4.5f, 5.5f, 6.5f,
1076 8.5f, 9.5f, 10.5f,
1077 }));
1078
1079 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1080 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1081 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1082
1083 armnn::AdditionQueueDescriptor data;
1084 armnn::WorkloadInfo info;
1085 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1086 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1087 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1088
1089 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1090
1091 inputHandle1->Allocate();
1092 inputHandle2->Allocate();
1093 outputHandle->Allocate();
1094
1095 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1096 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1097
surmeh013537c2c2018-05-18 16:31:43 +01001098 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001099 workload->Execute();
1100
1101 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1102
1103 return ret;
1104}
1105
1106template <typename T>
1107LayerTestResult<T, 4> AdditionBroadcast1ElementTestImpl(armnn::IWorkloadFactory& workloadFactory,
1108 float qScale,
1109 int32_t qOffset)
1110{
1111 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>());
1112 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, armnn::GetDataType<T>());
1113 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>());
1114
1115 if (armnn::IsQuantizedType<T>())
1116 {
1117 inputTensorInfo1.SetQuantizationScale(qScale);
1118 inputTensorInfo1.SetQuantizationOffset(qOffset);
1119 inputTensorInfo2.SetQuantizationScale(qScale);
1120 inputTensorInfo2.SetQuantizationOffset(qOffset);
1121 outputTensorInfo.SetQuantizationScale(qScale);
1122 outputTensorInfo.SetQuantizationOffset(qOffset);
1123 }
1124
1125 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
1126 {
1127 0.0f, 1.0f, 2.0f,
1128 3.0f, 4.0f, 5.0f,
1129 6.0f, 7.0f, 8.0f,
1130 9.0f, 10.0f, 11.0f,
1131 12.0f, 13.0f, 14.0f,
1132 15.0f, 16.0f, 17.0f,
1133 }));
1134
1135 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
1136 {
1137 0.5f,
1138 }));
1139
1140 LayerTestResult<T,4> ret(outputTensorInfo);
1141 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
1142 {
1143 0.5f, 1.5f, 2.5f,
1144 3.5f, 4.5f, 5.5f,
1145 6.5f, 7.5f, 8.5f,
1146 9.5f, 10.5f, 11.5f,
1147 12.5f, 13.5f, 14.5f,
1148 15.5f, 16.5f, 17.5f,
1149 }));
1150
1151 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1152 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1153 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1154
1155 armnn::AdditionQueueDescriptor data;
1156 armnn::WorkloadInfo info;
1157 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1158 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1159 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1160
1161 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1162
1163 inputHandle1->Allocate();
1164 inputHandle2->Allocate();
1165 outputHandle->Allocate();
1166
1167 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1168 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1169
surmeh013537c2c2018-05-18 16:31:43 +01001170 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001171 workload->Execute();
1172
1173 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1174
1175 return ret;
1176}
1177
1178LayerTestResult<float, 4> AdditionBroadcastTest(armnn::IWorkloadFactory& workloadFactory)
1179{
1180 return AdditionBroadcastTestImpl<float>(workloadFactory, 0.0f, 0);
1181}
1182
1183LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test(armnn::IWorkloadFactory& workloadFactory)
1184{
1185 return AdditionBroadcastTestImpl<uint8_t>(workloadFactory, 2.f, 0);
1186}
1187
1188LayerTestResult<float, 4> AdditionBroadcast1ElementTest(armnn::IWorkloadFactory& workloadFactory)
1189{
1190 return AdditionBroadcast1ElementTestImpl<float>(workloadFactory, 0.0f, 0);
1191}
1192
1193LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test(armnn::IWorkloadFactory& workloadFactory)
1194{
1195 return AdditionBroadcast1ElementTestImpl<uint8_t>(workloadFactory, 0.1333333f, 128);
1196}
1197
1198LayerTestResult<float,4> CompareAdditionTest(armnn::IWorkloadFactory& workloadFactory,
David Beckf195f032018-09-06 16:46:34 +01001199 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00001200{
1201 unsigned int batchSize = 4;
1202 unsigned int channels = 1;
1203 unsigned int height = 2;
1204 unsigned int width = 3;
1205
1206 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
1207 armnn::TensorInfo outputTensorInfo;
1208
1209 unsigned int shape[] = {batchSize, channels, height, width};
1210
1211 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1212 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1213 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1214
1215 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 1232);
1216 auto input2 = MakeRandomTensor<float, 4>(inputTensorInfo2, 456);
1217
1218 LayerTestResult<float,4> ret(outputTensorInfo);
1219
1220 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1221 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1222 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1223
1224 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
1225 std::unique_ptr<armnn::ITensorHandle> inputHandle2Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo2);
1226 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
1227
1228 armnn::AdditionQueueDescriptor data;
1229 armnn::WorkloadInfo info;
1230 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1231 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1232 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1233
1234 armnn::AdditionQueueDescriptor refData = data;
1235 armnn::WorkloadInfo refInfo = info;
1236 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo1, inputHandle1Ref.get());
1237 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get());
1238 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1239
1240 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1241 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateAddition(refData, refInfo);
1242
1243 inputHandle1->Allocate();
1244 inputHandle2->Allocate();
1245 outputHandle->Allocate();
1246 inputHandle1Ref->Allocate();
1247 inputHandle2Ref->Allocate();
1248 outputHandleRef->Allocate();
1249
1250 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1251 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1252 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
1253 CopyDataToITensorHandle(inputHandle2Ref.get(), &input2[0][0][0][0]);
1254
surmeh013537c2c2018-05-18 16:31:43 +01001255 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001256 workload->Execute();
surmeh013537c2c2018-05-18 16:31:43 +01001257 refWorkloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001258 workloadRef->Execute();
1259
1260 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1261 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
1262
1263 return ret;
1264}
1265
surmeh01bceff2f2018-03-29 16:29:27 +01001266namespace {
David Beck5cd01f32018-09-12 16:00:08 +01001267template <typename T>
1268LayerTestResult<T, 4> DivisionTestHelper(armnn::IWorkloadFactory& workloadFactory,
1269 const unsigned int shape0[4],
1270 const std::vector<T>& values0,
1271 float scale0,
1272 int32_t offset0,
1273 const unsigned int shape1[4],
1274 const std::vector<T> & values1,
1275 float scale1,
1276 int32_t offset1,
1277 const unsigned int outShape[4],
1278 const std::vector<T> & outValues,
1279 float outScale,
1280 int32_t outOffset)
1281{
1282 auto dataType = (std::is_same<T, uint8_t>::value ?
1283 armnn::DataType::QuantisedAsymm8 :
1284 armnn::DataType::Float32);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001285
David Beck5cd01f32018-09-12 16:00:08 +01001286 armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
1287 armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
1288 armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001289
David Beck5cd01f32018-09-12 16:00:08 +01001290 inputTensorInfo0.SetQuantizationScale(scale0);
1291 inputTensorInfo0.SetQuantizationOffset(offset0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001292
David Beck5cd01f32018-09-12 16:00:08 +01001293 inputTensorInfo1.SetQuantizationScale(scale1);
1294 inputTensorInfo1.SetQuantizationOffset(offset1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001295
David Beck5cd01f32018-09-12 16:00:08 +01001296 outputTensorInfo.SetQuantizationScale(outScale);
1297 outputTensorInfo.SetQuantizationOffset(outOffset);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001298
David Beck5cd01f32018-09-12 16:00:08 +01001299 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
1300 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001301
David Beck5cd01f32018-09-12 16:00:08 +01001302 LayerTestResult<T, 4> result(outputTensorInfo);
1303 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001304
David Beck5cd01f32018-09-12 16:00:08 +01001305 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
1306 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1307 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001308
David Beck5cd01f32018-09-12 16:00:08 +01001309 armnn::DivisionQueueDescriptor data;
1310 armnn::WorkloadInfo info;
1311 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
1312 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1313 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001314
David Beck5cd01f32018-09-12 16:00:08 +01001315 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDivision(data, info);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001316
David Beck5cd01f32018-09-12 16:00:08 +01001317 inputHandle0->Allocate();
1318 inputHandle1->Allocate();
1319 outputHandle->Allocate();
1320
1321 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
1322 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1323
1324 workloadFactory.Finalize();
1325 workload->Execute();
1326
1327 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
1328
1329 return result;
1330}
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001331} // anonymous namespace
1332
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01001333LayerTestResult<float,4> DivisionByZeroTest(armnn::IWorkloadFactory& workloadFactory)
1334{
1335 const unsigned int width = 2;
1336 const unsigned int height = 2;
1337 const unsigned int channelCount = 2;
1338 const unsigned int batchSize = 2;
1339
1340 unsigned int shape[] = { batchSize, channelCount, height, width };
1341
1342 std::vector<float> input0({
1343 1.f, 1.f, 1.f, 1.f, 0.f, 0.f, 0.f, 0.f,
1344 -1.f, -1.f, -1.f, -1.f, 5.f, 5.f, 5.f, 5.f });
1345
1346 std::vector<float> input1({
1347 0.f, 0.f, -0.f, -0.f, 0.f, 0.f, -0.f, -0.f,
1348 0.f, 0.f, -0.f, -0.f, 5.f, 5.f, 5.f, 5.f });
1349
1350 std::vector<float> output({
1351 INFINITY, INFINITY, -INFINITY, -INFINITY, NAN, NAN, -NAN, -NAN,
1352 -INFINITY, -INFINITY, INFINITY, INFINITY, 1, 1, 1, 1 });
1353
David Beck5cd01f32018-09-12 16:00:08 +01001354 return DivisionTestHelper<float>(workloadFactory,
1355 shape, input0, 1.0f, 0,
1356 shape, input1, 1.0f, 0,
1357 shape, output, 1.0f, 0);
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01001358}
1359
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001360LayerTestResult<float,4> DivisionTest(armnn::IWorkloadFactory& workloadFactory)
1361{
1362 const unsigned int width = 2;
1363 const unsigned int height = 2;
1364 const unsigned int channelCount = 2;
1365 const unsigned int batchSize = 2;
1366
1367 unsigned int shape[] = { batchSize, channelCount, height, width };
1368
1369 std::vector<float> input0({
1370 2, 2, 2, 2, 3, 3, 3, 3,
1371 4, 4, 4, 4, 5, 5, 5, 5 });
1372
1373 std::vector<float> input1({
1374 1, 1, 1, 1, 2, 2, 2, 2,
1375 4, 4, 4, 4, 4, 4, 4, 4 });
1376
1377 std::vector<float> output({
1378 2, 2, 2, 2, 1.5, 1.5, 1.5, 1.5,
1379 1, 1, 1, 1, 1.25, 1.25, 1.25, 1.25 });
1380
David Beck5cd01f32018-09-12 16:00:08 +01001381
1382 return DivisionTestHelper<float>(workloadFactory,
1383 shape, input0, 1.0f, 0,
1384 shape, input1, 1.0f, 0,
1385 shape, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001386}
1387
1388LayerTestResult<float, 4> DivisionBroadcast1ElementTest(armnn::IWorkloadFactory& workloadFactory)
1389{
1390 unsigned int shape0[] = { 1, 2, 2, 2 };
1391 std::vector<float> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
1392
1393 unsigned int shape1[] = { 1, 1, 1, 1 };
1394 std::vector<float> input1({ 2 });
1395
1396 std::vector<float> output({ 1, 2, 3, 4, 5, 6, 7, 8});
1397
David Beck5cd01f32018-09-12 16:00:08 +01001398
1399 return DivisionTestHelper<float>(workloadFactory,
1400 shape0, input0, 1.0f, 0,
1401 shape1, input1, 1.0f, 0,
1402 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001403}
1404
1405LayerTestResult<float, 4> DivisionBroadcast1DVectorTest(armnn::IWorkloadFactory& workloadFactory)
1406{
1407 unsigned int shape0[] = { 1, 3, 3, 2 };
1408 std::vector<float> input0({
1409 1, 4, 3, 8, 5, 12,
1410 7, 16, 9, 20, 11, 24,
1411 13, 28, 15, 32, 17, 36});
1412
1413 unsigned int shape1[] = { 1, 1, 1, 2 };
1414 std::vector<float> input1({ 1, 2 });
1415
1416 std::vector<float> output({
1417 1, 2, 3, 4, 5, 6,
1418 7, 8, 9, 10, 11, 12,
1419 13, 14, 15, 16, 17, 18});
1420
David Beck5cd01f32018-09-12 16:00:08 +01001421 return DivisionTestHelper<float>(workloadFactory,
1422 shape0, input0, 1.0f, 0,
1423 shape1, input1, 1.0f, 0,
1424 shape0, output, 1.0f, 0);
1425}
1426
1427
1428LayerTestResult<uint8_t,4> DivisionUint8Test(armnn::IWorkloadFactory& workloadFactory)
1429{
1430 const unsigned int width = 2;
1431 const unsigned int height = 2;
1432 const unsigned int channelCount = 2;
1433 const unsigned int batchSize = 2;
1434
1435 unsigned int shape[] = { batchSize, channelCount, height, width };
1436
1437 std::vector<uint8_t> input0({2, 2, 2, 2, 3, 3, 3, 3,
1438 4, 4, 4, 4, 5, 5, 5, 5 });
1439
1440 std::vector<uint8_t> input1({1, 1, 1, 1, 2, 2, 2, 2,
1441 4, 4, 4, 4, 4, 4, 4, 4 });
1442
1443 std::vector<uint8_t> output({8, 8, 8, 8, 6, 6, 6, 6,
1444 4, 4, 4, 4, 5, 5, 5, 5});
1445
1446
1447 return DivisionTestHelper<uint8_t>(workloadFactory,
1448 shape, input0, 1.0f, 0,
1449 shape, input1, 1.0f, 0,
1450 shape, output, 0.25f, 0);
1451}
1452
1453LayerTestResult<uint8_t, 4> DivisionBroadcast1ElementUint8Test(armnn::IWorkloadFactory& workloadFactory)
1454{
1455 unsigned int shape0[] = { 1, 2, 2, 2 };
1456 std::vector<uint8_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
1457
1458 unsigned int shape1[] = { 1, 1, 1, 1 };
1459 std::vector<uint8_t> input1({ 2 });
1460
1461 std::vector<uint8_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
1462
1463 return DivisionTestHelper<uint8_t>(workloadFactory,
1464 shape0, input0, 1.0f, 0,
1465 shape1, input1, 1.0f, 0,
1466 shape0, output, 1.0f, 0);
1467}
1468
1469LayerTestResult<uint8_t, 4> DivisionBroadcast1DVectorUint8Test(armnn::IWorkloadFactory& workloadFactory)
1470{
1471 unsigned int shape0[] = { 1, 3, 3, 2 };
1472 std::vector<uint8_t> input0({1, 4, 3, 8, 5, 12,
1473 7, 16, 9, 20, 11, 24,
1474 13, 28, 15, 32, 17, 36});
1475
1476 unsigned int shape1[] = { 1, 1, 1, 2 };
1477 std::vector<uint8_t> input1({ 1, 2 });
1478
1479 std::vector<uint8_t> output({1, 2, 3, 4, 5, 6,
1480 7, 8, 9, 10, 11, 12,
1481 13, 14, 15, 16, 17, 18});
1482
1483 return DivisionTestHelper<uint8_t>(workloadFactory,
1484 shape0, input0, 1.0f, 0,
1485 shape1, input1, 1.0f, 0,
1486 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001487}
1488
1489namespace {
surmeh01bceff2f2018-03-29 16:29:27 +01001490LayerTestResult<float,4> MultiplicationTestHelper(armnn::IWorkloadFactory& workloadFactory,
1491 const unsigned int shape0[4],
1492 const std::vector<float> & values0,
1493 const unsigned int shape1[4],
1494 const std::vector<float> & values1,
1495 const unsigned int outShape[4],
1496 const std::vector<float> & outValues)
telsoa014fcda012018-03-09 14:13:49 +00001497{
surmeh01bceff2f2018-03-29 16:29:27 +01001498 const size_t dimensionCount = 4;
1499 armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, armnn::DataType::Float32};
1500 armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, armnn::DataType::Float32};
1501 armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, armnn::DataType::Float32};
telsoa014fcda012018-03-09 14:13:49 +00001502
surmeh01bceff2f2018-03-29 16:29:27 +01001503 auto input0 = MakeTensor<float, 4>(inputTensorInfo0, values0);
1504 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00001505
1506 LayerTestResult<float,4> ret(outputTensorInfo);
1507
1508 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
1509 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1510 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1511
1512 armnn::MultiplicationQueueDescriptor data;
1513 armnn::WorkloadInfo info;
1514 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
1515 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1516 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1517
1518 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
1519
1520 inputHandle0->Allocate();
1521 inputHandle1->Allocate();
1522 outputHandle->Allocate();
1523
1524 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
1525 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1526
surmeh013537c2c2018-05-18 16:31:43 +01001527 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001528 workload->Execute();
1529
1530 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1531
surmeh01bceff2f2018-03-29 16:29:27 +01001532 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00001533 return ret;
1534}
surmeh01bceff2f2018-03-29 16:29:27 +01001535} // anonymous namespace
1536
1537
1538LayerTestResult<float,4> MultiplicationTest(armnn::IWorkloadFactory& workloadFactory)
1539{
1540 const unsigned int width = 2;
1541 const unsigned int height = 2;
1542 const unsigned int channelCount = 2;
1543 const unsigned int batchSize = 2;
1544
1545 unsigned int shape[] = { batchSize, channelCount, height, width };
1546
1547 std::vector<float> input0({
1548 1, 1, 1, 1, 2, 2, 2, 2,
1549 3, 3, 3, 3, 4, 4, 4, 4 });
1550
1551 std::vector<float> input1({
1552 2, 2, 2, 2, 3, 3, 3, 3,
1553 4, 4, 4, 4, 5, 5, 5, 5 });
1554
1555 std::vector<float> output({
1556 2, 2, 2, 2, 6, 6, 6, 6,
1557 12, 12, 12, 12, 20, 20, 20, 20 });
1558
1559 return MultiplicationTestHelper(workloadFactory,
1560 shape,
1561 input0,
1562 shape,
1563 input1,
1564 shape,
1565 output);
1566}
1567
1568LayerTestResult<float, 4> MultiplicationBroadcast1ElementTest(armnn::IWorkloadFactory& workloadFactory)
1569{
1570 unsigned int shape0[] = { 1, 2, 2, 2 };
1571 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
1572
1573 unsigned int shape1[] = { 1, 1, 1, 1 };
1574 std::vector<float> input1({ 2 });
1575
1576 std::vector<float> output({ 2, 4, 6, 8, 10, 12, 14, 16});
1577
1578 return MultiplicationTestHelper(workloadFactory,
1579 shape0,
1580 input0,
1581 shape1,
1582 input1,
1583 shape0,
1584 output);
1585}
1586
1587LayerTestResult<float, 4> MultiplicationBroadcast1DVectorTest(armnn::IWorkloadFactory& workloadFactory)
1588{
1589 unsigned int shape0[] = { 1, 3, 3, 2 };
1590 std::vector<float> input0({
1591 1, 2, 3, 4, 5, 6,
1592 7, 8, 9, 10, 11, 12,
1593 13, 14, 15, 16, 17, 18});
1594
1595 unsigned int shape1[] = { 1, 1, 1, 2 };
1596 std::vector<float> input1({ 1, 2 });
1597
1598 std::vector<float> output({
1599 1, 4, 3, 8, 5, 12,
1600 7, 16, 9, 20, 11, 24,
1601 13, 28, 15, 32, 17, 36});
1602
1603 return MultiplicationTestHelper(workloadFactory,
1604 shape0,
1605 input0,
1606 shape1,
1607 input1,
1608 shape0,
1609 output);
1610}
telsoa014fcda012018-03-09 14:13:49 +00001611
1612LayerTestResult<float,4> CompareMultiplicationTest(armnn::IWorkloadFactory& workloadFactory,
1613 armnn::IWorkloadFactory& refWorkloadFactory)
1614{
1615 const unsigned int width = 16;
1616 const unsigned int height = 32;
1617 const unsigned int channelCount = 2;
1618 const unsigned int batchSize = 5;
1619
1620 armnn::TensorInfo inputTensorInfo0;
1621 armnn::TensorInfo inputTensorInfo1;
1622 armnn::TensorInfo outputTensorInfo;
1623
1624 constexpr unsigned int shape[] = { batchSize, channelCount, height, width };
1625
1626 inputTensorInfo0 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1627 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1628 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1629
1630 LayerTestResult<float,4> comparisonResult(outputTensorInfo);
1631
1632 auto input0 = MakeRandomTensor<float, 4>(inputTensorInfo0, 803506992);
1633 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 54902257);
1634
1635 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
1636 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1637 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1638
1639 std::unique_ptr<armnn::ITensorHandle> inputHandle0Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo0);
1640 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
1641 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
1642
1643 armnn::MultiplicationQueueDescriptor data;
1644 armnn::WorkloadInfo info;
1645 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
1646 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1647 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1648
1649 armnn::MultiplicationQueueDescriptor refData = data;
1650 armnn::WorkloadInfo refInfo = info;
1651 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo0, inputHandle0Ref.get());
1652 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo1, inputHandle1Ref.get());
1653 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1654
1655 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
1656 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateMultiplication(refData, refInfo);
1657
1658 inputHandle0->Allocate();
1659 inputHandle1->Allocate();
1660 outputHandle->Allocate();
1661 inputHandle0Ref->Allocate();
1662 inputHandle1Ref->Allocate();
1663 outputHandleRef->Allocate();
1664
1665 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
1666 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1667 CopyDataToITensorHandle(inputHandle0Ref.get(), &input0[0][0][0][0]);
1668 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
1669
surmeh013537c2c2018-05-18 16:31:43 +01001670 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001671 workload->Execute();
surmeh013537c2c2018-05-18 16:31:43 +01001672 refWorkloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001673 workloadRef->Execute();
1674
1675 CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get());
1676 CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get());
1677
1678 return comparisonResult;
1679}
1680
1681LayerTestResult<float,4> CompareBatchNormTest(armnn::IWorkloadFactory& workloadFactory,
1682 armnn::IWorkloadFactory& refWorkloadFactory)
1683{
1684 const unsigned int width = 2;
1685 const unsigned int height = 3;
1686 const unsigned int channels = 5;
1687 const unsigned int batchSize = 3;
1688
1689 armnn::TensorInfo inputTensorInfo;
1690 armnn::TensorInfo outputTensorInfo;
1691 armnn::TensorInfo tensorInfo;
1692
1693 constexpr unsigned int shape[] = {batchSize, channels, height, width};
1694 constexpr unsigned int tensorShape[] = {channels};
1695
1696 inputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1697 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1698 tensorInfo = armnn::TensorInfo(1, tensorShape, armnn::DataType::Float32);
1699
1700 auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 21312);
1701
1702 auto mean = MakeRandomTensor<float, 1>(tensorInfo, 123);
1703 auto variance = MakeRandomTensor<float, 1>(tensorInfo, 234, 0.0f);
1704 auto beta = MakeRandomTensor<float, 1>(tensorInfo, 123);
1705 auto gamma = MakeRandomTensor<float, 1>(tensorInfo, 345);
1706
1707 LayerTestResult<float,4> ret(outputTensorInfo);
1708
1709 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
1710 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1711
1712 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
1713 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
1714
1715 armnn::BatchNormalizationQueueDescriptor data;
1716 armnn::WorkloadInfo info;
1717 armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
1718 armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
1719 armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
1720 armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
1721
1722 AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
1723 AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
1724 AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
1725 AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
1726
1727 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1728 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1729 data.m_Mean = &meanTensor;
1730 data.m_Variance = &varianceTensor;
1731 data.m_Beta = &betaTensor;
1732 data.m_Gamma = &gammaTensor;
1733 data.m_Parameters.m_Eps = 0.01f;
1734
1735 armnn::BatchNormalizationQueueDescriptor refData = data;
1736 armnn::WorkloadInfo refInfo = info;
1737 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
1738 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1739
1740 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
1741 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateBatchNormalization(refData, refInfo);
1742
1743 inputHandle->Allocate();
1744 outputHandle->Allocate();
1745 inputHandleRef->Allocate();
1746 outputHandleRef->Allocate();
1747
1748 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
1749 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
1750
surmeh013537c2c2018-05-18 16:31:43 +01001751 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001752 workload->Execute();
surmeh013537c2c2018-05-18 16:31:43 +01001753 refWorkloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001754 workloadRef->Execute();
1755
1756 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1757 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
1758
1759 return ret;
1760}
1761
surmeh013537c2c2018-05-18 16:31:43 +01001762template<typename T>
1763void PermuteTensorData(
1764 armnn::IWorkloadFactory& workloadFactory,
1765 const armnn::PermutationVector& mappings,
1766 armnn::TensorInfo & inputTensorInfo,
1767 const T * inputData,
1768 std::vector<T>& outputData)
telsoa014fcda012018-03-09 14:13:49 +00001769{
surmeh013537c2c2018-05-18 16:31:43 +01001770 BOOST_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
1771 if (inputData == nullptr)
1772 {
1773 // Nullptr is an error in the test. By returning without doing the concatenation
1774 // I expect the caller to fail the test. It still makes sense to report this as
1775 // an assert for Debug builds.
1776 return;
1777 }
telsoa014fcda012018-03-09 14:13:49 +00001778
surmeh013537c2c2018-05-18 16:31:43 +01001779 armnn::TensorInfo outputTensorInfo = armnnUtils::Permuted(inputTensorInfo, mappings);
1780
1781 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
1782 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1783
1784 armnn::PermuteQueueDescriptor queueDescriptor;
1785 queueDescriptor.m_Parameters = armnn::PermuteDescriptor{mappings};
1786 armnn::WorkloadInfo workloadInfo;
1787 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
1788 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
1789
1790 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePermute(queueDescriptor, workloadInfo);
1791
1792 inputHandle->Allocate();
1793 outputHandle->Allocate();
1794
1795 CopyDataToITensorHandle(inputHandle.get(), inputData);
1796
1797 workload->Execute();
1798
1799 outputData.resize(outputTensorInfo.GetNumElements());
1800 CopyDataFromITensorHandle(&outputData[0], outputHandle.get());
1801 inputTensorInfo = outputTensorInfo;
1802}
1803
1804armnn::OriginsDescriptor CreateMergerDescriptorForConcatenation(
1805 const std::vector<armnn::TensorInfo> & inputTensorInfos,
1806 unsigned int concatDim)
1807{
telsoa014fcda012018-03-09 14:13:49 +00001808 std::vector<armnn::TensorShape> shapes;
1809 shapes.reserve(inputTensorInfos.size());
1810 for (const armnn::TensorInfo& it: inputTensorInfos)
1811 {
1812 shapes.push_back(it.GetShape());
1813 }
surmeh013537c2c2018-05-18 16:31:43 +01001814
1815 return armnn::CreateMergerDescriptorForConcatenation(shapes.begin(),
1816 shapes.end(),
1817 concatDim);
1818}
1819
1820//
1821// Concatenation is only supported for N and C dimensions for NCHW. In case of
telsoa01c577f2c2018-08-31 09:22:23 +01001822// <4 dimensions we need to make sure that the concat dimensions are at least
surmeh013537c2c2018-05-18 16:31:43 +01001823// the 3rd slowest iterating one.
1824//
1825
1826bool NeedPermuteForConcat(
1827 const std::vector<armnn::TensorInfo> & inputTensorInfos,
1828 unsigned int concatDim)
1829{
1830 // See note above. Additionally we expect the input shapes to have the
1831 // same number of dimensions.
1832 unsigned int nDimensions = 0;
1833
telsoa01c577f2c2018-08-31 09:22:23 +01001834 // Determine the number of dimensions as well as sanity check them
1835 // agains test implementation issues.
surmeh013537c2c2018-05-18 16:31:43 +01001836 for (auto && tensorInfo : inputTensorInfos)
1837 {
1838 if (!nDimensions)
1839 {
1840 nDimensions = tensorInfo.GetShape().GetNumDimensions();
1841 }
1842 else
1843 {
1844 BOOST_ASSERT_MSG(nDimensions == tensorInfo.GetShape().GetNumDimensions(),
1845 "Input shapes must have the same number of dimensions");
1846 }
1847 }
1848
1849 return (nDimensions-concatDim) < 3;
1850}
1851
1852armnn::TensorShape ExpandTensorShapeTo3dForPermute(const armnn::TensorShape & inputShape)
1853{
1854 unsigned int numDims = inputShape.GetNumDimensions();
1855 if (numDims >= 3)
1856 {
1857 // Nothing to do if the inputShape has at least 3 dimensions.
1858 return inputShape;
1859 }
1860
1861 std::vector<unsigned int> newDims(size_t(3), 1u);
1862 unsigned int expandedBy = 3 - numDims;
1863 for (unsigned int i=0; i<numDims; ++i)
1864 {
1865 newDims[expandedBy+i] = inputShape[i];
1866 }
1867 return armnn::TensorShape(3u, &newDims[0]);
1868}
1869
1870void Generate3dPermuteVectorForConcat(
1871 unsigned int numDimensions,
1872 unsigned int & concatDim,
1873 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutations)
1874{
1875 BOOST_ASSERT_MSG(numDimensions <= 3,
1876 "Only dimensions 1,2 and 3 are supported by this helper");
1877
1878 unsigned int expandedBy = 3 - numDimensions;
1879 unsigned int expandedConcatAxis = concatDim + expandedBy;
1880
1881 if (expandedConcatAxis == 2)
1882 {
1883 concatDim = 0;
1884 armnn::PermutationVector forwardPermutation({1, 2, 0});
1885 armnn::PermutationVector reversePermutation({2, 0, 1});
1886 permutations = std::make_pair(forwardPermutation, reversePermutation);
1887 }
1888 else if (expandedConcatAxis == 1)
1889 {
1890 concatDim = 0;
1891 armnn::PermutationVector forwardPermutation({2, 0, 1});
1892 armnn::PermutationVector reversePermutation({1, 2, 0});
1893 permutations = std::make_pair(forwardPermutation, reversePermutation);
1894 }
1895 else
1896 {
1897 BOOST_ASSERT(expandedConcatAxis == 0);
1898 concatDim = 0;
1899 }
1900}
1901
1902//
1903// Permute the input tensors so we can do a supported concatenation.
1904// Also treat lower than 3d tensors as 3d by adding dummy 1 dimensions
1905// at the front. Finally this function tells what the output shape
1906// of the permuted concatenated tensor is going to be.
1907//
1908template <typename T>
1909void PermuteInputsForConcat(
1910 armnn::IWorkloadFactory& workloadFactory,
1911 std::vector<armnn::TensorInfo> & inputTensorInfos,
1912 std::vector<T *> & inputData,
1913 std::vector<std::vector<T>> & inputDataStorage,
1914 armnn::PermutationVector & permuteVector,
1915 unsigned int & concatDim,
1916 armnn::TensorInfo & outputTensorInfo)
1917{
1918 BOOST_ASSERT_MSG(inputTensorInfos.size() > 1,
1919 "Expecting more than one tensor to be concatenated here");
1920
1921 unsigned int numDims = 0;
1922 unsigned int nthInput = 0;
1923 const armnn::PermutationVector identity({0, 1, 2});
1924
1925 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutations =
1926 std::make_pair(identity, identity);
1927
1928 inputDataStorage.resize(inputData.size());
1929
1930 for (auto && tensorInfo : inputTensorInfos)
1931 {
1932 if (numDims == 0)
1933 {
1934 numDims = tensorInfo.GetShape().GetNumDimensions();
1935 Generate3dPermuteVectorForConcat(numDims, concatDim, permutations);
telsoa01c577f2c2018-08-31 09:22:23 +01001936 // Store the reverese permutation.
surmeh013537c2c2018-05-18 16:31:43 +01001937 permuteVector = permutations.second;
1938 BOOST_ASSERT_MSG(!permuteVector.IsEqual(identity),
1939 "Test logic error, we don't need permutation, so we shouldn't arrive here");
1940 }
1941 else
1942 {
1943 BOOST_ASSERT_MSG(numDims == tensorInfo.GetShape().GetNumDimensions(),
1944 "All inputs must have the same number of dimensions");
1945 }
1946
1947 armnn::TensorInfo newTensorInfo = tensorInfo;
1948 newTensorInfo.SetShape(ExpandTensorShapeTo3dForPermute(tensorInfo.GetShape()));
1949
1950 PermuteTensorData<T>(workloadFactory,
1951 permutations.first,
1952 newTensorInfo,
1953 inputData[nthInput],
1954 inputDataStorage[nthInput]);
1955
1956 inputData[nthInput] = inputDataStorage[nthInput].data();
1957 inputTensorInfos[nthInput] = newTensorInfo;
1958
1959 ++nthInput;
1960 }
1961
1962 outputTensorInfo.SetShape(
1963 armnnUtils::Permuted(
1964 ExpandTensorShapeTo3dForPermute(outputTensorInfo.GetShape()),
1965 permutations.first));
1966}
1967
1968
1969//
1970// This is the pair of PermuteInputsForConcat(...) which permutes back
telsoa01c577f2c2018-08-31 09:22:23 +01001971// the output of the concatenation so we can check it against an expected
surmeh013537c2c2018-05-18 16:31:43 +01001972// output.
1973//
1974template <typename T>
1975void PermuteOutputForConcat(
1976 armnn::IWorkloadFactory& workloadFactory,
1977 const armnn::TensorInfo & tensorInfo,
1978 const armnn::PermutationVector & permuteVector,
1979 std::unique_ptr<armnn::ITensorHandle> && inputDataHandle,
1980 T * data)
1981{
1982 BOOST_ASSERT_MSG(data != nullptr, "data must not be null");
1983 if (data == nullptr)
1984 {
1985 // Nullptr is an error in the test. By returning without doing the permutation
1986 // I expect the caller to fail the test. It still makes sense to report this as
1987 // an assert for Debug builds.
1988 return;
1989 }
1990
1991 armnn::TensorInfo resultTensorInfo = tensorInfo;
1992 std::vector<T> inputData(tensorInfo.GetNumElements());
1993 std::vector<T> outputData;
1994
1995 CopyDataFromITensorHandle(&inputData[0], inputDataHandle.get());
1996
1997 PermuteTensorData<T>(workloadFactory,
1998 permuteVector,
1999 resultTensorInfo,
2000 &inputData[0],
2001 outputData);
2002
2003 ::memcpy(data, &outputData[0], sizeof(T)*outputData.size());
2004}
2005
2006template <typename T>
2007void Concatenate(armnn::IWorkloadFactory& workloadFactory,
2008 std::initializer_list<const armnn::TensorInfo> inputTensorInfosOrig,
2009 std::initializer_list<T *> inputsOrig,
2010 const armnn::TensorInfo& outputTensorInfoOrig,
2011 T * output,
2012 unsigned int concatDim)
2013{
2014 BOOST_ASSERT_MSG(output != nullptr, "output must not be null");
2015 if (output == nullptr)
2016 {
2017 // Nullptr is an error in the test. By returning without doing the permutation
2018 // I expect the caller to fail the test. It still makes sense to report this as
2019 // an assert for Debug builds.
2020 return;
2021 }
2022
2023 armnn::MergerQueueDescriptor queueDescriptor;
2024
telsoa01c577f2c2018-08-31 09:22:23 +01002025 // Saves a copy of the parameters which we might need to change.
surmeh013537c2c2018-05-18 16:31:43 +01002026 std::vector<armnn::TensorInfo> inputTensorInfos(inputTensorInfosOrig.begin(), inputTensorInfosOrig.end());
2027 std::vector<T *> inputs = inputsOrig;
2028 armnn::TensorInfo outputTensorInfo = outputTensorInfoOrig;
2029
2030 armnn::PermutationVector permuteVector{0, 1, 2};
2031
telsoa01c577f2c2018-08-31 09:22:23 +01002032 // Holds and automatically releases memory for the reshaped input data.
surmeh013537c2c2018-05-18 16:31:43 +01002033 std::vector<std::vector<T>> tmpInputDataStorage;
2034
2035 const size_t inputCount = inputTensorInfos.size();
2036
2037 bool needPermuteForConcat = NeedPermuteForConcat(inputTensorInfos, concatDim);
2038
2039 if (needPermuteForConcat)
2040 {
2041 //
2042 // We need to permute the inputs, because concatenation along
telsoa01c577f2c2018-08-31 09:22:23 +01002043 // the requested axis is not supported.
surmeh013537c2c2018-05-18 16:31:43 +01002044 //
2045 PermuteInputsForConcat<T>(workloadFactory,
2046 inputTensorInfos,
2047 inputs,
2048 tmpInputDataStorage,
2049 permuteVector,
2050 concatDim,
2051 outputTensorInfo);
2052 }
2053
2054 armnn::OriginsDescriptor viewsDescriptor = CreateMergerDescriptorForConcatenation(inputTensorInfos, concatDim);
telsoa014fcda012018-03-09 14:13:49 +00002055
2056 queueDescriptor.m_ViewOrigins.reserve(viewsDescriptor.GetNumViews());
2057 for (unsigned int i = 0; i < viewsDescriptor.GetNumViews(); ++i)
2058 {
2059 queueDescriptor.m_ViewOrigins.emplace_back(std::vector<unsigned int>(viewsDescriptor.GetViewOrigin(i),
2060 viewsDescriptor.GetViewOrigin(i) + viewsDescriptor.GetNumDimensions()));
2061 }
2062
telsoa014fcda012018-03-09 14:13:49 +00002063 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2064
2065 std::vector<std::unique_ptr<armnn::ITensorHandle>> inputHandles;
2066 inputHandles.reserve(inputCount);
2067
2068 const bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2069 for (unsigned int i = 0; i < inputCount; ++i)
2070 {
surmeh013537c2c2018-05-18 16:31:43 +01002071 const armnn::TensorInfo& inputTensorInfo = inputTensorInfos[i];
telsoa014fcda012018-03-09 14:13:49 +00002072
2073 std::unique_ptr<armnn::ITensorHandle> inputHandle = subTensorsSupported ?
2074 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo.GetShape(),
2075 queueDescriptor.m_ViewOrigins[i].m_Origin.data())
2076 : workloadFactory.CreateTensorHandle(inputTensorInfo);
2077
2078 inputHandles.emplace_back(std::move(inputHandle));
2079 }
2080
2081 armnn::WorkloadInfo workloadInfo;
2082
2083 for (unsigned int i = 0; i < inputCount; ++i)
2084 {
surmeh013537c2c2018-05-18 16:31:43 +01002085 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfos[i], inputHandles[i].get());
telsoa014fcda012018-03-09 14:13:49 +00002086 }
2087
2088 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
2089
2090 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(queueDescriptor, workloadInfo);
2091
2092 for (auto& inputHandle : inputHandles)
2093 {
2094 inputHandle->Allocate();
2095 }
2096
2097 outputHandle->Allocate();
2098
2099 unsigned int nextInputId = 0;
2100 for (auto& inputHandle : inputHandles)
2101 {
surmeh013537c2c2018-05-18 16:31:43 +01002102 CopyDataToITensorHandle(inputHandle.get(), inputs[nextInputId]);
2103 ++nextInputId;
telsoa014fcda012018-03-09 14:13:49 +00002104 }
2105
surmeh013537c2c2018-05-18 16:31:43 +01002106 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00002107 workload->Execute();
2108
surmeh013537c2c2018-05-18 16:31:43 +01002109 if (needPermuteForConcat)
2110 {
2111 PermuteOutputForConcat<T>(workloadFactory,
2112 outputTensorInfo,
2113 permuteVector,
2114 std::move(outputHandle),
2115 output);
2116 }
2117 else
2118 {
2119 CopyDataFromITensorHandle(output, outputHandle.get());
2120 }
telsoa014fcda012018-03-09 14:13:49 +00002121}
2122
2123template <typename T>
2124LayerTestResult<T, 1> Concatenation1dTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale, int32_t qOffset)
2125{
2126 armnn::TensorInfo inputTensorInfo({ 3 }, armnn::GetDataType<T>());
2127
2128 auto input0 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 1.0f, 2.0f, 3.0f }));
2129 auto input1 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 4.0f, 5.0f, 6.0f }));
2130 auto input2 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 7.0f, 8.0f, 9.0f }));
2131
2132 armnn::TensorInfo outputTensorInfo({ 9 }, armnn::GetDataType<T>());
2133
2134 LayerTestResult<T, 1> result(outputTensorInfo);
2135
2136 std::vector<T> output;
2137 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002138 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002139 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
2140 { input0.data(), input1.data(), input2.data() },
2141 outputTensorInfo,
2142 output.data(),
2143 0);
2144
2145 result.output = MakeTensor<T, 1>(outputTensorInfo, output);
2146 result.outputExpected = MakeTensor<T, 1>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2147 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f
2148 }));
2149
2150 return result;
2151}
2152
2153LayerTestResult<float, 1> Concatenation1dTest(armnn::IWorkloadFactory& workloadFactory)
2154{
2155 return Concatenation1dTestImpl<float>(workloadFactory, 0.0f, 0);
2156}
2157
2158template <typename T>
2159LayerTestResult<T, 2> Concatenation2dTestImpl(armnn::IWorkloadFactory& workloadFactory,
2160 const armnn::TensorInfo& outputTensorInfo,
2161 unsigned int dimension,
2162 const float qScale,
2163 const int32_t qOffset)
2164{
2165 armnn::TensorInfo inputTensorInfo({ 2, 3 }, armnn::GetDataType<T>());
2166
2167 auto input0 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2168 // Batch 0
2169 1.0f, 2.0f, 3.0f,
2170
2171 // Batch 1
2172 10.0f, 11.0f, 12.0f,
2173 }));
2174
2175 auto input1 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2176 // Batch 0
2177 4.0f, 5.0f, 6.0f,
2178
2179 // Batch 1
2180 13.0f, 14.0f, 15.0f,
2181 }));
2182
2183 auto input2 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2184 // Batch 0
2185 7.0f, 8.0f, 9.0f,
2186
2187 // Batch 1
2188 16.0f, 17.0f, 18.0f,
2189 }));
2190
2191 LayerTestResult<T, 2> result(outputTensorInfo);
2192
2193 std::vector<T> output;
2194 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002195 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002196 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
2197 { input0.data(), input1.data(), input2.data() },
2198 outputTensorInfo,
2199 output.data(),
2200 dimension);
2201
2202 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
2203 return result;
2204}
2205
2206template <typename T>
2207LayerTestResult<T, 2> Concatenation2dDim0TestImpl(armnn::IWorkloadFactory& workloadFactory,
2208 float qScale, int32_t qOffset)
2209{
2210 armnn::TensorInfo outputTensorInfo({ 6, 3 }, armnn::GetDataType<T>());
2211
2212 LayerTestResult<T, 2> result = Concatenation2dTestImpl<T>(workloadFactory, outputTensorInfo, 0, qScale, qOffset);
2213 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2214 // Batch 0
2215 1.0f, 2.0f, 3.0f,
2216
2217 // Batch 1
2218 10.0f, 11.0f, 12.0f,
2219
2220 // Batch 2
2221 4.0f, 5.0f, 6.0f,
2222
2223 // Batch 3
2224 13.0f, 14.0f, 15.0f,
2225
2226 // Batch 4
2227 7.0f, 8.0f, 9.0f,
2228
2229 // Batch 5
2230 16.0f, 17.0f, 18.0f,
2231 }));
2232
2233 return result;
2234}
2235
2236LayerTestResult<float, 2> Concatenation2dDim0Test(armnn::IWorkloadFactory& workloadFactory)
2237{
2238 return Concatenation2dDim0TestImpl<float>(workloadFactory, 0.0f, 0);
2239}
2240
2241template <typename T>
2242LayerTestResult<T, 2> Concatenation2dDim1TestImpl(armnn::IWorkloadFactory& workloadFactory,
2243 float qScale, int32_t qOffset)
2244{
2245 armnn::TensorInfo outputTensorInfo({ 2, 9 }, armnn::GetDataType<T>());
2246
2247 LayerTestResult<T, 2> result = Concatenation2dTestImpl<T>(workloadFactory, outputTensorInfo, 1, qScale, qOffset);
2248 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2249 // Batch 0
2250 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
2251
2252 // Batch 1
2253 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f
2254 }));
2255
2256 return result;
2257}
2258
2259LayerTestResult<float, 2> Concatenation2dDim1Test(armnn::IWorkloadFactory& workloadFactory)
2260{
2261 return Concatenation2dDim1TestImpl<float>(workloadFactory, 0.0f, 0);
2262}
2263
2264template <typename T>
2265LayerTestResult<T, 2> Concatenation2dDim0DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2266 int32_t qOffset)
2267{
2268 armnn::TensorInfo input0TensorInfo({ 2, 3 }, armnn::GetDataType<T>());
2269 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2270 // Batch 0
2271 1.0f, 2.0f, 3.0f,
2272
2273 // Batch 1
2274 10.0f, 11.0f, 12.0f,
2275 }));
2276
2277 armnn::TensorInfo input1TensorInfo({ 3, 3 }, armnn::GetDataType<T>());
2278 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2279 // Batch 0
2280 4.0f, 5.0f, 6.0f,
2281
2282 // Batch 1
2283 13.0f, 14.0f, 15.0f,
2284
2285 // Batch 0
2286 7.0f, 8.0f, 9.0f,
2287 }));
2288
2289 armnn::TensorInfo input2TensorInfo({ 1, 3 }, armnn::GetDataType<T>());
2290 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2291 // Batch 1
2292 16.0f, 17.0f, 18.0f,
2293 }));
2294
2295 armnn::TensorInfo outputTensorInfo({ 6, 3 }, armnn::GetDataType<T>());
2296 LayerTestResult<T, 2> result(outputTensorInfo);
2297
2298 std::vector<T> output;
2299 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002300 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002301 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
2302 { input0.data(), input1.data(), input2.data() },
2303 outputTensorInfo,
2304 output.data(),
2305 0);
2306
2307 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
2308 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2309 // Batch 0
2310 1.0f, 2.0f, 3.0f,
2311
2312 // Batch 1
2313 10.0f, 11.0f, 12.0f,
2314
2315 // Batch 2
2316 4.0f, 5.0f, 6.0f,
2317
2318 // Batch 3
2319 13.0f, 14.0f, 15.0f,
2320
2321 // Batch 4
2322 7.0f, 8.0f, 9.0f,
2323
2324 // Batch 5
2325 16.0f, 17.0f, 18.0f,
2326 }));
2327
2328 return result;
2329}
2330
2331LayerTestResult<float, 2> Concatenation2dDim0DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory)
2332{
2333 return Concatenation2dDim0DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
2334}
2335
2336template <typename T>
2337LayerTestResult<T, 2> Concatenation2dDim1DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2338 int32_t qOffset)
2339{
2340 armnn::TensorInfo input0TensorInfo({ 2, 3 }, armnn::GetDataType<T>());
2341 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2342 // Batch 0
2343 1.0f, 2.0f, 3.0f,
2344
2345 // Batch 1
2346 10.0f, 11.0f, 12.0f,
2347 }));
2348
2349 armnn::TensorInfo input1TensorInfo({ 2, 5 }, armnn::GetDataType<T>());
2350 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2351 // Batch 0
2352 4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
2353
2354 // Batch 1
2355 13.0f, 14.0f, 15.0f, 16.0f, 17.0f,
2356 }));
2357
2358 armnn::TensorInfo input2TensorInfo({ 2, 1 }, armnn::GetDataType<T>());
2359 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2360 // Batch 0
2361 9.0f,
2362
2363 // Batch 1
2364 18.0f
2365 }));
2366
2367 armnn::TensorInfo outputTensorInfo({ 2, 9 }, armnn::GetDataType<T>());
2368 LayerTestResult<T, 2> result(outputTensorInfo);
2369
2370 std::vector<T> output;
2371 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002372 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002373 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
2374 { input0.data(), input1.data(), input2.data() },
2375 outputTensorInfo,
2376 output.data(),
2377 1);
2378
2379 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
2380 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2381 // Batch 0
2382 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
2383
2384 // Batch 1
2385 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f,
2386 }));
2387
2388 return result;
2389}
2390
2391LayerTestResult<float, 2> Concatenation2dDim1DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory)
2392{
2393 return Concatenation2dDim1DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
2394}
2395
2396template <typename T>
2397LayerTestResult<T, 3> Concatenation3dTestImpl(armnn::IWorkloadFactory& workloadFactory,
2398 const armnn::TensorInfo& outputTensorInfo,
2399 unsigned int dimension,
2400 float qScale,
2401 int32_t qOffset)
2402{
2403 armnn::TensorInfo inputTensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
2404
2405 auto input0 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2406 // Batch 0, Channel 0
2407 1.0f, 2.0f,
2408
2409 // Batch 0, Channel 1
2410 3.0f, 4.0f,
2411
2412 // Batch 0, Channel 2
2413 5.0f, 6.0f,
2414
2415 // Batch 1, Channel 0
2416 19.0f, 20.0f,
2417
2418 // Batch 1, Channel 1
2419 21.0f, 22.0f,
2420
2421 // Batch 1, Channel 2
2422 23.0f, 24.0f
2423 }));
2424
2425 auto input1 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2426 // Batch 0, Channel 0
2427 7.0f, 8.0f,
2428
2429 // Batch 0, Channel 1
2430 9.0f, 10.0f,
2431
2432 // Batch 0, Channel 2
2433 11.0f, 12.0f,
2434
2435 // Batch 1, Channel 0
2436 25.0f, 26.0f,
2437
2438 // Batch 1, Channel 1
2439 27.0f, 28.0f,
2440
2441 // Batch 1, Channel 2
2442 29.0f, 30.0f
2443 }));
2444
2445 auto input2 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2446 // Batch 0, Channel 0
2447 13.0f, 14.0f,
2448
2449 // Batch 0, Channel 1
2450 15.0f, 16.0f,
2451
2452 // Batch 0, Channel 2
2453 17.0f, 18.0f,
2454
2455 // Batch 1, Channel 0
2456 31.0f, 32.0f,
2457
2458 // Batch 1, Channel 1
2459 33.0f, 34.0f,
2460
2461 // Batch 1, Channel 2
2462 35.0f, 36.0f
2463 }));
2464
2465 LayerTestResult<T, 3> result(outputTensorInfo);
2466
2467 std::vector<T> output;
2468 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002469 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002470 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
2471 { input0.data(), input1.data(), input2.data() },
2472 outputTensorInfo,
2473 output.data(),
2474 dimension);
2475
2476 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
2477 return result;
2478}
2479
2480template <typename T>
2481LayerTestResult<T, 3> Concatenation3dDim0TestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2482 int32_t qOffset)
2483{
2484 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, armnn::GetDataType<T>());
2485
2486 LayerTestResult<T, 3> result = Concatenation3dTestImpl<T>(workloadFactory, outputTensorInfo, 0,
2487 qScale, qOffset);
2488 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2489 // Batch 0, Channel 0
2490 1.0f, 2.0f,
2491
2492 // Batch 0, Channel 1
2493 3.0f, 4.0f,
2494
2495 // Batch 0, Channel 2
2496 5.0f, 6.0f,
2497
2498 // Batch 1, Channel 0
2499 19.0f, 20.0f,
2500
2501 // Batch 1, Channel 1
2502 21.0f, 22.0f,
2503
2504 // Batch 1, Channel 2
2505 23.0f, 24.0f,
2506
2507 // Batch 2, Channel 0
2508 7.0f, 8.0f,
2509
2510 // Batch 2, Channel 1
2511 9.0f, 10.0f,
2512
2513 // Batch 2, Channel 2
2514 11.0f, 12.0f,
2515
2516 // Batch 3, Channel 0
2517 25.0f, 26.0f,
2518
2519 // Batch 3, Channel 1
2520 27.0f, 28.0f,
2521
2522 // Batch 3, Channel 2
2523 29.0f, 30.0f,
2524
2525 // Batch 4, Channel 0
2526 13.0f, 14.0f,
2527
2528 // Batch 4, Channel 1
2529 15.0f, 16.0f,
2530
2531 // Batch 4, Channel 2
2532 17.0f, 18.0f,
2533
2534 // Batch 5, Channel 0
2535 31.0f, 32.0f,
2536
2537 // Batch 5, Channel 1
2538 33.0f, 34.0f,
2539
2540 // Batch 5, Channel 2
2541 35.0f, 36.0f
2542 }));
2543 return result;
2544}
2545
2546LayerTestResult<float, 3> Concatenation3dDim0Test(armnn::IWorkloadFactory& workloadFactory)
2547{
2548 return Concatenation3dDim0TestImpl<float>(workloadFactory, 0.0f, 0);
2549}
2550
2551template <typename T>
2552LayerTestResult<T, 3> Concatenation3dDim1TestImpl(armnn::IWorkloadFactory& workloadFactory,
2553 float qScale, int32_t qOffset)
2554{
2555 armnn::TensorInfo outputTensorInfo({ 2, 9, 2 }, armnn::GetDataType<T>());
2556
2557 LayerTestResult<T, 3> result = Concatenation3dTestImpl<T>(workloadFactory, outputTensorInfo, 1, qScale, qOffset);
2558 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2559 // Batch 0, Channel 0
2560 1.0f, 2.0f,
2561
2562 // Batch 0, Channel 1
2563 3.0f, 4.0f,
2564
2565 // Batch 0, Channel 2
2566 5.0f, 6.0f,
2567
2568 // Batch 0, Channel 3
2569 7.0f, 8.0f,
2570
2571 // Batch 0, Channel 4
2572 9.0f, 10.0f,
2573
2574 // Batch 0, Channel 5
2575 11.0f, 12.0f,
2576
2577 // Batch 0, Channel 6
2578 13.0f, 14.0f,
2579
2580 // Batch 0, Channel 7
2581 15.0f, 16.0f,
2582
2583 // Batch 0, Channel 8
2584 17.0f, 18.0f,
2585
2586 // Batch 1, Channel 0
2587 19.0f, 20.0f,
2588
2589 // Batch 1, Channel 1
2590 21.0f, 22.0f,
2591
2592 // Batch 1, Channel 2
2593 23.0f, 24.0f,
2594
2595 // Batch 1, Channel 3
2596 25.0f, 26.0f,
2597
2598 // Batch 1, Channel 4
2599 27.0f, 28.0f,
2600
2601 // Batch 1, Channel 5
2602 29.0f, 30.0f,
2603
2604 // Batch 1, Channel 6
2605 31.0f, 32.0f,
2606
2607 // Batch 1, Channel 7
2608 33.0f, 34.0f,
2609
2610 // Batch 1, Channel 8
2611 35.0f, 36.0f
2612 }));
2613
2614 return result;
2615}
2616
2617LayerTestResult<float, 3> Concatenation3dDim1Test(armnn::IWorkloadFactory& workloadFactory)
2618{
2619 return Concatenation3dDim1TestImpl<float>(workloadFactory, 0.0f, 0);
2620}
2621
2622template <typename T>
2623LayerTestResult<T, 3> Concatenation3dDim2TestImpl(armnn::IWorkloadFactory& workloadFactory,
2624 float qScale, int32_t qOffset)
2625{
2626 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, armnn::GetDataType<T>());
2627
2628 LayerTestResult<T, 3> result = Concatenation3dTestImpl<T>(workloadFactory, outputTensorInfo, 2, qScale, qOffset);
2629 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2630 // Batch 0, Channel 0
2631 1.0f, 2.0f, 7.0f, 8.0f, 13.0f, 14.0f,
2632
2633 // Batch 0, Channel 1
2634 3.0f, 4.0f, 9.0f, 10.0f, 15.0f, 16.0f,
2635
2636 // Batch 0, Channel 2
2637 5.0f, 6.0f, 11.0f, 12.0f, 17.0f, 18.0f,
2638
2639 // Batch 1, Channel 0
2640 19.0f, 20.0f, 25.0f, 26.0f, 31.0f, 32.0f,
2641
2642 // Batch 1, Channel 1
2643 21.0f, 22.0f, 27.0f, 28.0f, 33.0f, 34.0f,
2644
2645 // Batch 1, Channel 2
2646 23.0f, 24.0f, 29.0f, 30.0f, 35.0f, 36.0f,
2647 }));
2648
2649 return result;
2650}
2651
2652LayerTestResult<float, 3> Concatenation3dDim2Test(armnn::IWorkloadFactory& workloadFactory)
2653{
2654 return Concatenation3dDim2TestImpl<float>(workloadFactory, 0.0f, 0);
2655}
2656
2657template <typename T>
2658LayerTestResult<T, 3> Concatenation3dDim0DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2659 int32_t qOffset)
2660{
2661 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
2662 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2663 // Batch 0, Channel 0
2664 1.0f, 2.0f,
2665
2666 // Batch 0, Channel 1
2667 3.0f, 4.0f,
2668
2669 // Batch 0, Channel 2
2670 5.0f, 6.0f,
2671
2672 // Batch 1, Channel 0
2673 19.0f, 20.0f,
2674
2675 // Batch 1, Channel 1
2676 21.0f, 22.0f,
2677
2678 // Batch 1, Channel 2
2679 23.0f, 24.0f
2680 }));
2681
2682 armnn::TensorInfo input1TensorInfo({ 1, 3, 2 }, armnn::GetDataType<T>());
2683 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2684 // Batch 0, Channel 0
2685 7.0f, 8.0f,
2686
2687 // Batch 0, Channel 1
2688 9.0f, 10.0f,
2689
2690 // Batch 0, Channel 2
2691 11.0f, 12.0f,
2692 }));
2693
2694 armnn::TensorInfo input2TensorInfo({ 3, 3, 2 }, armnn::GetDataType<T>());
2695 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2696 // Batch 0, Channel 0
2697 25.0f, 26.0f,
2698
2699 // Batch 0, Channel 1
2700 27.0f, 28.0f,
2701
2702 // Batch 0, Channel 2
2703 29.0f, 30.0f,
2704
2705 // Batch 1, Channel 0
2706 13.0f, 14.0f,
2707
2708 // Batch 1, Channel 1
2709 15.0f, 16.0f,
2710
2711 // Batch 1, Channel 2
2712 17.0f, 18.0f,
2713
2714 // Batch 2, Channel 0
2715 31.0f, 32.0f,
2716
2717 // Batch 2, Channel 1
2718 33.0f, 34.0f,
2719
2720 // Batch 2, Channel 2
2721 35.0f, 36.0f
2722 }));
2723
2724 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, armnn::GetDataType<T>());
2725 LayerTestResult<T, 3> result(outputTensorInfo);
2726
2727 std::vector<T> output;
2728 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002729 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002730 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
2731 { input0.data(), input1.data(), input2.data() },
2732 outputTensorInfo,
2733 output.data(),
2734 0);
2735
2736 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
2737 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2738 // Batch 0, Channel 0
2739 1.0f, 2.0f,
2740
2741 // Batch 0, Channel 1
2742 3.0f, 4.0f,
2743
2744 // Batch 0, Channel 2
2745 5.0f, 6.0f,
2746
2747 // Batch 1, Channel 0
2748 19.0f, 20.0f,
2749
2750 // Batch 1, Channel 1
2751 21.0f, 22.0f,
2752
2753 // Batch 1, Channel 2
2754 23.0f, 24.0f,
2755
2756 // Batch 2, Channel 0
2757 7.0f, 8.0f,
2758
2759 // Batch 2, Channel 1
2760 9.0f, 10.0f,
2761
2762 // Batch 2, Channel 2
2763 11.0f, 12.0f,
2764
2765 // Batch 3, Channel 0
2766 25.0f, 26.0f,
2767
2768 // Batch 3, Channel 1
2769 27.0f, 28.0f,
2770
2771 // Batch 3, Channel 2
2772 29.0f, 30.0f,
2773
2774 // Batch 4, Channel 0
2775 13.0f, 14.0f,
2776
2777 // Batch 4, Channel 1
2778 15.0f, 16.0f,
2779
2780 // Batch 4, Channel 2
2781 17.0f, 18.0f,
2782
2783 // Batch 5, Channel 0
2784 31.0f, 32.0f,
2785
2786 // Batch 5, Channel 1
2787 33.0f, 34.0f,
2788
2789 // Batch 5, Channel 2
2790 35.0f, 36.0f
2791 }));
2792
2793 return result;
2794}
2795
2796LayerTestResult<float, 3> Concatenation3dDim0DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory)
2797{
2798 return Concatenation3dDim0DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
2799}
2800
2801template <typename T>
2802LayerTestResult<T, 3> Concatenation3dDim1DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2803 int32_t qOffset)
2804{
2805 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
2806 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2807 // Batch 0, Channel 0
2808 1.0f, 2.0f,
2809
2810 // Batch 0, Channel 1
2811 3.0f, 4.0f,
2812
2813 // Batch 0, Channel 2
2814 5.0f, 6.0f,
2815
2816 // Batch 1, Channel 0
2817 19.0f, 20.0f,
2818
2819 // Batch 1, Channel 1
2820 21.0f, 22.0f,
2821
2822 // Batch 1, Channel 2
2823 23.0f, 24.0f
2824 }));
2825
2826 armnn::TensorInfo input1TensorInfo({ 2, 4, 2 }, armnn::GetDataType<T>());
2827 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2828 // Batch 0, Channel 0
2829 7.0f, 8.0f,
2830
2831 // Batch 0, Channel 1
2832 9.0f, 10.0f,
2833
2834 // Batch 0, Channel 2
2835 11.0f, 12.0f,
2836
2837 // Batch 0, Channel 3
2838 25.0f, 26.0f,
2839
2840 // Batch 1, Channel 0
2841 27.0f, 28.0f,
2842
2843 // Batch 1, Channel 1
2844 29.0f, 30.0f,
2845
2846 // Batch 1, Channel 2
2847 13.0f, 14.0f,
2848
2849 // Batch 1, Channel 3
2850 15.0f, 16.0f,
2851 }));
2852
2853 armnn::TensorInfo input2TensorInfo({ 2, 1, 2 }, armnn::GetDataType<T>());
2854 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2855 // Batch 0, Channel 0
2856 17.0f, 18.0f,
2857
2858 // Batch 1, Channel 0
2859 31.0f, 32.0f,
2860 }));
2861
2862 armnn::TensorInfo outputTensorInfo({ 2, 8, 2 }, armnn::GetDataType<T>());
2863 LayerTestResult<T, 3> result(outputTensorInfo);
2864
2865 std::vector<T> output;
2866 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002867 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002868 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
2869 { input0.data(), input1.data(), input2.data() },
2870 outputTensorInfo,
2871 output.data(),
2872 1);
2873
2874 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
2875 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2876 // Batch 0, Channel 0
2877 1.0f, 2.0f,
2878
2879 // Batch 0, Channel 1
2880 3.0f, 4.0f,
2881
2882 // Batch 0, Channel 2
2883 5.0f, 6.0f,
2884
2885 // Batch 0, Channel 3
2886 7.0f, 8.0f,
2887
2888 // Batch 0, Channel 4
2889 9.0f, 10.0f,
2890
2891 // Batch 0, Channel 5
2892 11.0f, 12.0f,
2893
2894 // Batch 0, Channel 6
2895 25.0f, 26.0f,
2896
2897 // Batch 0, Channel 7
2898 17.0f, 18.0f,
2899
2900 // Batch 1, Channel 0
2901 19.0f, 20.0f,
2902
2903 // Batch 1, Channel 1
2904 21.0f, 22.0f,
2905
2906 // Batch 1, Channel 2
2907 23.0f, 24.0f,
2908
2909 // Batch 1, Channel 3
2910 27.0f, 28.0f,
2911
2912 // Batch 1, Channel 4
2913 29.0f, 30.0f,
2914
2915 // Batch 1, Channel 5
2916 13.0f, 14.0f,
2917
2918 // Batch 1, Channel 6
2919 15.0f, 16.0f,
2920
2921 // Batch 1, Channel 7
2922 31.0f, 32.0f,
2923 }));
2924
2925 return result;
2926}
2927
2928LayerTestResult<float, 3> Concatenation3dDim1DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory)
2929{
2930 return Concatenation3dDim1DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
2931}
2932
2933template <typename T>
2934LayerTestResult<T, 3> Concatenation3dDim2DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2935 int32_t qOffset)
2936{
2937 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
2938 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2939 // Batch 0, Channel 0
2940 1.0f, 2.0f,
2941
2942 // Batch 0, Channel 1
2943 3.0f, 4.0f,
2944
2945 // Batch 0, Channel 2
2946 5.0f, 6.0f,
2947
2948 // Batch 1, Channel 0
2949 19.0f, 20.0f,
2950
2951 // Batch 1, Channel 1
2952 21.0f, 22.0f,
2953
2954 // Batch 1, Channel 2
2955 23.0f, 24.0f
2956 }));
2957
2958 armnn::TensorInfo input1TensorInfo({ 2, 3, 1 }, armnn::GetDataType<T>());
2959 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2960 // Batch 0, Channel 0
2961 7.0f,
2962
2963 // Batch 0, Channel 1
2964 9.0f,
2965
2966 // Batch 0, Channel 2
2967 11.0f,
2968
2969 // Batch 1, Channel 0
2970 25.0f,
2971
2972 // Batch 1, Channel 1
2973 27.0f,
2974
2975 // Batch 1, Channel 2
2976 29.0f
2977 }));
2978
2979 armnn::TensorInfo input2TensorInfo({ 2, 3, 3 }, armnn::GetDataType<T>());
2980 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2981 // Batch 0, Channel 0
2982 13.0f, 14.0f, 50.0f,
2983
2984 // Batch 0, Channel 1
2985 15.0f, 16.0f, 51.0f,
2986
2987 // Batch 0, Channel 2
2988 17.0f, 18.0f, 52.0f,
2989
2990 // Batch 1, Channel 0
2991 31.0f, 32.0f, 53.0f,
2992
2993 // Batch 1, Channel 1
2994 33.0f, 34.0f, 54.0f,
2995
2996 // Batch 1, Channel 2
2997 35.0f, 36.0f, 55.0f,
2998 }));
2999
3000 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, armnn::GetDataType<T>());
3001 LayerTestResult<T, 3> result(outputTensorInfo);
3002
3003 std::vector<T> output;
3004 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01003005 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00003006 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
3007 { input0.data(), input1.data(), input2.data() },
3008 outputTensorInfo,
3009 output.data(),
3010 2);
3011
3012 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
3013 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3014 // Batch 0, Channel 0
3015 1.0f, 2.0f, 7.0f, 13.0f, 14.0f, 50.0f,
3016
3017 // Batch 0, Channel 1
3018 3.0f, 4.0f, 9.0f, 15.0f, 16.0f, 51.0f,
3019
3020 // Batch 0, Channel 2
3021 5.0f, 6.0f, 11.0f, 17.0f, 18.0f, 52.0f,
3022
3023 // Batch 1, Channel 0
3024 19.0f, 20.0f, 25.0f, 31.0f, 32.0f, 53.0f,
3025
3026 // Batch 1, Channel 1
3027 21.0f, 22.0f, 27.0f, 33.0f, 34.0f, 54.0f,
3028
3029 // Batch 1, Channel 2
3030 23.0f, 24.0f, 29.0f, 35.0f, 36.0f, 55.0f,
3031 }));
3032
3033 return result;
3034}
3035
3036LayerTestResult<float, 3> Concatenation3dDim2DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory)
3037{
3038 return Concatenation3dDim2DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
3039}
3040
James Conroy074f3712018-10-03 09:32:03 +01003041LayerTestResult<float, 4> ResizeBilinearNopTestImpl(armnn::IWorkloadFactory& workloadFactory,
3042 const armnn::TensorShape& inputOutputTensorShape,
3043 armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00003044{
James Conroy074f3712018-10-03 09:32:03 +01003045 const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
3046 const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00003047
3048 auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
3049 1.0f, 2.0f, 3.0f, 4.0f,
3050 2.0f, 3.0f, 4.0f, 5.0f,
3051 3.0f, 4.0f, 5.0f, 6.0f,
3052 4.0f, 5.0f, 6.0f, 7.0f
3053 }));
3054
3055 LayerTestResult<float, 4> result(outputTensorInfo);
3056 result.outputExpected = input;
3057
3058 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3059 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3060
3061 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01003062 descriptor.m_Parameters.m_DataLayout = dataLayout;
3063 armnn::WorkloadInfo info;
3064 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3065 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3066
3067 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
3068
3069 inputHandle->Allocate();
3070 outputHandle->Allocate();
3071 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3072
3073 workloadFactory.Finalize();
3074 workload->Execute();
3075
3076 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3077 return result;
3078}
3079
3080LayerTestResult<float, 4> ResizeBilinearNopTest(armnn::IWorkloadFactory& workloadFactory)
3081{
3082 // BatchSize = 1, Channels = 1, Height = 4, Width = 4
3083 const armnn::TensorShape inputOutputShape{ 1, 1, 4, 4 };
3084
3085 return ResizeBilinearNopTestImpl(workloadFactory, inputOutputShape, armnn::DataLayout::NCHW);
3086}
3087
3088LayerTestResult<float, 4> ResizeBilinearNopNhwcTest(armnn::IWorkloadFactory& workloadFactory)
3089{
3090 // BatchSize = 1, Height = 4, Width = 4, Channels = 1
3091 const armnn::TensorShape inputOutputShape{ 1, 4, 4, 1 };
3092
3093 return ResizeBilinearNopTestImpl(workloadFactory, inputOutputShape, armnn::DataLayout::NHWC);
3094}
3095
3096LayerTestResult<float, 4> SimpleResizeBilinearTestImpl(armnn::IWorkloadFactory& workloadFactory,
3097 const armnn::TensorShape& inputTensorShape,
3098 const armnn::TensorShape& outputTensorShape,
3099 armnn::DataLayout dataLayout)
3100{
3101 const armnn::TensorInfo inputTensorInfo(inputTensorShape, armnn::DataType::Float32);
3102 const armnn::TensorInfo outputTensorInfo(outputTensorShape, armnn::DataType::Float32);
3103
3104 auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
3105 1.0f, 255.0f,
3106 200.0f, 250.0f
3107 }));
3108
3109 // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
3110 // then figures out the interpolants and weights. Note this is different to projecting the centre of the
3111 // output texel - and thus we'll expect the output 1x1 matrix to contain, as its single element, the value
3112 // that was at position (0,0) of the input matrix (rather than an average, which we would expect if projecting
3113 // the centre).
3114 LayerTestResult<float, 4> result(outputTensorInfo);
3115 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>({
3116 1.0f
3117 }));
3118
3119 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3120 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3121
3122 armnn::ResizeBilinearQueueDescriptor descriptor;
3123 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00003124 armnn::WorkloadInfo info;
3125 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3126 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3127
3128 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
3129
3130 inputHandle->Allocate();
3131 outputHandle->Allocate();
3132 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3133
surmeh013537c2c2018-05-18 16:31:43 +01003134 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00003135 workload->Execute();
3136
3137 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3138 return result;
3139}
3140
3141LayerTestResult<float, 4> SimpleResizeBilinearTest(armnn::IWorkloadFactory& workloadFactory)
3142{
James Conroy074f3712018-10-03 09:32:03 +01003143 // inputShape: BatchSize = 1, Channels = 1, Height = 2, Width = 2
3144 const armnn::TensorShape inputShape{ 1, 1, 2, 2 };
telsoa014fcda012018-03-09 14:13:49 +00003145
James Conroy074f3712018-10-03 09:32:03 +01003146 // outputShape: BatchSize = 1, Channels = 1, Height = 1, Width = 1
3147 const armnn::TensorShape outputShape{ 1, 1, 1, 1 };
telsoa014fcda012018-03-09 14:13:49 +00003148
James Conroy074f3712018-10-03 09:32:03 +01003149 return SimpleResizeBilinearTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NCHW);
3150}
3151
3152LayerTestResult<float, 4> SimpleResizeBilinearNhwcTest(armnn::IWorkloadFactory& workloadFactory)
3153{
3154 // inputShape: BatchSize = 1, Height = 2, Width = 2, Channels = 1
3155 const armnn::TensorShape inputShape{ 1, 2, 2, 1 };
3156
3157 // outputShape: BatchSize = 1, Height = 1, Width = 1, Channels = 1
3158 const armnn::TensorShape outputShape{ 1, 1, 1, 1 };
3159
3160 return SimpleResizeBilinearTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NHWC);
3161}
3162
3163LayerTestResult<float, 4> ResizeBilinearSqMinTestImpl(armnn::IWorkloadFactory& workloadFactory,
3164 const armnn::TensorShape& inputTensorShape,
3165 const armnn::TensorShape& outputTensorShape,
3166 armnn::DataLayout dataLayout)
3167{
3168 const armnn::TensorInfo inputTensorInfo(inputTensorShape, armnn::DataType::Float32);
3169 const armnn::TensorInfo outputTensorInfo(outputTensorShape, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00003170
3171 auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
James Conroy074f3712018-10-03 09:32:03 +01003172 1.0f, 2.0f, 3.0f, 4.0f,
3173 2.0f, 3.0f, 4.0f, 5.0f,
3174 3.0f, 4.0f, 5.0f, 6.0f,
3175 4.0f, 5.0f, 6.0f, 7.0f
telsoa014fcda012018-03-09 14:13:49 +00003176 }));
3177
telsoa014fcda012018-03-09 14:13:49 +00003178 LayerTestResult<float, 4> result(outputTensorInfo);
3179 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>({
James Conroy074f3712018-10-03 09:32:03 +01003180 1.0f, 3.0f,
3181 3.0f, 5.0f
telsoa014fcda012018-03-09 14:13:49 +00003182 }));
3183
3184 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3185 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3186
3187 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01003188 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00003189 armnn::WorkloadInfo info;
3190 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3191 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3192
3193 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
3194
3195 inputHandle->Allocate();
3196 outputHandle->Allocate();
3197 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3198
surmeh013537c2c2018-05-18 16:31:43 +01003199 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00003200 workload->Execute();
3201
3202 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3203 return result;
3204}
3205
3206LayerTestResult<float, 4> ResizeBilinearSqMinTest(armnn::IWorkloadFactory& workloadFactory)
3207{
James Conroy074f3712018-10-03 09:32:03 +01003208 // inputShape: BatchSize = 1, Channels = 1, Height = 4, Width = 4
3209 const armnn::TensorShape inputShape{ 1, 1, 4, 4 };
telsoa014fcda012018-03-09 14:13:49 +00003210
James Conroy074f3712018-10-03 09:32:03 +01003211 // outputShape: BatchSize = 1, Channels = 1, Height = 2, Width = 2
3212 const armnn::TensorShape outputShape{ 1, 1, 2, 2 };
telsoa014fcda012018-03-09 14:13:49 +00003213
James Conroy074f3712018-10-03 09:32:03 +01003214 return ResizeBilinearSqMinTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NCHW);
3215}
3216
3217LayerTestResult<float, 4> ResizeBilinearSqMinNhwcTest(armnn::IWorkloadFactory& workloadFactory)
3218{
3219 // inputShape: BatchSize = 1, Height = 4, Width = 4, Channels = 1
3220 const armnn::TensorShape inputShape{ 1, 4, 4, 1 };
3221
3222 // outputShape: BatchSize = 1, Height = 2, Width = 2, Channels = 1
3223 const armnn::TensorShape outputShape{ 1, 2, 2, 1 };
3224
3225 return ResizeBilinearSqMinTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NHWC);
3226}
3227
3228LayerTestResult<float, 4> ResizeBilinearMinTestImpl(armnn::IWorkloadFactory& workloadFactory,
3229 const armnn::TensorShape& inputTensorShape,
3230 const armnn::TensorShape& outputTensorShape,
3231 armnn::DataLayout dataLayout)
3232{
3233 const armnn::TensorInfo inputTensorInfo(inputTensorShape, armnn::DataType::Float32);
3234 const armnn::TensorInfo outputTensorInfo(outputTensorShape, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00003235
3236 auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
James Conroy074f3712018-10-03 09:32:03 +01003237 1.0f, 2.0f, 3.0f, 5.0f, 8.0f,
3238 13.0f, 21.0f, 34.0f, 55.0f, 89.0f,
3239 144.0f, 233.0f, 377.0f, 610.0f, 987.0f
telsoa014fcda012018-03-09 14:13:49 +00003240 }));
3241
3242 LayerTestResult<float, 4> result(outputTensorInfo);
3243 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>({
James Conroy074f3712018-10-03 09:32:03 +01003244 1.0f, 2.6666f, 6.0f,
3245 78.5f, 179.3333f, 401.0f
telsoa014fcda012018-03-09 14:13:49 +00003246 }));
3247
3248 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3249 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3250
3251 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01003252 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00003253 armnn::WorkloadInfo info;
3254 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3255 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3256
3257 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
3258
3259 inputHandle->Allocate();
3260 outputHandle->Allocate();
3261 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3262
surmeh013537c2c2018-05-18 16:31:43 +01003263 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00003264 workload->Execute();
3265
3266 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3267 return result;
3268}
3269
3270LayerTestResult<float, 4> ResizeBilinearMinTest(armnn::IWorkloadFactory& workloadFactory)
3271{
James Conroy074f3712018-10-03 09:32:03 +01003272 // inputShape: BatchSize = 1, Channels = 1, Height = 3, Width = 5
3273 const armnn::TensorShape inputShape{ 1, 1, 3, 5 };
telsoa014fcda012018-03-09 14:13:49 +00003274
James Conroy074f3712018-10-03 09:32:03 +01003275 // outputShape: BatchSize = 1, Channels = 1, Height = 2, Width = 3
3276 const armnn::TensorShape outputShape{ 1, 1, 2, 3 };
telsoa014fcda012018-03-09 14:13:49 +00003277
James Conroy074f3712018-10-03 09:32:03 +01003278 return ResizeBilinearMinTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NCHW);
3279}
3280
3281LayerTestResult<float, 4> ResizeBilinearMinNhwcTest(armnn::IWorkloadFactory& workloadFactory)
3282{
3283 // inputShape: BatchSize = 1, Height = 3, Width = 5, Channels = 1
3284 const armnn::TensorShape inputShape{ 1, 3, 5, 1 };
3285
3286 // outputShape: BatchSize = 1, Height = 2, Width = 3, Channels = 1
3287 const armnn::TensorShape outputShape{ 1, 2, 3, 1 };
3288
3289 return ResizeBilinearMinTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NHWC);
3290}
3291
3292LayerTestResult<float, 4> ResizeBilinearMagTestImpl(armnn::IWorkloadFactory& workloadFactory,
3293 const armnn::TensorShape& inputTensorShape,
3294 const armnn::TensorShape& outputTensorShape,
3295 armnn::DataLayout dataLayout)
3296{
3297 const armnn::TensorInfo inputTensorInfo(inputTensorShape, armnn::DataType::Float32);
3298 const armnn::TensorInfo outputTensorInfo(outputTensorShape, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00003299
3300 auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
James Conroy074f3712018-10-03 09:32:03 +01003301 1.0f, 2.0f,
3302 13.0f, 21.0f,
3303 144.0f, 233.0f
telsoa014fcda012018-03-09 14:13:49 +00003304 }));
3305
3306 LayerTestResult<float, 4> result(outputTensorInfo);
3307 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>({
James Conroy074f3712018-10-03 09:32:03 +01003308 1.0f, 1.4f, 1.8f, 2.0f, 2.0f,
3309 13.0f, 16.2f, 19.4f, 21.0f, 21.0f,
3310 144.0f, 179.6f, 215.2f, 233.0f, 233.0f
telsoa014fcda012018-03-09 14:13:49 +00003311 }));
3312
3313 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3314 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3315
3316 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01003317 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00003318 armnn::WorkloadInfo info;
3319 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3320 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3321
3322 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
3323
3324 inputHandle->Allocate();
3325 outputHandle->Allocate();
3326 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3327
surmeh013537c2c2018-05-18 16:31:43 +01003328 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00003329 workload->Execute();
3330
3331 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3332 return result;
3333}
3334
3335LayerTestResult<float, 4> ResizeBilinearMagTest(armnn::IWorkloadFactory& workloadFactory)
3336{
James Conroy074f3712018-10-03 09:32:03 +01003337 // inputShape: BatchSize = 1, Channels = 1, Height = 3, Width = 2
3338 const armnn::TensorShape inputShape{ 1, 1, 3, 2 };
telsoa014fcda012018-03-09 14:13:49 +00003339
James Conroy074f3712018-10-03 09:32:03 +01003340 // outputShape: BatchSize = 1, Channels = 1, Height = 3, Width = 5
3341 const armnn::TensorShape outputShape{ 1, 1, 3, 5 };
telsoa014fcda012018-03-09 14:13:49 +00003342
James Conroy074f3712018-10-03 09:32:03 +01003343 return ResizeBilinearMagTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NCHW);
3344}
telsoa014fcda012018-03-09 14:13:49 +00003345
James Conroy074f3712018-10-03 09:32:03 +01003346LayerTestResult<float, 4> ResizeBilinearMagNhwcTest(armnn::IWorkloadFactory& workloadFactory)
3347{
3348 // inputShape: BatchSize = 1, Height = 3, Width = 2, Channels = 1
3349 const armnn::TensorShape inputShape{ 1, 3, 2, 1 };
telsoa014fcda012018-03-09 14:13:49 +00003350
James Conroy074f3712018-10-03 09:32:03 +01003351 // outputShape: BatchSize = 1, Height = 3, Width = 5, Channels = 1
3352 const armnn::TensorShape outputShape{ 1, 3, 5, 1 };
telsoa014fcda012018-03-09 14:13:49 +00003353
James Conroy074f3712018-10-03 09:32:03 +01003354 return ResizeBilinearMagTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00003355}
3356
3357LayerTestResult<float, 2> FakeQuantizationTest(armnn::IWorkloadFactory& workloadFactory)
3358{
3359 constexpr unsigned int width = 2;
3360 constexpr unsigned int height = 3;
3361
3362 const armnn::TensorInfo tensorInfo({height, width },
3363 armnn::DataType::Float32);
3364 auto input = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
3365 -10.0f, -5.0f,
3366 0.0f, 5.0f,
3367 10.0f, 10.0f
3368 }));
3369
3370 LayerTestResult<float, 2> ret(tensorInfo);
3371
3372 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
3373
3374 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
3375
3376 armnn::FakeQuantizationQueueDescriptor data;
3377 armnn::WorkloadInfo info;
3378
3379 AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
3380 AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
3381 float min = -10.f;
3382 float max = 10.f;
3383
3384 data.m_Parameters.m_Min = min;
3385 data.m_Parameters.m_Max = max;
3386
3387 armnn::PassthroughCpuTensorHandle refHandle(tensorInfo, &ret.outputExpected[0][0]);
3388 armnn::FakeQuantizationQueueDescriptor refData = data;
3389 armnn::WorkloadInfo refInfo = info;
3390 SetWorkloadOutput(refData, refInfo, 0, tensorInfo, &refHandle);
3391
3392 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFakeQuantization(data, info);
3393
3394 inputHandle->Allocate();
3395 outputHandle->Allocate();
3396
3397 CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
3398
surmeh013537c2c2018-05-18 16:31:43 +01003399 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00003400 workload->Execute();
3401
3402 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
3403
3404 ret.outputExpected = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
3405 0.0f, 63.0f,
3406 128.0f, 191.0f,
3407 255.0f, 255.0f
3408 }));
3409 return ret;
3410}
3411
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003412namespace
3413{
3414
3415LayerTestResult<float, 4> L2NormalizationTestImpl(armnn::IWorkloadFactory& workloadFactory,
3416 const armnn::TensorShape& inputOutputTensorShape,
3417 const std::vector<float>& inputValues,
3418 const std::vector<float>& expectedOutputValues,
3419 armnn::DataLayout dataLayout)
3420{
3421 const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
3422 const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
3423
3424 auto inputTensor = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(inputValues));
3425
3426 LayerTestResult<float, 4> result(outputTensorInfo);
3427 result.outputExpected = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(expectedOutputValues));
3428
3429 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3430 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3431
3432 armnn::L2NormalizationQueueDescriptor descriptor;
3433 descriptor.m_Parameters.m_DataLayout = dataLayout;
3434 armnn::WorkloadInfo info;
3435
3436 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3437 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3438
3439 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
3440
3441 inputHandle->Allocate();
3442 outputHandle->Allocate();
3443
3444 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
3445
3446 workloadFactory.Finalize();
3447 workload->Execute();
3448
3449 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3450
3451 return result;
3452}
3453
3454float CalcInvL2Norm(std::initializer_list<float> elements)
3455{
3456 const float reduction = std::accumulate(elements.begin(), elements.end(), 0.0f,
3457 [](float acc, float element) { return acc + element * element; });
3458 return 1.0f / sqrtf(reduction);
3459}
3460
3461} // anonymous namespace
3462
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003463template<typename T>
3464LayerTestResult<T, 2> Pad2dTestCommon(armnn::IWorkloadFactory& workloadFactory, float qScale, int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003465{
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003466 const armnn::TensorShape inputShape{ 3, 3 };
3467 const armnn::TensorShape outputShape{ 7, 7 };
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003468
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003469 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::GetDataType<T>());
3470 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::GetDataType<T>());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003471
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003472 std::vector<T> inputValues(
3473 QuantizedVector<T>(qScale, qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003474 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003475 // Height (3) x Width (3)
3476 4, 8, 6,
3477 7, 4, 4,
3478 3, 2, 4
3479 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003480
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003481 std::vector<T> expectedOutputValues(
3482 QuantizedVector<T>(qScale, qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003483 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003484 0, 0, 0, 0, 0, 0, 0,
3485 0, 0, 0, 0, 0, 0, 0,
3486 0, 0, 4, 8, 6, 0, 0,
3487 0, 0, 7, 4, 4, 0, 0,
3488 0, 0, 3, 2, 4, 0, 0,
3489 0, 0, 0, 0, 0, 0, 0,
3490 0, 0, 0, 0, 0, 0, 0
3491 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003492
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003493 auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003494
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003495 LayerTestResult<T, 2> result(outputTensorInfo);
3496 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003497
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003498 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3499 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003500
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003501 armnn::PadQueueDescriptor descriptor;
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003502
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003503 std::vector<std::pair<unsigned int, unsigned int>> PadList;
3504 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
3505 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003506
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003507 descriptor.m_Parameters.m_PadList = PadList;
3508 armnn::WorkloadInfo info;
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003509
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003510 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3511 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003512
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003513 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003514
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003515 inputHandle->Allocate();
3516 outputHandle->Allocate();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003517
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003518 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003519
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003520 workloadFactory.Finalize();
3521 workload->Execute();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003522
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003523 CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003524
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003525 return result;
3526}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003527
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003528template <typename T>
3529LayerTestResult<T, 3> Pad3dTestCommon(armnn::IWorkloadFactory& workloadFactory, float qScale, int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003530{
3531 const armnn::TensorShape inputShape{ 2, 2, 2 };
3532 const armnn::TensorShape outputShape{ 3, 5, 6 };
3533
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003534 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::GetDataType<T>());
3535 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::GetDataType<T>());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003536
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003537 std::vector<T> inputValues(
3538 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003539 {
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003540 // Channel 0, Height (2) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003541 0, 4,
3542 2, 5,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003543
3544 // Channel 1, Height (2) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003545 6, 1,
3546 5, 2
3547 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003548
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003549 std::vector<T> expectedOutputValues(
3550 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003551 {
3552
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003553 0, 0, 0, 0, 0, 0,
3554 0, 0, 0, 0, 0, 0,
3555 0, 0, 0, 4, 0, 0,
3556 0, 0, 2, 5, 0, 0,
3557 0, 0, 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003558
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003559 0, 0, 0, 0, 0, 0,
3560 0, 0, 0, 0, 0, 0,
3561 0, 0, 6, 1, 0, 0,
3562 0, 0, 5, 2, 0, 0,
3563 0, 0, 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003564
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003565 0, 0, 0, 0, 0, 0,
3566 0, 0, 0, 0, 0, 0,
3567 0, 0, 0, 0, 0, 0,
3568 0, 0, 0, 0, 0, 0,
3569 0, 0, 0, 0, 0, 0
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003570
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003571 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003572
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003573 auto inputTensor = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003574
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003575 LayerTestResult<T, 3> result(outputTensorInfo);
3576 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003577
3578 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3579 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3580
3581 armnn::PadQueueDescriptor descriptor;
3582
3583 std::vector<std::pair<unsigned int, unsigned int>> PadList;
3584 PadList.push_back(std::pair<unsigned int, unsigned int>(0,1));
3585 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
3586 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
3587
3588 descriptor.m_Parameters.m_PadList = PadList;
3589 armnn::WorkloadInfo info;
3590
3591 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3592 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3593
3594 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
3595
3596 inputHandle->Allocate();
3597 outputHandle->Allocate();
3598
3599 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]);
3600
3601 workloadFactory.Finalize();
3602 workload->Execute();
3603
3604 CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
3605
3606 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003607}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003608
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003609template <typename T>
3610LayerTestResult<T, 4> Pad4dTestCommon(armnn::IWorkloadFactory& workloadFactory, float qScale, int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003611{
3612 const armnn::TensorShape inputShape{ 2, 2, 3, 2 };
3613 const armnn::TensorShape outputShape{ 4, 5, 7, 4 };
3614
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003615 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::GetDataType<T>());
3616 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::GetDataType<T>());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003617
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003618 std::vector<T> inputValues(
3619 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003620 {
3621 // Batch 0, Channel 0, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003622 0, 1,
3623 2, 3,
3624 4, 5,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003625
3626 // Batch 0, Channel 1, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003627 6, 7,
3628 8, 9,
3629 10, 11,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003630
3631 // Batch 1, Channel 0, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003632 12, 13,
3633 14, 15,
3634 16, 17,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003635
3636 // Batch 1, Channel 1, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003637 18, 19,
3638 20, 21,
3639 22, 23
3640 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003641
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003642 std::vector<T> expectedOutputValues(
3643 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003644 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003645 0, 0, 0, 0,
3646 0, 0, 0, 0,
3647 0, 0, 0, 0,
3648 0, 0, 0, 0,
3649 0, 0, 0, 0,
3650 0, 0, 0, 0,
3651 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003652
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003653 0, 0, 0, 0,
3654 0, 0, 0, 0,
3655 0, 0, 0, 0,
3656 0, 0, 0, 0,
3657 0, 0, 0, 0,
3658 0, 0, 0, 0,
3659 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003660
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003661 0, 0, 0, 0,
3662 0, 0, 0, 0,
3663 0, 0, 0, 0,
3664 0, 0, 0, 0,
3665 0, 0, 0, 0,
3666 0, 0, 0, 0,
3667 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003668
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003669 0, 0, 0, 0,
3670 0, 0, 0, 0,
3671 0, 0, 0, 0,
3672 0, 0, 0, 0,
3673 0, 0, 0, 0,
3674 0, 0, 0, 0,
3675 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003676
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003677 0, 0, 0, 0,
3678 0, 0, 0, 0,
3679 0, 0, 0, 0,
3680 0, 0, 0, 0,
3681 0, 0, 0, 0,
3682 0, 0, 0, 0,
3683 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003684
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003685 0, 0, 0, 0,
3686 0, 0, 0, 0,
3687 0, 0, 0, 0,
3688 0, 0, 0, 0,
3689 0, 0, 0, 0,
3690 0, 0, 0, 0,
3691 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003692
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003693 0, 0, 0, 0,
3694 0, 0, 0, 0,
3695 0, 0, 0, 0,
3696 0, 0, 0, 0,
3697 0, 0, 0, 0,
3698 0, 0, 0, 0,
3699 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003700
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003701 0, 0, 0, 0,
3702 0, 0, 0, 0,
3703 0, 0, 0, 0,
3704 0, 0, 1, 0,
3705 0, 2, 3, 0,
3706 0, 4, 5, 0,
3707 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003708
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003709 0, 0, 0, 0,
3710 0, 0, 0, 0,
3711 0, 0, 0, 0,
3712 0, 6, 7, 0,
3713 0, 8, 9, 0,
3714 0, 10, 11, 0,
3715 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003716
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003717 0, 0, 0, 0,
3718 0, 0, 0, 0,
3719 0, 0, 0, 0,
3720 0, 0, 0, 0,
3721 0, 0, 0, 0,
3722 0, 0, 0, 0,
3723 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003724
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003725 0, 0, 0, 0,
3726 0, 0, 0, 0,
3727 0, 0, 0, 0,
3728 0, 0, 0, 0,
3729 0, 0, 0, 0,
3730 0, 0, 0, 0,
3731 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003732
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003733 0, 0, 0, 0,
3734 0, 0, 0, 0,
3735 0, 0, 0, 0,
3736 0, 0, 0, 0,
3737 0, 0, 0, 0,
3738 0, 0, 0, 0,
3739 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003740
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003741 0, 0, 0, 0,
3742 0, 0, 0, 0,
3743 0, 0, 0, 0,
3744 0, 12, 13, 0,
3745 0, 14, 15, 0,
3746 0, 16, 17, 0,
3747 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003748
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003749 0, 0, 0, 0,
3750 0, 0, 0, 0,
3751 0, 0, 0, 0,
3752 0, 18, 19, 0,
3753 0, 20, 21, 0,
3754 0, 22, 23, 0,
3755 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003756
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003757 0, 0, 0, 0,
3758 0, 0, 0, 0,
3759 0, 0, 0, 0,
3760 0, 0, 0, 0,
3761 0, 0, 0, 0,
3762 0, 0, 0, 0,
3763 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003764
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003765 0, 0, 0, 0,
3766 0, 0, 0, 0,
3767 0, 0, 0, 0,
3768 0, 0, 0, 0,
3769 0, 0, 0, 0,
3770 0, 0, 0, 0,
3771 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003772
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003773 0, 0, 0, 0,
3774 0, 0, 0, 0,
3775 0, 0, 0, 0,
3776 0, 0, 0, 0,
3777 0, 0, 0, 0,
3778 0, 0, 0, 0,
3779 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003780
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003781 0, 0, 0, 0,
3782 0, 0, 0, 0,
3783 0, 0, 0, 0,
3784 0, 0, 0, 0,
3785 0, 0, 0, 0,
3786 0, 0, 0, 0,
3787 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003788
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003789 0, 0, 0, 0,
3790 0, 0, 0, 0,
3791 0, 0, 0, 0,
3792 0, 0, 0, 0,
3793 0, 0, 0, 0,
3794 0, 0, 0, 0,
3795 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003796
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003797 0, 0, 0, 0,
3798 0, 0, 0, 0,
3799 0, 0, 0, 0,
3800 0, 0, 0, 0,
3801 0, 0, 0, 0,
3802 0, 0, 0, 0,
3803 0, 0, 0, 0
3804 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003805
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003806 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003807
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003808 LayerTestResult<T, 4> result(outputTensorInfo);
3809 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003810
3811 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3812 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3813
3814 armnn::PadQueueDescriptor descriptor;
3815
3816 std::vector<std::pair<unsigned int, unsigned int>> PadList;
3817 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
3818 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
3819 PadList.push_back(std::pair<unsigned int, unsigned int>(3,1));
3820 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
3821
3822 descriptor.m_Parameters.m_PadList = PadList;
3823 armnn::WorkloadInfo info;
3824
3825 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3826 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3827
3828 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
3829
3830 inputHandle->Allocate();
3831 outputHandle->Allocate();
3832
3833 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
3834
3835 workloadFactory.Finalize();
3836
3837 workload->Execute();
3838
3839 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3840
3841 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003842}
3843
3844LayerTestResult<uint8_t, 2> PadUint82dTest(armnn::IWorkloadFactory& workloadFactory)
3845{
3846 return Pad2dTestCommon<uint8_t>(workloadFactory, 1.0f, 0);
3847}
3848
3849LayerTestResult<uint8_t, 3> PadUint83dTest(armnn::IWorkloadFactory& workloadFactory)
3850{
3851 return Pad3dTestCommon<uint8_t>(workloadFactory, 1.0f, 0);
3852}
3853
3854LayerTestResult<uint8_t, 4> PadUint84dTest(armnn::IWorkloadFactory& workloadFactory)
3855{
3856 return Pad4dTestCommon<uint8_t>(workloadFactory, 1.0f, 0);
3857}
3858
3859LayerTestResult<float, 2> PadFloat322dTest(armnn::IWorkloadFactory& workloadFactory)
3860{
3861 return Pad2dTestCommon<float>(workloadFactory, 0.0f, 0);
3862}
3863
3864LayerTestResult<float, 3> PadFloat323dTest(armnn::IWorkloadFactory& workloadFactory)
3865{
3866 return Pad3dTestCommon<float>(workloadFactory, 0.0f, 0);
3867}
3868
3869LayerTestResult<float, 4> PadFloat324dTest(armnn::IWorkloadFactory& workloadFactory)
3870{
3871 return Pad4dTestCommon<float>(workloadFactory, 0.0f, 0);
3872}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003873
telsoa014fcda012018-03-09 14:13:49 +00003874LayerTestResult<float, 4> L2Normalization1dTest(armnn::IWorkloadFactory& workloadFactory)
3875{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003876 // Width: 1
3877 // Height: 1
3878 // Channels: 10
3879 // BatchSize: 1
telsoa014fcda012018-03-09 14:13:49 +00003880
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003881 const armnn::TensorShape inputOutputShape{ 1, 10, 1, 1 };
3882 std::vector<float> inputValues
3883 {
3884 // Batch 0, Channel 0, Height (1) x Width (1)
3885 1.0f,
telsoa014fcda012018-03-09 14:13:49 +00003886
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003887 // Batch 0, Channel 1, Height (1) x Width (1)
3888 2.0f,
telsoa014fcda012018-03-09 14:13:49 +00003889
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003890 // Batch 0, Channel 2, Height (1) x Width (1)
3891 3.0f,
telsoa014fcda012018-03-09 14:13:49 +00003892
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003893 // Batch 0, Channel 3, Height (1) x Width (1)
3894 4.0f,
3895
3896 // Batch 0, Channel 4, Height (1) x Width (1)
3897 5.0f,
3898
3899 // Batch 0, Channel 5, Height (1) x Width (1)
3900 6.0f,
3901
3902 // Batch 0, Channel 6, Height (1) x Width (1)
3903 7.0f,
3904
3905 // Batch 0, Channel 7, Height (1) x Width (1)
3906 8.0f,
3907
3908 // Batch 0, Channel 8, Height (1) x Width (1)
3909 9.0f,
3910
3911 // Batch 0, Channel 9, Height (1) x Width (1)
3912 10.0f
3913 };
telsoa014fcda012018-03-09 14:13:49 +00003914 const float approxInvL2Norm = 0.050964719f;
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003915 std::vector<float> expectedOutputValues
3916 {
3917 // Batch 0, Channel 0, Height (1) x Width (1)
telsoa014fcda012018-03-09 14:13:49 +00003918 1.0f * approxInvL2Norm,
3919 2.0f * approxInvL2Norm,
3920 3.0f * approxInvL2Norm,
3921 4.0f * approxInvL2Norm,
3922 5.0f * approxInvL2Norm,
3923 6.0f * approxInvL2Norm,
3924 7.0f * approxInvL2Norm,
3925 8.0f * approxInvL2Norm,
3926 9.0f * approxInvL2Norm,
3927 10.0f * approxInvL2Norm
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003928 };
telsoa014fcda012018-03-09 14:13:49 +00003929
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003930 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
3931 inputValues, expectedOutputValues, armnn::DataLayout::NCHW);
telsoa014fcda012018-03-09 14:13:49 +00003932}
3933
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003934LayerTestResult<float, 4> L2Normalization1dNhwcTest(armnn::IWorkloadFactory& workloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00003935{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003936 // Width: 1
3937 // Height: 1
3938 // Channels: 10
3939 // BatchSize: 1
telsoa014fcda012018-03-09 14:13:49 +00003940
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003941 const armnn::TensorShape inputOutputShape{ 1, 1, 1, 10 };
3942 std::vector<float> inputValues
3943 {
3944 // Batch 0, Height 0, Width (1) x Channel (10)
3945 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f
3946 };
3947 const float approxInvL2Norm = 0.050964719f;
3948 std::vector<float> expectedOutputValues
3949 {
3950 // Batch 0, Height 0, Width (1) x Channel (10)
3951 1.0f * approxInvL2Norm,
3952 2.0f * approxInvL2Norm,
3953 3.0f * approxInvL2Norm,
3954 4.0f * approxInvL2Norm,
3955 5.0f * approxInvL2Norm,
3956 6.0f * approxInvL2Norm,
3957 7.0f * approxInvL2Norm,
3958 8.0f * approxInvL2Norm,
3959 9.0f * approxInvL2Norm,
3960 10.0f * approxInvL2Norm
3961 };
3962
3963 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
3964 inputValues, expectedOutputValues, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00003965}
3966
3967LayerTestResult<float, 4> L2Normalization2dTest(armnn::IWorkloadFactory& workloadFactory)
3968{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003969 // Width: 5
3970 // Height: 1
3971 // Channels: 2
3972 // BatchSize: 1
telsoa014fcda012018-03-09 14:13:49 +00003973
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003974 const armnn::TensorShape inputOutputShape{ 1, 2, 1, 5 };
3975 std::vector<float> inputValues
3976 {
3977 // Batch 0, Channel 0, Height (1) x Width (5)
telsoa014fcda012018-03-09 14:13:49 +00003978 1.0f, 3.0f, 5.0f, 7.0f, 9.0f,
telsoa014fcda012018-03-09 14:13:49 +00003979
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003980 // Batch 0, Channel 1, Height (1) x Width (5)
3981 2.0f, 4.0f, 6.0f, 8.0f, 10.0f
3982 };
3983 std::vector<float> expectedOutputValues
3984 {
3985 // Batch 0, Channel 0, Height (1) x Width (5)
3986 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
3987 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
3988 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
3989 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
telsoa014fcda012018-03-09 14:13:49 +00003990 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
3991
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003992 // Batch 0, Channel 1, Height (1) x Width (5)
3993 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
3994 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
3995 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
3996 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
telsoa014fcda012018-03-09 14:13:49 +00003997 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003998 };
telsoa014fcda012018-03-09 14:13:49 +00003999
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004000 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
4001 inputValues, expectedOutputValues, armnn::DataLayout::NCHW);
4002}
telsoa014fcda012018-03-09 14:13:49 +00004003
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004004LayerTestResult<float, 4> L2Normalization2dNhwcTest(armnn::IWorkloadFactory& workloadFactory)
4005{
4006 // Width: 5
4007 // Height: 1
4008 // Channels: 2
4009 // BatchSize: 1
telsoa014fcda012018-03-09 14:13:49 +00004010
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004011 const armnn::TensorShape inputOutputShape{ 1, 1, 5, 2 };
4012 std::vector<float> inputValues
4013 {
4014 // Batch 0, Height 0, Width (5) x Channel (2)
4015 1.0f, 2.0f,
4016 3.0f, 4.0f,
4017 5.0f, 6.0f,
4018 7.0f, 8.0f,
4019 9.0f, 10.0f
4020 };
4021 std::vector<float> expectedOutputValues
4022 {
4023 // Batch 0, Height 0, Width (5) x Channel (2)
4024 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
4025 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
4026 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
4027 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
4028 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
4029 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
4030 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
4031 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
4032 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
4033 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
4034 };
telsoa014fcda012018-03-09 14:13:49 +00004035
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004036 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
4037 inputValues, expectedOutputValues, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00004038}
4039
4040LayerTestResult<float, 4> L2Normalization3dTest(armnn::IWorkloadFactory& workloadFactory)
4041{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004042 // Width: 3
4043 // Height: 4
4044 // Channels: 2
4045 // BatchSize: 1
telsoa014fcda012018-03-09 14:13:49 +00004046
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004047 const armnn::TensorShape inputOutputShape{ 1, 2, 4, 3 };
4048 std::vector<float> inputValues
4049 {
4050 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004051 119.0f, 21.0f, 150.0f,
4052 149.0f, 32.0f, 179.0f,
4053 15.0f, 227.0f, 141.0f,
4054 147.0f, 199.0f, 220.0f,
4055
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004056 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004057 110.0f, 140.0f, 73.0f,
4058 211.0f, 212.0f, 89.0f,
4059 24.0f, 138.0f, 188.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004060 162.0f, 12.0f, 161.0f
4061 };
4062 std::vector<float> expectedOutputValues
4063 {
4064 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004065 119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
4066 21.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
4067 150.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
4068 149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
4069 32.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
4070 179.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
4071 15.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
4072 227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
4073 141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
4074 147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
4075 199.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
4076 220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
4077
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004078 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004079 110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
4080 140.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
4081 73.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
4082 211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
4083 212.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
4084 89.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
4085 24.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
4086 138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
4087 188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
4088 162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
4089 12.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004090 161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
4091 };
telsoa014fcda012018-03-09 14:13:49 +00004092
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004093 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
4094 inputValues, expectedOutputValues, armnn::DataLayout::NCHW);
4095}
telsoa014fcda012018-03-09 14:13:49 +00004096
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004097LayerTestResult<float, 4> L2Normalization3dNhwcTest(armnn::IWorkloadFactory& workloadFactory)
4098{
4099 // Width: 3
4100 // Height: 4
4101 // Channels: 2
4102 // BatchSize: 1
telsoa014fcda012018-03-09 14:13:49 +00004103
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004104 const armnn::TensorShape inputOutputShape{ 1, 4, 3, 2 };
4105 std::vector<float> inputValues
4106 {
4107 // Batch 0, Height 0, Width (3) x Channel (2)
4108 119.0f, 110.0f,
4109 21.0f, 140.0f,
4110 150.0f, 73.0f,
telsoa014fcda012018-03-09 14:13:49 +00004111
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004112 // Batch 0, Height 1, Width (3) x Channel (2)
4113 149.0f, 211.0f,
4114 32.0f, 212.0f,
4115 179.0f, 89.0f,
telsoa014fcda012018-03-09 14:13:49 +00004116
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004117 // Batch 0, Height 2, Width (3) x Channel (2)
4118 15.0f, 24.0f,
4119 227.0f, 138.0f,
4120 141.0f, 188.0f,
telsoa014fcda012018-03-09 14:13:49 +00004121
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004122 // Batch 0, Height 3, Width (3) x Channel (2)
4123 147.0f, 162.0f,
4124 199.0f, 12.0f,
4125 220.0f, 161.0f
4126 };
4127 std::vector<float> expectedOutputValues
4128 {
4129 // Batch 0, Height 0, Width (3) x Channel (2)
4130 119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
4131 110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
4132 21.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
4133 140.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
4134 150.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
4135 73.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
4136
4137 // Batch 0, Height 1, Width (3) x Channel (2)
4138 149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
4139 211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
4140 32.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
4141 212.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
4142 179.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
4143 89.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
4144
4145 // Batch 0, Height 2, Width (3) x Channel (2)
4146 15.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
4147 24.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
4148 227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
4149 138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
4150 141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
4151 188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
4152
4153 // Batch 0, Height 3, Width (3) x Channel (2)
4154 147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
4155 162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
4156 199.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
4157 12.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
4158 220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
4159 161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
4160 };
4161
4162 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
4163 inputValues, expectedOutputValues, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00004164}
4165
4166LayerTestResult<float, 4> L2Normalization4dTest(armnn::IWorkloadFactory& workloadFactory)
4167{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004168 // Width: 3
4169 // Height: 4
4170 // Channels: 3
4171 // BatchSize: 2
telsoa014fcda012018-03-09 14:13:49 +00004172
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004173 const armnn::TensorShape inputOutputShape{ 2, 3, 4, 3 };
4174 std::vector<float> inputValues
4175 {
4176 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004177 235.0f, 46.0f, 178.0f,
4178 100.0f, 123.0f, 19.0f,
4179 172.0f, 74.0f, 250.0f,
4180 6.0f, 195.0f, 80.0f,
4181
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004182 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004183 113.0f, 95.0f, 202.0f,
4184 77.0f, 114.0f, 71.0f,
4185 122.0f, 246.0f, 166.0f,
4186 82.0f, 28.0f, 37.0f,
4187
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004188 // Batch 0, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004189 56.0f, 170.0f, 162.0f,
4190 194.0f, 89.0f, 254.0f,
4191 12.0f, 209.0f, 200.0f,
4192 1.0f, 64.0f, 54.0f,
4193
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004194 // Batch 1, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004195 67.0f, 90.0f, 49.0f,
4196 7.0f, 163.0f, 18.0f,
4197 25.0f, 117.0f, 103.0f,
4198 247.0f, 59.0f, 189.0f,
4199
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004200 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004201 239.0f, 104.0f, 199.0f,
4202 17.0f, 124.0f, 153.0f,
4203 222.0f, 217.0f, 75.0f,
4204 32.0f, 126.0f, 21.0f,
4205
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004206 // Batch 1, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004207 97.0f, 145.0f, 215.0f,
4208 115.0f, 116.0f, 238.0f,
4209 226.0f, 16.0f, 132.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004210 92.0f, 125.0f, 88.0f
4211 };
4212 std::vector<float> expectedOutputValues
4213 {
4214 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004215 235.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
4216 46.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
4217 178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
4218 100.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
4219 123.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
4220 19.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
4221 172.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
4222 74.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
4223 250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
4224 6.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
4225 195.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
4226 80.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
4227
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004228 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004229 113.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
4230 95.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
4231 202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
4232 77.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
4233 114.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
4234 71.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
4235 122.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
4236 246.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
4237 166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
4238 82.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
4239 28.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
4240 37.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
4241
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004242 // Batch 0, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004243 56.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
4244 170.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
4245 162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
4246 194.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
4247 89.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
4248 254.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
4249 12.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
4250 209.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
4251 200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
4252 1.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
4253 64.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
4254 54.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
4255
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004256 // Batch 1, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004257 67.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
4258 90.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
4259 49.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
4260 7.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
4261 163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
4262 18.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
4263 25.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
4264 117.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
4265 103.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
4266 247.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
4267 59.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
4268 189.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
4269
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004270 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004271 239.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
4272 104.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
4273 199.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
4274 17.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
4275 124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
4276 153.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
4277 222.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
4278 217.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
4279 75.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
4280 32.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
4281 126.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
4282 21.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
4283
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004284 // Batch 1, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004285 97.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
4286 145.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
4287 215.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
4288 115.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
4289 116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
4290 238.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
4291 226.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
4292 16.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
4293 132.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
4294 92.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
4295 125.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004296 88.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f })
4297 };
telsoa014fcda012018-03-09 14:13:49 +00004298
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004299 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
4300 inputValues, expectedOutputValues, armnn::DataLayout::NCHW);
4301}
telsoa014fcda012018-03-09 14:13:49 +00004302
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004303LayerTestResult<float, 4> L2Normalization4dNhwcTest(armnn::IWorkloadFactory& workloadFactory)
4304{
4305 // Width: 3
4306 // Height: 4
4307 // Channels: 3
4308 // BatchSize: 2
telsoa014fcda012018-03-09 14:13:49 +00004309
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004310 const armnn::TensorShape inputOutputShape{ 2, 4, 3, 3 };
4311 std::vector<float> inputValues
4312 {
4313 // Batch 0, Height 0, Width (3) x Channel (3)
4314 235.0f, 113.0f, 56.0f,
4315 46.0f, 95.0f, 170.0f,
4316 178.0f, 202.0f, 162.0f,
telsoa014fcda012018-03-09 14:13:49 +00004317
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004318 // Batch 0, Height 1, Width (3) x Channel (3)
4319 100.0f, 77.0f, 194.0f,
4320 123.0f, 114.0f, 89.0f,
4321 19.0f, 71.0f, 254.0f,
telsoa014fcda012018-03-09 14:13:49 +00004322
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004323 // Batch 0, Height 2, Width (3) x Channel (3)
4324 172.0f, 122.0f, 12.0f,
4325 74.0f, 246.0f, 209.0f,
4326 250.0f, 166.0f, 200.0f,
telsoa014fcda012018-03-09 14:13:49 +00004327
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004328 // Batch 0, Height 3, Width (3) x Channel (3)
4329 6.0f, 82.0f, 1.0f,
4330 195.0f, 28.0f, 64.0f,
4331 80.0f, 37.0f, 54.0f,
4332
4333 // Batch 1, Height 0, Width (3) x Channel (3)
4334 67.0f, 239.0f, 97.0f,
4335 90.0f, 104.0f, 145.0f,
4336 49.0f, 199.0f, 215.0f,
4337
4338 // Batch 1, Height 1, Width (3) x Channel (3)
4339 7.0f, 17.0f, 115.0f,
4340 163.0f, 124.0f, 116.0f,
4341 18.0f, 153.0f, 238.0f,
4342
4343 // Batch 1, Height 2, Width (3) x Channel (3)
4344 25.0f, 222.0f, 226.0f,
4345 117.0f, 217.0f, 16.0f,
4346 103.0f, 75.0f, 132.0f,
4347
4348 // Batch 1, Height 3, Width (3) x Channel (3)
4349 247.0f, 32.0f, 92.0f,
4350 59.0f, 126.0f, 125.0f,
4351 189.0f, 21.0f, 88.0f
4352 };
4353 std::vector<float> expectedOutputValues
4354 {
4355 // Batch 0, Height 0, Width (3) x Channel (3)
4356 235.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
4357 113.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
4358 56.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
4359 46.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
4360 95.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
4361 170.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
4362 178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
4363 202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
4364 162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
4365
4366 // Batch 0, Height 1, Width (3) x Channel (3)
4367 100.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
4368 77.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
4369 194.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
4370 123.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
4371 114.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
4372 89.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
4373 19.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
4374 71.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
4375 254.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
4376
4377 // Batch 0, Height 2, Width (3) x Channel (3)
4378 172.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
4379 122.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
4380 12.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
4381 74.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
4382 246.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
4383 209.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
4384 250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
4385 166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
4386 200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
4387
4388 // Batch 0, Height 3, Width (3) x Channel (3)
4389 6.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
4390 82.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
4391 1.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
4392 195.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
4393 28.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
4394 64.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
4395 80.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
4396 37.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
4397 54.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
4398
4399 // Batch 1, Height 0, Width (3) x Channel (3)
4400 67.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
4401 239.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
4402 97.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
4403 90.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
4404 104.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
4405 145.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
4406 49.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
4407 199.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
4408 215.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
4409
4410 // Batch 1, Height 1, Width (3) x Channel (3)
4411 7.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
4412 17.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
4413 115.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
4414 163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
4415 124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
4416 116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
4417 18.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
4418 153.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
4419 238.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
4420
4421 // Batch 1, Height 2, Width (3) x Channel (3)
4422 25.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
4423 222.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
4424 226.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
4425 117.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
4426 217.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
4427 16.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
4428 103.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
4429 75.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
4430 132.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
4431
4432 // Batch 1, Height 3, Width (3) x Channel (3)
4433 247.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
4434 32.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
4435 92.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
4436 59.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
4437 126.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
4438 125.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
4439 189.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
4440 21.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
4441 88.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f })
4442 };
4443
4444 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
4445 inputValues, expectedOutputValues, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00004446}
4447
4448template <typename T>
4449LayerTestResult<T, 4> ConstantTestImpl(armnn::IWorkloadFactory& workloadFactory,
4450 float qScale,
4451 int32_t qOffset)
4452{
4453 constexpr unsigned int inputWidth = 3;
4454 constexpr unsigned int inputHeight = 4;
4455 constexpr unsigned int inputChannels = 3;
4456 constexpr unsigned int inputBatchSize = 2;
4457
4458 constexpr unsigned int outputWidth = inputWidth;
4459 constexpr unsigned int outputHeight = inputHeight;
4460 constexpr unsigned int outputChannels = inputChannels;
4461 constexpr unsigned int outputBatchSize = inputBatchSize;
4462
4463 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
4464 armnn::GetDataType<T>());
4465
4466 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
4467 armnn::GetDataType<T>());
4468
4469 // Set quantization parameters if the requested type is a quantized type.
4470 if(armnn::IsQuantizedType<T>())
4471 {
4472 inputTensorInfo.SetQuantizationScale(qScale);
4473 inputTensorInfo.SetQuantizationOffset(qOffset);
4474 outputTensorInfo.SetQuantizationScale(qScale);
4475 outputTensorInfo.SetQuantizationOffset(qOffset);
4476 }
4477
4478 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
4479 QuantizedVector<T>(qScale, qOffset, {
4480 // Batch 0, Channel 0
4481 235.0f, 46.0f, 178.0f,
4482 100.0f, 123.0f, 19.0f,
4483 172.0f, 74.0f, 250.0f,
4484 6.0f, 195.0f, 80.0f,
4485
4486 // Batch 0, Channel 1
4487 113.0f, 95.0f, 202.0f,
4488 77.0f, 114.0f, 71.0f,
4489 122.0f, 246.0f, 166.0f,
4490 82.0f, 28.0f, 37.0f,
4491
4492 // Batch 0, Channel 2
4493 56.0f, 170.0f, 162.0f,
4494 194.0f, 89.0f, 254.0f,
4495 12.0f, 209.0f, 200.0f,
4496 1.0f, 64.0f, 54.0f,
4497
4498 // Batch 1, Channel 0
4499 67.0f, 90.0f, 49.0f,
4500 7.0f, 163.0f, 18.0f,
4501 25.0f, 117.0f, 103.0f,
4502 247.0f, 59.0f, 189.0f,
4503
4504 // Batch 1, Channel 1
4505 239.0f, 104.0f, 199.0f,
4506 17.0f, 124.0f, 153.0f,
4507 222.0f, 217.0f, 75.0f,
4508 32.0f, 126.0f, 21.0f,
4509
4510 // Batch 1, Channel 2
4511 97.0f, 145.0f, 215.0f,
4512 115.0f, 116.0f, 238.0f,
4513 226.0f, 16.0f, 132.0f,
4514 92.0f, 125.0f, 88.0f,
4515 })));
4516
4517 LayerTestResult<T, 4> result(outputTensorInfo);
4518 result.outputExpected = input;
4519
4520 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4521
4522 armnn::ScopedCpuTensorHandle constantTensor(inputTensorInfo);
4523 AllocateAndCopyDataToITensorHandle(&constantTensor, &input[0][0][0][0]);
4524
4525 armnn::ConstantQueueDescriptor descriptor;
4526 descriptor.m_LayerOutput = &constantTensor;
4527
4528 armnn::WorkloadInfo info;
4529 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4530
4531 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConstant(descriptor, info);
4532
4533 outputHandle->Allocate();
4534
surmeh013537c2c2018-05-18 16:31:43 +01004535 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00004536 workload->Execute();
4537
4538 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4539 return result;
4540}
4541
4542LayerTestResult<float, 4> ConstantTest(armnn::IWorkloadFactory& workloadFactory)
4543{
4544 return ConstantTestImpl<float>(workloadFactory, 0.0f, 0);
4545}
4546
4547LayerTestResult<uint8_t, 4> ConstantTestUint8(armnn::IWorkloadFactory& workloadFactory)
4548{
4549 return ConstantTestImpl<uint8_t>(workloadFactory, 1.0f, 0);
4550}
4551
4552LayerTestResult<uint8_t, 3> MergerUint8Test(armnn::IWorkloadFactory& workloadFactory)
4553{
surmeh013537c2c2018-05-18 16:31:43 +01004554 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +00004555 unsigned int outputHeight = 6;
4556 unsigned int outputChannels = 3;
4557
surmeh013537c2c2018-05-18 16:31:43 +01004558 unsigned int inputWidth1 = 3;
4559 unsigned int inputHeight1 = 6;
4560 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +00004561
surmeh013537c2c2018-05-18 16:31:43 +01004562 unsigned int inputWidth2 = 3;
4563 unsigned int inputHeight2 = 6;
4564 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +00004565
telsoa01c577f2c2018-08-31 09:22:23 +01004566 // Defines the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +00004567 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
4568 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
4569 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
telsoa014fcda012018-03-09 14:13:49 +00004570
telsoa01c577f2c2018-08-31 09:22:23 +01004571 // Arbitrary scale and offsets. They don't really matter as the merger operator doesn't dequantize/quantize them.
telsoa014fcda012018-03-09 14:13:49 +00004572 const float scale = 0.13497836f;
4573 const int32_t offset = -7;
4574
4575 outputTensorInfo.SetQuantizationScale(scale);
4576 outputTensorInfo.SetQuantizationOffset(offset);
4577 inputTensorInfo1.SetQuantizationScale(scale);
4578 inputTensorInfo1.SetQuantizationOffset(offset);
4579 inputTensorInfo2.SetQuantizationScale(scale);
4580 inputTensorInfo2.SetQuantizationOffset(offset);
telsoa014fcda012018-03-09 14:13:49 +00004581
4582 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
4583
4584 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
surmeh013537c2c2018-05-18 16:31:43 +01004585 {
4586 1, 2, 3,
4587 4, 5, 6,
4588 7, 8, 9,
4589 10, 11, 12,
4590 13, 14, 15,
4591 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00004592
surmeh013537c2c2018-05-18 16:31:43 +01004593 19, 20, 21,
4594 22, 23, 24,
4595 25, 26, 27,
4596 28, 29, 30,
4597 31, 32, 33,
4598 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00004599
surmeh013537c2c2018-05-18 16:31:43 +01004600 37, 38, 39,
4601 40, 41, 42,
4602 43, 44, 45,
4603 46, 47, 48,
4604 49, 50, 51,
4605 52, 53, 54,
4606 })
telsoa014fcda012018-03-09 14:13:49 +00004607 );
4608
telsoa014fcda012018-03-09 14:13:49 +00004609 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
4610 {
surmeh013537c2c2018-05-18 16:31:43 +01004611 1, 2, 3,
4612 4, 5, 6,
4613 7, 8, 9,
4614 10, 11, 12,
4615 13, 14, 15,
4616 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00004617
surmeh013537c2c2018-05-18 16:31:43 +01004618 19, 20, 21,
4619 22, 23, 24,
4620 25, 26, 27,
4621 28, 29, 30,
4622 31, 32, 33,
4623 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00004624 })
4625 );
4626
4627 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
4628 {
surmeh013537c2c2018-05-18 16:31:43 +01004629 37, 38, 39,
4630 40, 41, 42,
telsoa014fcda012018-03-09 14:13:49 +00004631 43, 44, 45,
surmeh013537c2c2018-05-18 16:31:43 +01004632 46, 47, 48,
4633 49, 50, 51,
4634 52, 53, 54,
telsoa014fcda012018-03-09 14:13:49 +00004635 })
4636 );
4637
telsoa01c577f2c2018-08-31 09:22:23 +01004638 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
telsoa014fcda012018-03-09 14:13:49 +00004639 armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1);
4640
telsoa01c577f2c2018-08-31 09:22:23 +01004641 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
telsoa014fcda012018-03-09 14:13:49 +00004642 armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2);
4643
telsoa014fcda012018-03-09 14:13:49 +00004644
4645 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4646
4647 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
4648
4649 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
4650 subTensorsSupported ?
4651 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
4652 workloadFactory.CreateTensorHandle(inputTensorInfo1);
4653
4654 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
4655 subTensorsSupported ?
4656 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
4657 workloadFactory.CreateTensorHandle(inputTensorInfo2);
4658
telsoa014fcda012018-03-09 14:13:49 +00004659
4660 armnn::MergerQueueDescriptor data;
4661 armnn::WorkloadInfo info;
4662 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
4663 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +00004664 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4665
4666 data.m_ViewOrigins.push_back(window1);
4667 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +00004668
4669 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(data, info);
4670
4671 inputHandle1->Allocate();
4672 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00004673 outputHandle->Allocate();
4674
4675 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
4676 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00004677
surmeh013537c2c2018-05-18 16:31:43 +01004678 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00004679 workload->Execute();
4680
4681 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
4682
4683 return ret;
4684}
4685
4686LayerTestResult<uint8_t, 4> AdditionUint8Test(armnn::IWorkloadFactory& workloadFactory)
4687{
4688 unsigned int batchSize = 1;
4689 unsigned int channels = 2;
4690 unsigned int height = 2;
4691 unsigned int width = 3;
4692
4693 const float scale = 7.0f;
4694 const int32_t offset = 3;
4695
4696 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
4697 armnn::TensorInfo outputTensorInfo;
4698
4699 const unsigned int shape[] = { batchSize, channels, height, width };
4700 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
4701 inputTensorInfo1.SetQuantizationScale(scale);
4702 inputTensorInfo1.SetQuantizationOffset(offset);
4703
4704 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
4705 inputTensorInfo2.SetQuantizationScale(scale);
4706 inputTensorInfo2.SetQuantizationOffset(offset);
4707
4708 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
4709 outputTensorInfo.SetQuantizationScale(scale);
4710 outputTensorInfo.SetQuantizationOffset(offset);
4711
telsoa01c577f2c2018-08-31 09:22:23 +01004712 // See dequantized values to the right.
telsoa014fcda012018-03-09 14:13:49 +00004713 auto input1 = MakeTensor<uint8_t, 4>(inputTensorInfo1, std::vector<uint8_t>(
4714 {
4715 63, 35, 77, 70, 56, 112, // 420, 224, 518, 469, 371, 763
4716 203, 28, 252, 168, 245, 91 // 1400, 175, 1743, 1155, 1694, 616
4717 }));
4718
telsoa01c577f2c2018-08-31 09:22:23 +01004719 // See dequantized values to the right.
telsoa014fcda012018-03-09 14:13:49 +00004720 auto input2 = MakeTensor<uint8_t, 4>(inputTensorInfo1, std::vector<uint8_t>(
4721 {
4722 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
4723 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
4724 }));
4725
telsoa01c577f2c2018-08-31 09:22:23 +01004726 // See dequantized values to the right.
telsoa014fcda012018-03-09 14:13:49 +00004727 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
4728 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>(
4729 {
4730 81, 39, 249, 255, 228, 255, // 546, 252, 1722, 2065(clamped), 1575, 2212(clamped)
4731 255, 186, 255, 186, 255, 214, // 2261(clamped), 1281, 2163(clamped), 1281, 2408(clamped), 1477
4732 }));
4733
4734 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
4735 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
4736 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4737
4738 armnn::AdditionQueueDescriptor data;
4739 armnn::WorkloadInfo info;
4740 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
4741 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
4742 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4743
4744 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
4745
4746 inputHandle1->Allocate();
4747 inputHandle2->Allocate();
4748 outputHandle->Allocate();
4749
4750 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
4751 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
4752
surmeh013537c2c2018-05-18 16:31:43 +01004753 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00004754 workload->Execute();
4755
4756 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4757
4758 return result;
4759}
4760
surmeh01bceff2f2018-03-29 16:29:27 +01004761namespace
telsoa014fcda012018-03-09 14:13:49 +00004762{
surmeh01bceff2f2018-03-29 16:29:27 +01004763LayerTestResult<uint8_t, 4> MultiplicationUint8TestHelper(armnn::IWorkloadFactory& workloadFactory,
4764 const unsigned int shape0[4],
4765 const std::vector<uint8_t> & values0,
4766 float scale0,
4767 int32_t offset0,
4768 const unsigned int shape1[4],
4769 const std::vector<uint8_t> & values1,
4770 float scale1,
4771 int32_t offset1,
4772 const unsigned int outShape[4],
4773 const std::vector<uint8_t> & outValues,
4774 float outScale,
4775 int32_t outOffset)
4776{
4777 armnn::TensorInfo inputTensorInfo0(4, shape0, armnn::DataType::QuantisedAsymm8);
4778 armnn::TensorInfo inputTensorInfo1(4, shape1, armnn::DataType::QuantisedAsymm8);
4779 armnn::TensorInfo outputTensorInfo(4, outShape, armnn::DataType::QuantisedAsymm8);
telsoa014fcda012018-03-09 14:13:49 +00004780
surmeh01bceff2f2018-03-29 16:29:27 +01004781 inputTensorInfo0.SetQuantizationScale(scale0);
4782 inputTensorInfo0.SetQuantizationOffset(offset0);
telsoa014fcda012018-03-09 14:13:49 +00004783
surmeh01bceff2f2018-03-29 16:29:27 +01004784 inputTensorInfo1.SetQuantizationScale(scale1);
4785 inputTensorInfo1.SetQuantizationOffset(offset1);
telsoa014fcda012018-03-09 14:13:49 +00004786
surmeh01bceff2f2018-03-29 16:29:27 +01004787 outputTensorInfo.SetQuantizationScale(outScale);
4788 outputTensorInfo.SetQuantizationOffset(outOffset);
telsoa014fcda012018-03-09 14:13:49 +00004789
surmeh01bceff2f2018-03-29 16:29:27 +01004790 auto input0 = MakeTensor<uint8_t, 4>(inputTensorInfo0, values0);
4791 auto input1 = MakeTensor<uint8_t, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00004792
telsoa014fcda012018-03-09 14:13:49 +00004793 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
surmeh01bceff2f2018-03-29 16:29:27 +01004794 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00004795
surmeh01bceff2f2018-03-29 16:29:27 +01004796 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
telsoa014fcda012018-03-09 14:13:49 +00004797 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
telsoa014fcda012018-03-09 14:13:49 +00004798 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4799
4800 armnn::MultiplicationQueueDescriptor data;
4801 armnn::WorkloadInfo info;
surmeh01bceff2f2018-03-29 16:29:27 +01004802 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
4803 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
telsoa014fcda012018-03-09 14:13:49 +00004804 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4805
4806 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
4807
surmeh01bceff2f2018-03-29 16:29:27 +01004808 inputHandle0->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00004809 inputHandle1->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00004810 outputHandle->Allocate();
4811
surmeh01bceff2f2018-03-29 16:29:27 +01004812 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00004813 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00004814
surmeh013537c2c2018-05-18 16:31:43 +01004815 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00004816 workload->Execute();
4817
4818 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4819
4820 return result;
4821}
surmeh01bceff2f2018-03-29 16:29:27 +01004822} // anonymous namespace
4823
4824LayerTestResult<uint8_t, 4> MultiplicationUint8Test(armnn::IWorkloadFactory& workloadFactory)
4825{
4826 unsigned int batchSize = 1;
4827 unsigned int channels = 2;
4828 unsigned int height = 2;
4829 unsigned int width = 3;
4830 const unsigned int shape[] = { batchSize, channels, height, width };
4831
telsoa01c577f2c2018-08-31 09:22:23 +01004832 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01004833 std::vector<uint8_t> input0({
4834 62, 37, 3, 172, 13, 111, // 244, 144, 8, 684, 48, 440,
4835 188, 20, 73, 31, 23, 31 // 748, 76, 288, 120, 88, 120
4836 });
4837
telsoa01c577f2c2018-08-31 09:22:23 +01004838 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01004839 std::vector<uint8_t> input1({
4840 126, 240, 252, 183, 121, 247, // 384, 726, 762, 555, 369, 747,
4841 48, 115, 151, 79, 78, 97 // 150, 351, 459, 243, 240, 297
4842 });
4843
telsoa01c577f2c2018-08-31 09:22:23 +01004844 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01004845 std::vector<uint8_t> output(
4846 {
4847 64, 72, 0, 255, 8, 236, // 93696, 104544, 6096(clamped), 379620(clamped), 17712, 328680,
4848 77, 15, 92, 16, 10, 21, // 112200, 26676, 132192, 29160, 21120, 35640
4849 });
4850
4851 return MultiplicationUint8TestHelper(workloadFactory,
4852 shape,
4853 input0,
4854 4.0f,
4855 1,
4856 shape,
4857 input1,
4858 3.0f,
4859 -2,
4860 shape,
4861 output,
telsoa01c577f2c2018-08-31 09:22:23 +01004862 1366.255f, // Scale/offset chosen to have output values out of range.
surmeh01bceff2f2018-03-29 16:29:27 +01004863 -5);
4864}
4865
4866LayerTestResult<uint8_t, 4> MultiplicationBroadcast1ElementUint8Test(armnn::IWorkloadFactory& workloadFactory)
4867{
4868 const unsigned int shape0[] = { 1, 2, 2, 3 };
4869 const unsigned int shape1[] = { 1, 1, 1, 1 };
4870
4871 std::vector<uint8_t> input0({
4872 1, 2, 3, 4, 5, 6,
4873 7, 8, 9, 10, 11, 12
4874 });
4875
4876 std::vector<uint8_t> input1({2});
4877
4878 std::vector<uint8_t> output({
4879 2, 4, 6, 8, 10, 12,
4880 14, 16, 18, 20, 22, 24
4881 });
4882
4883 return MultiplicationUint8TestHelper(workloadFactory,
4884 shape0,
4885 input0,
4886 1.0f,
4887 0,
4888 shape1,
4889 input1,
4890 1.0f,
4891 0,
4892 shape0,
4893 output,
4894 1.0f,
4895 0);
4896}
4897
4898LayerTestResult<uint8_t, 4> MultiplicationBroadcast1DVectorUint8Test(armnn::IWorkloadFactory& workloadFactory)
4899{
4900 const unsigned int shape0[] = { 1, 2, 2, 3 };
4901 const unsigned int shape1[] = { 1, 1, 1, 3 };
4902
4903 std::vector<uint8_t> input0({
4904 1, 2, 3, 4, 5, 6,
4905 7, 8, 9, 10, 11, 12
4906 });
4907
4908 std::vector<uint8_t> input1({1, 2, 3});
4909
4910 std::vector<uint8_t> output({
4911 1, 4, 9, 4, 10, 18,
4912 7, 16, 27, 10, 22, 36
4913 });
4914
4915 return MultiplicationUint8TestHelper(workloadFactory,
4916 shape0,
4917 input0,
4918 1.0f,
4919 0,
4920 shape1,
4921 input1,
4922 1.0f,
4923 0,
4924 shape0,
4925 output,
4926 1.0f,
4927 0);
4928}
telsoa014fcda012018-03-09 14:13:49 +00004929
David Beckf195f032018-09-06 16:46:34 +01004930namespace
4931{
4932template <typename T>
4933LayerTestResult<T, 4> SubtractionTestHelper(armnn::IWorkloadFactory& workloadFactory,
4934 const unsigned int shape0[4],
4935 const std::vector<T>& values0,
4936 float scale0,
4937 int32_t offset0,
4938 const unsigned int shape1[4],
4939 const std::vector<T> & values1,
4940 float scale1,
4941 int32_t offset1,
4942 const unsigned int outShape[4],
4943 const std::vector<T> & outValues,
4944 float outScale,
4945 int32_t outOffset)
4946{
4947 auto dataType = (std::is_same<T, uint8_t>::value ?
4948 armnn::DataType::QuantisedAsymm8 :
4949 armnn::DataType::Float32);
4950
4951 armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
4952 armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
4953 armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
4954
4955 inputTensorInfo0.SetQuantizationScale(scale0);
4956 inputTensorInfo0.SetQuantizationOffset(offset0);
4957
4958 inputTensorInfo1.SetQuantizationScale(scale1);
4959 inputTensorInfo1.SetQuantizationOffset(offset1);
4960
4961 outputTensorInfo.SetQuantizationScale(outScale);
4962 outputTensorInfo.SetQuantizationOffset(outOffset);
4963
4964 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
4965 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
4966
4967 LayerTestResult<T, 4> result(outputTensorInfo);
4968 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
4969
4970 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
4971 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
4972 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4973
4974 armnn::SubtractionQueueDescriptor data;
4975 armnn::WorkloadInfo info;
4976 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
4977 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
4978 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4979
4980 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSubtraction(data, info);
4981
4982 inputHandle0->Allocate();
4983 inputHandle1->Allocate();
4984 outputHandle->Allocate();
4985
4986 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
4987 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
4988
4989 workloadFactory.Finalize();
4990 workload->Execute();
4991
4992 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4993
4994 return result;
4995}
4996} // anonymous namespace
4997
4998LayerTestResult<uint8_t, 4> SubtractionUint8Test(armnn::IWorkloadFactory& workloadFactory)
4999{
5000 const unsigned int shape0[] = { 1, 1, 2, 2 };
5001 const unsigned int shape1[] = { 1, 1, 2, 2 };
5002
5003 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
5004 std::vector<uint8_t> input1({ 1, 2, 1, 2 });
5005 std::vector<uint8_t> output({ 3, 3, 5, 5 });
5006
5007 return SubtractionTestHelper(workloadFactory,
5008 shape0, input0, 0.5f, 2,
5009 shape1, input1, 1.0f, 0,
5010 shape0, output, 1.0f, 0);
5011}
5012
5013LayerTestResult<uint8_t, 4> SubtractionBroadcast1ElementUint8Test(armnn::IWorkloadFactory& workloadFactory)
5014{
5015 const unsigned int shape0[] = { 1, 1, 2, 2 };
5016 const unsigned int shape1[] = { 1, 1, 1, 1 };
5017
5018 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
5019 std::vector<uint8_t> input1({ 2 });
5020 std::vector<uint8_t> output({ 5, 6, 7, 8 });
5021
5022 return SubtractionTestHelper(workloadFactory,
5023 shape0, input0, 0.5f, 2,
5024 shape1, input1, 1.0f, 0,
5025 shape0, output, 1.0f, 3);
5026}
5027
5028LayerTestResult<uint8_t, 4> SubtractionBroadcastUint8Test(armnn::IWorkloadFactory& workloadFactory)
5029{
5030 const unsigned int shape0[] = { 1, 1, 2, 2 };
5031 const unsigned int shape1[] = { 1, 1, 2, 1 };
5032
5033 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
5034 std::vector<uint8_t> input1({ 2, 1 });
5035 std::vector<uint8_t> output({ 8, 11, 12, 15 });
5036
5037 return SubtractionTestHelper(workloadFactory,
5038 shape0, input0, 1.0f, 0,
5039 shape1, input1, 1.0f, 0,
5040 shape0, output, 1.0f, 0);
5041}
5042
5043LayerTestResult<float, 4> SubtractionTest(armnn::IWorkloadFactory& workloadFactory)
5044{
5045 const unsigned int shape0[] = { 1, 1, 2, 2 };
5046 const unsigned int shape1[] = { 1, 1, 2, 2 };
5047
5048 std::vector<float> input0({ 1, 2, 3, 4 });
5049 std::vector<float> input1({ 1, -1, 0, 2 });
5050 std::vector<float> output({ 0, 3, 3, 2 });
5051
5052 return SubtractionTestHelper(workloadFactory,
5053 shape0, input0, 1.0f, 0,
5054 shape1, input1, 1.0f, 0,
5055 shape0, output, 1.0f, 0);
5056}
5057
5058LayerTestResult<float, 4> SubtractionBroadcast1ElementTest(armnn::IWorkloadFactory& workloadFactory)
5059{
5060 const unsigned int shape0[] = { 1, 1, 2, 2 };
5061 const unsigned int shape1[] = { 1, 1, 1, 1 };
5062
5063 std::vector<float> input0({ 1, 2, 3, 4 });
5064 std::vector<float> input1({ 10 });
5065 std::vector<float> output({ -9, -8, -7, -6 });
5066
5067 return SubtractionTestHelper(workloadFactory,
5068 shape0, input0, 1.0f, 0,
5069 shape1, input1, 1.0f, 0,
5070 shape0, output, 1.0f, 0);
5071}
5072
5073LayerTestResult<float, 4> SubtractionBroadcastTest(armnn::IWorkloadFactory& workloadFactory)
5074{
5075 const unsigned int shape0[] = { 1, 1, 2, 2 };
5076 const unsigned int shape1[] = { 1, 1, 1, 2 };
5077
5078 std::vector<float> input0({ 1, 2, 3, 4 });
5079 std::vector<float> input1({ 10, -5 });
5080 std::vector<float> output({ -9, 7, -7, 9 });
5081
5082 return SubtractionTestHelper(workloadFactory,
5083 shape0, input0, 1.0f, 0,
5084 shape1, input1, 1.0f, 0,
5085 shape0, output, 1.0f, 0);
5086}
5087
telsoa014fcda012018-03-09 14:13:49 +00005088LayerTestResult<uint8_t, 4> ResizeBilinearNopUint8Test(armnn::IWorkloadFactory& workloadFactory)
5089{
5090 constexpr unsigned int inputWidth = 4;
5091 constexpr unsigned int inputHeight = 4;
5092 constexpr unsigned int inputChannels = 1;
5093 constexpr unsigned int inputBatchSize = 1;
5094
5095 constexpr unsigned int outputWidth = inputWidth;
5096 constexpr unsigned int outputHeight = inputHeight;
5097 constexpr unsigned int outputChannels = inputChannels;
5098 constexpr unsigned int outputBatchSize = inputBatchSize;
5099
5100 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
5101 armnn::DataType::QuantisedAsymm8);
5102 inputTensorInfo.SetQuantizationScale(1.5f);
5103 inputTensorInfo.SetQuantizationOffset(-3);
5104
5105 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
5106 armnn::DataType::QuantisedAsymm8);
5107 outputTensorInfo.SetQuantizationScale(1.5f);
5108 outputTensorInfo.SetQuantizationOffset(-3);
5109
5110 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
5111 1, 2, 3, 4,
5112 2, 3, 4, 5,
5113 3, 4, 5, 6,
5114 4, 5, 6, 7
5115 }));
5116
5117 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
5118 result.outputExpected = input;
5119
5120 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5121 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5122
5123 armnn::ResizeBilinearQueueDescriptor descriptor;
5124 armnn::WorkloadInfo info;
5125 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5126 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5127
5128 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5129
5130 inputHandle->Allocate();
5131 outputHandle->Allocate();
5132 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5133
surmeh013537c2c2018-05-18 16:31:43 +01005134 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00005135 workload->Execute();
5136
5137 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5138 return result;
5139}
5140
5141LayerTestResult<uint8_t, 4> SimpleResizeBilinearUint8Test(armnn::IWorkloadFactory& workloadFactory)
5142{
5143 constexpr unsigned int inputWidth = 2;
5144 constexpr unsigned int inputHeight = 2;
5145 constexpr unsigned int inputChannels = 1;
5146 constexpr unsigned int inputBatchSize = 1;
5147
5148 constexpr unsigned int outputWidth = inputWidth / 2;
5149 constexpr unsigned int outputHeight = inputHeight / 2;
5150 constexpr unsigned int outputChannels = inputChannels;
5151 constexpr unsigned int outputBatchSize = inputBatchSize;
5152
5153 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
5154 armnn::DataType::QuantisedAsymm8);
5155 inputTensorInfo.SetQuantizationScale(0.1567f);
5156 inputTensorInfo.SetQuantizationOffset(1);
5157
5158 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
5159 armnn::DataType::QuantisedAsymm8);
5160 outputTensorInfo.SetQuantizationScale(0.1567f);
5161 outputTensorInfo.SetQuantizationOffset(1);
5162
5163 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
5164 1, 255,
5165 200, 250
5166 }));
5167
5168 // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
5169 // then figures out the interpolants and weights. Note this is different to projecting the centre of the
telsoa01c577f2c2018-08-31 09:22:23 +01005170 // output texel - and thus we'll expect the output 1x1 matrix to contain, as its single element, the value
telsoa014fcda012018-03-09 14:13:49 +00005171 // that was at position (0,0) of the input matrix (rather than an average, which we would expect if projecting
5172 // the centre).
5173 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
5174 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
5175 1
5176 }));
5177
5178 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5179 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5180
5181 armnn::ResizeBilinearQueueDescriptor descriptor;
5182 armnn::WorkloadInfo info;
5183 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5184 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5185
5186 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5187
5188 inputHandle->Allocate();
5189 outputHandle->Allocate();
5190 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5191
surmeh013537c2c2018-05-18 16:31:43 +01005192 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00005193 workload->Execute();
5194
5195 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5196 return result;
5197}
5198
5199LayerTestResult<uint8_t, 4> ResizeBilinearSqMinUint8Test(armnn::IWorkloadFactory& workloadFactory)
5200{
5201 constexpr unsigned int inputWidth = 4;
5202 constexpr unsigned int inputHeight = 4;
5203 constexpr unsigned int inputChannels = 1;
5204 constexpr unsigned int inputBatchSize = 1;
5205
5206 constexpr unsigned int outputWidth = inputWidth / 2;
5207 constexpr unsigned int outputHeight = inputHeight / 2;
5208 constexpr unsigned int outputChannels = inputChannels;
5209 constexpr unsigned int outputBatchSize = inputBatchSize;
5210
5211 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
5212 armnn::DataType::QuantisedAsymm8);
5213 inputTensorInfo.SetQuantizationScale(3.141592f);
5214 inputTensorInfo.SetQuantizationOffset(3);
5215
5216 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
5217 armnn::DataType::QuantisedAsymm8);
5218 outputTensorInfo.SetQuantizationScale(3.141592f);
5219 outputTensorInfo.SetQuantizationOffset(3);
5220
5221 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
5222 1, 2, 3, 4,
5223 2, 3, 4, 5,
5224 3, 4, 5, 6,
5225 4, 5, 6, 7
5226 }));
5227
5228 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
5229 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
5230 1, 3,
5231 3, 5
5232 }));
5233
5234 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5235 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5236
5237 armnn::ResizeBilinearQueueDescriptor descriptor;
5238 armnn::WorkloadInfo info;
5239 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5240 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5241
5242 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5243
5244 inputHandle->Allocate();
5245 outputHandle->Allocate();
5246 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5247
surmeh013537c2c2018-05-18 16:31:43 +01005248 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00005249 workload->Execute();
5250
5251 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5252 return result;
5253}
5254
5255LayerTestResult<uint8_t, 4> ResizeBilinearMinUint8Test(armnn::IWorkloadFactory& workloadFactory)
5256{
5257 constexpr unsigned int inputWidth = 3;
5258 constexpr unsigned int inputHeight = 2;
5259 constexpr unsigned int inputChannels = 1;
5260 constexpr unsigned int inputBatchSize = 1;
5261
5262 constexpr unsigned int outputWidth = 2;
5263 constexpr unsigned int outputHeight = 1;
5264 constexpr unsigned int outputChannels = inputChannels;
5265 constexpr unsigned int outputBatchSize = inputBatchSize;
5266
5267 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
5268 armnn::DataType::QuantisedAsymm8);
5269 inputTensorInfo.SetQuantizationScale(1.5f);
5270 inputTensorInfo.SetQuantizationOffset(-1);
5271
5272 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
5273 armnn::DataType::QuantisedAsymm8);
5274 outputTensorInfo.SetQuantizationScale(1.5f);
5275 outputTensorInfo.SetQuantizationOffset(-1);
5276
5277 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
5278 1, 2, 3, // 3.0, 4.5, 6.0
5279 5, 8, 13 // 9.0, 13.5, 21.0
5280 }));
5281
5282 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
5283 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
5284 1, 3 // 3.0, 5.25
5285 }));
5286
5287 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5288 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5289
5290 armnn::ResizeBilinearQueueDescriptor descriptor;
5291 armnn::WorkloadInfo info;
5292 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5293 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5294
5295 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5296
5297 inputHandle->Allocate();
5298 outputHandle->Allocate();
5299
5300 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5301
surmeh013537c2c2018-05-18 16:31:43 +01005302 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00005303 workload->Execute();
5304
5305 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5306 return result;
5307}
5308
5309LayerTestResult<uint8_t, 4> ResizeBilinearMagUint8Test(armnn::IWorkloadFactory& workloadFactory)
5310{
5311 constexpr unsigned int inputWidth = 2;
5312 constexpr unsigned int inputHeight = 3;
5313 constexpr unsigned int inputChannels = 1;
5314 constexpr unsigned int inputBatchSize = 1;
5315
5316 constexpr unsigned int outputWidth = 5;
5317 constexpr unsigned int outputHeight = 3;
5318 constexpr unsigned int outputChannels = inputChannels;
5319 constexpr unsigned int outputBatchSize = inputBatchSize;
5320
5321 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
5322 armnn::DataType::QuantisedAsymm8);
5323 inputTensorInfo.SetQuantizationScale(0.010765f);
5324 inputTensorInfo.SetQuantizationOffset(7);
5325
5326 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
5327 armnn::DataType::QuantisedAsymm8);
5328 outputTensorInfo.SetQuantizationScale(0.010132f);
5329 outputTensorInfo.SetQuantizationOffset(-18);
5330
5331 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
5332 24, 228, // 0.183005, 2.379065,
5333 105, 128, // 1.05497, 1.302565
5334 230, 71 // 2.400595, 0.68896
5335 }));
5336
5337 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
5338 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
5339 0, 87, 173, 217, 217, // 0.18300501, 1.06142902, 1.93985295, 2.37906504, 2.37906504
5340 86, 96, 106, 111, 111, // 1.05497003, 1.15400803, 1.25304604, 1.30256498, 1.30256498
5341 219, 151, 84, 50, 50 // 2.40059495, 1.71594095, 1.03128707, 0.68896002, 0.68896002
5342 }));
5343
5344 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5345 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5346
5347 armnn::ResizeBilinearQueueDescriptor descriptor;
5348 armnn::WorkloadInfo info;
5349 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5350 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5351
5352 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5353
5354 inputHandle->Allocate();
5355 outputHandle->Allocate();
5356 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5357
surmeh013537c2c2018-05-18 16:31:43 +01005358 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00005359 workload->Execute();
5360
5361 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5362 return result;
5363}
5364
5365LayerTestResult<float, 4> BatchNormTest(armnn::IWorkloadFactory& workloadFactory)
5366{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01005367 // BatchSize: 1
5368 // Channels: 2
5369 // Height: 3
5370 // Width: 2
5371
5372 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
5373 std::vector<float> inputValues
5374 {
5375 // Batch 0, Channel 0, Height (3) x Width (2)
5376 1.f, 4.f,
5377 4.f, 2.f,
5378 1.f, 6.f,
5379
5380 // Batch 0, Channel 1, Height (3) x Width (2)
5381 1.f, 1.f,
5382 4.f, 1.f,
5383 -2.f, 4.f
5384 };
5385 std::vector<float> expectedOutputValues
5386 {
5387 // Batch 0, Channel 0, Height (3) x Width (2)
5388 1.f, 4.f,
5389 4.f, 2.f,
5390 1.f, 6.f,
5391
5392 // Batch 0, Channel 1, Height (3) x Width (2)
5393 3.f, 3.f,
5394 4.f, 3.f,
5395 2.f, 4.f
5396 };
5397
5398 return BatchNormTestImpl<float>(workloadFactory, inputOutputShape, inputValues, expectedOutputValues,
5399 0.f, 0, armnn::DataLayout::NCHW);
5400}
5401
5402LayerTestResult<float, 4> BatchNormNhwcTest(armnn::IWorkloadFactory& workloadFactory)
5403{
5404 // BatchSize: 1
5405 // Height: 3
5406 // Width: 2
5407 // Channels: 2
5408
5409 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
5410 std::vector<float> inputValues
5411 {
5412 // Batch 0, Height 0, Width (2) x Channel (2)
5413 1.f, 1.f,
5414 4.f, 1.f,
5415
5416 // Batch 0, Height 1, Width (2) x Channel (2)
5417 4.f, 4.f,
5418 2.f, 1.f,
5419
5420 // Batch 0, Height 2, Width (2) x Channel (2)
5421 1.f, -2.f,
5422 6.f, 4.f
5423 };
5424 std::vector<float> expectedOutputValues
5425 {
5426 // Batch 0, Height 0, Width (2) x Channel (2)
5427 1.f, 3.f,
5428 4.f, 3.f,
5429
5430 // Batch 0, Height 1, Width (2) x Channel (2)
5431 4.f, 4.f,
5432 2.f, 3.f,
5433
5434 // Batch 0, Height 2, Width (2) x Channel (2)
5435 1.f, 2.f,
5436 6.f, 4.f
5437 };
5438
5439 return BatchNormTestImpl<float>(workloadFactory, inputOutputShape, inputValues, expectedOutputValues,
5440 0.f, 0, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00005441}
5442
5443LayerTestResult<uint8_t, 4> BatchNormUint8Test(armnn::IWorkloadFactory& workloadFactory)
5444{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01005445 // BatchSize: 1
5446 // Channels: 2
5447 // Height: 3
5448 // Width: 2
5449
5450 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
5451 std::vector<float> inputValues
5452 {
5453 // Batch 0, Channel 0, Height (3) x Width (2)
5454 1.f, 4.f,
5455 4.f, 2.f,
5456 1.f, 6.f,
5457
5458 // Batch 0, Channel 1, Height (3) x Width (2)
5459 1.f, 1.f,
5460 4.f, 1.f,
5461 -2.f, 4.f
5462 };
5463 std::vector<float> expectedOutputValues
5464 {
5465 // Batch 0, Channel 0, Height (3) x Width (2)
5466 1.f, 4.f,
5467 4.f, 2.f,
5468 1.f, 6.f,
5469
5470 // Batch 0, Channel 1, Height (3) x Width (2)
5471 3.f, 3.f,
5472 4.f, 3.f,
5473 2.f, 4.f
5474 };
5475
5476 return BatchNormTestImpl<uint8_t>(workloadFactory, inputOutputShape, inputValues, expectedOutputValues,
5477 1.f/20.f, 50, armnn::DataLayout::NCHW);
5478}
5479
5480LayerTestResult<uint8_t, 4> BatchNormUint8NhwcTest(armnn::IWorkloadFactory& workloadFactory)
5481{
5482 // BatchSize: 1
5483 // Height: 3
5484 // Width: 2
5485 // Channels: 2
5486
5487 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
5488 std::vector<float> inputValues
5489 {
5490 // Batch 0, Height 0, Width (2) x Channel (2)
5491 1.f, 1.f,
5492 4.f, 1.f,
5493
5494 // Batch 0, Height 1, Width (2) x Channel (2)
5495 4.f, 4.f,
5496 2.f, 1.f,
5497
5498 // Batch 0, Height 2, Width (2) x Channel (2)
5499 1.f, -2.f,
5500 6.f, 4.f
5501 };
5502 std::vector<float> expectedOutputValues
5503 {
5504 // Batch 0, Height 0, Width (2) x Channel (2)
5505 1.f, 3.f,
5506 4.f, 3.f,
5507
5508 // Batch 0, Height 1, Width (2) x Channel (2)
5509 4.f, 4.f,
5510 2.f, 3.f,
5511
5512 // Batch 0, Height 2, Width (2) x Channel (2)
5513 1.f, 2.f,
5514 6.f, 4.f
5515 };
5516
5517 return BatchNormTestImpl<uint8_t>(workloadFactory, inputOutputShape, inputValues, expectedOutputValues,
5518 1.f/20.f, 50, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00005519}
5520
5521LayerTestResult<uint8_t, 4> ConstantUint8Test(armnn::IWorkloadFactory& workloadFactory)
5522{
5523 return ConstantTestImpl<uint8_t>(workloadFactory, 2e-6f, 1);
5524}
5525
5526LayerTestResult<uint8_t, 1> Concatenation1dUint8Test(armnn::IWorkloadFactory& workloadFactory)
5527{
5528 return Concatenation1dTestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5529}
5530
5531LayerTestResult<uint8_t, 2> Concatenation2dDim0Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5532{
5533 return Concatenation2dDim0TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5534}
5535
5536LayerTestResult<uint8_t, 2> Concatenation2dDim1Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5537{
5538 return Concatenation2dDim1TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5539}
5540
5541LayerTestResult<uint8_t, 2> Concatenation2dDim0DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory)
5542{
5543 return Concatenation2dDim0DiffInputDimsTestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5544}
5545
5546LayerTestResult<uint8_t, 2> Concatenation2dDim1DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory)
5547{
5548 return Concatenation2dDim1DiffInputDimsTestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5549}
5550
5551LayerTestResult<uint8_t, 3> Concatenation3dDim0Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5552{
5553 return Concatenation3dDim0TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5554}
5555
5556LayerTestResult<uint8_t, 3> Concatenation3dDim1Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5557{
5558 return Concatenation3dDim1TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5559}
5560
5561LayerTestResult<uint8_t, 3> Concatenation3dDim2Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5562{
5563 return Concatenation3dDim2TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5564}
5565
5566LayerTestResult<uint8_t, 3> Concatenation3dDim0DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory)
5567{
5568 return Concatenation3dDim0TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5569}
5570
5571LayerTestResult<uint8_t, 3> Concatenation3dDim1DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory)
5572{
5573 return Concatenation3dDim1DiffInputDimsTestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5574}
5575
5576LayerTestResult<uint8_t, 3> Concatenation3dDim2DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory)
5577{
5578 return Concatenation3dDim2DiffInputDimsTestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5579}
5580
5581LayerTestResult<float, 4> SimpleMaxPooling2dSize2x2Stride2x2Test(armnn::IWorkloadFactory& workloadFactory,
5582 bool forceNoPadding)
5583{
5584 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<float>(workloadFactory, forceNoPadding);
5585}
5586
5587LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(armnn::IWorkloadFactory& workloadFactory,
5588 bool forceNoPadding)
5589{
5590 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<uint8_t>(workloadFactory, forceNoPadding, 3.0f, -5);
5591}
5592
5593LayerTestResult<float, 4> SimpleMaxPooling2dSize3x3Stride2x4Test(armnn::IWorkloadFactory& workloadFactory,
5594 bool forceNoPadding)
5595{
5596 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<float>(workloadFactory, forceNoPadding);
5597}
5598
5599LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(armnn::IWorkloadFactory& workloadFactory,
5600 bool forceNoPadding)
5601{
5602 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<uint8_t>(workloadFactory, forceNoPadding, 0.1f, 128);
5603}
5604
James Conroy45a9b772018-10-31 11:47:53 +00005605LayerTestResult<float, 4> SimpleMaxPooling2dTest(armnn::IWorkloadFactory& workloadFactory,
5606 const armnn::DataLayoutIndexed& dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00005607{
James Conroy45a9b772018-10-31 11:47:53 +00005608 return SimpleMaxPooling2dTestCommon<float>(workloadFactory, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00005609}
5610
James Conroy45a9b772018-10-31 11:47:53 +00005611LayerTestResult<uint8_t, 4> SimpleMaxPooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory,
5612 const armnn::DataLayoutIndexed& dataLayout)
Francis Murtagh043d0d02018-10-05 14:08:48 +01005613{
James Conroy45a9b772018-10-31 11:47:53 +00005614 return SimpleMaxPooling2dTestCommon<uint8_t>(workloadFactory, dataLayout);
Francis Murtagh043d0d02018-10-05 14:08:48 +01005615}
5616
James Conroy45a9b772018-10-31 11:47:53 +00005617LayerTestResult<float, 4> SimpleAveragePooling2dTest(armnn::IWorkloadFactory& workloadFactory,
5618 const armnn::DataLayoutIndexed& dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00005619{
James Conroy45a9b772018-10-31 11:47:53 +00005620 return SimpleAveragePooling2dTestCommon<float>(workloadFactory, dataLayout);
James Conroy69482272018-10-19 10:41:35 +01005621}
5622
James Conroy45a9b772018-10-31 11:47:53 +00005623LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory,
5624 const armnn::DataLayoutIndexed& dataLayout)
James Conroy69482272018-10-19 10:41:35 +01005625{
James Conroy45a9b772018-10-31 11:47:53 +00005626 return SimpleAveragePooling2dTestCommon<uint8_t>(workloadFactory, dataLayout, 0.5, -1);
telsoa014fcda012018-03-09 14:13:49 +00005627}
5628
surmeh01bceff2f2018-03-29 16:29:27 +01005629LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(armnn::IWorkloadFactory& workloadFactory,
5630 bool forceNoPadding)
5631{
5632 return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<float>(workloadFactory, forceNoPadding);
5633}
5634
telsoa014fcda012018-03-09 14:13:49 +00005635LayerTestResult<float, 4> LargeTensorsAveragePooling2dTest(armnn::IWorkloadFactory& workloadFactory)
5636{
5637 return LargeTensorsAveragePooling2dTestCommon<float>(workloadFactory);
5638}
5639
5640LayerTestResult<uint8_t, 4> LargeTensorsAveragePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
5641{
5642 return LargeTensorsAveragePooling2dTestCommon<uint8_t>(workloadFactory, 0.5, -1);
5643}
5644
James Conroy45a9b772018-10-31 11:47:53 +00005645LayerTestResult<float, 4> SimpleL2Pooling2dTest(armnn::IWorkloadFactory& workloadFactory,
5646 const armnn::DataLayoutIndexed& dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00005647{
James Conroy45a9b772018-10-31 11:47:53 +00005648 return SimpleL2Pooling2dTestCommon<float>(workloadFactory, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00005649}
5650
James Conroy45a9b772018-10-31 11:47:53 +00005651LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory,
5652 const armnn::DataLayoutIndexed& dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00005653{
James Conroy45a9b772018-10-31 11:47:53 +00005654 return SimpleL2Pooling2dTestCommon<uint8_t>(workloadFactory, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00005655}
5656
5657LayerTestResult<float, 4> L2Pooling2dSize3Stride1Test(armnn::IWorkloadFactory& workloadFactory)
5658{
5659 return L2Pooling2dSize3Stride1TestCommon<float>(workloadFactory);
5660}
5661
5662LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride1Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5663{
5664 return L2Pooling2dSize3Stride1TestCommon<uint8_t>(workloadFactory);
5665}
5666
5667LayerTestResult<float, 4> L2Pooling2dSize3Stride3Test(armnn::IWorkloadFactory& workloadFactory)
5668{
5669 return L2Pooling2dSize3Stride3TestCommon<float>(workloadFactory);
5670}
5671
5672LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride3Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5673{
5674 return L2Pooling2dSize3Stride3TestCommon<uint8_t>(workloadFactory);
5675}
5676
5677LayerTestResult<float, 4> L2Pooling2dSize3Stride4Test(armnn::IWorkloadFactory& workloadFactory)
5678{
5679 return L2Pooling2dSize3Stride4TestCommon<float>(workloadFactory);
5680}
5681
5682LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride4Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5683{
5684 return L2Pooling2dSize3Stride4TestCommon<uint8_t>(workloadFactory);
5685}
5686
5687LayerTestResult<float, 4> L2Pooling2dSize7Test(armnn::IWorkloadFactory& workloadFactory)
5688{
5689 return L2Pooling2dSize7TestCommon<float>(workloadFactory);
5690}
5691
5692LayerTestResult<uint8_t, 4> L2Pooling2dSize7Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5693{
5694 return L2Pooling2dSize7TestCommon<uint8_t>(workloadFactory);
5695}
5696
5697LayerTestResult<float, 4> L2Pooling2dSize9Test(armnn::IWorkloadFactory& workloadFactory)
5698{
5699 return L2Pooling2dSize9TestCommon<float>(workloadFactory);
5700}
5701
5702LayerTestResult<uint8_t, 4> L2Pooling2dSize9Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5703{
5704 return L2Pooling2dSize9TestCommon<uint8_t>(workloadFactory);
5705}
5706
5707LayerTestResult<float, 4> AsymmetricNonSquarePooling2dTest(armnn::IWorkloadFactory& workloadFactory)
5708{
5709 return AsymmetricNonSquarePooling2dTestCommon<float>(workloadFactory);
5710}
5711
5712LayerTestResult<uint8_t, 4> AsymmetricNonSquarePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
5713{
5714 return AsymmetricNonSquarePooling2dTestCommon<uint8_t>(workloadFactory);
5715}
5716
5717LayerTestResult<float, 4> ComparePooling2dTest(armnn::IWorkloadFactory& workloadFactory,
5718 armnn::IWorkloadFactory& refWorkloadFactory,
5719 armnn::PoolingAlgorithm poolingType)
5720{
5721 return ComparePooling2dTestCommon<float>(workloadFactory, refWorkloadFactory, poolingType);
5722}
5723
5724LayerTestResult<uint8_t, 4> ComparePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory,
5725 armnn::IWorkloadFactory& refWorkloadFactory,
5726 armnn::PoolingAlgorithm poolingType)
5727{
5728 return ComparePooling2dTestCommon<uint8_t>(workloadFactory, refWorkloadFactory, poolingType, 0.1f, 128);
5729}
5730
5731LayerTestResult<float, 2> FullyConnectedLargeTest(armnn::IWorkloadFactory& workloadFactory,
5732 bool transposeWeights)
5733{
5734 return FullyConnectedLargeTestCommon<float>(workloadFactory, transposeWeights);
5735}
5736
5737LayerTestResult<float, 4> IgnorePaddingSimpleMaxPooling2dTest(armnn::IWorkloadFactory& workloadFactory)
5738{
5739 return IgnorePaddingSimpleMaxPooling2dTestCommon<float>(workloadFactory);
5740}
5741
5742LayerTestResult<uint8_t, 4> IgnorePaddingSimpleMaxPooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
5743{
5744 return IgnorePaddingSimpleMaxPooling2dTestCommon<uint8_t>(workloadFactory, 1.0f, -5);
5745}
5746
5747LayerTestResult<float, 4> IgnorePaddingMaxPooling2dSize3Test(armnn::IWorkloadFactory& workloadFactory)
5748{
5749 return IgnorePaddingMaxPooling2dSize3TestCommon<float>(workloadFactory);
5750}
5751
5752LayerTestResult<uint8_t, 4> IgnorePaddingMaxPooling2dSize3Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5753{
5754 return IgnorePaddingMaxPooling2dSize3TestCommon<uint8_t>(workloadFactory, 1.0f, -5);
5755}
5756
5757LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dTest(armnn::IWorkloadFactory& workloadFactory)
5758{
5759 return IgnorePaddingSimpleAveragePooling2dTestCommon<float>(workloadFactory);
5760}
5761
5762LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
5763{
5764 return IgnorePaddingSimpleAveragePooling2dTestCommon<uint8_t>(workloadFactory);
5765}
5766
5767LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTest(armnn::IWorkloadFactory& workloadFactory)
5768{
5769 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<float>(workloadFactory);
5770}
5771
5772LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(
5773 armnn::IWorkloadFactory& workloadFactory)
5774{
5775 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<uint8_t>(workloadFactory);
5776}
5777
5778LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3Test(armnn::IWorkloadFactory& workloadFactory)
5779{
5780 return IgnorePaddingAveragePooling2dSize3TestCommon<float>(workloadFactory);
5781}
5782
5783LayerTestResult<uint8_t, 4> IgnorePaddingAveragePooling2dSize3Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5784{
5785 return IgnorePaddingAveragePooling2dSize3TestCommon<uint8_t>(workloadFactory);
5786}
5787
5788LayerTestResult<float, 4> IgnorePaddingSimpleL2Pooling2dTest(armnn::IWorkloadFactory& workloadFactory)
5789{
5790 return IgnorePaddingSimpleL2Pooling2dTestCommon<float>(workloadFactory);
5791}
5792
5793LayerTestResult<uint8_t, 4> IgnorePaddingSimpleL2Pooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
5794{
5795 return IgnorePaddingSimpleL2Pooling2dTestCommon<uint8_t>(workloadFactory);
5796}
5797
5798LayerTestResult<float, 4> IgnorePaddingL2Pooling2dSize3Test(armnn::IWorkloadFactory& workloadFactory)
5799{
5800 return IgnorePaddingL2Pooling2dSize3TestCommon<float>(workloadFactory);
5801}
5802
5803LayerTestResult<uint8_t, 4> IgnorePaddingL2Pooling2dSize3Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5804{
5805 return IgnorePaddingL2Pooling2dSize3TestCommon<uint8_t>(workloadFactory);
5806}
5807
5808LayerTestResult<float, 4> SimplePermuteFloat32Test(armnn::IWorkloadFactory& workloadFactory)
5809{
5810 return SimplePermuteFloat32TestCommon(workloadFactory);
5811};
5812
5813LayerTestResult<uint8_t, 4> SimplePermuteUint8Test(armnn::IWorkloadFactory& workloadFactory)
5814{
5815 return SimplePermuteUint8TestCommon(workloadFactory);
5816};
surmeh01bceff2f2018-03-29 16:29:27 +01005817
5818LayerTestResult<float, 4> PermuteFloat32ValueSet1Test(armnn::IWorkloadFactory& workloadFactory)
5819{
5820 return PermuteFloat32ValueSet1TestCommon(workloadFactory);
5821};
5822
5823LayerTestResult<float, 4> PermuteFloat32ValueSet2Test(armnn::IWorkloadFactory& workloadFactory)
5824{
5825 return PermuteFloat32ValueSet2TestCommon(workloadFactory);
5826};
5827
5828LayerTestResult<float, 4> PermuteFloat32ValueSet3Test(armnn::IWorkloadFactory& workloadFactory)
5829{
5830 return PermuteFloat32ValueSet3TestCommon(workloadFactory);
narpra011e4c31d2018-09-28 11:07:51 +01005831};
5832
5833namespace
5834{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005835
narpra011e4c31d2018-09-28 11:07:51 +01005836template <typename T, std::size_t InputDim, std::size_t OutputDim>
5837LayerTestResult<T, OutputDim> MeanTestHelper(armnn::IWorkloadFactory& workloadFactory,
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005838 const unsigned int* inputShape,
5839 const std::vector<T>& inputData,
5840 const std::vector<unsigned int>& axis,
5841 bool keepDims,
5842 const unsigned int* outputShape,
5843 const std::vector<T>& outputData,
5844 float scale = 1.0f,
5845 int32_t offset = 0)
narpra011e4c31d2018-09-28 11:07:51 +01005846{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005847 auto dataType = (std::is_same<T, uint8_t>::value ? armnn::DataType::QuantisedAsymm8 : armnn::DataType::Float32);
narpra011e4c31d2018-09-28 11:07:51 +01005848
5849 armnn::TensorInfo inputTensorInfo(InputDim, inputShape, dataType);
5850 armnn::TensorInfo outputTensorInfo(OutputDim, outputShape, dataType);
5851
5852 inputTensorInfo.SetQuantizationScale(scale);
5853 inputTensorInfo.SetQuantizationOffset(offset);
5854
5855 outputTensorInfo.SetQuantizationScale(scale);
5856 outputTensorInfo.SetQuantizationOffset(offset);
5857
5858 auto input = MakeTensor<T, InputDim>(inputTensorInfo, inputData);
5859
5860 LayerTestResult<T, OutputDim> result(outputTensorInfo);
5861 result.outputExpected = MakeTensor<T, OutputDim>(outputTensorInfo, outputData);
5862
5863 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5864 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5865
5866 armnn::MeanQueueDescriptor data;
5867 data.m_Parameters.m_Axis = axis;
5868 data.m_Parameters.m_KeepDims = keepDims;
5869 armnn::WorkloadInfo info;
5870 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
5871 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
5872
5873 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMean(data, info);
5874
5875 inputHandle->Allocate();
5876 outputHandle->Allocate();
5877
5878 CopyDataToITensorHandle(inputHandle.get(), input.origin());
5879
5880 workloadFactory.Finalize();
5881 workload->Execute();
5882
5883 CopyDataFromITensorHandle(result.output.origin(), outputHandle.get());
5884
5885 return result;
5886}
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005887
narpra011e4c31d2018-09-28 11:07:51 +01005888} // anonymous namespace
5889
5890LayerTestResult<uint8_t, 1> MeanUint8SimpleTest(armnn::IWorkloadFactory& workloadFactory)
5891{
5892 const unsigned int inputShape[] = { 3, 2 };
5893 const unsigned int outputShape[] = { 1 };
5894
5895 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
5896 std::vector<uint8_t> output({ 2 });
5897
5898 return MeanTestHelper<uint8_t, 2, 1>(workloadFactory, inputShape, input, {}, false, outputShape, output);
5899}
5900
5901LayerTestResult<uint8_t, 3> MeanUint8SimpleAxisTest(armnn::IWorkloadFactory& workloadFactory)
5902{
5903 const unsigned int inputShape[] = { 1, 1, 3, 2 };
5904 const unsigned int outputShape[] = { 1, 1, 2 };
5905
5906 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
5907 std::vector<uint8_t> output({ 2, 2 });
5908
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005909 return MeanTestHelper<uint8_t, 4, 3>(workloadFactory, inputShape, input, { 2 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01005910}
5911
5912LayerTestResult<uint8_t, 4> MeanUint8KeepDimsTest(armnn::IWorkloadFactory& workloadFactory)
5913{
5914 const unsigned int inputShape[] = { 1, 1, 3, 2 };
5915 const unsigned int outputShape[] = { 1, 1, 1, 2 };
5916
5917 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
5918 std::vector<uint8_t> output({ 2, 2 });
5919
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005920 return MeanTestHelper<uint8_t, 4, 4>(workloadFactory, inputShape, input, { 2 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01005921}
5922
5923LayerTestResult<uint8_t, 4> MeanUint8MultipleDimsTest(armnn::IWorkloadFactory& workloadFactory)
5924{
5925 const unsigned int inputShape[] = { 2, 3, 1, 2 };
5926 const unsigned int outputShape[] = { 1, 3, 1, 1 };
5927
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005928 std::vector<uint8_t> input({ 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6 });
narpra011e4c31d2018-09-28 11:07:51 +01005929 std::vector<uint8_t> output({ 1, 3, 5 });
5930
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005931 return MeanTestHelper<uint8_t, 4, 4>(workloadFactory, inputShape, input, { 0, 3 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01005932}
5933
5934LayerTestResult<uint8_t, 1> MeanVtsUint8Test(armnn::IWorkloadFactory& workloadFactory)
5935{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005936 const unsigned int inputShape[] = { 4, 3, 2 };
narpra011e4c31d2018-09-28 11:07:51 +01005937 const unsigned int outputShape[] = { 2 };
5938
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005939 std::vector<uint8_t> input({ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
5940 24 });
5941 std::vector<uint8_t> output({ 12, 13 });
narpra011e4c31d2018-09-28 11:07:51 +01005942
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005943 return MeanTestHelper<uint8_t, 3, 1>(workloadFactory, inputShape, input, { 0, 1 }, false, outputShape,
5944 output, 0.8f, 5);
narpra011e4c31d2018-09-28 11:07:51 +01005945}
5946
5947LayerTestResult<float, 1> MeanFloatSimpleTest(armnn::IWorkloadFactory& workloadFactory)
5948{
5949 const unsigned int inputShape[] = { 3, 2 };
5950 const unsigned int outputShape[] = { 1 };
5951
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005952 std::vector<float> input({ 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f });
5953 std::vector<float> output({ 2.0f });
narpra011e4c31d2018-09-28 11:07:51 +01005954
5955 return MeanTestHelper<float, 2, 1>(workloadFactory, inputShape, input, {}, false, outputShape, output);
5956}
5957
5958LayerTestResult<float, 3> MeanFloatSimpleAxisTest(armnn::IWorkloadFactory& workloadFactory)
5959{
5960 const unsigned int inputShape[] = { 2, 3, 1, 2 };
5961 const unsigned int outputShape[] = { 3, 1, 2 };
5962
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005963 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f });
5964 std::vector<float> output({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f });
narpra011e4c31d2018-09-28 11:07:51 +01005965
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005966 return MeanTestHelper<float, 4, 3>(workloadFactory, inputShape, input, { 0 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01005967}
5968
5969LayerTestResult<float, 4> MeanFloatKeepDimsTest(armnn::IWorkloadFactory& workloadFactory)
5970{
5971 const unsigned int inputShape[] = { 1, 1, 3, 2 };
5972 const unsigned int outputShape[] = { 1, 1, 1, 2 };
5973
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005974 std::vector<float> input({ 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f });
5975 std::vector<float> output({ 2.0f, 2.0f });
narpra011e4c31d2018-09-28 11:07:51 +01005976
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005977 return MeanTestHelper<float, 4, 4>(workloadFactory, inputShape, input, { 2 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01005978}
5979
5980LayerTestResult<float, 4> MeanFloatMultipleDimsTest(armnn::IWorkloadFactory& workloadFactory)
5981{
5982 const unsigned int inputShape[] = { 2, 3, 1, 2 };
5983 const unsigned int outputShape[] = { 1, 3, 1, 1 };
5984
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005985 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f });
5986 std::vector<float> output({ 1.5f, 3.5f, 5.5f });
narpra011e4c31d2018-09-28 11:07:51 +01005987
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005988 return MeanTestHelper<float, 4, 4>(workloadFactory, inputShape, input, { 0, 3 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01005989}
5990
5991LayerTestResult<float, 1> MeanVtsFloat1Test(armnn::IWorkloadFactory& workloadFactory)
5992{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005993 const unsigned int inputShape[] = { 4, 3, 2 };
narpra011e4c31d2018-09-28 11:07:51 +01005994 const unsigned int outputShape[] = { 2 };
5995
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005996 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f,
5997 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f });
5998 std::vector<float> output({ 12.0f, 13.0f });
narpra011e4c31d2018-09-28 11:07:51 +01005999
Matteo Martincigh28dcab62018-10-19 16:40:03 +01006000 return MeanTestHelper<float, 3, 1>(workloadFactory, inputShape, input, { 0, 1 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01006001}
6002
6003LayerTestResult<float, 3> MeanVtsFloat2Test(armnn::IWorkloadFactory& workloadFactory)
6004{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01006005 const unsigned int inputShape[] = { 4, 3, 2 };
6006 const unsigned int outputShape[] = { 1, 3, 1 };
narpra011e4c31d2018-09-28 11:07:51 +01006007
Matteo Martincigh28dcab62018-10-19 16:40:03 +01006008 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f,
6009 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f });
6010 std::vector<float> output({ 10.5f, 12.5f, 14.5f });
narpra011e4c31d2018-09-28 11:07:51 +01006011
Matteo Martincigh28dcab62018-10-19 16:40:03 +01006012 return MeanTestHelper<float, 3, 3>(workloadFactory, inputShape, input, { 0, 2 }, true, outputShape, output);
6013}
6014
6015LayerTestResult<float, 3> MeanVtsFloat3Test(armnn::IWorkloadFactory& workloadFactory)
6016{
6017 const unsigned int inputShape[] = { 1, 2, 2, 1 };
6018 const unsigned int outputShape[] = { 1, 2, 1 };
6019
6020 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f });
6021 std::vector<float> output({ 1.5f, 3.5f });
6022
6023 return MeanTestHelper<float, 4, 3>(workloadFactory, inputShape, input, { 2 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01006024}
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01006025
6026LayerTestResult<float, 4> AdditionAfterMaxPoolTest(armnn::IWorkloadFactory& workloadFactory)
6027{
6028 // Create Initial Tensor
6029 // 1, 2, 3
6030 // 4, 5, 6
6031 // 7, 8, 9
6032
6033 armnn::TensorInfo poolingInputTensorInfo({ 1, 1, 3, 3}, armnn::GetDataType<float>());
6034 armnn::TensorInfo poolingOutputTensorInfo({ 1, 1, 2, 2}, armnn::GetDataType<float>());
6035
6036 boost::multi_array<float, 4> poolingInput = MakeTensor<float,4>(poolingInputTensorInfo,
6037 {1, 2, 3,
6038 4, 5, 6,
6039 7, 8, 9
6040 });
6041
6042 std::unique_ptr<armnn::ITensorHandle> poolingInputHandle =
6043 workloadFactory.CreateTensorHandle(poolingInputTensorInfo);
6044 std::unique_ptr<armnn::ITensorHandle> poolingOutputHandle =
6045 workloadFactory.CreateTensorHandle(poolingOutputTensorInfo);
6046
6047 // Apply MaxPool poolSize = 1x1, stride=2x2
6048 // Result =
6049 // 1, 3
6050 // 7, 9
6051 armnn::Pooling2dDescriptor descriptor;
6052 descriptor.m_PoolHeight = 1;
6053 descriptor.m_PoolWidth = 1;
6054 descriptor.m_StrideX = 2;
6055 descriptor.m_StrideY = 2;
6056 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
6057
6058 armnn::Pooling2dQueueDescriptor queueDescriptor;
6059 queueDescriptor.m_Parameters = descriptor;
6060 armnn::WorkloadInfo workloadInfo;
6061 AddInputToWorkload(queueDescriptor, workloadInfo, poolingInputTensorInfo, poolingInputHandle.get());
6062 AddOutputToWorkload(queueDescriptor, workloadInfo, poolingOutputTensorInfo, poolingOutputHandle.get());
6063
6064 // Create the MaxPool
6065 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
6066
6067 //LayerTestResult<float, 4> result(poolingOutputTensorInfo);
6068 auto shape( GetTensorShapeAsArray<4>(poolingOutputTensorInfo));
6069 boost::multi_array<float, 4> resultMaxPool;
6070 resultMaxPool.resize(shape);
6071
6072
6073 // Create addition with another tensor the same size
6074 // This would be the result to apply a Conv2d with kernel ones(2) and stride 1x1
6075 // with the initial tensor.
6076 // 12, 16
6077 // 24, 28
6078
6079 armnn::TensorInfo addInputTensorInfo({ 1,1,2,2}, armnn::GetDataType<float>());
6080 armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2}, armnn::GetDataType<float>());
6081
6082 boost::multi_array<float, 4> addInput = MakeTensor<float,4>(addInputTensorInfo,
6083 {12, 16,
6084 24, 28,
6085 });
6086
6087 // Expected output tensor after MaxPool and Addition.
6088 LayerTestResult<float,4> addRet(addOutputTensorInfo);
6089 addRet.outputExpected = MakeTensor<float, 4>(addOutputTensorInfo, std::vector<float>(
6090 {
6091 13, 19,
6092 31, 37
6093 }));
6094
6095 std::unique_ptr<armnn::ITensorHandle> addInputHandle = workloadFactory.CreateTensorHandle(addInputTensorInfo);
6096 std::unique_ptr<armnn::ITensorHandle> addOutputHandle = workloadFactory.CreateTensorHandle(addOutputTensorInfo);
6097
6098 armnn::AdditionQueueDescriptor data;
6099 armnn::WorkloadInfo info;
6100
6101 // Add the output of the MaxPool and the new tensor
6102 AddInputToWorkload(data, info, poolingOutputTensorInfo, poolingOutputHandle.get());
6103 AddInputToWorkload(data, info, addInputTensorInfo, addInputHandle.get());
6104 AddOutputToWorkload(data, info, addOutputTensorInfo, addOutputHandle.get());
6105
6106 std::unique_ptr<armnn::IWorkload> addWorkload = workloadFactory.CreateAddition(data, info);
6107
6108 poolingInputHandle->Allocate();
6109 poolingOutputHandle->Allocate();
6110 addInputHandle->Allocate();
6111 addOutputHandle->Allocate();
6112
6113 CopyDataToITensorHandle(poolingInputHandle.get(), &poolingInput[0][0][0][0]);
6114 CopyDataFromITensorHandle(&resultMaxPool[0][0][0][0], poolingOutputHandle.get());
6115
6116 CopyDataToITensorHandle(poolingOutputHandle.get(), &resultMaxPool[0][0][0][0]);
6117 CopyDataToITensorHandle(addInputHandle.get(), &addInput[0][0][0][0]);
6118
6119 workload->Execute();
6120 addWorkload->Execute();
6121
6122 CopyDataFromITensorHandle(&addRet.output[0][0][0][0], addOutputHandle.get());
6123
6124 workloadFactory.Finalize();
6125
6126 return addRet;
6127}