blob: 6b5fa726b8da291a86ff93c659e2ef7f17a7ab6a [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#include "LayerTests.hpp"
6
7#include "test/TensorHelpers.hpp"
8#include "TensorCopyUtils.hpp"
surmeh013537c2c2018-05-18 16:31:43 +01009#include "Permute.hpp"
telsoa014fcda012018-03-09 14:13:49 +000010
11#include <boost/test/unit_test.hpp>
surmeh013537c2c2018-05-18 16:31:43 +010012#include <boost/assert.hpp>
telsoa014fcda012018-03-09 14:13:49 +000013
David Beck711fa312018-09-24 10:46:38 +010014#include <armnn/LayerSupport.hpp>
telsoa014fcda012018-03-09 14:13:49 +000015
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000016#include <backendsCommon/CpuTensorHandle.hpp>
17#include <backendsCommon/WorkloadFactory.hpp>
telsoa014fcda012018-03-09 14:13:49 +000018
telsoa014fcda012018-03-09 14:13:49 +000019#include <algorithm>
20#include <boost/cast.hpp>
21
22#include "WorkloadTestUtils.hpp"
23#include "Conv2dTestImpl.hpp"
24#include "BatchNormTestImpl.hpp"
25#include "ActivationTestImpl.hpp"
26#include "Pooling2dTestImpl.hpp"
27#include "ReshapeTestImpl.hpp"
28#include "FullyConnectedTestImpl.hpp"
29#include "SplitterTestImpl.hpp"
30#include "SoftmaxTestImpl.hpp"
31#include "NormTestImpl.hpp"
32#include "PermuteTestImpl.hpp"
telsoa01c577f2c2018-08-31 09:22:23 +010033#include "LstmTestImpl.hpp"
34#include "ConvertFp16ToFp32TestImpl.hpp"
35#include "ConvertFp32ToFp16TestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000036
telsoa01c577f2c2018-08-31 09:22:23 +010037// 3-channel 16x8 image used as common input data for a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000038static std::vector<float> ConvInput3x8x16({
39 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
40 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
41 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
42 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
43 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
44 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
45 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
46 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
47 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
48 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
49 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
50 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
51 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
52 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
53 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
54 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
55 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
56 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
57 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
58 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
59 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
60 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
61 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
62 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
63});
64
telsoa01c577f2c2018-08-31 09:22:23 +010065// 2-channel bias used by a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000066static std::vector<float> Bias2({0, 2});
67
telsoa01c577f2c2018-08-31 09:22:23 +010068// Helper function that returns either Bias2 or an empty vector depending on whether bias is enabled.
telsoa014fcda012018-03-09 14:13:49 +000069template<typename T>
70boost::multi_array<T, 1> GetBias2(bool biasEnabled, float qScale, int32_t qOffset)
71{
72 if(biasEnabled)
73 {
74 armnn::TensorInfo biasDesc({static_cast<unsigned int>(Bias2.size())}, armnn::GetDataType<T>());
75 boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(qScale, qOffset, Bias2));
76 return bias;
77 }
78 else
79 {
80 return boost::multi_array<T, 1>();
81 }
82}
83
84template<typename T>
85LayerTestResult<T, 4> SimpleConvolution2d3x5TestCommon(armnn::IWorkloadFactory& workloadFactory,
86 float qScale,
87 int32_t qOffset,
jimfly010a088a62018-10-25 17:05:05 +010088 bool biasEnabled,
89 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +000090{
telsoa01c577f2c2018-08-31 09:22:23 +010091 // Use common single-batch 3-channel 16x8 image.
telsoa014fcda012018-03-09 14:13:49 +000092 armnn::TensorInfo inputDesc({1, 3, 8, 16}, armnn::GetDataType<T>());
93 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
94
telsoa01c577f2c2018-08-31 09:22:23 +010095 // Use a 2-element batch with 3-channel 3x5 kernels.
telsoa014fcda012018-03-09 14:13:49 +000096 armnn::TensorInfo kernelDesc({2, 3, 5, 3}, armnn::GetDataType<T>());
97 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
98 QuantizedVector<T>(qScale, qOffset, {
99 1, 1, 1,
100 1, -1, 1,
101 1, 1, 1,
102 1, 1, 1,
103 1, 1, 1,
104
105 0, 0, 0,
106 0, 0, 0,
107 0, 0, 0,
108 0, 0, 0,
109 0, 0, 0,
110
111 2, 2, 2,
112 2, 2, 2,
113 2, 2, 2,
114 2, 2, 2,
115 2, 2, 2,
116
117
118 0, 0, 0,
119 0, 0, 0,
120 0, 0, 0,
121 0, 0, 0,
122 0, 0, 0,
123
124 1, 1, 1,
125 1, 1, 1,
126 1, 1, 1,
127 1, 1, 1,
128 1, 1, 1,
129
130 0, 0, 0,
131 0, 0, 0,
132 0, 0, 0,
133 0, 0, 0,
134 0, 0, 0
135 })));
136
telsoa01c577f2c2018-08-31 09:22:23 +0100137 // Expected output is 2 batch elements of a 1-channel 14x4 image.
telsoa014fcda012018-03-09 14:13:49 +0000138 armnn::TensorInfo outputDesc({1, 2, 4, 14}, armnn::GetDataType<T>());
139 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
140 QuantizedVector<T>(qScale, qOffset, {
141 -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24,
142 -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25,
143 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
144 -23.5f, -23.5f, -23.5f,
145 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
146 -23.5f, -23.5f, -23.5f,
147
148 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
149 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
150 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
151 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
152 })));
153
154 return SimpleConvolution2dTestImpl<T>(workloadFactory,
155 input,
156 kernel,
157 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
158 expectedOutput,
159 qScale,
jimfly010a088a62018-10-25 17:05:05 +0100160 qOffset,
161 layout);
telsoa014fcda012018-03-09 14:13:49 +0000162}
163
164template<typename T>
165LayerTestResult<T, 4> SimpleConvolution2d3x3TestCommon(armnn::IWorkloadFactory& workloadFactory,
166 float qScale,
167 int32_t qOffset,
narpra015f703182018-10-26 16:24:58 +0100168 bool biasEnabled,
169 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000170{
telsoa01c577f2c2018-08-31 09:22:23 +0100171 // Use a 3x3 kernel, which exercises ArmCompute's direct convolution path.
telsoa014fcda012018-03-09 14:13:49 +0000172
telsoa01c577f2c2018-08-31 09:22:23 +0100173 // Use common single-batch 3-channel 16x8 image.
telsoa014fcda012018-03-09 14:13:49 +0000174 armnn::TensorInfo inputDesc({1, 3, 8, 16}, armnn::GetDataType<T>());
175 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
176
telsoa01c577f2c2018-08-31 09:22:23 +0100177 // Use a 2-element batch of 3-channel 3x3 kernels.
telsoa014fcda012018-03-09 14:13:49 +0000178 armnn::TensorInfo kernelDesc({2, 3, 3, 3}, armnn::GetDataType<T>());
179 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
180 QuantizedVector<T>(qScale, qOffset, {
181 1, 1, 1,
182 1, -1, 1,
183 1, 1, 1,
184
185 0, 0, 0,
186 0, 0, 0,
187 0, 0, 0,
188
189 2, 2, 2,
190 2, 2, 2,
191 2, 2, 2,
192
193
194 0, 0, 0,
195 0, 0, 0,
196 0, 0, 0,
197
198 1, 1, 1,
199 1, 1, 1,
200 1, 1, 1,
201
202 0, 0, 0,
203 0, 0, 0,
204 0, 0, 0
205 })));
206
telsoa01c577f2c2018-08-31 09:22:23 +0100207 // Expected output is 1 batch of a 2-channel 14x6 image.
telsoa014fcda012018-03-09 14:13:49 +0000208 armnn::TensorInfo outputDesc({1, 2, 6, 14}, armnn::GetDataType<T>());
209 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
210 QuantizedVector<T>(qScale, qOffset, {
211 -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15,
212 -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16,
213 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
214 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
215 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
216 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
217
218 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
219 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
220 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
221 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
222 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
223 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
224 })));
225
226 return SimpleConvolution2dTestImpl<T>(workloadFactory,
227 input,
228 kernel,
229 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
230 expectedOutput,
231 qScale,
narpra015f703182018-10-26 16:24:58 +0100232 qOffset,
233 layout);
telsoa014fcda012018-03-09 14:13:49 +0000234}
235
Francis Murtaghd59116e2018-10-04 16:03:07 +0100236template<typename T>
237LayerTestResult<T, 4> SimpleConvolution2d3x3NhwcTestCommon(armnn::IWorkloadFactory& workloadFactory,
238 float qScale,
239 int32_t qOffset,
240 bool biasEnabled,
241 armnn::DataLayout dataLayout)
242{
243 // Use common single-batch 5x5 image.
244
245 armnn::TensorInfo inputDesc({1, 3, 4, 1}, armnn::GetDataType<T>());
246 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
247 {
248 1, 5, 2, 3,
249 8, 7, 3, 6,
250 3, 3, 9, 1
251 });
252
253
254 // Use a 2-element batch of 3-channel 3x3 kernels.
255 armnn::TensorInfo kernelDesc({1, 3, 3, 1}, armnn::GetDataType<T>());
256 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, {
257 4, 5, 6,
258 0, 0, 0,
259 3, 2, 1
260 });
261
262 // Expected output is 1 batch of a 5x5 image.
263 armnn::TensorInfo outputDesc({1, 3, 4, 1}, armnn::GetDataType<T>());
264
265 const std::vector<float> outputData =
266 {
267 23, 41, 33, 21,
268 44, 65, 76, 52,
269 82, 85, 79, 42
270 };
271
272 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
273
274 return SimpleConvolution2dNhwcTestImpl<T>(workloadFactory,
275 input,
276 kernel,
277 boost::multi_array<T, 1>(),
278 expectedOutput,
279 dataLayout,
280 qScale,
281 qOffset);
282}
283
telsoa014fcda012018-03-09 14:13:49 +0000284LayerTestResult<float, 4> SimpleConvolution2d3x5Test(armnn::IWorkloadFactory& workloadFactory,
jimfly010a088a62018-10-25 17:05:05 +0100285 bool biasEnabled,
286 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000287{
jimfly010a088a62018-10-25 17:05:05 +0100288 return SimpleConvolution2d3x5TestCommon<float>(workloadFactory, 0.f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000289}
290
291LayerTestResult<uint8_t, 4> SimpleConvolution2d3x5Uint8Test(armnn::IWorkloadFactory& workloadFactory,
jimfly010a088a62018-10-25 17:05:05 +0100292 bool biasEnabled,
293 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000294{
jimfly010a088a62018-10-25 17:05:05 +0100295 return SimpleConvolution2d3x5TestCommon<uint8_t>(workloadFactory, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000296}
297
298LayerTestResult<float, 4> SimpleConvolution2d3x3Test(armnn::IWorkloadFactory& workloadFactory,
narpra015f703182018-10-26 16:24:58 +0100299 bool biasEnabled,
300 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000301{
narpra015f703182018-10-26 16:24:58 +0100302 return SimpleConvolution2d3x3TestCommon<float>(workloadFactory, 0.f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000303}
304
Francis Murtaghd59116e2018-10-04 16:03:07 +0100305LayerTestResult<float, 4> SimpleConvolution2d3x3NhwcTest(armnn::IWorkloadFactory& workloadFactory,
306 bool biasEnabled)
307{
308 return SimpleConvolution2d3x3NhwcTestCommon<float>(workloadFactory, 0.f, 0, biasEnabled, armnn::DataLayout::NHWC);
309}
310
telsoa014fcda012018-03-09 14:13:49 +0000311LayerTestResult<uint8_t, 4> SimpleConvolution2d3x3Uint8Test(armnn::IWorkloadFactory& workloadFactory,
narpra015f703182018-10-26 16:24:58 +0100312 bool biasEnabled,
313 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000314{
narpra015f703182018-10-26 16:24:58 +0100315 return SimpleConvolution2d3x3TestCommon<uint8_t>(workloadFactory, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000316}
317
318template<typename T>
319LayerTestResult<T, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon(
320 armnn::IWorkloadFactory& workloadFactory,
narpra015f703182018-10-26 16:24:58 +0100321 const armnn::DataLayoutIndexed& layout,
telsoa014fcda012018-03-09 14:13:49 +0000322 float qScale,
323 int32_t qOffset)
324{
telsoa01c577f2c2018-08-31 09:22:23 +0100325 // Use a single-batch 1-channel 3x3 image as input.
telsoa014fcda012018-03-09 14:13:49 +0000326 armnn::TensorInfo inputDesc({1, 1, 3, 3}, armnn::GetDataType<T>());
327 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
328 QuantizedVector<T>(qScale, qOffset, {
329 11,21,31,
330 12,22,32,
331 13,23,33
332 })));
333
telsoa01c577f2c2018-08-31 09:22:23 +0100334 // Use 1 batch of a 1-channel 2x2 kernel.
telsoa014fcda012018-03-09 14:13:49 +0000335 armnn::TensorInfo kernelDesc({1, 1, 2, 2}, armnn::GetDataType<T>());
336 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
337 QuantizedVector<T>(qScale, qOffset, {
338 -11,-21,
339 -12,-22,
340 })));
341
telsoa01c577f2c2018-08-31 09:22:23 +0100342// Expected output is 1 batch of a 1-channel 6x8 image.
telsoa014fcda012018-03-09 14:13:49 +0000343// Manually calculated like this:
344//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
345//[-11*0 -21*0 -12*0 -22*11 ; -11*0 -21*0 -12*11 -22*21 ; -11*0 -21*0 -12*21 -22*31 ; -11*0 -21*0 -12*31 -22*0 ..]
346//[-11*0 -21*11 -12*0 -22*12 ; -11*11 -21*21 -12*12 -22*22 ; -11*21 -21*31 -12*22 -22*32 ; -11*31 -21*0 -12*32 -22*0 ..]
347//[-11*0 -21*12 -12*0 -22*13 ; -11*12 -21*22 -12*13 -22*23 ; -11*22 -21*32 -12*23 -22*33 ; -11*32 -21*0 -12*33 -22*0 ..]
348//[-11*0 -21*13 -12*0 -22*0 ; -11*13 -21*23 -12*0 -22*0 ; -11*23 -21*33 -12*0 -22*0 ; -11*33 -21*0 -12*0 -22*0 ..]
349//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
350//[..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ..]
351 armnn::TensorInfo outputDesc({1, 1, 8, 6}, armnn::GetDataType<T>());
352 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
353 QuantizedVector<T>(qScale, qOffset, {
354 0, 0, 0, 0, 0, 0,
355 -242, -594, -934, -372, 0, 0,
356 -495, -1190, -1850, -725, 0, 0,
357 -538, -1256, -1916, -748, 0, 0,
358 -273, -626, -946, -363, 0, 0,
359 0, 0, 0, 0, 0, 0,
360 0, 0, 0, 0, 0, 0,
361 0, 0, 0, 0, 0, 0
362 })));
363
364 return SimpleConvolution2dTestImpl<T>(workloadFactory,
365 input,
366 kernel,
367 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(false, qScale, qOffset),
368 expectedOutput,
369 qScale,
370 qOffset,
narpra015f703182018-10-26 16:24:58 +0100371 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100372 1, // Padding left.
373 2, // Padding top.
374 3, // Padding right.
375 4); // Padding bottom.
telsoa014fcda012018-03-09 14:13:49 +0000376}
377
378template<typename T>
379LayerTestResult<T, 4> SimpleConvolution2dAsymmetricPaddingTestCommon(armnn::IWorkloadFactory& workloadFactory,
narpra015f703182018-10-26 16:24:58 +0100380 const armnn::DataLayoutIndexed& layout,
381 float qScale,
382 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +0000383{
telsoa01c577f2c2018-08-31 09:22:23 +0100384 // Use a single-batch 1-channel 5x5 image as input.
telsoa014fcda012018-03-09 14:13:49 +0000385 armnn::TensorInfo inputDesc({ 1, 1, 5, 5 }, armnn::GetDataType<T>());
386 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
387 QuantizedVector<T>(qScale, qOffset, {
388 11,21,31,41,51,
389 12,22,32,42,52,
390 13,23,33,43,53,
391 14,24,34,44,54,
392 15,25,35,45,55,
393 })));
394
telsoa01c577f2c2018-08-31 09:22:23 +0100395 // Use 1 batch of a 1-channel 4x4 kernel.
telsoa014fcda012018-03-09 14:13:49 +0000396 armnn::TensorInfo kernelDesc({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
397 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
398 QuantizedVector<T>(qScale, qOffset, {
399 -11,-21,-31,-41,
400 -12,-22,-32,-42,
401 -13,-23,-33,-43,
402 -14,-24,-34,-44,
403 })));
404
telsoa01c577f2c2018-08-31 09:22:23 +0100405 // Expected output is 1 batch of a 1-channel 5x5 image.
telsoa014fcda012018-03-09 14:13:49 +0000406 armnn::TensorInfo outputDesc({ 1, 1, 5, 5 }, armnn::GetDataType<T>());
407 std::vector<T> myVec(outputDesc.GetNumElements(), 0);
408 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
409 QuantizedVector<T>(qScale, qOffset, {
telsoa014fcda012018-03-09 14:13:49 +0000410 -7140, -10580, -13940, -9300, -5230,
411 -9590, -14120, -18520, -12290, -6860,
412 -9980, -14560, -18960, -12560, -7000,
413 -7518, -10904, -14144, -9318, -5152,
surmeh013537c2c2018-05-18 16:31:43 +0100414 -5032, -7256, -9376, -6142, -3368,
telsoa014fcda012018-03-09 14:13:49 +0000415 })));
416
417 return SimpleConvolution2dTestImpl<T>(workloadFactory,
418 input,
419 kernel,
420 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(false, qScale, qOffset),
421 expectedOutput,
422 qScale,
423 qOffset,
narpra015f703182018-10-26 16:24:58 +0100424 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100425 1, // Padding left.
426 1, // Padding top.
427 2, // Padding right.
428 2); // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100429}
430
431template<typename T>
432LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(armnn::IWorkloadFactory& workloadFactory,
433 float qScale,
434 int32_t qOffset,
jimfly01382a91d2018-10-26 15:55:50 +0100435 bool biasEnabled,
436 const armnn::DataLayoutIndexed& layout)
surmeh013537c2c2018-05-18 16:31:43 +0100437{
telsoa01c577f2c2018-08-31 09:22:23 +0100438 // Use a single-batch 2-channel 5x5 image as input.
surmeh013537c2c2018-05-18 16:31:43 +0100439 armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5 }, armnn::GetDataType<T>());
440 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
441 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
442 0, 1, 2, 3, 4,
443 5, 6, 7, 8, 9,
444 10, 11, 12, 13, 14,
445 15, 16, 17, 18, 19,
446 20, 21, 22, 23, 24,
447
448 25, 26, 27, 28, 29,
449 30, 31, 32, 33, 34,
450 35, 36, 37, 38, 39,
451 40, 41, 42, 43, 44,
452 45, 46, 47, 48, 49
453 })));
454
telsoa01c577f2c2018-08-31 09:22:23 +0100455 // Use a depth multiplier of 1 on a 2-channel 4x4 kernel.
surmeh013537c2c2018-05-18 16:31:43 +0100456 armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, armnn::GetDataType<T>());
457 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
458 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
459 32, 31, 30, 29,
460 28, 27, 26, 25,
461 24, 23, 22, 21,
462 20, 19, 18, 17,
463
464 16, 15, 14, 13,
465 12, 11, 10, 9,
466 8, 7, 6, 5,
467 4, 3, 2, 1
468 })));
469
telsoa01c577f2c2018-08-31 09:22:23 +0100470 // Expected output is 1 batch of a 2-channel 5x5 image.
471 // Calculated using the python tensorflow library with strideX=1, strideY=1.
surmeh013537c2c2018-05-18 16:31:43 +0100472 armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5 }, armnn::GetDataType<T>());
473 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
474 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
475 1062, 1580, 1850, 1530, 1117,
476 2140, 3108, 3500, 2842, 2042,
477 3580, 5068, 5460, 4342, 3062,
478 3618, 5072, 5390, 4248, 2971,
479 3074, 4282, 4510, 3533, 2457,
480 1550, 2284, 2362, 1955, 1428,
481 2910, 4206, 4342, 3528, 2536,
482 3390, 4886, 5022, 4068, 2916,
483 3566, 5056, 5182, 4133, 2922,
484 3100, 4352, 4452, 3517, 2465
485 })));
486
487 return DepthwiseConvolution2dAsymmetricTestImpl<T>(workloadFactory,
488 input,
489 kernel,
490 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
491 expectedOutput,
492 qScale,
493 qOffset,
jimfly01382a91d2018-10-26 15:55:50 +0100494 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100495 1, // Padding left.
496 1, // Padding top.
497 2, // Padding right.
498 2, // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100499 1, // strideX
500 1); // strideY
telsoa014fcda012018-03-09 14:13:49 +0000501}
502
Nikhil Rajcec6b652018-10-12 13:51:57 +0100503template<typename T>
504LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestCommon(armnn::IWorkloadFactory& workloadFactory,
505 float qScale,
506 int32_t qOffset,
507 bool biasEnabled)
508{
509 armnn::TensorInfo inputTensorInfo({ 1, 5, 5, 2}, armnn::GetDataType<T>());
510 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
511 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
512 0, 25,
513 1, 26,
514 2, 27,
515 3, 28,
516 4, 29,
517
518 5, 30,
519 6, 31,
520 7, 32,
521 8, 33,
522 9, 34,
523
524 10, 35,
525 11, 36,
526 12, 37,
527 13, 38,
528 14, 39,
529
530 15, 40,
531 16, 41,
532 17, 42,
533 18, 43,
534 19, 44,
535
536 20, 45,
537 21, 46,
538 22, 47,
539 23, 48,
540 24, 49
541 })));
542
543 armnn::TensorInfo kernelTensorInfo({ 1, 4, 4, 2}, armnn::GetDataType<T>());
544 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
545 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
546 32, 16,
547 31, 15,
548 30, 14,
549 29, 13,
550
551 28, 12,
552 27, 11,
553 26, 10,
554 25, 9,
555
556 24, 8,
557 23, 7,
558 22, 6,
559 21, 5,
560
561 20, 4,
562 19, 3,
563 18, 2,
564 17, 1
565 })));
566
567 armnn::TensorInfo outputTensorInfo({ 1, 5, 5, 2}, armnn::GetDataType<T>());
568 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
569 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
570 1062, 1550,
571 1580, 2284,
572 1850, 2362,
573 1530, 1955,
574 1117, 1428,
575
576 2140, 2910,
577 3108, 4206,
578 3500, 4342,
579 2842, 3528,
580 2042, 2536,
581
582 3580, 3390,
583 5068, 4886,
584 5460, 5022,
585 4342, 4068,
586 3062, 2916,
587
588 3618, 3566,
589 5072, 5056,
590 5390, 5182,
591 4248, 4133,
592 2971, 2922,
593
594 3074, 3100,
595 4282, 4352,
596 4510, 4452,
597 3533, 3517,
598 2457, 2465
599 })));
600
601 return DepthwiseConvolution2dNhwcTestImpl<T>(workloadFactory,
602 input,
603 kernel,
604 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
605 expectedOutput,
606 qScale,
607 qOffset,
608 1, // Padding left.
609 1, // Padding top.
610 2, // Padding right.
611 2, // Padding bottom.
612 1, // strideX
613 1); // strideY
614}
615
telsoa014fcda012018-03-09 14:13:49 +0000616LayerTestResult<float, 4>
narpra015f703182018-10-26 16:24:58 +0100617Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest(armnn::IWorkloadFactory& workloadFactory,
618 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000619{
narpra015f703182018-10-26 16:24:58 +0100620 return Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon<float>(workloadFactory, layout, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000621}
622
narpra015f703182018-10-26 16:24:58 +0100623LayerTestResult<float, 4> Convolution2dAsymmetricPaddingTest(armnn::IWorkloadFactory& workloadFactory,
624 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000625{
narpra015f703182018-10-26 16:24:58 +0100626 return SimpleConvolution2dAsymmetricPaddingTestCommon<float>(workloadFactory, layout, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000627}
628
629LayerTestResult<float, 4> DepthwiseConvolution2dTest(armnn::IWorkloadFactory& workloadFactory,
jimfly01d84216a2018-10-26 12:56:21 +0100630 bool biasEnabled,
631 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000632{
jimfly01d84216a2018-10-26 12:56:21 +0100633 return DepthwiseConvolution2dTestImpl<float, float>(workloadFactory, 0.0f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000634}
635
Nikhil Rajcec6b652018-10-12 13:51:57 +0100636LayerTestResult<float, 4> DepthwiseConvolution2dDepthNhwcTest(armnn::IWorkloadFactory& workloadFactory,
637 bool biasEnabled)
638{
639 return DepthwiseConvolution2dNhwcTestCommon<float>(workloadFactory, 0.0f, 0, biasEnabled);
640}
641
telsoa014fcda012018-03-09 14:13:49 +0000642LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul1Test(armnn::IWorkloadFactory& workloadFactory,
jimfly01b9c89632018-10-26 16:50:13 +0100643 bool biasEnabled,
644 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000645{
jimfly01b9c89632018-10-26 16:50:13 +0100646 return DepthwiseConvolution2dDepthMul1TestImpl<float, float>(workloadFactory, 0.0f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000647}
648
surmeh013537c2c2018-05-18 16:31:43 +0100649LayerTestResult<float, 4> DepthwiseConvolution2dAsymmetricTest(armnn::IWorkloadFactory& workloadFactory,
jimfly01382a91d2018-10-26 15:55:50 +0100650 bool biasEnabled,
651 const armnn::DataLayoutIndexed& layout)
surmeh013537c2c2018-05-18 16:31:43 +0100652{
jimfly01382a91d2018-10-26 15:55:50 +0100653 return DepthwiseConvolution2dAsymmetricTestCommon<float>(workloadFactory, 0.0f, 0, biasEnabled, layout);
surmeh013537c2c2018-05-18 16:31:43 +0100654}
655
telsoa014fcda012018-03-09 14:13:49 +0000656LayerTestResult<uint8_t, 4> DepthwiseConvolution2dUint8Test(armnn::IWorkloadFactory& workloadFactory,
jimfly01d84216a2018-10-26 12:56:21 +0100657 bool biasEnabled,
658 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000659{
jimfly01d84216a2018-10-26 12:56:21 +0100660 return DepthwiseConvolution2dTestImpl<uint8_t, int32_t>(workloadFactory, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000661}
662
663LayerTestResult<uint8_t, 4> DepthwiseConvolution2dDepthMul1Uint8Test(armnn::IWorkloadFactory& workloadFactory,
jimfly01b9c89632018-10-26 16:50:13 +0100664 bool biasEnabled,
665 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000666{
jimfly01b9c89632018-10-26 16:50:13 +0100667 return DepthwiseConvolution2dDepthMul1TestImpl<uint8_t, int32_t>(workloadFactory, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000668}
669
670LayerTestResult<float, 4> Convolution1dTest(armnn::IWorkloadFactory& workloadFactory, bool biasEnabled)
671{
672 return Convolution1dTestImpl<float>(workloadFactory, 0.0f, 0, biasEnabled);
673}
674
675LayerTestResult<uint8_t, 4> Convolution1dUint8Test(armnn::IWorkloadFactory& workloadFactory, bool biasEnabled)
676{
677 return Convolution1dTestImpl<uint8_t>(workloadFactory, 0.1f, 128, biasEnabled);
678}
679
680LayerTestResult<float,4> CompareConvolution2dTest(armnn::IWorkloadFactory& workloadFactory,
jimfly01382a91d2018-10-26 15:55:50 +0100681 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +0000682{
683 return CompareConvolution2dTestImpl<float>(workloadFactory, refWorkloadFactory);
684}
685
686template<typename T>
687LayerTestResult<T,4> CompareDepthwiseConvolution2dTest(armnn::IWorkloadFactory& workloadFactory,
jimfly017af00da2018-10-31 14:43:53 +0000688 armnn::IWorkloadFactory& refWorkloadFactory,
689 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000690{
jimfly017af00da2018-10-31 14:43:53 +0000691 return CompareDepthwiseConvolution2dTestImpl<T>(workloadFactory, refWorkloadFactory, layout);
telsoa014fcda012018-03-09 14:13:49 +0000692}
693
694template LayerTestResult<float, 4> CompareDepthwiseConvolution2dTest<float>(
jimfly017af00da2018-10-31 14:43:53 +0000695 armnn::IWorkloadFactory&, armnn::IWorkloadFactory&, const armnn::DataLayoutIndexed&);
telsoa014fcda012018-03-09 14:13:49 +0000696template LayerTestResult<uint8_t, 4> CompareDepthwiseConvolution2dTest<uint8_t>(
jimfly017af00da2018-10-31 14:43:53 +0000697 armnn::IWorkloadFactory&, armnn::IWorkloadFactory&, const armnn::DataLayoutIndexed&);
telsoa014fcda012018-03-09 14:13:49 +0000698
699LayerTestResult<float,4> SimpleNormalizationAcrossTest(armnn::IWorkloadFactory& workloadFactory)
700{
701 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
702 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
703 return SimpleNormalizationTestImpl(workloadFactory, normChannel, normMethod);
704}
705
706LayerTestResult<float,4> SimpleNormalizationWithinTest(armnn::IWorkloadFactory& workloadFactory)
707{
708 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
709 auto normChannel = armnn::NormalizationAlgorithmChannel::Within;
710 return SimpleNormalizationTestImpl(workloadFactory, normChannel, normMethod);
711}
712
narpra0155a97bc2018-10-02 14:35:53 +0100713LayerTestResult<float,4> SimpleNormalizationAcrossNhwcTest(armnn::IWorkloadFactory& workloadFactory)
714{
715 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
716 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Matteo Martincigh8e6f92d2018-10-18 08:45:39 +0100717 return SimpleNormalizationNhwcTestImpl(workloadFactory, normChannel, normMethod);
narpra0155a97bc2018-10-02 14:35:53 +0100718}
719
telsoa014fcda012018-03-09 14:13:49 +0000720LayerTestResult<float,2> SimpleSoftmaxTest(armnn::IWorkloadFactory& workloadFactory, float beta)
721{
722 return SimpleSoftmaxTestImpl<float>(workloadFactory, beta);
723}
724
725LayerTestResult<uint8_t,2> SimpleSoftmaxUint8Test(armnn::IWorkloadFactory& workloadFactory, float beta)
726{
727 return SimpleSoftmaxTestImpl<uint8_t>(workloadFactory, beta);
728}
729
730LayerTestResult<float,4> CompareNormalizationTest(armnn::IWorkloadFactory& workloadFactory,
731 armnn::IWorkloadFactory& refWorkloadFactory,
732 armnn::NormalizationAlgorithmChannel normChannel,
733 armnn::NormalizationAlgorithmMethod normMethod)
734{
735 return CompareNormalizationTestImpl(workloadFactory, refWorkloadFactory, normChannel, normMethod);
736}
737
738LayerTestResult<float,2> CompareSoftmaxTest(armnn::IWorkloadFactory& workloadFactory,
739 armnn::IWorkloadFactory& refWorkloadFactory,
740 float beta)
741{
742 return CompareSoftmaxTestImpl<float>(workloadFactory, refWorkloadFactory, beta);
743}
744
745LayerTestResult<uint8_t,2> CompareSoftmaxUint8Test(armnn::IWorkloadFactory& workloadFactory,
746 armnn::IWorkloadFactory& refWorkloadFactory,
747 float beta)
748{
749 return CompareSoftmaxTestImpl<uint8_t>(workloadFactory, refWorkloadFactory, beta);
750}
751
752std::vector<LayerTestResult<float,3>> SplitterTest(armnn::IWorkloadFactory& workloadFactory)
753{
754 return SplitterTestCommon<float>(workloadFactory);
755}
756
757std::vector<LayerTestResult<uint8_t,3>> SplitterUint8Test(armnn::IWorkloadFactory& workloadFactory)
758{
759 return SplitterTestCommon<uint8_t>(workloadFactory, 1.0f, 0);
760}
761
762LayerTestResult<float, 3> CopyViaSplitterTest(armnn::IWorkloadFactory& workloadFactory)
763{
764 return CopyViaSplitterTestImpl<float>(workloadFactory, 0.0f, 0);
765}
766
767LayerTestResult<uint8_t, 3> CopyViaSplitterUint8Test(armnn::IWorkloadFactory& workloadFactory)
768{
769 return CopyViaSplitterTestImpl<uint8_t>(workloadFactory, 1.0f, 0);
770}
771
telsoa01c577f2c2018-08-31 09:22:23 +0100772LayerTestResult<float, 2> LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest(
773 armnn::IWorkloadFactory& workloadFactory)
774{
775 armnn::TensorInfo inputDesc({ 2, 2 }, armnn::GetDataType<float>());
776 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
777 { 2., 3., 3., 4. }));
778
779 armnn::TensorInfo outputDesc({ 2, 4 }, armnn::GetDataType<float>());
780 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
781 {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
782 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f}));
783 return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl(workloadFactory, input, expectedOutput);
784}
785
786LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest(
787 armnn::IWorkloadFactory& workloadFactory)
788{
789 armnn::TensorInfo inputDesc({ 2, 5 }, armnn::GetDataType<float>());
790 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
791 {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
792 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f}));
793
794 armnn::TensorInfo outputDesc({ 2, 16 }, armnn::GetDataType<float>());
795 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
796 {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f,
797 -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f,
798 -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f,
799 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f,
800 -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f,
801 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f,
802 0.02168f}));
803 return LstmLayerFloat32NoCifgWithPeepholeWithProjectionTestImpl(workloadFactory, input, expectedOutput);
804}
805
806LayerTestResult<float, 2> LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest(armnn::IWorkloadFactory& workloadFactory)
807{
808 armnn::TensorInfo inputDesc({2, 2}, armnn::GetDataType<float>());
809 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
810 {2., 3., 3., 4.}));
811
812
813 armnn::TensorInfo outputDesc({2, 4}, armnn::GetDataType<float>());
814 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
815 {{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
816 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}}));
817
818 return LstmNoCifgNoPeepholeNoProjectionTestImpl(workloadFactory, input, expectedOutput);
819}
820
telsoa014fcda012018-03-09 14:13:49 +0000821LayerTestResult<float,3> MergerTest(armnn::IWorkloadFactory& workloadFactory)
822{
surmeh013537c2c2018-05-18 16:31:43 +0100823 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +0000824 unsigned int outputHeight = 6;
825 unsigned int outputChannels = 3;
826
surmeh013537c2c2018-05-18 16:31:43 +0100827 unsigned int inputWidth1 = 3;
828 unsigned int inputHeight1 = 6;
829 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +0000830
surmeh013537c2c2018-05-18 16:31:43 +0100831 unsigned int inputWidth2 = 3;
832 unsigned int inputHeight2 = 6;
833 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +0000834
telsoa01c577f2c2018-08-31 09:22:23 +0100835 // Define the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +0000836 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::Float32);
837 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::Float32);
838 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +0000839
840 LayerTestResult<float,3> ret(outputTensorInfo);
841
telsoa014fcda012018-03-09 14:13:49 +0000842 ret.outputExpected = MakeTensor<float, 3>(outputTensorInfo, std::vector<float>(
surmeh013537c2c2018-05-18 16:31:43 +0100843 {
844 1.0f, 2.0f, 3.0f,
845 4.0f, 5.0f, 6.0f,
846 7.0f, 8.0f, 9.0f,
847 10.0f, 11.0f, 12.0f,
848 13.0f, 14.0f, 15.0f,
849 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +0000850
surmeh013537c2c2018-05-18 16:31:43 +0100851 19.0f, 20.0f, 21.0f,
852 22.0f, 23.0f, 24.0f,
853 25.0f, 26.0f, 27.0f,
854 28.0f, 29.0f, 30.0f,
855 31.0f, 32.0f, 33.0f,
856 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +0000857
surmeh013537c2c2018-05-18 16:31:43 +0100858 37.0f, 38.0f, 39.0f,
859 40.0f, 41.0f, 42.0f,
860 43.0f, 44.0f, 45.0f,
861 46.0f, 47.0f, 48.0f,
862 49.0f, 50.0f, 51.0f,
863 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +0000864 })
865 );
866
telsoa014fcda012018-03-09 14:13:49 +0000867 auto input1 = MakeTensor<float, 3>(inputTensorInfo1, std::vector<float>(
868 {
surmeh013537c2c2018-05-18 16:31:43 +0100869 1.0f, 2.0f, 3.0f,
870 4.0f, 5.0f, 6.0f,
871 7.0f, 8.0f, 9.0f,
872 10.0f, 11.0f, 12.0f,
873 13.0f, 14.0f, 15.0f,
874 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +0000875
surmeh013537c2c2018-05-18 16:31:43 +0100876 19.0f, 20.0f, 21.0f,
877 22.0f, 23.0f, 24.0f,
878 25.0f, 26.0f, 27.0f,
879 28.0f, 29.0f, 30.0f,
880 31.0f, 32.0f, 33.0f,
881 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +0000882 })
883 );
884
885 auto input2 = MakeTensor<float, 3>(inputTensorInfo2, std::vector<float>(
886 {
surmeh013537c2c2018-05-18 16:31:43 +0100887 37.0f, 38.0f, 39.0f,
888 40.0f, 41.0f, 42.0f,
telsoa014fcda012018-03-09 14:13:49 +0000889 43.0f, 44.0f, 45.0f,
surmeh013537c2c2018-05-18 16:31:43 +0100890 46.0f, 47.0f, 48.0f,
891 49.0f, 50.0f, 51.0f,
892 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +0000893 })
894 );
895
telsoa01c577f2c2018-08-31 09:22:23 +0100896 std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of input[0].
telsoa014fcda012018-03-09 14:13:49 +0000897 armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1);
898
telsoa01c577f2c2018-08-31 09:22:23 +0100899 std::vector<unsigned int> wOrigin2 = {2, 0, 0}; //Extent of the window is defined by size of input[1].
telsoa014fcda012018-03-09 14:13:49 +0000900 armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2);
901
telsoa014fcda012018-03-09 14:13:49 +0000902 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
903
904 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
905
906 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
907 subTensorsSupported ?
908 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
909 workloadFactory.CreateTensorHandle(inputTensorInfo1);
910
911 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
912 subTensorsSupported ?
913 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
914 workloadFactory.CreateTensorHandle(inputTensorInfo2);
915
telsoa014fcda012018-03-09 14:13:49 +0000916 armnn::MergerQueueDescriptor data;
917 armnn::WorkloadInfo info;
918 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
919 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +0000920 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
921
922 data.m_ViewOrigins.push_back(window1);
923 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +0000924
925 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(data, info);
926
927 inputHandle1->Allocate();
928 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +0000929 outputHandle->Allocate();
930
931 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
932 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +0000933
934 workload->Execute();
935
936 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
937
938 return ret;
939}
940
941LayerTestResult<float,4> AdditionTest(armnn::IWorkloadFactory& workloadFactory)
942{
943 unsigned int batchSize = 2;
944 unsigned int channels = 2;
945 unsigned int height = 2;
946 unsigned int width = 3;
947
948 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
949 armnn::TensorInfo outputTensorInfo;
950
951 unsigned int shape[] = {batchSize, channels, height, width};
952
953 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
954 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
955 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
956
957
958 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, std::vector<float>(
959 {
960 0.0f, 2.0f, 1.0f,
961 0.2f, 1.0f, 2.0f,
962
963 1.0f, 2.0f, 1.0f,
964 0.2f, 1.0f, 2.0f,
965
966 0.0f, 2.0f, 1.0f,
967 4.2f, 1.0f, 2.0f,
968
969 0.0f, 0.0f, 1.0f,
970 0.2f, 1.0f, 2.0f,
971 }));
972
973 auto input2 = MakeTensor<float, 4>(inputTensorInfo2, std::vector<float>(
974 {
975 1.0f, 2.0f, 1.0f,
976 0.0f, 1.0f, 2.0f,
977
978 1.0f, 2.0f, -2.0f,
979 0.2f, 1.0f, 2.0f,
980
981 0.0f, 2.0f, 1.0f,
982 4.2f, 0.0f, -3.0f,
983
984 0.0f, 0.0f, 1.0f,
985 0.7f, 1.0f, 5.0f,
986 }));
987
988 LayerTestResult<float,4> ret(outputTensorInfo);
989 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>(
990 {
991 1.0f, 4.0f, 2.0f,
992 0.2f, 2.0f, 4.0f,
993
994 2.0f, 4.0f, -1.0f,
995 0.4f, 2.0f, 4.0f,
996
997 0.0f, 4.0f, 2.0f,
998 8.4f, 1.0f, -1.0f,
999
1000 0.0f, 0.0f, 2.0f,
1001 0.9f, 2.0f, 7.0f,
1002 }));
1003
1004 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1005 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1006 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1007
1008 armnn::AdditionQueueDescriptor data;
1009 armnn::WorkloadInfo info;
1010 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1011 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1012 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1013
1014 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1015
1016 inputHandle1->Allocate();
1017 inputHandle2->Allocate();
1018 outputHandle->Allocate();
1019
1020 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1021 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1022
1023 workload->Execute();
1024
1025 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1026
1027 return ret;
1028}
1029
1030template <typename T>
1031LayerTestResult<T, 4> AdditionBroadcastTestImpl(armnn::IWorkloadFactory& workloadFactory,
1032 float qScale,
1033 int32_t qOffset)
1034{
1035 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, armnn::GetDataType<T>());
1036 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, armnn::GetDataType<T>());
1037 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>());
1038
1039 if (armnn::IsQuantizedType<T>())
1040 {
1041 inputTensorInfo1.SetQuantizationScale(qScale);
1042 inputTensorInfo1.SetQuantizationOffset(qOffset);
1043 inputTensorInfo2.SetQuantizationScale(qScale);
1044 inputTensorInfo2.SetQuantizationOffset(qOffset);
1045 outputTensorInfo.SetQuantizationScale(qScale);
1046 outputTensorInfo.SetQuantizationOffset(qOffset);
1047 }
1048
1049 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
1050 {
1051 0.0f,
1052 1.0f,
1053
1054 2.0f,
1055 3.0f,
1056
1057 4.0f,
1058 5.0f,
1059 }));
1060
1061 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
1062 {
1063 0.5f, 1.5f, 2.5f,
1064 3.5f, 4.5f, 5.5f,
1065 }));
1066
1067 LayerTestResult<T,4> ret(outputTensorInfo);
1068 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
1069 {
1070 0.5f, 1.5f, 2.5f,
1071 4.5f, 5.5f, 6.5f,
1072
1073 2.5f, 3.5f, 4.5f,
1074 6.5f, 7.5f, 8.5f,
1075
1076 4.5f, 5.5f, 6.5f,
1077 8.5f, 9.5f, 10.5f,
1078 }));
1079
1080 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1081 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1082 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1083
1084 armnn::AdditionQueueDescriptor data;
1085 armnn::WorkloadInfo info;
1086 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1087 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1088 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1089
1090 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1091
1092 inputHandle1->Allocate();
1093 inputHandle2->Allocate();
1094 outputHandle->Allocate();
1095
1096 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1097 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1098
1099 workload->Execute();
1100
1101 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1102
1103 return ret;
1104}
1105
1106template <typename T>
1107LayerTestResult<T, 4> AdditionBroadcast1ElementTestImpl(armnn::IWorkloadFactory& workloadFactory,
1108 float qScale,
1109 int32_t qOffset)
1110{
1111 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>());
1112 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, armnn::GetDataType<T>());
1113 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>());
1114
1115 if (armnn::IsQuantizedType<T>())
1116 {
1117 inputTensorInfo1.SetQuantizationScale(qScale);
1118 inputTensorInfo1.SetQuantizationOffset(qOffset);
1119 inputTensorInfo2.SetQuantizationScale(qScale);
1120 inputTensorInfo2.SetQuantizationOffset(qOffset);
1121 outputTensorInfo.SetQuantizationScale(qScale);
1122 outputTensorInfo.SetQuantizationOffset(qOffset);
1123 }
1124
1125 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
1126 {
1127 0.0f, 1.0f, 2.0f,
1128 3.0f, 4.0f, 5.0f,
1129 6.0f, 7.0f, 8.0f,
1130 9.0f, 10.0f, 11.0f,
1131 12.0f, 13.0f, 14.0f,
1132 15.0f, 16.0f, 17.0f,
1133 }));
1134
1135 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
1136 {
1137 0.5f,
1138 }));
1139
1140 LayerTestResult<T,4> ret(outputTensorInfo);
1141 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
1142 {
1143 0.5f, 1.5f, 2.5f,
1144 3.5f, 4.5f, 5.5f,
1145 6.5f, 7.5f, 8.5f,
1146 9.5f, 10.5f, 11.5f,
1147 12.5f, 13.5f, 14.5f,
1148 15.5f, 16.5f, 17.5f,
1149 }));
1150
1151 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1152 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1153 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1154
1155 armnn::AdditionQueueDescriptor data;
1156 armnn::WorkloadInfo info;
1157 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1158 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1159 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1160
1161 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1162
1163 inputHandle1->Allocate();
1164 inputHandle2->Allocate();
1165 outputHandle->Allocate();
1166
1167 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1168 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1169
1170 workload->Execute();
1171
1172 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1173
1174 return ret;
1175}
1176
1177LayerTestResult<float, 4> AdditionBroadcastTest(armnn::IWorkloadFactory& workloadFactory)
1178{
1179 return AdditionBroadcastTestImpl<float>(workloadFactory, 0.0f, 0);
1180}
1181
1182LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test(armnn::IWorkloadFactory& workloadFactory)
1183{
1184 return AdditionBroadcastTestImpl<uint8_t>(workloadFactory, 2.f, 0);
1185}
1186
1187LayerTestResult<float, 4> AdditionBroadcast1ElementTest(armnn::IWorkloadFactory& workloadFactory)
1188{
1189 return AdditionBroadcast1ElementTestImpl<float>(workloadFactory, 0.0f, 0);
1190}
1191
1192LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test(armnn::IWorkloadFactory& workloadFactory)
1193{
1194 return AdditionBroadcast1ElementTestImpl<uint8_t>(workloadFactory, 0.1333333f, 128);
1195}
1196
1197LayerTestResult<float,4> CompareAdditionTest(armnn::IWorkloadFactory& workloadFactory,
David Beckf195f032018-09-06 16:46:34 +01001198 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00001199{
1200 unsigned int batchSize = 4;
1201 unsigned int channels = 1;
1202 unsigned int height = 2;
1203 unsigned int width = 3;
1204
1205 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
1206 armnn::TensorInfo outputTensorInfo;
1207
1208 unsigned int shape[] = {batchSize, channels, height, width};
1209
1210 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1211 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1212 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1213
1214 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 1232);
1215 auto input2 = MakeRandomTensor<float, 4>(inputTensorInfo2, 456);
1216
1217 LayerTestResult<float,4> ret(outputTensorInfo);
1218
1219 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1220 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1221 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1222
1223 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
1224 std::unique_ptr<armnn::ITensorHandle> inputHandle2Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo2);
1225 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
1226
1227 armnn::AdditionQueueDescriptor data;
1228 armnn::WorkloadInfo info;
1229 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1230 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1231 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1232
1233 armnn::AdditionQueueDescriptor refData = data;
1234 armnn::WorkloadInfo refInfo = info;
1235 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo1, inputHandle1Ref.get());
1236 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get());
1237 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1238
1239 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1240 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateAddition(refData, refInfo);
1241
1242 inputHandle1->Allocate();
1243 inputHandle2->Allocate();
1244 outputHandle->Allocate();
1245 inputHandle1Ref->Allocate();
1246 inputHandle2Ref->Allocate();
1247 outputHandleRef->Allocate();
1248
1249 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1250 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1251 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
1252 CopyDataToITensorHandle(inputHandle2Ref.get(), &input2[0][0][0][0]);
1253
1254 workload->Execute();
1255 workloadRef->Execute();
1256
1257 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1258 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
1259
1260 return ret;
1261}
1262
surmeh01bceff2f2018-03-29 16:29:27 +01001263namespace {
David Beck5cd01f32018-09-12 16:00:08 +01001264template <typename T>
1265LayerTestResult<T, 4> DivisionTestHelper(armnn::IWorkloadFactory& workloadFactory,
1266 const unsigned int shape0[4],
1267 const std::vector<T>& values0,
1268 float scale0,
1269 int32_t offset0,
1270 const unsigned int shape1[4],
1271 const std::vector<T> & values1,
1272 float scale1,
1273 int32_t offset1,
1274 const unsigned int outShape[4],
1275 const std::vector<T> & outValues,
1276 float outScale,
1277 int32_t outOffset)
1278{
1279 auto dataType = (std::is_same<T, uint8_t>::value ?
1280 armnn::DataType::QuantisedAsymm8 :
1281 armnn::DataType::Float32);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001282
David Beck5cd01f32018-09-12 16:00:08 +01001283 armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
1284 armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
1285 armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001286
David Beck5cd01f32018-09-12 16:00:08 +01001287 inputTensorInfo0.SetQuantizationScale(scale0);
1288 inputTensorInfo0.SetQuantizationOffset(offset0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001289
David Beck5cd01f32018-09-12 16:00:08 +01001290 inputTensorInfo1.SetQuantizationScale(scale1);
1291 inputTensorInfo1.SetQuantizationOffset(offset1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001292
David Beck5cd01f32018-09-12 16:00:08 +01001293 outputTensorInfo.SetQuantizationScale(outScale);
1294 outputTensorInfo.SetQuantizationOffset(outOffset);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001295
David Beck5cd01f32018-09-12 16:00:08 +01001296 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
1297 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001298
David Beck5cd01f32018-09-12 16:00:08 +01001299 LayerTestResult<T, 4> result(outputTensorInfo);
1300 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001301
David Beck5cd01f32018-09-12 16:00:08 +01001302 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
1303 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1304 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001305
David Beck5cd01f32018-09-12 16:00:08 +01001306 armnn::DivisionQueueDescriptor data;
1307 armnn::WorkloadInfo info;
1308 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
1309 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1310 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001311
David Beck5cd01f32018-09-12 16:00:08 +01001312 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDivision(data, info);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001313
David Beck5cd01f32018-09-12 16:00:08 +01001314 inputHandle0->Allocate();
1315 inputHandle1->Allocate();
1316 outputHandle->Allocate();
1317
1318 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
1319 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1320
David Beck5cd01f32018-09-12 16:00:08 +01001321 workload->Execute();
1322
1323 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
1324
1325 return result;
1326}
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001327} // anonymous namespace
1328
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01001329LayerTestResult<float,4> DivisionByZeroTest(armnn::IWorkloadFactory& workloadFactory)
1330{
1331 const unsigned int width = 2;
1332 const unsigned int height = 2;
1333 const unsigned int channelCount = 2;
1334 const unsigned int batchSize = 2;
1335
1336 unsigned int shape[] = { batchSize, channelCount, height, width };
1337
1338 std::vector<float> input0({
1339 1.f, 1.f, 1.f, 1.f, 0.f, 0.f, 0.f, 0.f,
1340 -1.f, -1.f, -1.f, -1.f, 5.f, 5.f, 5.f, 5.f });
1341
1342 std::vector<float> input1({
1343 0.f, 0.f, -0.f, -0.f, 0.f, 0.f, -0.f, -0.f,
1344 0.f, 0.f, -0.f, -0.f, 5.f, 5.f, 5.f, 5.f });
1345
1346 std::vector<float> output({
1347 INFINITY, INFINITY, -INFINITY, -INFINITY, NAN, NAN, -NAN, -NAN,
1348 -INFINITY, -INFINITY, INFINITY, INFINITY, 1, 1, 1, 1 });
1349
David Beck5cd01f32018-09-12 16:00:08 +01001350 return DivisionTestHelper<float>(workloadFactory,
1351 shape, input0, 1.0f, 0,
1352 shape, input1, 1.0f, 0,
1353 shape, output, 1.0f, 0);
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01001354}
1355
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001356LayerTestResult<float,4> DivisionTest(armnn::IWorkloadFactory& workloadFactory)
1357{
1358 const unsigned int width = 2;
1359 const unsigned int height = 2;
1360 const unsigned int channelCount = 2;
1361 const unsigned int batchSize = 2;
1362
1363 unsigned int shape[] = { batchSize, channelCount, height, width };
1364
1365 std::vector<float> input0({
1366 2, 2, 2, 2, 3, 3, 3, 3,
1367 4, 4, 4, 4, 5, 5, 5, 5 });
1368
1369 std::vector<float> input1({
1370 1, 1, 1, 1, 2, 2, 2, 2,
1371 4, 4, 4, 4, 4, 4, 4, 4 });
1372
1373 std::vector<float> output({
1374 2, 2, 2, 2, 1.5, 1.5, 1.5, 1.5,
1375 1, 1, 1, 1, 1.25, 1.25, 1.25, 1.25 });
1376
David Beck5cd01f32018-09-12 16:00:08 +01001377
1378 return DivisionTestHelper<float>(workloadFactory,
1379 shape, input0, 1.0f, 0,
1380 shape, input1, 1.0f, 0,
1381 shape, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001382}
1383
1384LayerTestResult<float, 4> DivisionBroadcast1ElementTest(armnn::IWorkloadFactory& workloadFactory)
1385{
1386 unsigned int shape0[] = { 1, 2, 2, 2 };
1387 std::vector<float> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
1388
1389 unsigned int shape1[] = { 1, 1, 1, 1 };
1390 std::vector<float> input1({ 2 });
1391
1392 std::vector<float> output({ 1, 2, 3, 4, 5, 6, 7, 8});
1393
David Beck5cd01f32018-09-12 16:00:08 +01001394
1395 return DivisionTestHelper<float>(workloadFactory,
1396 shape0, input0, 1.0f, 0,
1397 shape1, input1, 1.0f, 0,
1398 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001399}
1400
1401LayerTestResult<float, 4> DivisionBroadcast1DVectorTest(armnn::IWorkloadFactory& workloadFactory)
1402{
1403 unsigned int shape0[] = { 1, 3, 3, 2 };
1404 std::vector<float> input0({
1405 1, 4, 3, 8, 5, 12,
1406 7, 16, 9, 20, 11, 24,
1407 13, 28, 15, 32, 17, 36});
1408
1409 unsigned int shape1[] = { 1, 1, 1, 2 };
1410 std::vector<float> input1({ 1, 2 });
1411
1412 std::vector<float> output({
1413 1, 2, 3, 4, 5, 6,
1414 7, 8, 9, 10, 11, 12,
1415 13, 14, 15, 16, 17, 18});
1416
David Beck5cd01f32018-09-12 16:00:08 +01001417 return DivisionTestHelper<float>(workloadFactory,
1418 shape0, input0, 1.0f, 0,
1419 shape1, input1, 1.0f, 0,
1420 shape0, output, 1.0f, 0);
1421}
1422
1423
1424LayerTestResult<uint8_t,4> DivisionUint8Test(armnn::IWorkloadFactory& workloadFactory)
1425{
1426 const unsigned int width = 2;
1427 const unsigned int height = 2;
1428 const unsigned int channelCount = 2;
1429 const unsigned int batchSize = 2;
1430
1431 unsigned int shape[] = { batchSize, channelCount, height, width };
1432
1433 std::vector<uint8_t> input0({2, 2, 2, 2, 3, 3, 3, 3,
1434 4, 4, 4, 4, 5, 5, 5, 5 });
1435
1436 std::vector<uint8_t> input1({1, 1, 1, 1, 2, 2, 2, 2,
1437 4, 4, 4, 4, 4, 4, 4, 4 });
1438
1439 std::vector<uint8_t> output({8, 8, 8, 8, 6, 6, 6, 6,
1440 4, 4, 4, 4, 5, 5, 5, 5});
1441
1442
1443 return DivisionTestHelper<uint8_t>(workloadFactory,
1444 shape, input0, 1.0f, 0,
1445 shape, input1, 1.0f, 0,
1446 shape, output, 0.25f, 0);
1447}
1448
1449LayerTestResult<uint8_t, 4> DivisionBroadcast1ElementUint8Test(armnn::IWorkloadFactory& workloadFactory)
1450{
1451 unsigned int shape0[] = { 1, 2, 2, 2 };
1452 std::vector<uint8_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
1453
1454 unsigned int shape1[] = { 1, 1, 1, 1 };
1455 std::vector<uint8_t> input1({ 2 });
1456
1457 std::vector<uint8_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
1458
1459 return DivisionTestHelper<uint8_t>(workloadFactory,
1460 shape0, input0, 1.0f, 0,
1461 shape1, input1, 1.0f, 0,
1462 shape0, output, 1.0f, 0);
1463}
1464
1465LayerTestResult<uint8_t, 4> DivisionBroadcast1DVectorUint8Test(armnn::IWorkloadFactory& workloadFactory)
1466{
1467 unsigned int shape0[] = { 1, 3, 3, 2 };
1468 std::vector<uint8_t> input0({1, 4, 3, 8, 5, 12,
1469 7, 16, 9, 20, 11, 24,
1470 13, 28, 15, 32, 17, 36});
1471
1472 unsigned int shape1[] = { 1, 1, 1, 2 };
1473 std::vector<uint8_t> input1({ 1, 2 });
1474
1475 std::vector<uint8_t> output({1, 2, 3, 4, 5, 6,
1476 7, 8, 9, 10, 11, 12,
1477 13, 14, 15, 16, 17, 18});
1478
1479 return DivisionTestHelper<uint8_t>(workloadFactory,
1480 shape0, input0, 1.0f, 0,
1481 shape1, input1, 1.0f, 0,
1482 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001483}
1484
1485namespace {
surmeh01bceff2f2018-03-29 16:29:27 +01001486LayerTestResult<float,4> MultiplicationTestHelper(armnn::IWorkloadFactory& workloadFactory,
1487 const unsigned int shape0[4],
1488 const std::vector<float> & values0,
1489 const unsigned int shape1[4],
1490 const std::vector<float> & values1,
1491 const unsigned int outShape[4],
1492 const std::vector<float> & outValues)
telsoa014fcda012018-03-09 14:13:49 +00001493{
surmeh01bceff2f2018-03-29 16:29:27 +01001494 const size_t dimensionCount = 4;
1495 armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, armnn::DataType::Float32};
1496 armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, armnn::DataType::Float32};
1497 armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, armnn::DataType::Float32};
telsoa014fcda012018-03-09 14:13:49 +00001498
surmeh01bceff2f2018-03-29 16:29:27 +01001499 auto input0 = MakeTensor<float, 4>(inputTensorInfo0, values0);
1500 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00001501
1502 LayerTestResult<float,4> ret(outputTensorInfo);
1503
1504 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
1505 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1506 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1507
1508 armnn::MultiplicationQueueDescriptor data;
1509 armnn::WorkloadInfo info;
1510 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
1511 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1512 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1513
1514 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
1515
1516 inputHandle0->Allocate();
1517 inputHandle1->Allocate();
1518 outputHandle->Allocate();
1519
1520 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
1521 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1522
1523 workload->Execute();
1524
1525 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1526
surmeh01bceff2f2018-03-29 16:29:27 +01001527 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00001528 return ret;
1529}
surmeh01bceff2f2018-03-29 16:29:27 +01001530} // anonymous namespace
1531
1532
1533LayerTestResult<float,4> MultiplicationTest(armnn::IWorkloadFactory& workloadFactory)
1534{
1535 const unsigned int width = 2;
1536 const unsigned int height = 2;
1537 const unsigned int channelCount = 2;
1538 const unsigned int batchSize = 2;
1539
1540 unsigned int shape[] = { batchSize, channelCount, height, width };
1541
1542 std::vector<float> input0({
1543 1, 1, 1, 1, 2, 2, 2, 2,
1544 3, 3, 3, 3, 4, 4, 4, 4 });
1545
1546 std::vector<float> input1({
1547 2, 2, 2, 2, 3, 3, 3, 3,
1548 4, 4, 4, 4, 5, 5, 5, 5 });
1549
1550 std::vector<float> output({
1551 2, 2, 2, 2, 6, 6, 6, 6,
1552 12, 12, 12, 12, 20, 20, 20, 20 });
1553
1554 return MultiplicationTestHelper(workloadFactory,
1555 shape,
1556 input0,
1557 shape,
1558 input1,
1559 shape,
1560 output);
1561}
1562
1563LayerTestResult<float, 4> MultiplicationBroadcast1ElementTest(armnn::IWorkloadFactory& workloadFactory)
1564{
1565 unsigned int shape0[] = { 1, 2, 2, 2 };
1566 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
1567
1568 unsigned int shape1[] = { 1, 1, 1, 1 };
1569 std::vector<float> input1({ 2 });
1570
1571 std::vector<float> output({ 2, 4, 6, 8, 10, 12, 14, 16});
1572
1573 return MultiplicationTestHelper(workloadFactory,
1574 shape0,
1575 input0,
1576 shape1,
1577 input1,
1578 shape0,
1579 output);
1580}
1581
1582LayerTestResult<float, 4> MultiplicationBroadcast1DVectorTest(armnn::IWorkloadFactory& workloadFactory)
1583{
1584 unsigned int shape0[] = { 1, 3, 3, 2 };
1585 std::vector<float> input0({
1586 1, 2, 3, 4, 5, 6,
1587 7, 8, 9, 10, 11, 12,
1588 13, 14, 15, 16, 17, 18});
1589
1590 unsigned int shape1[] = { 1, 1, 1, 2 };
1591 std::vector<float> input1({ 1, 2 });
1592
1593 std::vector<float> output({
1594 1, 4, 3, 8, 5, 12,
1595 7, 16, 9, 20, 11, 24,
1596 13, 28, 15, 32, 17, 36});
1597
1598 return MultiplicationTestHelper(workloadFactory,
1599 shape0,
1600 input0,
1601 shape1,
1602 input1,
1603 shape0,
1604 output);
1605}
telsoa014fcda012018-03-09 14:13:49 +00001606
1607LayerTestResult<float,4> CompareMultiplicationTest(armnn::IWorkloadFactory& workloadFactory,
1608 armnn::IWorkloadFactory& refWorkloadFactory)
1609{
1610 const unsigned int width = 16;
1611 const unsigned int height = 32;
1612 const unsigned int channelCount = 2;
1613 const unsigned int batchSize = 5;
1614
1615 armnn::TensorInfo inputTensorInfo0;
1616 armnn::TensorInfo inputTensorInfo1;
1617 armnn::TensorInfo outputTensorInfo;
1618
1619 constexpr unsigned int shape[] = { batchSize, channelCount, height, width };
1620
1621 inputTensorInfo0 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1622 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1623 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1624
1625 LayerTestResult<float,4> comparisonResult(outputTensorInfo);
1626
1627 auto input0 = MakeRandomTensor<float, 4>(inputTensorInfo0, 803506992);
1628 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 54902257);
1629
1630 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
1631 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1632 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1633
1634 std::unique_ptr<armnn::ITensorHandle> inputHandle0Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo0);
1635 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
1636 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
1637
1638 armnn::MultiplicationQueueDescriptor data;
1639 armnn::WorkloadInfo info;
1640 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
1641 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1642 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1643
1644 armnn::MultiplicationQueueDescriptor refData = data;
1645 armnn::WorkloadInfo refInfo = info;
1646 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo0, inputHandle0Ref.get());
1647 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo1, inputHandle1Ref.get());
1648 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1649
1650 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
1651 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateMultiplication(refData, refInfo);
1652
1653 inputHandle0->Allocate();
1654 inputHandle1->Allocate();
1655 outputHandle->Allocate();
1656 inputHandle0Ref->Allocate();
1657 inputHandle1Ref->Allocate();
1658 outputHandleRef->Allocate();
1659
1660 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
1661 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1662 CopyDataToITensorHandle(inputHandle0Ref.get(), &input0[0][0][0][0]);
1663 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
1664
1665 workload->Execute();
1666 workloadRef->Execute();
1667
1668 CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get());
1669 CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get());
1670
1671 return comparisonResult;
1672}
1673
1674LayerTestResult<float,4> CompareBatchNormTest(armnn::IWorkloadFactory& workloadFactory,
1675 armnn::IWorkloadFactory& refWorkloadFactory)
1676{
1677 const unsigned int width = 2;
1678 const unsigned int height = 3;
1679 const unsigned int channels = 5;
1680 const unsigned int batchSize = 3;
1681
1682 armnn::TensorInfo inputTensorInfo;
1683 armnn::TensorInfo outputTensorInfo;
1684 armnn::TensorInfo tensorInfo;
1685
1686 constexpr unsigned int shape[] = {batchSize, channels, height, width};
1687 constexpr unsigned int tensorShape[] = {channels};
1688
1689 inputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1690 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1691 tensorInfo = armnn::TensorInfo(1, tensorShape, armnn::DataType::Float32);
1692
1693 auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 21312);
1694
1695 auto mean = MakeRandomTensor<float, 1>(tensorInfo, 123);
1696 auto variance = MakeRandomTensor<float, 1>(tensorInfo, 234, 0.0f);
1697 auto beta = MakeRandomTensor<float, 1>(tensorInfo, 123);
1698 auto gamma = MakeRandomTensor<float, 1>(tensorInfo, 345);
1699
1700 LayerTestResult<float,4> ret(outputTensorInfo);
1701
1702 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
1703 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1704
1705 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
1706 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
1707
1708 armnn::BatchNormalizationQueueDescriptor data;
1709 armnn::WorkloadInfo info;
1710 armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
1711 armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
1712 armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
1713 armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
1714
1715 AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
1716 AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
1717 AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
1718 AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
1719
1720 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1721 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1722 data.m_Mean = &meanTensor;
1723 data.m_Variance = &varianceTensor;
1724 data.m_Beta = &betaTensor;
1725 data.m_Gamma = &gammaTensor;
1726 data.m_Parameters.m_Eps = 0.01f;
1727
1728 armnn::BatchNormalizationQueueDescriptor refData = data;
1729 armnn::WorkloadInfo refInfo = info;
1730 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
1731 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1732
1733 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
1734 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateBatchNormalization(refData, refInfo);
1735
1736 inputHandle->Allocate();
1737 outputHandle->Allocate();
1738 inputHandleRef->Allocate();
1739 outputHandleRef->Allocate();
1740
1741 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
1742 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
1743
1744 workload->Execute();
1745 workloadRef->Execute();
1746
1747 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1748 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
1749
1750 return ret;
1751}
1752
surmeh013537c2c2018-05-18 16:31:43 +01001753template<typename T>
1754void PermuteTensorData(
1755 armnn::IWorkloadFactory& workloadFactory,
1756 const armnn::PermutationVector& mappings,
1757 armnn::TensorInfo & inputTensorInfo,
1758 const T * inputData,
1759 std::vector<T>& outputData)
telsoa014fcda012018-03-09 14:13:49 +00001760{
surmeh013537c2c2018-05-18 16:31:43 +01001761 BOOST_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
1762 if (inputData == nullptr)
1763 {
1764 // Nullptr is an error in the test. By returning without doing the concatenation
1765 // I expect the caller to fail the test. It still makes sense to report this as
1766 // an assert for Debug builds.
1767 return;
1768 }
telsoa014fcda012018-03-09 14:13:49 +00001769
surmeh013537c2c2018-05-18 16:31:43 +01001770 armnn::TensorInfo outputTensorInfo = armnnUtils::Permuted(inputTensorInfo, mappings);
1771
1772 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
1773 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1774
1775 armnn::PermuteQueueDescriptor queueDescriptor;
1776 queueDescriptor.m_Parameters = armnn::PermuteDescriptor{mappings};
1777 armnn::WorkloadInfo workloadInfo;
1778 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
1779 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
1780
1781 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePermute(queueDescriptor, workloadInfo);
1782
1783 inputHandle->Allocate();
1784 outputHandle->Allocate();
1785
1786 CopyDataToITensorHandle(inputHandle.get(), inputData);
1787
1788 workload->Execute();
1789
1790 outputData.resize(outputTensorInfo.GetNumElements());
1791 CopyDataFromITensorHandle(&outputData[0], outputHandle.get());
1792 inputTensorInfo = outputTensorInfo;
1793}
1794
1795armnn::OriginsDescriptor CreateMergerDescriptorForConcatenation(
1796 const std::vector<armnn::TensorInfo> & inputTensorInfos,
1797 unsigned int concatDim)
1798{
telsoa014fcda012018-03-09 14:13:49 +00001799 std::vector<armnn::TensorShape> shapes;
1800 shapes.reserve(inputTensorInfos.size());
1801 for (const armnn::TensorInfo& it: inputTensorInfos)
1802 {
1803 shapes.push_back(it.GetShape());
1804 }
surmeh013537c2c2018-05-18 16:31:43 +01001805
1806 return armnn::CreateMergerDescriptorForConcatenation(shapes.begin(),
1807 shapes.end(),
1808 concatDim);
1809}
1810
1811//
1812// Concatenation is only supported for N and C dimensions for NCHW. In case of
telsoa01c577f2c2018-08-31 09:22:23 +01001813// <4 dimensions we need to make sure that the concat dimensions are at least
surmeh013537c2c2018-05-18 16:31:43 +01001814// the 3rd slowest iterating one.
1815//
1816
1817bool NeedPermuteForConcat(
1818 const std::vector<armnn::TensorInfo> & inputTensorInfos,
1819 unsigned int concatDim)
1820{
1821 // See note above. Additionally we expect the input shapes to have the
1822 // same number of dimensions.
1823 unsigned int nDimensions = 0;
1824
telsoa01c577f2c2018-08-31 09:22:23 +01001825 // Determine the number of dimensions as well as sanity check them
1826 // agains test implementation issues.
surmeh013537c2c2018-05-18 16:31:43 +01001827 for (auto && tensorInfo : inputTensorInfos)
1828 {
1829 if (!nDimensions)
1830 {
1831 nDimensions = tensorInfo.GetShape().GetNumDimensions();
1832 }
1833 else
1834 {
1835 BOOST_ASSERT_MSG(nDimensions == tensorInfo.GetShape().GetNumDimensions(),
1836 "Input shapes must have the same number of dimensions");
1837 }
1838 }
1839
1840 return (nDimensions-concatDim) < 3;
1841}
1842
1843armnn::TensorShape ExpandTensorShapeTo3dForPermute(const armnn::TensorShape & inputShape)
1844{
1845 unsigned int numDims = inputShape.GetNumDimensions();
1846 if (numDims >= 3)
1847 {
1848 // Nothing to do if the inputShape has at least 3 dimensions.
1849 return inputShape;
1850 }
1851
1852 std::vector<unsigned int> newDims(size_t(3), 1u);
1853 unsigned int expandedBy = 3 - numDims;
1854 for (unsigned int i=0; i<numDims; ++i)
1855 {
1856 newDims[expandedBy+i] = inputShape[i];
1857 }
1858 return armnn::TensorShape(3u, &newDims[0]);
1859}
1860
1861void Generate3dPermuteVectorForConcat(
1862 unsigned int numDimensions,
1863 unsigned int & concatDim,
1864 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutations)
1865{
1866 BOOST_ASSERT_MSG(numDimensions <= 3,
1867 "Only dimensions 1,2 and 3 are supported by this helper");
1868
1869 unsigned int expandedBy = 3 - numDimensions;
1870 unsigned int expandedConcatAxis = concatDim + expandedBy;
1871
1872 if (expandedConcatAxis == 2)
1873 {
1874 concatDim = 0;
1875 armnn::PermutationVector forwardPermutation({1, 2, 0});
1876 armnn::PermutationVector reversePermutation({2, 0, 1});
1877 permutations = std::make_pair(forwardPermutation, reversePermutation);
1878 }
1879 else if (expandedConcatAxis == 1)
1880 {
1881 concatDim = 0;
1882 armnn::PermutationVector forwardPermutation({2, 0, 1});
1883 armnn::PermutationVector reversePermutation({1, 2, 0});
1884 permutations = std::make_pair(forwardPermutation, reversePermutation);
1885 }
1886 else
1887 {
1888 BOOST_ASSERT(expandedConcatAxis == 0);
1889 concatDim = 0;
1890 }
1891}
1892
1893//
1894// Permute the input tensors so we can do a supported concatenation.
1895// Also treat lower than 3d tensors as 3d by adding dummy 1 dimensions
1896// at the front. Finally this function tells what the output shape
1897// of the permuted concatenated tensor is going to be.
1898//
1899template <typename T>
1900void PermuteInputsForConcat(
1901 armnn::IWorkloadFactory& workloadFactory,
1902 std::vector<armnn::TensorInfo> & inputTensorInfos,
1903 std::vector<T *> & inputData,
1904 std::vector<std::vector<T>> & inputDataStorage,
1905 armnn::PermutationVector & permuteVector,
1906 unsigned int & concatDim,
1907 armnn::TensorInfo & outputTensorInfo)
1908{
1909 BOOST_ASSERT_MSG(inputTensorInfos.size() > 1,
1910 "Expecting more than one tensor to be concatenated here");
1911
1912 unsigned int numDims = 0;
1913 unsigned int nthInput = 0;
1914 const armnn::PermutationVector identity({0, 1, 2});
1915
1916 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutations =
1917 std::make_pair(identity, identity);
1918
1919 inputDataStorage.resize(inputData.size());
1920
1921 for (auto && tensorInfo : inputTensorInfos)
1922 {
1923 if (numDims == 0)
1924 {
1925 numDims = tensorInfo.GetShape().GetNumDimensions();
1926 Generate3dPermuteVectorForConcat(numDims, concatDim, permutations);
telsoa01c577f2c2018-08-31 09:22:23 +01001927 // Store the reverese permutation.
surmeh013537c2c2018-05-18 16:31:43 +01001928 permuteVector = permutations.second;
1929 BOOST_ASSERT_MSG(!permuteVector.IsEqual(identity),
1930 "Test logic error, we don't need permutation, so we shouldn't arrive here");
1931 }
1932 else
1933 {
1934 BOOST_ASSERT_MSG(numDims == tensorInfo.GetShape().GetNumDimensions(),
1935 "All inputs must have the same number of dimensions");
1936 }
1937
1938 armnn::TensorInfo newTensorInfo = tensorInfo;
1939 newTensorInfo.SetShape(ExpandTensorShapeTo3dForPermute(tensorInfo.GetShape()));
1940
1941 PermuteTensorData<T>(workloadFactory,
1942 permutations.first,
1943 newTensorInfo,
1944 inputData[nthInput],
1945 inputDataStorage[nthInput]);
1946
1947 inputData[nthInput] = inputDataStorage[nthInput].data();
1948 inputTensorInfos[nthInput] = newTensorInfo;
1949
1950 ++nthInput;
1951 }
1952
1953 outputTensorInfo.SetShape(
1954 armnnUtils::Permuted(
1955 ExpandTensorShapeTo3dForPermute(outputTensorInfo.GetShape()),
1956 permutations.first));
1957}
1958
1959
1960//
1961// This is the pair of PermuteInputsForConcat(...) which permutes back
telsoa01c577f2c2018-08-31 09:22:23 +01001962// the output of the concatenation so we can check it against an expected
surmeh013537c2c2018-05-18 16:31:43 +01001963// output.
1964//
1965template <typename T>
1966void PermuteOutputForConcat(
1967 armnn::IWorkloadFactory& workloadFactory,
1968 const armnn::TensorInfo & tensorInfo,
1969 const armnn::PermutationVector & permuteVector,
1970 std::unique_ptr<armnn::ITensorHandle> && inputDataHandle,
1971 T * data)
1972{
1973 BOOST_ASSERT_MSG(data != nullptr, "data must not be null");
1974 if (data == nullptr)
1975 {
1976 // Nullptr is an error in the test. By returning without doing the permutation
1977 // I expect the caller to fail the test. It still makes sense to report this as
1978 // an assert for Debug builds.
1979 return;
1980 }
1981
1982 armnn::TensorInfo resultTensorInfo = tensorInfo;
1983 std::vector<T> inputData(tensorInfo.GetNumElements());
1984 std::vector<T> outputData;
1985
1986 CopyDataFromITensorHandle(&inputData[0], inputDataHandle.get());
1987
1988 PermuteTensorData<T>(workloadFactory,
1989 permuteVector,
1990 resultTensorInfo,
1991 &inputData[0],
1992 outputData);
1993
1994 ::memcpy(data, &outputData[0], sizeof(T)*outputData.size());
1995}
1996
1997template <typename T>
1998void Concatenate(armnn::IWorkloadFactory& workloadFactory,
1999 std::initializer_list<const armnn::TensorInfo> inputTensorInfosOrig,
2000 std::initializer_list<T *> inputsOrig,
2001 const armnn::TensorInfo& outputTensorInfoOrig,
2002 T * output,
2003 unsigned int concatDim)
2004{
2005 BOOST_ASSERT_MSG(output != nullptr, "output must not be null");
2006 if (output == nullptr)
2007 {
2008 // Nullptr is an error in the test. By returning without doing the permutation
2009 // I expect the caller to fail the test. It still makes sense to report this as
2010 // an assert for Debug builds.
2011 return;
2012 }
2013
2014 armnn::MergerQueueDescriptor queueDescriptor;
2015
telsoa01c577f2c2018-08-31 09:22:23 +01002016 // Saves a copy of the parameters which we might need to change.
surmeh013537c2c2018-05-18 16:31:43 +01002017 std::vector<armnn::TensorInfo> inputTensorInfos(inputTensorInfosOrig.begin(), inputTensorInfosOrig.end());
2018 std::vector<T *> inputs = inputsOrig;
2019 armnn::TensorInfo outputTensorInfo = outputTensorInfoOrig;
2020
2021 armnn::PermutationVector permuteVector{0, 1, 2};
2022
telsoa01c577f2c2018-08-31 09:22:23 +01002023 // Holds and automatically releases memory for the reshaped input data.
surmeh013537c2c2018-05-18 16:31:43 +01002024 std::vector<std::vector<T>> tmpInputDataStorage;
2025
2026 const size_t inputCount = inputTensorInfos.size();
2027
2028 bool needPermuteForConcat = NeedPermuteForConcat(inputTensorInfos, concatDim);
2029
2030 if (needPermuteForConcat)
2031 {
2032 //
2033 // We need to permute the inputs, because concatenation along
telsoa01c577f2c2018-08-31 09:22:23 +01002034 // the requested axis is not supported.
surmeh013537c2c2018-05-18 16:31:43 +01002035 //
2036 PermuteInputsForConcat<T>(workloadFactory,
2037 inputTensorInfos,
2038 inputs,
2039 tmpInputDataStorage,
2040 permuteVector,
2041 concatDim,
2042 outputTensorInfo);
2043 }
2044
2045 armnn::OriginsDescriptor viewsDescriptor = CreateMergerDescriptorForConcatenation(inputTensorInfos, concatDim);
telsoa014fcda012018-03-09 14:13:49 +00002046
2047 queueDescriptor.m_ViewOrigins.reserve(viewsDescriptor.GetNumViews());
2048 for (unsigned int i = 0; i < viewsDescriptor.GetNumViews(); ++i)
2049 {
2050 queueDescriptor.m_ViewOrigins.emplace_back(std::vector<unsigned int>(viewsDescriptor.GetViewOrigin(i),
2051 viewsDescriptor.GetViewOrigin(i) + viewsDescriptor.GetNumDimensions()));
2052 }
2053
telsoa014fcda012018-03-09 14:13:49 +00002054 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2055
2056 std::vector<std::unique_ptr<armnn::ITensorHandle>> inputHandles;
2057 inputHandles.reserve(inputCount);
2058
2059 const bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2060 for (unsigned int i = 0; i < inputCount; ++i)
2061 {
surmeh013537c2c2018-05-18 16:31:43 +01002062 const armnn::TensorInfo& inputTensorInfo = inputTensorInfos[i];
telsoa014fcda012018-03-09 14:13:49 +00002063
2064 std::unique_ptr<armnn::ITensorHandle> inputHandle = subTensorsSupported ?
2065 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo.GetShape(),
2066 queueDescriptor.m_ViewOrigins[i].m_Origin.data())
2067 : workloadFactory.CreateTensorHandle(inputTensorInfo);
2068
2069 inputHandles.emplace_back(std::move(inputHandle));
2070 }
2071
2072 armnn::WorkloadInfo workloadInfo;
2073
2074 for (unsigned int i = 0; i < inputCount; ++i)
2075 {
surmeh013537c2c2018-05-18 16:31:43 +01002076 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfos[i], inputHandles[i].get());
telsoa014fcda012018-03-09 14:13:49 +00002077 }
2078
2079 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
2080
2081 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(queueDescriptor, workloadInfo);
2082
2083 for (auto& inputHandle : inputHandles)
2084 {
2085 inputHandle->Allocate();
2086 }
2087
2088 outputHandle->Allocate();
2089
2090 unsigned int nextInputId = 0;
2091 for (auto& inputHandle : inputHandles)
2092 {
surmeh013537c2c2018-05-18 16:31:43 +01002093 CopyDataToITensorHandle(inputHandle.get(), inputs[nextInputId]);
2094 ++nextInputId;
telsoa014fcda012018-03-09 14:13:49 +00002095 }
2096
2097 workload->Execute();
2098
surmeh013537c2c2018-05-18 16:31:43 +01002099 if (needPermuteForConcat)
2100 {
2101 PermuteOutputForConcat<T>(workloadFactory,
2102 outputTensorInfo,
2103 permuteVector,
2104 std::move(outputHandle),
2105 output);
2106 }
2107 else
2108 {
2109 CopyDataFromITensorHandle(output, outputHandle.get());
2110 }
telsoa014fcda012018-03-09 14:13:49 +00002111}
2112
2113template <typename T>
2114LayerTestResult<T, 1> Concatenation1dTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale, int32_t qOffset)
2115{
2116 armnn::TensorInfo inputTensorInfo({ 3 }, armnn::GetDataType<T>());
2117
2118 auto input0 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 1.0f, 2.0f, 3.0f }));
2119 auto input1 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 4.0f, 5.0f, 6.0f }));
2120 auto input2 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 7.0f, 8.0f, 9.0f }));
2121
2122 armnn::TensorInfo outputTensorInfo({ 9 }, armnn::GetDataType<T>());
2123
2124 LayerTestResult<T, 1> result(outputTensorInfo);
2125
2126 std::vector<T> output;
2127 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002128 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002129 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
2130 { input0.data(), input1.data(), input2.data() },
2131 outputTensorInfo,
2132 output.data(),
2133 0);
2134
2135 result.output = MakeTensor<T, 1>(outputTensorInfo, output);
2136 result.outputExpected = MakeTensor<T, 1>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2137 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f
2138 }));
2139
2140 return result;
2141}
2142
2143LayerTestResult<float, 1> Concatenation1dTest(armnn::IWorkloadFactory& workloadFactory)
2144{
2145 return Concatenation1dTestImpl<float>(workloadFactory, 0.0f, 0);
2146}
2147
2148template <typename T>
2149LayerTestResult<T, 2> Concatenation2dTestImpl(armnn::IWorkloadFactory& workloadFactory,
2150 const armnn::TensorInfo& outputTensorInfo,
2151 unsigned int dimension,
2152 const float qScale,
2153 const int32_t qOffset)
2154{
2155 armnn::TensorInfo inputTensorInfo({ 2, 3 }, armnn::GetDataType<T>());
2156
2157 auto input0 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2158 // Batch 0
2159 1.0f, 2.0f, 3.0f,
2160
2161 // Batch 1
2162 10.0f, 11.0f, 12.0f,
2163 }));
2164
2165 auto input1 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2166 // Batch 0
2167 4.0f, 5.0f, 6.0f,
2168
2169 // Batch 1
2170 13.0f, 14.0f, 15.0f,
2171 }));
2172
2173 auto input2 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2174 // Batch 0
2175 7.0f, 8.0f, 9.0f,
2176
2177 // Batch 1
2178 16.0f, 17.0f, 18.0f,
2179 }));
2180
2181 LayerTestResult<T, 2> result(outputTensorInfo);
2182
2183 std::vector<T> output;
2184 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002185 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002186 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
2187 { input0.data(), input1.data(), input2.data() },
2188 outputTensorInfo,
2189 output.data(),
2190 dimension);
2191
2192 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
2193 return result;
2194}
2195
2196template <typename T>
2197LayerTestResult<T, 2> Concatenation2dDim0TestImpl(armnn::IWorkloadFactory& workloadFactory,
2198 float qScale, int32_t qOffset)
2199{
2200 armnn::TensorInfo outputTensorInfo({ 6, 3 }, armnn::GetDataType<T>());
2201
2202 LayerTestResult<T, 2> result = Concatenation2dTestImpl<T>(workloadFactory, outputTensorInfo, 0, qScale, qOffset);
2203 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2204 // Batch 0
2205 1.0f, 2.0f, 3.0f,
2206
2207 // Batch 1
2208 10.0f, 11.0f, 12.0f,
2209
2210 // Batch 2
2211 4.0f, 5.0f, 6.0f,
2212
2213 // Batch 3
2214 13.0f, 14.0f, 15.0f,
2215
2216 // Batch 4
2217 7.0f, 8.0f, 9.0f,
2218
2219 // Batch 5
2220 16.0f, 17.0f, 18.0f,
2221 }));
2222
2223 return result;
2224}
2225
2226LayerTestResult<float, 2> Concatenation2dDim0Test(armnn::IWorkloadFactory& workloadFactory)
2227{
2228 return Concatenation2dDim0TestImpl<float>(workloadFactory, 0.0f, 0);
2229}
2230
2231template <typename T>
2232LayerTestResult<T, 2> Concatenation2dDim1TestImpl(armnn::IWorkloadFactory& workloadFactory,
2233 float qScale, int32_t qOffset)
2234{
2235 armnn::TensorInfo outputTensorInfo({ 2, 9 }, armnn::GetDataType<T>());
2236
2237 LayerTestResult<T, 2> result = Concatenation2dTestImpl<T>(workloadFactory, outputTensorInfo, 1, qScale, qOffset);
2238 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2239 // Batch 0
2240 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
2241
2242 // Batch 1
2243 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f
2244 }));
2245
2246 return result;
2247}
2248
2249LayerTestResult<float, 2> Concatenation2dDim1Test(armnn::IWorkloadFactory& workloadFactory)
2250{
2251 return Concatenation2dDim1TestImpl<float>(workloadFactory, 0.0f, 0);
2252}
2253
2254template <typename T>
2255LayerTestResult<T, 2> Concatenation2dDim0DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2256 int32_t qOffset)
2257{
2258 armnn::TensorInfo input0TensorInfo({ 2, 3 }, armnn::GetDataType<T>());
2259 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2260 // Batch 0
2261 1.0f, 2.0f, 3.0f,
2262
2263 // Batch 1
2264 10.0f, 11.0f, 12.0f,
2265 }));
2266
2267 armnn::TensorInfo input1TensorInfo({ 3, 3 }, armnn::GetDataType<T>());
2268 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2269 // Batch 0
2270 4.0f, 5.0f, 6.0f,
2271
2272 // Batch 1
2273 13.0f, 14.0f, 15.0f,
2274
2275 // Batch 0
2276 7.0f, 8.0f, 9.0f,
2277 }));
2278
2279 armnn::TensorInfo input2TensorInfo({ 1, 3 }, armnn::GetDataType<T>());
2280 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2281 // Batch 1
2282 16.0f, 17.0f, 18.0f,
2283 }));
2284
2285 armnn::TensorInfo outputTensorInfo({ 6, 3 }, armnn::GetDataType<T>());
2286 LayerTestResult<T, 2> result(outputTensorInfo);
2287
2288 std::vector<T> output;
2289 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002290 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002291 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
2292 { input0.data(), input1.data(), input2.data() },
2293 outputTensorInfo,
2294 output.data(),
2295 0);
2296
2297 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
2298 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2299 // Batch 0
2300 1.0f, 2.0f, 3.0f,
2301
2302 // Batch 1
2303 10.0f, 11.0f, 12.0f,
2304
2305 // Batch 2
2306 4.0f, 5.0f, 6.0f,
2307
2308 // Batch 3
2309 13.0f, 14.0f, 15.0f,
2310
2311 // Batch 4
2312 7.0f, 8.0f, 9.0f,
2313
2314 // Batch 5
2315 16.0f, 17.0f, 18.0f,
2316 }));
2317
2318 return result;
2319}
2320
2321LayerTestResult<float, 2> Concatenation2dDim0DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory)
2322{
2323 return Concatenation2dDim0DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
2324}
2325
2326template <typename T>
2327LayerTestResult<T, 2> Concatenation2dDim1DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2328 int32_t qOffset)
2329{
2330 armnn::TensorInfo input0TensorInfo({ 2, 3 }, armnn::GetDataType<T>());
2331 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2332 // Batch 0
2333 1.0f, 2.0f, 3.0f,
2334
2335 // Batch 1
2336 10.0f, 11.0f, 12.0f,
2337 }));
2338
2339 armnn::TensorInfo input1TensorInfo({ 2, 5 }, armnn::GetDataType<T>());
2340 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2341 // Batch 0
2342 4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
2343
2344 // Batch 1
2345 13.0f, 14.0f, 15.0f, 16.0f, 17.0f,
2346 }));
2347
2348 armnn::TensorInfo input2TensorInfo({ 2, 1 }, armnn::GetDataType<T>());
2349 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2350 // Batch 0
2351 9.0f,
2352
2353 // Batch 1
2354 18.0f
2355 }));
2356
2357 armnn::TensorInfo outputTensorInfo({ 2, 9 }, armnn::GetDataType<T>());
2358 LayerTestResult<T, 2> result(outputTensorInfo);
2359
2360 std::vector<T> output;
2361 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002362 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002363 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
2364 { input0.data(), input1.data(), input2.data() },
2365 outputTensorInfo,
2366 output.data(),
2367 1);
2368
2369 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
2370 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2371 // Batch 0
2372 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
2373
2374 // Batch 1
2375 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f,
2376 }));
2377
2378 return result;
2379}
2380
2381LayerTestResult<float, 2> Concatenation2dDim1DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory)
2382{
2383 return Concatenation2dDim1DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
2384}
2385
2386template <typename T>
2387LayerTestResult<T, 3> Concatenation3dTestImpl(armnn::IWorkloadFactory& workloadFactory,
2388 const armnn::TensorInfo& outputTensorInfo,
2389 unsigned int dimension,
2390 float qScale,
2391 int32_t qOffset)
2392{
2393 armnn::TensorInfo inputTensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
2394
2395 auto input0 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2396 // Batch 0, Channel 0
2397 1.0f, 2.0f,
2398
2399 // Batch 0, Channel 1
2400 3.0f, 4.0f,
2401
2402 // Batch 0, Channel 2
2403 5.0f, 6.0f,
2404
2405 // Batch 1, Channel 0
2406 19.0f, 20.0f,
2407
2408 // Batch 1, Channel 1
2409 21.0f, 22.0f,
2410
2411 // Batch 1, Channel 2
2412 23.0f, 24.0f
2413 }));
2414
2415 auto input1 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2416 // Batch 0, Channel 0
2417 7.0f, 8.0f,
2418
2419 // Batch 0, Channel 1
2420 9.0f, 10.0f,
2421
2422 // Batch 0, Channel 2
2423 11.0f, 12.0f,
2424
2425 // Batch 1, Channel 0
2426 25.0f, 26.0f,
2427
2428 // Batch 1, Channel 1
2429 27.0f, 28.0f,
2430
2431 // Batch 1, Channel 2
2432 29.0f, 30.0f
2433 }));
2434
2435 auto input2 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2436 // Batch 0, Channel 0
2437 13.0f, 14.0f,
2438
2439 // Batch 0, Channel 1
2440 15.0f, 16.0f,
2441
2442 // Batch 0, Channel 2
2443 17.0f, 18.0f,
2444
2445 // Batch 1, Channel 0
2446 31.0f, 32.0f,
2447
2448 // Batch 1, Channel 1
2449 33.0f, 34.0f,
2450
2451 // Batch 1, Channel 2
2452 35.0f, 36.0f
2453 }));
2454
2455 LayerTestResult<T, 3> result(outputTensorInfo);
2456
2457 std::vector<T> output;
2458 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002459 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002460 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
2461 { input0.data(), input1.data(), input2.data() },
2462 outputTensorInfo,
2463 output.data(),
2464 dimension);
2465
2466 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
2467 return result;
2468}
2469
2470template <typename T>
2471LayerTestResult<T, 3> Concatenation3dDim0TestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2472 int32_t qOffset)
2473{
2474 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, armnn::GetDataType<T>());
2475
2476 LayerTestResult<T, 3> result = Concatenation3dTestImpl<T>(workloadFactory, outputTensorInfo, 0,
2477 qScale, qOffset);
2478 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2479 // Batch 0, Channel 0
2480 1.0f, 2.0f,
2481
2482 // Batch 0, Channel 1
2483 3.0f, 4.0f,
2484
2485 // Batch 0, Channel 2
2486 5.0f, 6.0f,
2487
2488 // Batch 1, Channel 0
2489 19.0f, 20.0f,
2490
2491 // Batch 1, Channel 1
2492 21.0f, 22.0f,
2493
2494 // Batch 1, Channel 2
2495 23.0f, 24.0f,
2496
2497 // Batch 2, Channel 0
2498 7.0f, 8.0f,
2499
2500 // Batch 2, Channel 1
2501 9.0f, 10.0f,
2502
2503 // Batch 2, Channel 2
2504 11.0f, 12.0f,
2505
2506 // Batch 3, Channel 0
2507 25.0f, 26.0f,
2508
2509 // Batch 3, Channel 1
2510 27.0f, 28.0f,
2511
2512 // Batch 3, Channel 2
2513 29.0f, 30.0f,
2514
2515 // Batch 4, Channel 0
2516 13.0f, 14.0f,
2517
2518 // Batch 4, Channel 1
2519 15.0f, 16.0f,
2520
2521 // Batch 4, Channel 2
2522 17.0f, 18.0f,
2523
2524 // Batch 5, Channel 0
2525 31.0f, 32.0f,
2526
2527 // Batch 5, Channel 1
2528 33.0f, 34.0f,
2529
2530 // Batch 5, Channel 2
2531 35.0f, 36.0f
2532 }));
2533 return result;
2534}
2535
2536LayerTestResult<float, 3> Concatenation3dDim0Test(armnn::IWorkloadFactory& workloadFactory)
2537{
2538 return Concatenation3dDim0TestImpl<float>(workloadFactory, 0.0f, 0);
2539}
2540
2541template <typename T>
2542LayerTestResult<T, 3> Concatenation3dDim1TestImpl(armnn::IWorkloadFactory& workloadFactory,
2543 float qScale, int32_t qOffset)
2544{
2545 armnn::TensorInfo outputTensorInfo({ 2, 9, 2 }, armnn::GetDataType<T>());
2546
2547 LayerTestResult<T, 3> result = Concatenation3dTestImpl<T>(workloadFactory, outputTensorInfo, 1, qScale, qOffset);
2548 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2549 // Batch 0, Channel 0
2550 1.0f, 2.0f,
2551
2552 // Batch 0, Channel 1
2553 3.0f, 4.0f,
2554
2555 // Batch 0, Channel 2
2556 5.0f, 6.0f,
2557
2558 // Batch 0, Channel 3
2559 7.0f, 8.0f,
2560
2561 // Batch 0, Channel 4
2562 9.0f, 10.0f,
2563
2564 // Batch 0, Channel 5
2565 11.0f, 12.0f,
2566
2567 // Batch 0, Channel 6
2568 13.0f, 14.0f,
2569
2570 // Batch 0, Channel 7
2571 15.0f, 16.0f,
2572
2573 // Batch 0, Channel 8
2574 17.0f, 18.0f,
2575
2576 // Batch 1, Channel 0
2577 19.0f, 20.0f,
2578
2579 // Batch 1, Channel 1
2580 21.0f, 22.0f,
2581
2582 // Batch 1, Channel 2
2583 23.0f, 24.0f,
2584
2585 // Batch 1, Channel 3
2586 25.0f, 26.0f,
2587
2588 // Batch 1, Channel 4
2589 27.0f, 28.0f,
2590
2591 // Batch 1, Channel 5
2592 29.0f, 30.0f,
2593
2594 // Batch 1, Channel 6
2595 31.0f, 32.0f,
2596
2597 // Batch 1, Channel 7
2598 33.0f, 34.0f,
2599
2600 // Batch 1, Channel 8
2601 35.0f, 36.0f
2602 }));
2603
2604 return result;
2605}
2606
2607LayerTestResult<float, 3> Concatenation3dDim1Test(armnn::IWorkloadFactory& workloadFactory)
2608{
2609 return Concatenation3dDim1TestImpl<float>(workloadFactory, 0.0f, 0);
2610}
2611
2612template <typename T>
2613LayerTestResult<T, 3> Concatenation3dDim2TestImpl(armnn::IWorkloadFactory& workloadFactory,
2614 float qScale, int32_t qOffset)
2615{
2616 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, armnn::GetDataType<T>());
2617
2618 LayerTestResult<T, 3> result = Concatenation3dTestImpl<T>(workloadFactory, outputTensorInfo, 2, qScale, qOffset);
2619 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2620 // Batch 0, Channel 0
2621 1.0f, 2.0f, 7.0f, 8.0f, 13.0f, 14.0f,
2622
2623 // Batch 0, Channel 1
2624 3.0f, 4.0f, 9.0f, 10.0f, 15.0f, 16.0f,
2625
2626 // Batch 0, Channel 2
2627 5.0f, 6.0f, 11.0f, 12.0f, 17.0f, 18.0f,
2628
2629 // Batch 1, Channel 0
2630 19.0f, 20.0f, 25.0f, 26.0f, 31.0f, 32.0f,
2631
2632 // Batch 1, Channel 1
2633 21.0f, 22.0f, 27.0f, 28.0f, 33.0f, 34.0f,
2634
2635 // Batch 1, Channel 2
2636 23.0f, 24.0f, 29.0f, 30.0f, 35.0f, 36.0f,
2637 }));
2638
2639 return result;
2640}
2641
2642LayerTestResult<float, 3> Concatenation3dDim2Test(armnn::IWorkloadFactory& workloadFactory)
2643{
2644 return Concatenation3dDim2TestImpl<float>(workloadFactory, 0.0f, 0);
2645}
2646
2647template <typename T>
2648LayerTestResult<T, 3> Concatenation3dDim0DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2649 int32_t qOffset)
2650{
2651 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
2652 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2653 // Batch 0, Channel 0
2654 1.0f, 2.0f,
2655
2656 // Batch 0, Channel 1
2657 3.0f, 4.0f,
2658
2659 // Batch 0, Channel 2
2660 5.0f, 6.0f,
2661
2662 // Batch 1, Channel 0
2663 19.0f, 20.0f,
2664
2665 // Batch 1, Channel 1
2666 21.0f, 22.0f,
2667
2668 // Batch 1, Channel 2
2669 23.0f, 24.0f
2670 }));
2671
2672 armnn::TensorInfo input1TensorInfo({ 1, 3, 2 }, armnn::GetDataType<T>());
2673 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2674 // Batch 0, Channel 0
2675 7.0f, 8.0f,
2676
2677 // Batch 0, Channel 1
2678 9.0f, 10.0f,
2679
2680 // Batch 0, Channel 2
2681 11.0f, 12.0f,
2682 }));
2683
2684 armnn::TensorInfo input2TensorInfo({ 3, 3, 2 }, armnn::GetDataType<T>());
2685 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2686 // Batch 0, Channel 0
2687 25.0f, 26.0f,
2688
2689 // Batch 0, Channel 1
2690 27.0f, 28.0f,
2691
2692 // Batch 0, Channel 2
2693 29.0f, 30.0f,
2694
2695 // Batch 1, Channel 0
2696 13.0f, 14.0f,
2697
2698 // Batch 1, Channel 1
2699 15.0f, 16.0f,
2700
2701 // Batch 1, Channel 2
2702 17.0f, 18.0f,
2703
2704 // Batch 2, Channel 0
2705 31.0f, 32.0f,
2706
2707 // Batch 2, Channel 1
2708 33.0f, 34.0f,
2709
2710 // Batch 2, Channel 2
2711 35.0f, 36.0f
2712 }));
2713
2714 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, armnn::GetDataType<T>());
2715 LayerTestResult<T, 3> result(outputTensorInfo);
2716
2717 std::vector<T> output;
2718 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002719 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002720 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
2721 { input0.data(), input1.data(), input2.data() },
2722 outputTensorInfo,
2723 output.data(),
2724 0);
2725
2726 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
2727 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2728 // Batch 0, Channel 0
2729 1.0f, 2.0f,
2730
2731 // Batch 0, Channel 1
2732 3.0f, 4.0f,
2733
2734 // Batch 0, Channel 2
2735 5.0f, 6.0f,
2736
2737 // Batch 1, Channel 0
2738 19.0f, 20.0f,
2739
2740 // Batch 1, Channel 1
2741 21.0f, 22.0f,
2742
2743 // Batch 1, Channel 2
2744 23.0f, 24.0f,
2745
2746 // Batch 2, Channel 0
2747 7.0f, 8.0f,
2748
2749 // Batch 2, Channel 1
2750 9.0f, 10.0f,
2751
2752 // Batch 2, Channel 2
2753 11.0f, 12.0f,
2754
2755 // Batch 3, Channel 0
2756 25.0f, 26.0f,
2757
2758 // Batch 3, Channel 1
2759 27.0f, 28.0f,
2760
2761 // Batch 3, Channel 2
2762 29.0f, 30.0f,
2763
2764 // Batch 4, Channel 0
2765 13.0f, 14.0f,
2766
2767 // Batch 4, Channel 1
2768 15.0f, 16.0f,
2769
2770 // Batch 4, Channel 2
2771 17.0f, 18.0f,
2772
2773 // Batch 5, Channel 0
2774 31.0f, 32.0f,
2775
2776 // Batch 5, Channel 1
2777 33.0f, 34.0f,
2778
2779 // Batch 5, Channel 2
2780 35.0f, 36.0f
2781 }));
2782
2783 return result;
2784}
2785
2786LayerTestResult<float, 3> Concatenation3dDim0DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory)
2787{
2788 return Concatenation3dDim0DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
2789}
2790
2791template <typename T>
2792LayerTestResult<T, 3> Concatenation3dDim1DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2793 int32_t qOffset)
2794{
2795 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
2796 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2797 // Batch 0, Channel 0
2798 1.0f, 2.0f,
2799
2800 // Batch 0, Channel 1
2801 3.0f, 4.0f,
2802
2803 // Batch 0, Channel 2
2804 5.0f, 6.0f,
2805
2806 // Batch 1, Channel 0
2807 19.0f, 20.0f,
2808
2809 // Batch 1, Channel 1
2810 21.0f, 22.0f,
2811
2812 // Batch 1, Channel 2
2813 23.0f, 24.0f
2814 }));
2815
2816 armnn::TensorInfo input1TensorInfo({ 2, 4, 2 }, armnn::GetDataType<T>());
2817 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2818 // Batch 0, Channel 0
2819 7.0f, 8.0f,
2820
2821 // Batch 0, Channel 1
2822 9.0f, 10.0f,
2823
2824 // Batch 0, Channel 2
2825 11.0f, 12.0f,
2826
2827 // Batch 0, Channel 3
2828 25.0f, 26.0f,
2829
2830 // Batch 1, Channel 0
2831 27.0f, 28.0f,
2832
2833 // Batch 1, Channel 1
2834 29.0f, 30.0f,
2835
2836 // Batch 1, Channel 2
2837 13.0f, 14.0f,
2838
2839 // Batch 1, Channel 3
2840 15.0f, 16.0f,
2841 }));
2842
2843 armnn::TensorInfo input2TensorInfo({ 2, 1, 2 }, armnn::GetDataType<T>());
2844 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2845 // Batch 0, Channel 0
2846 17.0f, 18.0f,
2847
2848 // Batch 1, Channel 0
2849 31.0f, 32.0f,
2850 }));
2851
2852 armnn::TensorInfo outputTensorInfo({ 2, 8, 2 }, armnn::GetDataType<T>());
2853 LayerTestResult<T, 3> result(outputTensorInfo);
2854
2855 std::vector<T> output;
2856 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002857 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002858 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
2859 { input0.data(), input1.data(), input2.data() },
2860 outputTensorInfo,
2861 output.data(),
2862 1);
2863
2864 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
2865 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2866 // Batch 0, Channel 0
2867 1.0f, 2.0f,
2868
2869 // Batch 0, Channel 1
2870 3.0f, 4.0f,
2871
2872 // Batch 0, Channel 2
2873 5.0f, 6.0f,
2874
2875 // Batch 0, Channel 3
2876 7.0f, 8.0f,
2877
2878 // Batch 0, Channel 4
2879 9.0f, 10.0f,
2880
2881 // Batch 0, Channel 5
2882 11.0f, 12.0f,
2883
2884 // Batch 0, Channel 6
2885 25.0f, 26.0f,
2886
2887 // Batch 0, Channel 7
2888 17.0f, 18.0f,
2889
2890 // Batch 1, Channel 0
2891 19.0f, 20.0f,
2892
2893 // Batch 1, Channel 1
2894 21.0f, 22.0f,
2895
2896 // Batch 1, Channel 2
2897 23.0f, 24.0f,
2898
2899 // Batch 1, Channel 3
2900 27.0f, 28.0f,
2901
2902 // Batch 1, Channel 4
2903 29.0f, 30.0f,
2904
2905 // Batch 1, Channel 5
2906 13.0f, 14.0f,
2907
2908 // Batch 1, Channel 6
2909 15.0f, 16.0f,
2910
2911 // Batch 1, Channel 7
2912 31.0f, 32.0f,
2913 }));
2914
2915 return result;
2916}
2917
2918LayerTestResult<float, 3> Concatenation3dDim1DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory)
2919{
2920 return Concatenation3dDim1DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
2921}
2922
2923template <typename T>
2924LayerTestResult<T, 3> Concatenation3dDim2DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2925 int32_t qOffset)
2926{
2927 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
2928 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2929 // Batch 0, Channel 0
2930 1.0f, 2.0f,
2931
2932 // Batch 0, Channel 1
2933 3.0f, 4.0f,
2934
2935 // Batch 0, Channel 2
2936 5.0f, 6.0f,
2937
2938 // Batch 1, Channel 0
2939 19.0f, 20.0f,
2940
2941 // Batch 1, Channel 1
2942 21.0f, 22.0f,
2943
2944 // Batch 1, Channel 2
2945 23.0f, 24.0f
2946 }));
2947
2948 armnn::TensorInfo input1TensorInfo({ 2, 3, 1 }, armnn::GetDataType<T>());
2949 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2950 // Batch 0, Channel 0
2951 7.0f,
2952
2953 // Batch 0, Channel 1
2954 9.0f,
2955
2956 // Batch 0, Channel 2
2957 11.0f,
2958
2959 // Batch 1, Channel 0
2960 25.0f,
2961
2962 // Batch 1, Channel 1
2963 27.0f,
2964
2965 // Batch 1, Channel 2
2966 29.0f
2967 }));
2968
2969 armnn::TensorInfo input2TensorInfo({ 2, 3, 3 }, armnn::GetDataType<T>());
2970 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2971 // Batch 0, Channel 0
2972 13.0f, 14.0f, 50.0f,
2973
2974 // Batch 0, Channel 1
2975 15.0f, 16.0f, 51.0f,
2976
2977 // Batch 0, Channel 2
2978 17.0f, 18.0f, 52.0f,
2979
2980 // Batch 1, Channel 0
2981 31.0f, 32.0f, 53.0f,
2982
2983 // Batch 1, Channel 1
2984 33.0f, 34.0f, 54.0f,
2985
2986 // Batch 1, Channel 2
2987 35.0f, 36.0f, 55.0f,
2988 }));
2989
2990 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, armnn::GetDataType<T>());
2991 LayerTestResult<T, 3> result(outputTensorInfo);
2992
2993 std::vector<T> output;
2994 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002995 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002996 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
2997 { input0.data(), input1.data(), input2.data() },
2998 outputTensorInfo,
2999 output.data(),
3000 2);
3001
3002 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
3003 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3004 // Batch 0, Channel 0
3005 1.0f, 2.0f, 7.0f, 13.0f, 14.0f, 50.0f,
3006
3007 // Batch 0, Channel 1
3008 3.0f, 4.0f, 9.0f, 15.0f, 16.0f, 51.0f,
3009
3010 // Batch 0, Channel 2
3011 5.0f, 6.0f, 11.0f, 17.0f, 18.0f, 52.0f,
3012
3013 // Batch 1, Channel 0
3014 19.0f, 20.0f, 25.0f, 31.0f, 32.0f, 53.0f,
3015
3016 // Batch 1, Channel 1
3017 21.0f, 22.0f, 27.0f, 33.0f, 34.0f, 54.0f,
3018
3019 // Batch 1, Channel 2
3020 23.0f, 24.0f, 29.0f, 35.0f, 36.0f, 55.0f,
3021 }));
3022
3023 return result;
3024}
3025
3026LayerTestResult<float, 3> Concatenation3dDim2DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory)
3027{
3028 return Concatenation3dDim2DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
3029}
3030
James Conroy6b965822018-11-01 11:33:09 +00003031LayerTestResult<float, 4> ResizeBilinearNopTest(armnn::IWorkloadFactory& workloadFactory,
3032 const armnn::DataLayoutIndexed& dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00003033{
James Conroy6b965822018-11-01 11:33:09 +00003034 const armnn::TensorInfo inputTensorInfo = GetTensorInfo<float>(1, 2, 4, 4, dataLayout);
3035 const armnn::TensorInfo outputTensorInfo = GetTensorInfo<float>(1, 2, 4, 4, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00003036
James Conroy6b965822018-11-01 11:33:09 +00003037 std::vector<float> inputData({
3038 1.0f, 2.0f, 3.0f, 4.0f,
3039 2.0f, 3.0f, 4.0f, 5.0f,
3040 3.0f, 4.0f, 5.0f, 6.0f,
3041 4.0f, 5.0f, 6.0f, 7.0f,
3042
telsoa014fcda012018-03-09 14:13:49 +00003043 1.0f, 2.0f, 3.0f, 4.0f,
3044 2.0f, 3.0f, 4.0f, 5.0f,
3045 3.0f, 4.0f, 5.0f, 6.0f,
3046 4.0f, 5.0f, 6.0f, 7.0f
James Conroy6b965822018-11-01 11:33:09 +00003047 });
3048
3049 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
3050 if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC)
3051 {
3052 std::vector<float> tmp(inputData.size());
3053 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
3054 inputData = tmp;
3055 }
3056
3057 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
telsoa014fcda012018-03-09 14:13:49 +00003058
3059 LayerTestResult<float, 4> result(outputTensorInfo);
3060 result.outputExpected = input;
3061
3062 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3063 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3064
3065 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01003066 descriptor.m_Parameters.m_DataLayout = dataLayout;
3067 armnn::WorkloadInfo info;
3068 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3069 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3070
3071 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
3072
3073 inputHandle->Allocate();
3074 outputHandle->Allocate();
3075 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3076
James Conroy074f3712018-10-03 09:32:03 +01003077 workload->Execute();
3078
3079 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3080 return result;
3081}
3082
James Conroy6b965822018-11-01 11:33:09 +00003083LayerTestResult<float, 4> SimpleResizeBilinearTest(armnn::IWorkloadFactory& workloadFactory,
3084 const armnn::DataLayoutIndexed& dataLayout)
James Conroy074f3712018-10-03 09:32:03 +01003085{
James Conroy6b965822018-11-01 11:33:09 +00003086 const armnn::TensorInfo inputTensorInfo = GetTensorInfo<float>(1, 2, 2, 2, dataLayout);
3087 const armnn::TensorInfo outputTensorInfo = GetTensorInfo<float>(1, 2, 1, 1, dataLayout);
James Conroy074f3712018-10-03 09:32:03 +01003088
James Conroy6b965822018-11-01 11:33:09 +00003089 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01003090 1.0f, 255.0f,
James Conroy6b965822018-11-01 11:33:09 +00003091 200.0f, 250.0f,
3092
3093 250.0f, 200.0f,
3094 250.0f, 1.0f
3095 });
James Conroy074f3712018-10-03 09:32:03 +01003096
3097 // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
3098 // then figures out the interpolants and weights. Note this is different to projecting the centre of the
James Conroy6b965822018-11-01 11:33:09 +00003099 // output texel. Thus, for a input matrix of 2x2, we'll expect the output 1x1 matrix to contain, as
3100 // its single element, the value that was at position (0,0) of the input matrix (rather than an average,
3101 // which we would expect if projecting the centre).
3102
3103 std::vector<float> outputData({
3104 1.0f,
3105
3106 250.0f
3107 });
3108
3109 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
3110 if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC)
3111 {
3112 std::vector<float> tmp(inputData.size());
3113 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
3114 inputData = tmp;
3115
3116 std::vector<float> tmp1(outputData.size());
3117 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data());
3118 outputData = tmp1;
3119 }
3120
3121 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
3122
James Conroy074f3712018-10-03 09:32:03 +01003123 LayerTestResult<float, 4> result(outputTensorInfo);
James Conroy6b965822018-11-01 11:33:09 +00003124 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
James Conroy074f3712018-10-03 09:32:03 +01003125
3126 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3127 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3128
3129 armnn::ResizeBilinearQueueDescriptor descriptor;
3130 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00003131 armnn::WorkloadInfo info;
3132 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3133 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3134
3135 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
3136
3137 inputHandle->Allocate();
3138 outputHandle->Allocate();
3139 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3140
3141 workload->Execute();
3142
3143 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3144 return result;
3145}
3146
James Conroy6b965822018-11-01 11:33:09 +00003147LayerTestResult<float, 4> ResizeBilinearSqMinTest(armnn::IWorkloadFactory& workloadFactory,
3148 const armnn::DataLayoutIndexed& dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00003149{
James Conroy6b965822018-11-01 11:33:09 +00003150 const armnn::TensorInfo inputTensorInfo = GetTensorInfo<float>(1, 2, 4, 4, dataLayout);
3151 const armnn::TensorInfo outputTensorInfo = GetTensorInfo<float>(1, 2, 2, 2, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00003152
James Conroy6b965822018-11-01 11:33:09 +00003153 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01003154 1.0f, 2.0f, 3.0f, 4.0f,
3155 2.0f, 3.0f, 4.0f, 5.0f,
3156 3.0f, 4.0f, 5.0f, 6.0f,
James Conroy6b965822018-11-01 11:33:09 +00003157 4.0f, 5.0f, 6.0f, 7.0f,
3158
3159 7.0f, 6.0f, 5.0f, 4.0f,
3160 6.0f, 5.0f, 4.0f, 3.0f,
3161 5.0f, 4.0f, 3.0f, 2.0f,
3162 4.0f, 3.0f, 2.0f, 1.0f
3163 });
3164
3165 std::vector<float> outputData({
3166 1.0f, 3.0f,
3167 3.0f, 5.0f,
3168
3169 7.0f, 5.0f,
3170 5.0f, 3.0f
3171 });
3172
3173 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
3174 if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC)
3175 {
3176 std::vector<float> tmp(inputData.size());
3177 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
3178 inputData = tmp;
3179
3180 std::vector<float> tmp1(outputData.size());
3181 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data());
3182 outputData = tmp1;
3183 }
3184
3185 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
telsoa014fcda012018-03-09 14:13:49 +00003186
telsoa014fcda012018-03-09 14:13:49 +00003187 LayerTestResult<float, 4> result(outputTensorInfo);
James Conroy6b965822018-11-01 11:33:09 +00003188 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
telsoa014fcda012018-03-09 14:13:49 +00003189
3190 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3191 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3192
3193 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01003194 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00003195 armnn::WorkloadInfo info;
3196 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3197 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3198
3199 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
3200
3201 inputHandle->Allocate();
3202 outputHandle->Allocate();
3203 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3204
3205 workload->Execute();
3206
3207 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3208 return result;
3209}
3210
James Conroy6b965822018-11-01 11:33:09 +00003211LayerTestResult<float, 4> ResizeBilinearMinTest(armnn::IWorkloadFactory& workloadFactory,
3212 const armnn::DataLayoutIndexed& dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00003213{
James Conroy6b965822018-11-01 11:33:09 +00003214 const armnn::TensorInfo inputTensorInfo = GetTensorInfo<float>(1, 2, 3, 5, dataLayout);
3215 const armnn::TensorInfo outputTensorInfo = GetTensorInfo<float>(1, 2, 2, 3, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00003216
James Conroy6b965822018-11-01 11:33:09 +00003217 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01003218 1.0f, 2.0f, 3.0f, 5.0f, 8.0f,
3219 13.0f, 21.0f, 34.0f, 55.0f, 89.0f,
James Conroy6b965822018-11-01 11:33:09 +00003220 144.0f, 233.0f, 377.0f, 610.0f, 987.0f,
3221
3222 987.0f, 610.0f, 377.0f, 233.0f, 144.0f,
3223 89.0f, 55.0f, 34.0f, 21.0f, 13.0f,
3224 8.0f, 5.0f, 3.0f, 2.0f, 1.0f
3225 });
3226
3227 std::vector<float> outputData({
3228 1.0f, 2.6666f, 6.00f,
3229 78.5f, 179.3333f, 401.00f,
3230
3231 987.0f, 454.6670f, 203.33f,
3232 48.5f, 22.3333f, 10.00f
3233 });
3234
3235 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
3236 if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC)
3237 {
3238 std::vector<float> tmp(inputData.size());
3239 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
3240 inputData = tmp;
3241
3242 std::vector<float> tmp1(outputData.size());
3243 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data());
3244 outputData = tmp1;
3245 }
3246
3247 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
telsoa014fcda012018-03-09 14:13:49 +00003248
3249 LayerTestResult<float, 4> result(outputTensorInfo);
James Conroy6b965822018-11-01 11:33:09 +00003250 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
telsoa014fcda012018-03-09 14:13:49 +00003251
3252 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3253 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3254
3255 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01003256 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00003257 armnn::WorkloadInfo info;
3258 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3259 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3260
3261 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
3262
3263 inputHandle->Allocate();
3264 outputHandle->Allocate();
3265 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3266
3267 workload->Execute();
3268
3269 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3270 return result;
3271}
3272
James Conroy6b965822018-11-01 11:33:09 +00003273LayerTestResult<float, 4> ResizeBilinearMagTest(armnn::IWorkloadFactory& workloadFactory,
3274 const armnn::DataLayoutIndexed& dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00003275{
James Conroy6b965822018-11-01 11:33:09 +00003276 const armnn::TensorInfo inputTensorInfo = GetTensorInfo<float>(1, 2, 3, 2, dataLayout);
3277 const armnn::TensorInfo outputTensorInfo = GetTensorInfo<float>(1, 2, 3, 5, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00003278
James Conroy6b965822018-11-01 11:33:09 +00003279 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01003280 1.0f, 2.0f,
3281 13.0f, 21.0f,
James Conroy6b965822018-11-01 11:33:09 +00003282 144.0f, 233.0f,
telsoa014fcda012018-03-09 14:13:49 +00003283
James Conroy6b965822018-11-01 11:33:09 +00003284 233.0f, 144.0f,
3285 21.0f, 13.0f,
3286 2.0f, 1.0f
3287 });
3288
3289 std::vector<float> outputData({
James Conroy074f3712018-10-03 09:32:03 +01003290 1.0f, 1.4f, 1.8f, 2.0f, 2.0f,
3291 13.0f, 16.2f, 19.4f, 21.0f, 21.0f,
James Conroy6b965822018-11-01 11:33:09 +00003292 144.0f, 179.6f, 215.2f, 233.0f, 233.0f,
3293
3294 233.0f, 197.4f, 161.8f, 144.0f, 144.0f,
3295 21.0f, 17.8f, 14.6f, 13.0f, 13.0f,
3296 2.0f, 1.6f, 1.2f, 1.0f, 1.0f
3297 });
3298
3299 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
3300 if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC)
3301 {
3302 std::vector<float> tmp(inputData.size());
3303 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
3304 inputData = tmp;
3305
3306 std::vector<float> tmp1(outputData.size());
3307 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data());
3308 outputData = tmp1;
3309 }
3310
3311 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
3312
3313 LayerTestResult<float, 4> result(outputTensorInfo);
3314 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
telsoa014fcda012018-03-09 14:13:49 +00003315
3316 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3317 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3318
3319 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01003320 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00003321 armnn::WorkloadInfo info;
3322 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3323 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3324
3325 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
3326
3327 inputHandle->Allocate();
3328 outputHandle->Allocate();
3329 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3330
3331 workload->Execute();
3332
3333 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3334 return result;
3335}
3336
telsoa014fcda012018-03-09 14:13:49 +00003337LayerTestResult<float, 2> FakeQuantizationTest(armnn::IWorkloadFactory& workloadFactory)
3338{
3339 constexpr unsigned int width = 2;
3340 constexpr unsigned int height = 3;
3341
3342 const armnn::TensorInfo tensorInfo({height, width },
3343 armnn::DataType::Float32);
3344 auto input = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
3345 -10.0f, -5.0f,
3346 0.0f, 5.0f,
3347 10.0f, 10.0f
3348 }));
3349
3350 LayerTestResult<float, 2> ret(tensorInfo);
3351
3352 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
3353
3354 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
3355
3356 armnn::FakeQuantizationQueueDescriptor data;
3357 armnn::WorkloadInfo info;
3358
3359 AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
3360 AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
3361 float min = -10.f;
3362 float max = 10.f;
3363
3364 data.m_Parameters.m_Min = min;
3365 data.m_Parameters.m_Max = max;
3366
3367 armnn::PassthroughCpuTensorHandle refHandle(tensorInfo, &ret.outputExpected[0][0]);
3368 armnn::FakeQuantizationQueueDescriptor refData = data;
3369 armnn::WorkloadInfo refInfo = info;
3370 SetWorkloadOutput(refData, refInfo, 0, tensorInfo, &refHandle);
3371
3372 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFakeQuantization(data, info);
3373
3374 inputHandle->Allocate();
3375 outputHandle->Allocate();
3376
3377 CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
3378
3379 workload->Execute();
3380
3381 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
3382
3383 ret.outputExpected = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
3384 0.0f, 63.0f,
3385 128.0f, 191.0f,
3386 255.0f, 255.0f
3387 }));
3388 return ret;
3389}
3390
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003391namespace
3392{
3393
3394LayerTestResult<float, 4> L2NormalizationTestImpl(armnn::IWorkloadFactory& workloadFactory,
3395 const armnn::TensorShape& inputOutputTensorShape,
3396 const std::vector<float>& inputValues,
3397 const std::vector<float>& expectedOutputValues,
3398 armnn::DataLayout dataLayout)
3399{
3400 const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
3401 const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
3402
3403 auto inputTensor = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(inputValues));
3404
3405 LayerTestResult<float, 4> result(outputTensorInfo);
3406 result.outputExpected = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(expectedOutputValues));
3407
3408 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3409 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3410
3411 armnn::L2NormalizationQueueDescriptor descriptor;
3412 descriptor.m_Parameters.m_DataLayout = dataLayout;
3413 armnn::WorkloadInfo info;
3414
3415 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3416 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3417
3418 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
3419
3420 inputHandle->Allocate();
3421 outputHandle->Allocate();
3422
3423 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
3424
Aron Virginas-Tar60578952018-10-31 11:04:01 +00003425 workloadFactory.Acquire();
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003426 workload->Execute();
Aron Virginas-Tar60578952018-10-31 11:04:01 +00003427 workloadFactory.Release();
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003428
3429 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3430
3431 return result;
3432}
3433
3434float CalcInvL2Norm(std::initializer_list<float> elements)
3435{
3436 const float reduction = std::accumulate(elements.begin(), elements.end(), 0.0f,
3437 [](float acc, float element) { return acc + element * element; });
3438 return 1.0f / sqrtf(reduction);
3439}
3440
3441} // anonymous namespace
3442
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003443template<typename T>
3444LayerTestResult<T, 2> Pad2dTestCommon(armnn::IWorkloadFactory& workloadFactory, float qScale, int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003445{
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003446 const armnn::TensorShape inputShape{ 3, 3 };
3447 const armnn::TensorShape outputShape{ 7, 7 };
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003448
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003449 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::GetDataType<T>());
3450 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::GetDataType<T>());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003451
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003452 std::vector<T> inputValues(
3453 QuantizedVector<T>(qScale, qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003454 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003455 // Height (3) x Width (3)
3456 4, 8, 6,
3457 7, 4, 4,
3458 3, 2, 4
3459 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003460
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003461 std::vector<T> expectedOutputValues(
3462 QuantizedVector<T>(qScale, qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003463 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003464 0, 0, 0, 0, 0, 0, 0,
3465 0, 0, 0, 0, 0, 0, 0,
3466 0, 0, 4, 8, 6, 0, 0,
3467 0, 0, 7, 4, 4, 0, 0,
3468 0, 0, 3, 2, 4, 0, 0,
3469 0, 0, 0, 0, 0, 0, 0,
3470 0, 0, 0, 0, 0, 0, 0
3471 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003472
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003473 auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003474
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003475 LayerTestResult<T, 2> result(outputTensorInfo);
3476 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003477
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003478 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3479 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003480
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003481 armnn::PadQueueDescriptor descriptor;
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003482
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003483 std::vector<std::pair<unsigned int, unsigned int>> PadList;
3484 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
3485 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003486
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003487 descriptor.m_Parameters.m_PadList = PadList;
3488 armnn::WorkloadInfo info;
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003489
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003490 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3491 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003492
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003493 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003494
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003495 inputHandle->Allocate();
3496 outputHandle->Allocate();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003497
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003498 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003499
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003500 workload->Execute();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003501
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003502 CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003503
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003504 return result;
3505}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003506
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003507template <typename T>
3508LayerTestResult<T, 3> Pad3dTestCommon(armnn::IWorkloadFactory& workloadFactory, float qScale, int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003509{
3510 const armnn::TensorShape inputShape{ 2, 2, 2 };
3511 const armnn::TensorShape outputShape{ 3, 5, 6 };
3512
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003513 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::GetDataType<T>());
3514 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::GetDataType<T>());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003515
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003516 std::vector<T> inputValues(
3517 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003518 {
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003519 // Channel 0, Height (2) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003520 0, 4,
3521 2, 5,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003522
3523 // Channel 1, Height (2) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003524 6, 1,
3525 5, 2
3526 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003527
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003528 std::vector<T> expectedOutputValues(
3529 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003530 {
3531
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003532 0, 0, 0, 0, 0, 0,
3533 0, 0, 0, 0, 0, 0,
3534 0, 0, 0, 4, 0, 0,
3535 0, 0, 2, 5, 0, 0,
3536 0, 0, 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003537
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003538 0, 0, 0, 0, 0, 0,
3539 0, 0, 0, 0, 0, 0,
3540 0, 0, 6, 1, 0, 0,
3541 0, 0, 5, 2, 0, 0,
3542 0, 0, 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003543
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003544 0, 0, 0, 0, 0, 0,
3545 0, 0, 0, 0, 0, 0,
3546 0, 0, 0, 0, 0, 0,
3547 0, 0, 0, 0, 0, 0,
3548 0, 0, 0, 0, 0, 0
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003549
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003550 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003551
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003552 auto inputTensor = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003553
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003554 LayerTestResult<T, 3> result(outputTensorInfo);
3555 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003556
3557 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3558 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3559
3560 armnn::PadQueueDescriptor descriptor;
3561
3562 std::vector<std::pair<unsigned int, unsigned int>> PadList;
3563 PadList.push_back(std::pair<unsigned int, unsigned int>(0,1));
3564 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
3565 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
3566
3567 descriptor.m_Parameters.m_PadList = PadList;
3568 armnn::WorkloadInfo info;
3569
3570 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3571 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3572
3573 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
3574
3575 inputHandle->Allocate();
3576 outputHandle->Allocate();
3577
3578 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]);
3579
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003580 workload->Execute();
3581
3582 CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
3583
3584 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003585}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003586
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003587template <typename T>
3588LayerTestResult<T, 4> Pad4dTestCommon(armnn::IWorkloadFactory& workloadFactory, float qScale, int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003589{
3590 const armnn::TensorShape inputShape{ 2, 2, 3, 2 };
3591 const armnn::TensorShape outputShape{ 4, 5, 7, 4 };
3592
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003593 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::GetDataType<T>());
3594 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::GetDataType<T>());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003595
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003596 std::vector<T> inputValues(
3597 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003598 {
3599 // Batch 0, Channel 0, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003600 0, 1,
3601 2, 3,
3602 4, 5,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003603
3604 // Batch 0, Channel 1, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003605 6, 7,
3606 8, 9,
3607 10, 11,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003608
3609 // Batch 1, Channel 0, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003610 12, 13,
3611 14, 15,
3612 16, 17,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003613
3614 // Batch 1, Channel 1, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003615 18, 19,
3616 20, 21,
3617 22, 23
3618 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003619
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003620 std::vector<T> expectedOutputValues(
3621 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003622 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003623 0, 0, 0, 0,
3624 0, 0, 0, 0,
3625 0, 0, 0, 0,
3626 0, 0, 0, 0,
3627 0, 0, 0, 0,
3628 0, 0, 0, 0,
3629 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003630
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003631 0, 0, 0, 0,
3632 0, 0, 0, 0,
3633 0, 0, 0, 0,
3634 0, 0, 0, 0,
3635 0, 0, 0, 0,
3636 0, 0, 0, 0,
3637 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003638
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003639 0, 0, 0, 0,
3640 0, 0, 0, 0,
3641 0, 0, 0, 0,
3642 0, 0, 0, 0,
3643 0, 0, 0, 0,
3644 0, 0, 0, 0,
3645 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003646
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003647 0, 0, 0, 0,
3648 0, 0, 0, 0,
3649 0, 0, 0, 0,
3650 0, 0, 0, 0,
3651 0, 0, 0, 0,
3652 0, 0, 0, 0,
3653 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003654
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003655 0, 0, 0, 0,
3656 0, 0, 0, 0,
3657 0, 0, 0, 0,
3658 0, 0, 0, 0,
3659 0, 0, 0, 0,
3660 0, 0, 0, 0,
3661 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003662
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003663 0, 0, 0, 0,
3664 0, 0, 0, 0,
3665 0, 0, 0, 0,
3666 0, 0, 0, 0,
3667 0, 0, 0, 0,
3668 0, 0, 0, 0,
3669 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003670
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003671 0, 0, 0, 0,
3672 0, 0, 0, 0,
3673 0, 0, 0, 0,
3674 0, 0, 0, 0,
3675 0, 0, 0, 0,
3676 0, 0, 0, 0,
3677 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003678
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003679 0, 0, 0, 0,
3680 0, 0, 0, 0,
3681 0, 0, 0, 0,
3682 0, 0, 1, 0,
3683 0, 2, 3, 0,
3684 0, 4, 5, 0,
3685 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003686
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003687 0, 0, 0, 0,
3688 0, 0, 0, 0,
3689 0, 0, 0, 0,
3690 0, 6, 7, 0,
3691 0, 8, 9, 0,
3692 0, 10, 11, 0,
3693 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003694
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003695 0, 0, 0, 0,
3696 0, 0, 0, 0,
3697 0, 0, 0, 0,
3698 0, 0, 0, 0,
3699 0, 0, 0, 0,
3700 0, 0, 0, 0,
3701 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003702
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003703 0, 0, 0, 0,
3704 0, 0, 0, 0,
3705 0, 0, 0, 0,
3706 0, 0, 0, 0,
3707 0, 0, 0, 0,
3708 0, 0, 0, 0,
3709 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003710
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003711 0, 0, 0, 0,
3712 0, 0, 0, 0,
3713 0, 0, 0, 0,
3714 0, 0, 0, 0,
3715 0, 0, 0, 0,
3716 0, 0, 0, 0,
3717 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003718
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003719 0, 0, 0, 0,
3720 0, 0, 0, 0,
3721 0, 0, 0, 0,
3722 0, 12, 13, 0,
3723 0, 14, 15, 0,
3724 0, 16, 17, 0,
3725 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003726
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003727 0, 0, 0, 0,
3728 0, 0, 0, 0,
3729 0, 0, 0, 0,
3730 0, 18, 19, 0,
3731 0, 20, 21, 0,
3732 0, 22, 23, 0,
3733 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003734
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003735 0, 0, 0, 0,
3736 0, 0, 0, 0,
3737 0, 0, 0, 0,
3738 0, 0, 0, 0,
3739 0, 0, 0, 0,
3740 0, 0, 0, 0,
3741 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003742
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003743 0, 0, 0, 0,
3744 0, 0, 0, 0,
3745 0, 0, 0, 0,
3746 0, 0, 0, 0,
3747 0, 0, 0, 0,
3748 0, 0, 0, 0,
3749 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003750
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003751 0, 0, 0, 0,
3752 0, 0, 0, 0,
3753 0, 0, 0, 0,
3754 0, 0, 0, 0,
3755 0, 0, 0, 0,
3756 0, 0, 0, 0,
3757 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003758
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003759 0, 0, 0, 0,
3760 0, 0, 0, 0,
3761 0, 0, 0, 0,
3762 0, 0, 0, 0,
3763 0, 0, 0, 0,
3764 0, 0, 0, 0,
3765 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003766
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003767 0, 0, 0, 0,
3768 0, 0, 0, 0,
3769 0, 0, 0, 0,
3770 0, 0, 0, 0,
3771 0, 0, 0, 0,
3772 0, 0, 0, 0,
3773 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003774
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003775 0, 0, 0, 0,
3776 0, 0, 0, 0,
3777 0, 0, 0, 0,
3778 0, 0, 0, 0,
3779 0, 0, 0, 0,
3780 0, 0, 0, 0,
3781 0, 0, 0, 0
3782 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003783
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003784 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003785
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003786 LayerTestResult<T, 4> result(outputTensorInfo);
3787 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003788
3789 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3790 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3791
3792 armnn::PadQueueDescriptor descriptor;
3793
3794 std::vector<std::pair<unsigned int, unsigned int>> PadList;
3795 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
3796 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
3797 PadList.push_back(std::pair<unsigned int, unsigned int>(3,1));
3798 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
3799
3800 descriptor.m_Parameters.m_PadList = PadList;
3801 armnn::WorkloadInfo info;
3802
3803 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3804 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3805
3806 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
3807
3808 inputHandle->Allocate();
3809 outputHandle->Allocate();
3810
3811 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
3812
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003813 workload->Execute();
3814
3815 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3816
3817 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003818}
3819
3820LayerTestResult<uint8_t, 2> PadUint82dTest(armnn::IWorkloadFactory& workloadFactory)
3821{
3822 return Pad2dTestCommon<uint8_t>(workloadFactory, 1.0f, 0);
3823}
3824
3825LayerTestResult<uint8_t, 3> PadUint83dTest(armnn::IWorkloadFactory& workloadFactory)
3826{
3827 return Pad3dTestCommon<uint8_t>(workloadFactory, 1.0f, 0);
3828}
3829
3830LayerTestResult<uint8_t, 4> PadUint84dTest(armnn::IWorkloadFactory& workloadFactory)
3831{
3832 return Pad4dTestCommon<uint8_t>(workloadFactory, 1.0f, 0);
3833}
3834
3835LayerTestResult<float, 2> PadFloat322dTest(armnn::IWorkloadFactory& workloadFactory)
3836{
3837 return Pad2dTestCommon<float>(workloadFactory, 0.0f, 0);
3838}
3839
3840LayerTestResult<float, 3> PadFloat323dTest(armnn::IWorkloadFactory& workloadFactory)
3841{
3842 return Pad3dTestCommon<float>(workloadFactory, 0.0f, 0);
3843}
3844
3845LayerTestResult<float, 4> PadFloat324dTest(armnn::IWorkloadFactory& workloadFactory)
3846{
3847 return Pad4dTestCommon<float>(workloadFactory, 0.0f, 0);
3848}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003849
telsoa014fcda012018-03-09 14:13:49 +00003850LayerTestResult<float, 4> L2Normalization1dTest(armnn::IWorkloadFactory& workloadFactory)
3851{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003852 // Width: 1
3853 // Height: 1
3854 // Channels: 10
3855 // BatchSize: 1
telsoa014fcda012018-03-09 14:13:49 +00003856
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003857 const armnn::TensorShape inputOutputShape{ 1, 10, 1, 1 };
3858 std::vector<float> inputValues
3859 {
3860 // Batch 0, Channel 0, Height (1) x Width (1)
3861 1.0f,
telsoa014fcda012018-03-09 14:13:49 +00003862
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003863 // Batch 0, Channel 1, Height (1) x Width (1)
3864 2.0f,
telsoa014fcda012018-03-09 14:13:49 +00003865
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003866 // Batch 0, Channel 2, Height (1) x Width (1)
3867 3.0f,
telsoa014fcda012018-03-09 14:13:49 +00003868
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003869 // Batch 0, Channel 3, Height (1) x Width (1)
3870 4.0f,
3871
3872 // Batch 0, Channel 4, Height (1) x Width (1)
3873 5.0f,
3874
3875 // Batch 0, Channel 5, Height (1) x Width (1)
3876 6.0f,
3877
3878 // Batch 0, Channel 6, Height (1) x Width (1)
3879 7.0f,
3880
3881 // Batch 0, Channel 7, Height (1) x Width (1)
3882 8.0f,
3883
3884 // Batch 0, Channel 8, Height (1) x Width (1)
3885 9.0f,
3886
3887 // Batch 0, Channel 9, Height (1) x Width (1)
3888 10.0f
3889 };
telsoa014fcda012018-03-09 14:13:49 +00003890 const float approxInvL2Norm = 0.050964719f;
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003891 std::vector<float> expectedOutputValues
3892 {
3893 // Batch 0, Channel 0, Height (1) x Width (1)
telsoa014fcda012018-03-09 14:13:49 +00003894 1.0f * approxInvL2Norm,
3895 2.0f * approxInvL2Norm,
3896 3.0f * approxInvL2Norm,
3897 4.0f * approxInvL2Norm,
3898 5.0f * approxInvL2Norm,
3899 6.0f * approxInvL2Norm,
3900 7.0f * approxInvL2Norm,
3901 8.0f * approxInvL2Norm,
3902 9.0f * approxInvL2Norm,
3903 10.0f * approxInvL2Norm
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003904 };
telsoa014fcda012018-03-09 14:13:49 +00003905
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003906 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
3907 inputValues, expectedOutputValues, armnn::DataLayout::NCHW);
telsoa014fcda012018-03-09 14:13:49 +00003908}
3909
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003910LayerTestResult<float, 4> L2Normalization1dNhwcTest(armnn::IWorkloadFactory& workloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00003911{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003912 // Width: 1
3913 // Height: 1
3914 // Channels: 10
3915 // BatchSize: 1
telsoa014fcda012018-03-09 14:13:49 +00003916
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003917 const armnn::TensorShape inputOutputShape{ 1, 1, 1, 10 };
3918 std::vector<float> inputValues
3919 {
3920 // Batch 0, Height 0, Width (1) x Channel (10)
3921 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f
3922 };
3923 const float approxInvL2Norm = 0.050964719f;
3924 std::vector<float> expectedOutputValues
3925 {
3926 // Batch 0, Height 0, Width (1) x Channel (10)
3927 1.0f * approxInvL2Norm,
3928 2.0f * approxInvL2Norm,
3929 3.0f * approxInvL2Norm,
3930 4.0f * approxInvL2Norm,
3931 5.0f * approxInvL2Norm,
3932 6.0f * approxInvL2Norm,
3933 7.0f * approxInvL2Norm,
3934 8.0f * approxInvL2Norm,
3935 9.0f * approxInvL2Norm,
3936 10.0f * approxInvL2Norm
3937 };
3938
3939 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
3940 inputValues, expectedOutputValues, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00003941}
3942
3943LayerTestResult<float, 4> L2Normalization2dTest(armnn::IWorkloadFactory& workloadFactory)
3944{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003945 // Width: 5
3946 // Height: 1
3947 // Channels: 2
3948 // BatchSize: 1
telsoa014fcda012018-03-09 14:13:49 +00003949
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003950 const armnn::TensorShape inputOutputShape{ 1, 2, 1, 5 };
3951 std::vector<float> inputValues
3952 {
3953 // Batch 0, Channel 0, Height (1) x Width (5)
telsoa014fcda012018-03-09 14:13:49 +00003954 1.0f, 3.0f, 5.0f, 7.0f, 9.0f,
telsoa014fcda012018-03-09 14:13:49 +00003955
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003956 // Batch 0, Channel 1, Height (1) x Width (5)
3957 2.0f, 4.0f, 6.0f, 8.0f, 10.0f
3958 };
3959 std::vector<float> expectedOutputValues
3960 {
3961 // Batch 0, Channel 0, Height (1) x Width (5)
3962 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
3963 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
3964 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
3965 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
telsoa014fcda012018-03-09 14:13:49 +00003966 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
3967
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003968 // Batch 0, Channel 1, Height (1) x Width (5)
3969 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
3970 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
3971 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
3972 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
telsoa014fcda012018-03-09 14:13:49 +00003973 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003974 };
telsoa014fcda012018-03-09 14:13:49 +00003975
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003976 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
3977 inputValues, expectedOutputValues, armnn::DataLayout::NCHW);
3978}
telsoa014fcda012018-03-09 14:13:49 +00003979
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003980LayerTestResult<float, 4> L2Normalization2dNhwcTest(armnn::IWorkloadFactory& workloadFactory)
3981{
3982 // Width: 5
3983 // Height: 1
3984 // Channels: 2
3985 // BatchSize: 1
telsoa014fcda012018-03-09 14:13:49 +00003986
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003987 const armnn::TensorShape inputOutputShape{ 1, 1, 5, 2 };
3988 std::vector<float> inputValues
3989 {
3990 // Batch 0, Height 0, Width (5) x Channel (2)
3991 1.0f, 2.0f,
3992 3.0f, 4.0f,
3993 5.0f, 6.0f,
3994 7.0f, 8.0f,
3995 9.0f, 10.0f
3996 };
3997 std::vector<float> expectedOutputValues
3998 {
3999 // Batch 0, Height 0, Width (5) x Channel (2)
4000 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
4001 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
4002 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
4003 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
4004 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
4005 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
4006 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
4007 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
4008 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
4009 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
4010 };
telsoa014fcda012018-03-09 14:13:49 +00004011
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004012 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
4013 inputValues, expectedOutputValues, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00004014}
4015
4016LayerTestResult<float, 4> L2Normalization3dTest(armnn::IWorkloadFactory& workloadFactory)
4017{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004018 // Width: 3
4019 // Height: 4
4020 // Channels: 2
4021 // BatchSize: 1
telsoa014fcda012018-03-09 14:13:49 +00004022
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004023 const armnn::TensorShape inputOutputShape{ 1, 2, 4, 3 };
4024 std::vector<float> inputValues
4025 {
4026 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004027 119.0f, 21.0f, 150.0f,
4028 149.0f, 32.0f, 179.0f,
4029 15.0f, 227.0f, 141.0f,
4030 147.0f, 199.0f, 220.0f,
4031
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004032 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004033 110.0f, 140.0f, 73.0f,
4034 211.0f, 212.0f, 89.0f,
4035 24.0f, 138.0f, 188.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004036 162.0f, 12.0f, 161.0f
4037 };
4038 std::vector<float> expectedOutputValues
4039 {
4040 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004041 119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
4042 21.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
4043 150.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
4044 149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
4045 32.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
4046 179.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
4047 15.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
4048 227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
4049 141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
4050 147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
4051 199.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
4052 220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
4053
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004054 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004055 110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
4056 140.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
4057 73.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
4058 211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
4059 212.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
4060 89.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
4061 24.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
4062 138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
4063 188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
4064 162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
4065 12.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004066 161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
4067 };
telsoa014fcda012018-03-09 14:13:49 +00004068
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004069 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
4070 inputValues, expectedOutputValues, armnn::DataLayout::NCHW);
4071}
telsoa014fcda012018-03-09 14:13:49 +00004072
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004073LayerTestResult<float, 4> L2Normalization3dNhwcTest(armnn::IWorkloadFactory& workloadFactory)
4074{
4075 // Width: 3
4076 // Height: 4
4077 // Channels: 2
4078 // BatchSize: 1
telsoa014fcda012018-03-09 14:13:49 +00004079
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004080 const armnn::TensorShape inputOutputShape{ 1, 4, 3, 2 };
4081 std::vector<float> inputValues
4082 {
4083 // Batch 0, Height 0, Width (3) x Channel (2)
4084 119.0f, 110.0f,
4085 21.0f, 140.0f,
4086 150.0f, 73.0f,
telsoa014fcda012018-03-09 14:13:49 +00004087
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004088 // Batch 0, Height 1, Width (3) x Channel (2)
4089 149.0f, 211.0f,
4090 32.0f, 212.0f,
4091 179.0f, 89.0f,
telsoa014fcda012018-03-09 14:13:49 +00004092
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004093 // Batch 0, Height 2, Width (3) x Channel (2)
4094 15.0f, 24.0f,
4095 227.0f, 138.0f,
4096 141.0f, 188.0f,
telsoa014fcda012018-03-09 14:13:49 +00004097
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004098 // Batch 0, Height 3, Width (3) x Channel (2)
4099 147.0f, 162.0f,
4100 199.0f, 12.0f,
4101 220.0f, 161.0f
4102 };
4103 std::vector<float> expectedOutputValues
4104 {
4105 // Batch 0, Height 0, Width (3) x Channel (2)
4106 119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
4107 110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
4108 21.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
4109 140.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
4110 150.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
4111 73.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
4112
4113 // Batch 0, Height 1, Width (3) x Channel (2)
4114 149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
4115 211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
4116 32.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
4117 212.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
4118 179.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
4119 89.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
4120
4121 // Batch 0, Height 2, Width (3) x Channel (2)
4122 15.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
4123 24.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
4124 227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
4125 138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
4126 141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
4127 188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
4128
4129 // Batch 0, Height 3, Width (3) x Channel (2)
4130 147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
4131 162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
4132 199.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
4133 12.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
4134 220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
4135 161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
4136 };
4137
4138 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
4139 inputValues, expectedOutputValues, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00004140}
4141
4142LayerTestResult<float, 4> L2Normalization4dTest(armnn::IWorkloadFactory& workloadFactory)
4143{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004144 // Width: 3
4145 // Height: 4
4146 // Channels: 3
4147 // BatchSize: 2
telsoa014fcda012018-03-09 14:13:49 +00004148
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004149 const armnn::TensorShape inputOutputShape{ 2, 3, 4, 3 };
4150 std::vector<float> inputValues
4151 {
4152 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004153 235.0f, 46.0f, 178.0f,
4154 100.0f, 123.0f, 19.0f,
4155 172.0f, 74.0f, 250.0f,
4156 6.0f, 195.0f, 80.0f,
4157
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004158 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004159 113.0f, 95.0f, 202.0f,
4160 77.0f, 114.0f, 71.0f,
4161 122.0f, 246.0f, 166.0f,
4162 82.0f, 28.0f, 37.0f,
4163
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004164 // Batch 0, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004165 56.0f, 170.0f, 162.0f,
4166 194.0f, 89.0f, 254.0f,
4167 12.0f, 209.0f, 200.0f,
4168 1.0f, 64.0f, 54.0f,
4169
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004170 // Batch 1, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004171 67.0f, 90.0f, 49.0f,
4172 7.0f, 163.0f, 18.0f,
4173 25.0f, 117.0f, 103.0f,
4174 247.0f, 59.0f, 189.0f,
4175
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004176 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004177 239.0f, 104.0f, 199.0f,
4178 17.0f, 124.0f, 153.0f,
4179 222.0f, 217.0f, 75.0f,
4180 32.0f, 126.0f, 21.0f,
4181
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004182 // Batch 1, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004183 97.0f, 145.0f, 215.0f,
4184 115.0f, 116.0f, 238.0f,
4185 226.0f, 16.0f, 132.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004186 92.0f, 125.0f, 88.0f
4187 };
4188 std::vector<float> expectedOutputValues
4189 {
4190 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004191 235.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
4192 46.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
4193 178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
4194 100.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
4195 123.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
4196 19.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
4197 172.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
4198 74.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
4199 250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
4200 6.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
4201 195.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
4202 80.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
4203
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004204 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004205 113.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
4206 95.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
4207 202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
4208 77.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
4209 114.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
4210 71.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
4211 122.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
4212 246.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
4213 166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
4214 82.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
4215 28.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
4216 37.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
4217
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004218 // Batch 0, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004219 56.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
4220 170.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
4221 162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
4222 194.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
4223 89.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
4224 254.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
4225 12.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
4226 209.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
4227 200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
4228 1.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
4229 64.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
4230 54.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
4231
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004232 // Batch 1, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004233 67.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
4234 90.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
4235 49.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
4236 7.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
4237 163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
4238 18.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
4239 25.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
4240 117.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
4241 103.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
4242 247.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
4243 59.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
4244 189.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
4245
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004246 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004247 239.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
4248 104.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
4249 199.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
4250 17.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
4251 124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
4252 153.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
4253 222.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
4254 217.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
4255 75.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
4256 32.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
4257 126.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
4258 21.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
4259
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004260 // Batch 1, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004261 97.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
4262 145.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
4263 215.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
4264 115.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
4265 116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
4266 238.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
4267 226.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
4268 16.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
4269 132.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
4270 92.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
4271 125.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004272 88.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f })
4273 };
telsoa014fcda012018-03-09 14:13:49 +00004274
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004275 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
4276 inputValues, expectedOutputValues, armnn::DataLayout::NCHW);
4277}
telsoa014fcda012018-03-09 14:13:49 +00004278
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004279LayerTestResult<float, 4> L2Normalization4dNhwcTest(armnn::IWorkloadFactory& workloadFactory)
4280{
4281 // Width: 3
4282 // Height: 4
4283 // Channels: 3
4284 // BatchSize: 2
telsoa014fcda012018-03-09 14:13:49 +00004285
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004286 const armnn::TensorShape inputOutputShape{ 2, 4, 3, 3 };
4287 std::vector<float> inputValues
4288 {
4289 // Batch 0, Height 0, Width (3) x Channel (3)
4290 235.0f, 113.0f, 56.0f,
4291 46.0f, 95.0f, 170.0f,
4292 178.0f, 202.0f, 162.0f,
telsoa014fcda012018-03-09 14:13:49 +00004293
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004294 // Batch 0, Height 1, Width (3) x Channel (3)
4295 100.0f, 77.0f, 194.0f,
4296 123.0f, 114.0f, 89.0f,
4297 19.0f, 71.0f, 254.0f,
telsoa014fcda012018-03-09 14:13:49 +00004298
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004299 // Batch 0, Height 2, Width (3) x Channel (3)
4300 172.0f, 122.0f, 12.0f,
4301 74.0f, 246.0f, 209.0f,
4302 250.0f, 166.0f, 200.0f,
telsoa014fcda012018-03-09 14:13:49 +00004303
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004304 // Batch 0, Height 3, Width (3) x Channel (3)
4305 6.0f, 82.0f, 1.0f,
4306 195.0f, 28.0f, 64.0f,
4307 80.0f, 37.0f, 54.0f,
4308
4309 // Batch 1, Height 0, Width (3) x Channel (3)
4310 67.0f, 239.0f, 97.0f,
4311 90.0f, 104.0f, 145.0f,
4312 49.0f, 199.0f, 215.0f,
4313
4314 // Batch 1, Height 1, Width (3) x Channel (3)
4315 7.0f, 17.0f, 115.0f,
4316 163.0f, 124.0f, 116.0f,
4317 18.0f, 153.0f, 238.0f,
4318
4319 // Batch 1, Height 2, Width (3) x Channel (3)
4320 25.0f, 222.0f, 226.0f,
4321 117.0f, 217.0f, 16.0f,
4322 103.0f, 75.0f, 132.0f,
4323
4324 // Batch 1, Height 3, Width (3) x Channel (3)
4325 247.0f, 32.0f, 92.0f,
4326 59.0f, 126.0f, 125.0f,
4327 189.0f, 21.0f, 88.0f
4328 };
4329 std::vector<float> expectedOutputValues
4330 {
4331 // Batch 0, Height 0, Width (3) x Channel (3)
4332 235.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
4333 113.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
4334 56.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
4335 46.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
4336 95.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
4337 170.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
4338 178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
4339 202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
4340 162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
4341
4342 // Batch 0, Height 1, Width (3) x Channel (3)
4343 100.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
4344 77.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
4345 194.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
4346 123.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
4347 114.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
4348 89.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
4349 19.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
4350 71.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
4351 254.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
4352
4353 // Batch 0, Height 2, Width (3) x Channel (3)
4354 172.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
4355 122.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
4356 12.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
4357 74.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
4358 246.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
4359 209.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
4360 250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
4361 166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
4362 200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
4363
4364 // Batch 0, Height 3, Width (3) x Channel (3)
4365 6.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
4366 82.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
4367 1.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
4368 195.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
4369 28.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
4370 64.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
4371 80.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
4372 37.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
4373 54.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
4374
4375 // Batch 1, Height 0, Width (3) x Channel (3)
4376 67.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
4377 239.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
4378 97.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
4379 90.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
4380 104.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
4381 145.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
4382 49.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
4383 199.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
4384 215.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
4385
4386 // Batch 1, Height 1, Width (3) x Channel (3)
4387 7.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
4388 17.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
4389 115.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
4390 163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
4391 124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
4392 116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
4393 18.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
4394 153.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
4395 238.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
4396
4397 // Batch 1, Height 2, Width (3) x Channel (3)
4398 25.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
4399 222.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
4400 226.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
4401 117.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
4402 217.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
4403 16.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
4404 103.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
4405 75.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
4406 132.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
4407
4408 // Batch 1, Height 3, Width (3) x Channel (3)
4409 247.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
4410 32.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
4411 92.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
4412 59.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
4413 126.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
4414 125.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
4415 189.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
4416 21.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
4417 88.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f })
4418 };
4419
4420 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
4421 inputValues, expectedOutputValues, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00004422}
4423
4424template <typename T>
4425LayerTestResult<T, 4> ConstantTestImpl(armnn::IWorkloadFactory& workloadFactory,
4426 float qScale,
4427 int32_t qOffset)
4428{
4429 constexpr unsigned int inputWidth = 3;
4430 constexpr unsigned int inputHeight = 4;
4431 constexpr unsigned int inputChannels = 3;
4432 constexpr unsigned int inputBatchSize = 2;
4433
4434 constexpr unsigned int outputWidth = inputWidth;
4435 constexpr unsigned int outputHeight = inputHeight;
4436 constexpr unsigned int outputChannels = inputChannels;
4437 constexpr unsigned int outputBatchSize = inputBatchSize;
4438
4439 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
4440 armnn::GetDataType<T>());
4441
4442 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
4443 armnn::GetDataType<T>());
4444
4445 // Set quantization parameters if the requested type is a quantized type.
4446 if(armnn::IsQuantizedType<T>())
4447 {
4448 inputTensorInfo.SetQuantizationScale(qScale);
4449 inputTensorInfo.SetQuantizationOffset(qOffset);
4450 outputTensorInfo.SetQuantizationScale(qScale);
4451 outputTensorInfo.SetQuantizationOffset(qOffset);
4452 }
4453
4454 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
4455 QuantizedVector<T>(qScale, qOffset, {
4456 // Batch 0, Channel 0
4457 235.0f, 46.0f, 178.0f,
4458 100.0f, 123.0f, 19.0f,
4459 172.0f, 74.0f, 250.0f,
4460 6.0f, 195.0f, 80.0f,
4461
4462 // Batch 0, Channel 1
4463 113.0f, 95.0f, 202.0f,
4464 77.0f, 114.0f, 71.0f,
4465 122.0f, 246.0f, 166.0f,
4466 82.0f, 28.0f, 37.0f,
4467
4468 // Batch 0, Channel 2
4469 56.0f, 170.0f, 162.0f,
4470 194.0f, 89.0f, 254.0f,
4471 12.0f, 209.0f, 200.0f,
4472 1.0f, 64.0f, 54.0f,
4473
4474 // Batch 1, Channel 0
4475 67.0f, 90.0f, 49.0f,
4476 7.0f, 163.0f, 18.0f,
4477 25.0f, 117.0f, 103.0f,
4478 247.0f, 59.0f, 189.0f,
4479
4480 // Batch 1, Channel 1
4481 239.0f, 104.0f, 199.0f,
4482 17.0f, 124.0f, 153.0f,
4483 222.0f, 217.0f, 75.0f,
4484 32.0f, 126.0f, 21.0f,
4485
4486 // Batch 1, Channel 2
4487 97.0f, 145.0f, 215.0f,
4488 115.0f, 116.0f, 238.0f,
4489 226.0f, 16.0f, 132.0f,
4490 92.0f, 125.0f, 88.0f,
4491 })));
4492
4493 LayerTestResult<T, 4> result(outputTensorInfo);
4494 result.outputExpected = input;
4495
4496 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4497
4498 armnn::ScopedCpuTensorHandle constantTensor(inputTensorInfo);
4499 AllocateAndCopyDataToITensorHandle(&constantTensor, &input[0][0][0][0]);
4500
4501 armnn::ConstantQueueDescriptor descriptor;
4502 descriptor.m_LayerOutput = &constantTensor;
4503
4504 armnn::WorkloadInfo info;
4505 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4506
4507 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConstant(descriptor, info);
4508
4509 outputHandle->Allocate();
4510
4511 workload->Execute();
4512
4513 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4514 return result;
4515}
4516
4517LayerTestResult<float, 4> ConstantTest(armnn::IWorkloadFactory& workloadFactory)
4518{
4519 return ConstantTestImpl<float>(workloadFactory, 0.0f, 0);
4520}
4521
4522LayerTestResult<uint8_t, 4> ConstantTestUint8(armnn::IWorkloadFactory& workloadFactory)
4523{
4524 return ConstantTestImpl<uint8_t>(workloadFactory, 1.0f, 0);
4525}
4526
4527LayerTestResult<uint8_t, 3> MergerUint8Test(armnn::IWorkloadFactory& workloadFactory)
4528{
surmeh013537c2c2018-05-18 16:31:43 +01004529 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +00004530 unsigned int outputHeight = 6;
4531 unsigned int outputChannels = 3;
4532
surmeh013537c2c2018-05-18 16:31:43 +01004533 unsigned int inputWidth1 = 3;
4534 unsigned int inputHeight1 = 6;
4535 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +00004536
surmeh013537c2c2018-05-18 16:31:43 +01004537 unsigned int inputWidth2 = 3;
4538 unsigned int inputHeight2 = 6;
4539 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +00004540
telsoa01c577f2c2018-08-31 09:22:23 +01004541 // Defines the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +00004542 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
4543 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
4544 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
telsoa014fcda012018-03-09 14:13:49 +00004545
telsoa01c577f2c2018-08-31 09:22:23 +01004546 // Arbitrary scale and offsets. They don't really matter as the merger operator doesn't dequantize/quantize them.
telsoa014fcda012018-03-09 14:13:49 +00004547 const float scale = 0.13497836f;
4548 const int32_t offset = -7;
4549
4550 outputTensorInfo.SetQuantizationScale(scale);
4551 outputTensorInfo.SetQuantizationOffset(offset);
4552 inputTensorInfo1.SetQuantizationScale(scale);
4553 inputTensorInfo1.SetQuantizationOffset(offset);
4554 inputTensorInfo2.SetQuantizationScale(scale);
4555 inputTensorInfo2.SetQuantizationOffset(offset);
telsoa014fcda012018-03-09 14:13:49 +00004556
4557 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
4558
4559 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
surmeh013537c2c2018-05-18 16:31:43 +01004560 {
4561 1, 2, 3,
4562 4, 5, 6,
4563 7, 8, 9,
4564 10, 11, 12,
4565 13, 14, 15,
4566 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00004567
surmeh013537c2c2018-05-18 16:31:43 +01004568 19, 20, 21,
4569 22, 23, 24,
4570 25, 26, 27,
4571 28, 29, 30,
4572 31, 32, 33,
4573 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00004574
surmeh013537c2c2018-05-18 16:31:43 +01004575 37, 38, 39,
4576 40, 41, 42,
4577 43, 44, 45,
4578 46, 47, 48,
4579 49, 50, 51,
4580 52, 53, 54,
4581 })
telsoa014fcda012018-03-09 14:13:49 +00004582 );
4583
telsoa014fcda012018-03-09 14:13:49 +00004584 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
4585 {
surmeh013537c2c2018-05-18 16:31:43 +01004586 1, 2, 3,
4587 4, 5, 6,
4588 7, 8, 9,
4589 10, 11, 12,
4590 13, 14, 15,
4591 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00004592
surmeh013537c2c2018-05-18 16:31:43 +01004593 19, 20, 21,
4594 22, 23, 24,
4595 25, 26, 27,
4596 28, 29, 30,
4597 31, 32, 33,
4598 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00004599 })
4600 );
4601
4602 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
4603 {
surmeh013537c2c2018-05-18 16:31:43 +01004604 37, 38, 39,
4605 40, 41, 42,
telsoa014fcda012018-03-09 14:13:49 +00004606 43, 44, 45,
surmeh013537c2c2018-05-18 16:31:43 +01004607 46, 47, 48,
4608 49, 50, 51,
4609 52, 53, 54,
telsoa014fcda012018-03-09 14:13:49 +00004610 })
4611 );
4612
telsoa01c577f2c2018-08-31 09:22:23 +01004613 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
telsoa014fcda012018-03-09 14:13:49 +00004614 armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1);
4615
telsoa01c577f2c2018-08-31 09:22:23 +01004616 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
telsoa014fcda012018-03-09 14:13:49 +00004617 armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2);
4618
telsoa014fcda012018-03-09 14:13:49 +00004619
4620 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4621
4622 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
4623
4624 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
4625 subTensorsSupported ?
4626 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
4627 workloadFactory.CreateTensorHandle(inputTensorInfo1);
4628
4629 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
4630 subTensorsSupported ?
4631 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
4632 workloadFactory.CreateTensorHandle(inputTensorInfo2);
4633
telsoa014fcda012018-03-09 14:13:49 +00004634
4635 armnn::MergerQueueDescriptor data;
4636 armnn::WorkloadInfo info;
4637 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
4638 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +00004639 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4640
4641 data.m_ViewOrigins.push_back(window1);
4642 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +00004643
4644 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(data, info);
4645
4646 inputHandle1->Allocate();
4647 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00004648 outputHandle->Allocate();
4649
4650 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
4651 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00004652
4653 workload->Execute();
4654
4655 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
4656
4657 return ret;
4658}
4659
4660LayerTestResult<uint8_t, 4> AdditionUint8Test(armnn::IWorkloadFactory& workloadFactory)
4661{
4662 unsigned int batchSize = 1;
4663 unsigned int channels = 2;
4664 unsigned int height = 2;
4665 unsigned int width = 3;
4666
4667 const float scale = 7.0f;
4668 const int32_t offset = 3;
4669
4670 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
4671 armnn::TensorInfo outputTensorInfo;
4672
4673 const unsigned int shape[] = { batchSize, channels, height, width };
4674 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
4675 inputTensorInfo1.SetQuantizationScale(scale);
4676 inputTensorInfo1.SetQuantizationOffset(offset);
4677
4678 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
4679 inputTensorInfo2.SetQuantizationScale(scale);
4680 inputTensorInfo2.SetQuantizationOffset(offset);
4681
4682 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
4683 outputTensorInfo.SetQuantizationScale(scale);
4684 outputTensorInfo.SetQuantizationOffset(offset);
4685
telsoa01c577f2c2018-08-31 09:22:23 +01004686 // See dequantized values to the right.
telsoa014fcda012018-03-09 14:13:49 +00004687 auto input1 = MakeTensor<uint8_t, 4>(inputTensorInfo1, std::vector<uint8_t>(
4688 {
4689 63, 35, 77, 70, 56, 112, // 420, 224, 518, 469, 371, 763
4690 203, 28, 252, 168, 245, 91 // 1400, 175, 1743, 1155, 1694, 616
4691 }));
4692
telsoa01c577f2c2018-08-31 09:22:23 +01004693 // See dequantized values to the right.
telsoa014fcda012018-03-09 14:13:49 +00004694 auto input2 = MakeTensor<uint8_t, 4>(inputTensorInfo1, std::vector<uint8_t>(
4695 {
4696 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
4697 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
4698 }));
4699
telsoa01c577f2c2018-08-31 09:22:23 +01004700 // See dequantized values to the right.
telsoa014fcda012018-03-09 14:13:49 +00004701 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
4702 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>(
4703 {
4704 81, 39, 249, 255, 228, 255, // 546, 252, 1722, 2065(clamped), 1575, 2212(clamped)
4705 255, 186, 255, 186, 255, 214, // 2261(clamped), 1281, 2163(clamped), 1281, 2408(clamped), 1477
4706 }));
4707
4708 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
4709 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
4710 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4711
4712 armnn::AdditionQueueDescriptor data;
4713 armnn::WorkloadInfo info;
4714 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
4715 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
4716 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4717
4718 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
4719
4720 inputHandle1->Allocate();
4721 inputHandle2->Allocate();
4722 outputHandle->Allocate();
4723
4724 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
4725 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
4726
4727 workload->Execute();
4728
4729 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4730
4731 return result;
4732}
4733
surmeh01bceff2f2018-03-29 16:29:27 +01004734namespace
telsoa014fcda012018-03-09 14:13:49 +00004735{
surmeh01bceff2f2018-03-29 16:29:27 +01004736LayerTestResult<uint8_t, 4> MultiplicationUint8TestHelper(armnn::IWorkloadFactory& workloadFactory,
4737 const unsigned int shape0[4],
4738 const std::vector<uint8_t> & values0,
4739 float scale0,
4740 int32_t offset0,
4741 const unsigned int shape1[4],
4742 const std::vector<uint8_t> & values1,
4743 float scale1,
4744 int32_t offset1,
4745 const unsigned int outShape[4],
4746 const std::vector<uint8_t> & outValues,
4747 float outScale,
4748 int32_t outOffset)
4749{
4750 armnn::TensorInfo inputTensorInfo0(4, shape0, armnn::DataType::QuantisedAsymm8);
4751 armnn::TensorInfo inputTensorInfo1(4, shape1, armnn::DataType::QuantisedAsymm8);
4752 armnn::TensorInfo outputTensorInfo(4, outShape, armnn::DataType::QuantisedAsymm8);
telsoa014fcda012018-03-09 14:13:49 +00004753
surmeh01bceff2f2018-03-29 16:29:27 +01004754 inputTensorInfo0.SetQuantizationScale(scale0);
4755 inputTensorInfo0.SetQuantizationOffset(offset0);
telsoa014fcda012018-03-09 14:13:49 +00004756
surmeh01bceff2f2018-03-29 16:29:27 +01004757 inputTensorInfo1.SetQuantizationScale(scale1);
4758 inputTensorInfo1.SetQuantizationOffset(offset1);
telsoa014fcda012018-03-09 14:13:49 +00004759
surmeh01bceff2f2018-03-29 16:29:27 +01004760 outputTensorInfo.SetQuantizationScale(outScale);
4761 outputTensorInfo.SetQuantizationOffset(outOffset);
telsoa014fcda012018-03-09 14:13:49 +00004762
surmeh01bceff2f2018-03-29 16:29:27 +01004763 auto input0 = MakeTensor<uint8_t, 4>(inputTensorInfo0, values0);
4764 auto input1 = MakeTensor<uint8_t, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00004765
telsoa014fcda012018-03-09 14:13:49 +00004766 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
surmeh01bceff2f2018-03-29 16:29:27 +01004767 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00004768
surmeh01bceff2f2018-03-29 16:29:27 +01004769 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
telsoa014fcda012018-03-09 14:13:49 +00004770 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
telsoa014fcda012018-03-09 14:13:49 +00004771 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4772
4773 armnn::MultiplicationQueueDescriptor data;
4774 armnn::WorkloadInfo info;
surmeh01bceff2f2018-03-29 16:29:27 +01004775 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
4776 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
telsoa014fcda012018-03-09 14:13:49 +00004777 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4778
4779 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
4780
surmeh01bceff2f2018-03-29 16:29:27 +01004781 inputHandle0->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00004782 inputHandle1->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00004783 outputHandle->Allocate();
4784
surmeh01bceff2f2018-03-29 16:29:27 +01004785 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00004786 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00004787
4788 workload->Execute();
4789
4790 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4791
4792 return result;
4793}
surmeh01bceff2f2018-03-29 16:29:27 +01004794} // anonymous namespace
4795
4796LayerTestResult<uint8_t, 4> MultiplicationUint8Test(armnn::IWorkloadFactory& workloadFactory)
4797{
4798 unsigned int batchSize = 1;
4799 unsigned int channels = 2;
4800 unsigned int height = 2;
4801 unsigned int width = 3;
4802 const unsigned int shape[] = { batchSize, channels, height, width };
4803
telsoa01c577f2c2018-08-31 09:22:23 +01004804 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01004805 std::vector<uint8_t> input0({
4806 62, 37, 3, 172, 13, 111, // 244, 144, 8, 684, 48, 440,
4807 188, 20, 73, 31, 23, 31 // 748, 76, 288, 120, 88, 120
4808 });
4809
telsoa01c577f2c2018-08-31 09:22:23 +01004810 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01004811 std::vector<uint8_t> input1({
4812 126, 240, 252, 183, 121, 247, // 384, 726, 762, 555, 369, 747,
4813 48, 115, 151, 79, 78, 97 // 150, 351, 459, 243, 240, 297
4814 });
4815
telsoa01c577f2c2018-08-31 09:22:23 +01004816 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01004817 std::vector<uint8_t> output(
4818 {
4819 64, 72, 0, 255, 8, 236, // 93696, 104544, 6096(clamped), 379620(clamped), 17712, 328680,
4820 77, 15, 92, 16, 10, 21, // 112200, 26676, 132192, 29160, 21120, 35640
4821 });
4822
4823 return MultiplicationUint8TestHelper(workloadFactory,
4824 shape,
4825 input0,
4826 4.0f,
4827 1,
4828 shape,
4829 input1,
4830 3.0f,
4831 -2,
4832 shape,
4833 output,
telsoa01c577f2c2018-08-31 09:22:23 +01004834 1366.255f, // Scale/offset chosen to have output values out of range.
surmeh01bceff2f2018-03-29 16:29:27 +01004835 -5);
4836}
4837
4838LayerTestResult<uint8_t, 4> MultiplicationBroadcast1ElementUint8Test(armnn::IWorkloadFactory& workloadFactory)
4839{
4840 const unsigned int shape0[] = { 1, 2, 2, 3 };
4841 const unsigned int shape1[] = { 1, 1, 1, 1 };
4842
4843 std::vector<uint8_t> input0({
4844 1, 2, 3, 4, 5, 6,
4845 7, 8, 9, 10, 11, 12
4846 });
4847
4848 std::vector<uint8_t> input1({2});
4849
4850 std::vector<uint8_t> output({
4851 2, 4, 6, 8, 10, 12,
4852 14, 16, 18, 20, 22, 24
4853 });
4854
4855 return MultiplicationUint8TestHelper(workloadFactory,
4856 shape0,
4857 input0,
4858 1.0f,
4859 0,
4860 shape1,
4861 input1,
4862 1.0f,
4863 0,
4864 shape0,
4865 output,
4866 1.0f,
4867 0);
4868}
4869
4870LayerTestResult<uint8_t, 4> MultiplicationBroadcast1DVectorUint8Test(armnn::IWorkloadFactory& workloadFactory)
4871{
4872 const unsigned int shape0[] = { 1, 2, 2, 3 };
4873 const unsigned int shape1[] = { 1, 1, 1, 3 };
4874
4875 std::vector<uint8_t> input0({
4876 1, 2, 3, 4, 5, 6,
4877 7, 8, 9, 10, 11, 12
4878 });
4879
4880 std::vector<uint8_t> input1({1, 2, 3});
4881
4882 std::vector<uint8_t> output({
4883 1, 4, 9, 4, 10, 18,
4884 7, 16, 27, 10, 22, 36
4885 });
4886
4887 return MultiplicationUint8TestHelper(workloadFactory,
4888 shape0,
4889 input0,
4890 1.0f,
4891 0,
4892 shape1,
4893 input1,
4894 1.0f,
4895 0,
4896 shape0,
4897 output,
4898 1.0f,
4899 0);
4900}
telsoa014fcda012018-03-09 14:13:49 +00004901
David Beckf195f032018-09-06 16:46:34 +01004902namespace
4903{
4904template <typename T>
4905LayerTestResult<T, 4> SubtractionTestHelper(armnn::IWorkloadFactory& workloadFactory,
4906 const unsigned int shape0[4],
4907 const std::vector<T>& values0,
4908 float scale0,
4909 int32_t offset0,
4910 const unsigned int shape1[4],
4911 const std::vector<T> & values1,
4912 float scale1,
4913 int32_t offset1,
4914 const unsigned int outShape[4],
4915 const std::vector<T> & outValues,
4916 float outScale,
4917 int32_t outOffset)
4918{
4919 auto dataType = (std::is_same<T, uint8_t>::value ?
4920 armnn::DataType::QuantisedAsymm8 :
4921 armnn::DataType::Float32);
4922
4923 armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
4924 armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
4925 armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
4926
4927 inputTensorInfo0.SetQuantizationScale(scale0);
4928 inputTensorInfo0.SetQuantizationOffset(offset0);
4929
4930 inputTensorInfo1.SetQuantizationScale(scale1);
4931 inputTensorInfo1.SetQuantizationOffset(offset1);
4932
4933 outputTensorInfo.SetQuantizationScale(outScale);
4934 outputTensorInfo.SetQuantizationOffset(outOffset);
4935
4936 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
4937 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
4938
4939 LayerTestResult<T, 4> result(outputTensorInfo);
4940 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
4941
4942 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
4943 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
4944 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4945
4946 armnn::SubtractionQueueDescriptor data;
4947 armnn::WorkloadInfo info;
4948 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
4949 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
4950 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4951
4952 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSubtraction(data, info);
4953
4954 inputHandle0->Allocate();
4955 inputHandle1->Allocate();
4956 outputHandle->Allocate();
4957
4958 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
4959 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
4960
David Beckf195f032018-09-06 16:46:34 +01004961 workload->Execute();
4962
4963 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4964
4965 return result;
4966}
4967} // anonymous namespace
4968
4969LayerTestResult<uint8_t, 4> SubtractionUint8Test(armnn::IWorkloadFactory& workloadFactory)
4970{
4971 const unsigned int shape0[] = { 1, 1, 2, 2 };
4972 const unsigned int shape1[] = { 1, 1, 2, 2 };
4973
4974 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
4975 std::vector<uint8_t> input1({ 1, 2, 1, 2 });
4976 std::vector<uint8_t> output({ 3, 3, 5, 5 });
4977
4978 return SubtractionTestHelper(workloadFactory,
4979 shape0, input0, 0.5f, 2,
4980 shape1, input1, 1.0f, 0,
4981 shape0, output, 1.0f, 0);
4982}
4983
4984LayerTestResult<uint8_t, 4> SubtractionBroadcast1ElementUint8Test(armnn::IWorkloadFactory& workloadFactory)
4985{
4986 const unsigned int shape0[] = { 1, 1, 2, 2 };
4987 const unsigned int shape1[] = { 1, 1, 1, 1 };
4988
4989 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
4990 std::vector<uint8_t> input1({ 2 });
4991 std::vector<uint8_t> output({ 5, 6, 7, 8 });
4992
4993 return SubtractionTestHelper(workloadFactory,
4994 shape0, input0, 0.5f, 2,
4995 shape1, input1, 1.0f, 0,
4996 shape0, output, 1.0f, 3);
4997}
4998
4999LayerTestResult<uint8_t, 4> SubtractionBroadcastUint8Test(armnn::IWorkloadFactory& workloadFactory)
5000{
5001 const unsigned int shape0[] = { 1, 1, 2, 2 };
5002 const unsigned int shape1[] = { 1, 1, 2, 1 };
5003
5004 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
5005 std::vector<uint8_t> input1({ 2, 1 });
5006 std::vector<uint8_t> output({ 8, 11, 12, 15 });
5007
5008 return SubtractionTestHelper(workloadFactory,
5009 shape0, input0, 1.0f, 0,
5010 shape1, input1, 1.0f, 0,
5011 shape0, output, 1.0f, 0);
5012}
5013
5014LayerTestResult<float, 4> SubtractionTest(armnn::IWorkloadFactory& workloadFactory)
5015{
5016 const unsigned int shape0[] = { 1, 1, 2, 2 };
5017 const unsigned int shape1[] = { 1, 1, 2, 2 };
5018
5019 std::vector<float> input0({ 1, 2, 3, 4 });
5020 std::vector<float> input1({ 1, -1, 0, 2 });
5021 std::vector<float> output({ 0, 3, 3, 2 });
5022
5023 return SubtractionTestHelper(workloadFactory,
5024 shape0, input0, 1.0f, 0,
5025 shape1, input1, 1.0f, 0,
5026 shape0, output, 1.0f, 0);
5027}
5028
5029LayerTestResult<float, 4> SubtractionBroadcast1ElementTest(armnn::IWorkloadFactory& workloadFactory)
5030{
5031 const unsigned int shape0[] = { 1, 1, 2, 2 };
5032 const unsigned int shape1[] = { 1, 1, 1, 1 };
5033
5034 std::vector<float> input0({ 1, 2, 3, 4 });
5035 std::vector<float> input1({ 10 });
5036 std::vector<float> output({ -9, -8, -7, -6 });
5037
5038 return SubtractionTestHelper(workloadFactory,
5039 shape0, input0, 1.0f, 0,
5040 shape1, input1, 1.0f, 0,
5041 shape0, output, 1.0f, 0);
5042}
5043
5044LayerTestResult<float, 4> SubtractionBroadcastTest(armnn::IWorkloadFactory& workloadFactory)
5045{
5046 const unsigned int shape0[] = { 1, 1, 2, 2 };
5047 const unsigned int shape1[] = { 1, 1, 1, 2 };
5048
5049 std::vector<float> input0({ 1, 2, 3, 4 });
5050 std::vector<float> input1({ 10, -5 });
5051 std::vector<float> output({ -9, 7, -7, 9 });
5052
5053 return SubtractionTestHelper(workloadFactory,
5054 shape0, input0, 1.0f, 0,
5055 shape1, input1, 1.0f, 0,
5056 shape0, output, 1.0f, 0);
5057}
5058
telsoa014fcda012018-03-09 14:13:49 +00005059LayerTestResult<uint8_t, 4> ResizeBilinearNopUint8Test(armnn::IWorkloadFactory& workloadFactory)
5060{
5061 constexpr unsigned int inputWidth = 4;
5062 constexpr unsigned int inputHeight = 4;
5063 constexpr unsigned int inputChannels = 1;
5064 constexpr unsigned int inputBatchSize = 1;
5065
5066 constexpr unsigned int outputWidth = inputWidth;
5067 constexpr unsigned int outputHeight = inputHeight;
5068 constexpr unsigned int outputChannels = inputChannels;
5069 constexpr unsigned int outputBatchSize = inputBatchSize;
5070
5071 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
5072 armnn::DataType::QuantisedAsymm8);
5073 inputTensorInfo.SetQuantizationScale(1.5f);
5074 inputTensorInfo.SetQuantizationOffset(-3);
5075
5076 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
5077 armnn::DataType::QuantisedAsymm8);
5078 outputTensorInfo.SetQuantizationScale(1.5f);
5079 outputTensorInfo.SetQuantizationOffset(-3);
5080
5081 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
5082 1, 2, 3, 4,
5083 2, 3, 4, 5,
5084 3, 4, 5, 6,
5085 4, 5, 6, 7
5086 }));
5087
5088 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
5089 result.outputExpected = input;
5090
5091 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5092 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5093
5094 armnn::ResizeBilinearQueueDescriptor descriptor;
5095 armnn::WorkloadInfo info;
5096 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5097 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5098
5099 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5100
5101 inputHandle->Allocate();
5102 outputHandle->Allocate();
5103 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5104
5105 workload->Execute();
5106
5107 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5108 return result;
5109}
5110
5111LayerTestResult<uint8_t, 4> SimpleResizeBilinearUint8Test(armnn::IWorkloadFactory& workloadFactory)
5112{
5113 constexpr unsigned int inputWidth = 2;
5114 constexpr unsigned int inputHeight = 2;
5115 constexpr unsigned int inputChannels = 1;
5116 constexpr unsigned int inputBatchSize = 1;
5117
5118 constexpr unsigned int outputWidth = inputWidth / 2;
5119 constexpr unsigned int outputHeight = inputHeight / 2;
5120 constexpr unsigned int outputChannels = inputChannels;
5121 constexpr unsigned int outputBatchSize = inputBatchSize;
5122
5123 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
5124 armnn::DataType::QuantisedAsymm8);
5125 inputTensorInfo.SetQuantizationScale(0.1567f);
5126 inputTensorInfo.SetQuantizationOffset(1);
5127
5128 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
5129 armnn::DataType::QuantisedAsymm8);
5130 outputTensorInfo.SetQuantizationScale(0.1567f);
5131 outputTensorInfo.SetQuantizationOffset(1);
5132
5133 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
5134 1, 255,
5135 200, 250
5136 }));
5137
5138 // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
5139 // then figures out the interpolants and weights. Note this is different to projecting the centre of the
telsoa01c577f2c2018-08-31 09:22:23 +01005140 // output texel - and thus we'll expect the output 1x1 matrix to contain, as its single element, the value
telsoa014fcda012018-03-09 14:13:49 +00005141 // that was at position (0,0) of the input matrix (rather than an average, which we would expect if projecting
5142 // the centre).
5143 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
5144 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
5145 1
5146 }));
5147
5148 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5149 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5150
5151 armnn::ResizeBilinearQueueDescriptor descriptor;
5152 armnn::WorkloadInfo info;
5153 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5154 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5155
5156 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5157
5158 inputHandle->Allocate();
5159 outputHandle->Allocate();
5160 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5161
5162 workload->Execute();
5163
5164 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5165 return result;
5166}
5167
5168LayerTestResult<uint8_t, 4> ResizeBilinearSqMinUint8Test(armnn::IWorkloadFactory& workloadFactory)
5169{
5170 constexpr unsigned int inputWidth = 4;
5171 constexpr unsigned int inputHeight = 4;
5172 constexpr unsigned int inputChannels = 1;
5173 constexpr unsigned int inputBatchSize = 1;
5174
5175 constexpr unsigned int outputWidth = inputWidth / 2;
5176 constexpr unsigned int outputHeight = inputHeight / 2;
5177 constexpr unsigned int outputChannels = inputChannels;
5178 constexpr unsigned int outputBatchSize = inputBatchSize;
5179
5180 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
5181 armnn::DataType::QuantisedAsymm8);
5182 inputTensorInfo.SetQuantizationScale(3.141592f);
5183 inputTensorInfo.SetQuantizationOffset(3);
5184
5185 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
5186 armnn::DataType::QuantisedAsymm8);
5187 outputTensorInfo.SetQuantizationScale(3.141592f);
5188 outputTensorInfo.SetQuantizationOffset(3);
5189
5190 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
5191 1, 2, 3, 4,
5192 2, 3, 4, 5,
5193 3, 4, 5, 6,
5194 4, 5, 6, 7
5195 }));
5196
5197 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
5198 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
5199 1, 3,
5200 3, 5
5201 }));
5202
5203 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5204 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5205
5206 armnn::ResizeBilinearQueueDescriptor descriptor;
5207 armnn::WorkloadInfo info;
5208 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5209 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5210
5211 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5212
5213 inputHandle->Allocate();
5214 outputHandle->Allocate();
5215 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5216
5217 workload->Execute();
5218
5219 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5220 return result;
5221}
5222
5223LayerTestResult<uint8_t, 4> ResizeBilinearMinUint8Test(armnn::IWorkloadFactory& workloadFactory)
5224{
5225 constexpr unsigned int inputWidth = 3;
5226 constexpr unsigned int inputHeight = 2;
5227 constexpr unsigned int inputChannels = 1;
5228 constexpr unsigned int inputBatchSize = 1;
5229
5230 constexpr unsigned int outputWidth = 2;
5231 constexpr unsigned int outputHeight = 1;
5232 constexpr unsigned int outputChannels = inputChannels;
5233 constexpr unsigned int outputBatchSize = inputBatchSize;
5234
5235 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
5236 armnn::DataType::QuantisedAsymm8);
5237 inputTensorInfo.SetQuantizationScale(1.5f);
5238 inputTensorInfo.SetQuantizationOffset(-1);
5239
5240 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
5241 armnn::DataType::QuantisedAsymm8);
5242 outputTensorInfo.SetQuantizationScale(1.5f);
5243 outputTensorInfo.SetQuantizationOffset(-1);
5244
5245 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
5246 1, 2, 3, // 3.0, 4.5, 6.0
5247 5, 8, 13 // 9.0, 13.5, 21.0
5248 }));
5249
5250 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
5251 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
5252 1, 3 // 3.0, 5.25
5253 }));
5254
5255 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5256 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5257
5258 armnn::ResizeBilinearQueueDescriptor descriptor;
5259 armnn::WorkloadInfo info;
5260 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5261 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5262
5263 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5264
5265 inputHandle->Allocate();
5266 outputHandle->Allocate();
5267
5268 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5269
5270 workload->Execute();
5271
5272 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5273 return result;
5274}
5275
5276LayerTestResult<uint8_t, 4> ResizeBilinearMagUint8Test(armnn::IWorkloadFactory& workloadFactory)
5277{
5278 constexpr unsigned int inputWidth = 2;
5279 constexpr unsigned int inputHeight = 3;
5280 constexpr unsigned int inputChannels = 1;
5281 constexpr unsigned int inputBatchSize = 1;
5282
5283 constexpr unsigned int outputWidth = 5;
5284 constexpr unsigned int outputHeight = 3;
5285 constexpr unsigned int outputChannels = inputChannels;
5286 constexpr unsigned int outputBatchSize = inputBatchSize;
5287
5288 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
5289 armnn::DataType::QuantisedAsymm8);
5290 inputTensorInfo.SetQuantizationScale(0.010765f);
5291 inputTensorInfo.SetQuantizationOffset(7);
5292
5293 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
5294 armnn::DataType::QuantisedAsymm8);
5295 outputTensorInfo.SetQuantizationScale(0.010132f);
5296 outputTensorInfo.SetQuantizationOffset(-18);
5297
5298 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
5299 24, 228, // 0.183005, 2.379065,
5300 105, 128, // 1.05497, 1.302565
5301 230, 71 // 2.400595, 0.68896
5302 }));
5303
5304 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
5305 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
5306 0, 87, 173, 217, 217, // 0.18300501, 1.06142902, 1.93985295, 2.37906504, 2.37906504
5307 86, 96, 106, 111, 111, // 1.05497003, 1.15400803, 1.25304604, 1.30256498, 1.30256498
5308 219, 151, 84, 50, 50 // 2.40059495, 1.71594095, 1.03128707, 0.68896002, 0.68896002
5309 }));
5310
5311 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5312 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5313
5314 armnn::ResizeBilinearQueueDescriptor descriptor;
5315 armnn::WorkloadInfo info;
5316 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5317 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5318
5319 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5320
5321 inputHandle->Allocate();
5322 outputHandle->Allocate();
5323 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5324
5325 workload->Execute();
5326
5327 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5328 return result;
5329}
5330
5331LayerTestResult<float, 4> BatchNormTest(armnn::IWorkloadFactory& workloadFactory)
5332{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01005333 // BatchSize: 1
5334 // Channels: 2
5335 // Height: 3
5336 // Width: 2
5337
5338 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
5339 std::vector<float> inputValues
5340 {
5341 // Batch 0, Channel 0, Height (3) x Width (2)
5342 1.f, 4.f,
5343 4.f, 2.f,
5344 1.f, 6.f,
5345
5346 // Batch 0, Channel 1, Height (3) x Width (2)
5347 1.f, 1.f,
5348 4.f, 1.f,
5349 -2.f, 4.f
5350 };
5351 std::vector<float> expectedOutputValues
5352 {
5353 // Batch 0, Channel 0, Height (3) x Width (2)
5354 1.f, 4.f,
5355 4.f, 2.f,
5356 1.f, 6.f,
5357
5358 // Batch 0, Channel 1, Height (3) x Width (2)
5359 3.f, 3.f,
5360 4.f, 3.f,
5361 2.f, 4.f
5362 };
5363
5364 return BatchNormTestImpl<float>(workloadFactory, inputOutputShape, inputValues, expectedOutputValues,
5365 0.f, 0, armnn::DataLayout::NCHW);
5366}
5367
5368LayerTestResult<float, 4> BatchNormNhwcTest(armnn::IWorkloadFactory& workloadFactory)
5369{
5370 // BatchSize: 1
5371 // Height: 3
5372 // Width: 2
5373 // Channels: 2
5374
5375 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
5376 std::vector<float> inputValues
5377 {
5378 // Batch 0, Height 0, Width (2) x Channel (2)
5379 1.f, 1.f,
5380 4.f, 1.f,
5381
5382 // Batch 0, Height 1, Width (2) x Channel (2)
5383 4.f, 4.f,
5384 2.f, 1.f,
5385
5386 // Batch 0, Height 2, Width (2) x Channel (2)
5387 1.f, -2.f,
5388 6.f, 4.f
5389 };
5390 std::vector<float> expectedOutputValues
5391 {
5392 // Batch 0, Height 0, Width (2) x Channel (2)
5393 1.f, 3.f,
5394 4.f, 3.f,
5395
5396 // Batch 0, Height 1, Width (2) x Channel (2)
5397 4.f, 4.f,
5398 2.f, 3.f,
5399
5400 // Batch 0, Height 2, Width (2) x Channel (2)
5401 1.f, 2.f,
5402 6.f, 4.f
5403 };
5404
5405 return BatchNormTestImpl<float>(workloadFactory, inputOutputShape, inputValues, expectedOutputValues,
5406 0.f, 0, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00005407}
5408
5409LayerTestResult<uint8_t, 4> BatchNormUint8Test(armnn::IWorkloadFactory& workloadFactory)
5410{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01005411 // BatchSize: 1
5412 // Channels: 2
5413 // Height: 3
5414 // Width: 2
5415
5416 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
5417 std::vector<float> inputValues
5418 {
5419 // Batch 0, Channel 0, Height (3) x Width (2)
5420 1.f, 4.f,
5421 4.f, 2.f,
5422 1.f, 6.f,
5423
5424 // Batch 0, Channel 1, Height (3) x Width (2)
5425 1.f, 1.f,
5426 4.f, 1.f,
5427 -2.f, 4.f
5428 };
5429 std::vector<float> expectedOutputValues
5430 {
5431 // Batch 0, Channel 0, Height (3) x Width (2)
5432 1.f, 4.f,
5433 4.f, 2.f,
5434 1.f, 6.f,
5435
5436 // Batch 0, Channel 1, Height (3) x Width (2)
5437 3.f, 3.f,
5438 4.f, 3.f,
5439 2.f, 4.f
5440 };
5441
5442 return BatchNormTestImpl<uint8_t>(workloadFactory, inputOutputShape, inputValues, expectedOutputValues,
5443 1.f/20.f, 50, armnn::DataLayout::NCHW);
5444}
5445
5446LayerTestResult<uint8_t, 4> BatchNormUint8NhwcTest(armnn::IWorkloadFactory& workloadFactory)
5447{
5448 // BatchSize: 1
5449 // Height: 3
5450 // Width: 2
5451 // Channels: 2
5452
5453 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
5454 std::vector<float> inputValues
5455 {
5456 // Batch 0, Height 0, Width (2) x Channel (2)
5457 1.f, 1.f,
5458 4.f, 1.f,
5459
5460 // Batch 0, Height 1, Width (2) x Channel (2)
5461 4.f, 4.f,
5462 2.f, 1.f,
5463
5464 // Batch 0, Height 2, Width (2) x Channel (2)
5465 1.f, -2.f,
5466 6.f, 4.f
5467 };
5468 std::vector<float> expectedOutputValues
5469 {
5470 // Batch 0, Height 0, Width (2) x Channel (2)
5471 1.f, 3.f,
5472 4.f, 3.f,
5473
5474 // Batch 0, Height 1, Width (2) x Channel (2)
5475 4.f, 4.f,
5476 2.f, 3.f,
5477
5478 // Batch 0, Height 2, Width (2) x Channel (2)
5479 1.f, 2.f,
5480 6.f, 4.f
5481 };
5482
5483 return BatchNormTestImpl<uint8_t>(workloadFactory, inputOutputShape, inputValues, expectedOutputValues,
5484 1.f/20.f, 50, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00005485}
5486
5487LayerTestResult<uint8_t, 4> ConstantUint8Test(armnn::IWorkloadFactory& workloadFactory)
5488{
5489 return ConstantTestImpl<uint8_t>(workloadFactory, 2e-6f, 1);
5490}
5491
5492LayerTestResult<uint8_t, 1> Concatenation1dUint8Test(armnn::IWorkloadFactory& workloadFactory)
5493{
5494 return Concatenation1dTestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5495}
5496
5497LayerTestResult<uint8_t, 2> Concatenation2dDim0Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5498{
5499 return Concatenation2dDim0TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5500}
5501
5502LayerTestResult<uint8_t, 2> Concatenation2dDim1Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5503{
5504 return Concatenation2dDim1TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5505}
5506
5507LayerTestResult<uint8_t, 2> Concatenation2dDim0DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory)
5508{
5509 return Concatenation2dDim0DiffInputDimsTestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5510}
5511
5512LayerTestResult<uint8_t, 2> Concatenation2dDim1DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory)
5513{
5514 return Concatenation2dDim1DiffInputDimsTestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5515}
5516
5517LayerTestResult<uint8_t, 3> Concatenation3dDim0Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5518{
5519 return Concatenation3dDim0TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5520}
5521
5522LayerTestResult<uint8_t, 3> Concatenation3dDim1Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5523{
5524 return Concatenation3dDim1TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5525}
5526
5527LayerTestResult<uint8_t, 3> Concatenation3dDim2Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5528{
5529 return Concatenation3dDim2TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5530}
5531
5532LayerTestResult<uint8_t, 3> Concatenation3dDim0DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory)
5533{
5534 return Concatenation3dDim0TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5535}
5536
5537LayerTestResult<uint8_t, 3> Concatenation3dDim1DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory)
5538{
5539 return Concatenation3dDim1DiffInputDimsTestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5540}
5541
5542LayerTestResult<uint8_t, 3> Concatenation3dDim2DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory)
5543{
5544 return Concatenation3dDim2DiffInputDimsTestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5545}
5546
5547LayerTestResult<float, 4> SimpleMaxPooling2dSize2x2Stride2x2Test(armnn::IWorkloadFactory& workloadFactory,
5548 bool forceNoPadding)
5549{
5550 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<float>(workloadFactory, forceNoPadding);
5551}
5552
5553LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(armnn::IWorkloadFactory& workloadFactory,
5554 bool forceNoPadding)
5555{
5556 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<uint8_t>(workloadFactory, forceNoPadding, 3.0f, -5);
5557}
5558
5559LayerTestResult<float, 4> SimpleMaxPooling2dSize3x3Stride2x4Test(armnn::IWorkloadFactory& workloadFactory,
5560 bool forceNoPadding)
5561{
5562 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<float>(workloadFactory, forceNoPadding);
5563}
5564
5565LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(armnn::IWorkloadFactory& workloadFactory,
5566 bool forceNoPadding)
5567{
5568 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<uint8_t>(workloadFactory, forceNoPadding, 0.1f, 128);
5569}
5570
James Conroy45a9b772018-10-31 11:47:53 +00005571LayerTestResult<float, 4> SimpleMaxPooling2dTest(armnn::IWorkloadFactory& workloadFactory,
5572 const armnn::DataLayoutIndexed& dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00005573{
James Conroy45a9b772018-10-31 11:47:53 +00005574 return SimpleMaxPooling2dTestCommon<float>(workloadFactory, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00005575}
5576
James Conroy45a9b772018-10-31 11:47:53 +00005577LayerTestResult<uint8_t, 4> SimpleMaxPooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory,
5578 const armnn::DataLayoutIndexed& dataLayout)
Francis Murtagh043d0d02018-10-05 14:08:48 +01005579{
James Conroy45a9b772018-10-31 11:47:53 +00005580 return SimpleMaxPooling2dTestCommon<uint8_t>(workloadFactory, dataLayout);
Francis Murtagh043d0d02018-10-05 14:08:48 +01005581}
5582
James Conroy45a9b772018-10-31 11:47:53 +00005583LayerTestResult<float, 4> SimpleAveragePooling2dTest(armnn::IWorkloadFactory& workloadFactory,
5584 const armnn::DataLayoutIndexed& dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00005585{
James Conroy45a9b772018-10-31 11:47:53 +00005586 return SimpleAveragePooling2dTestCommon<float>(workloadFactory, dataLayout);
James Conroy69482272018-10-19 10:41:35 +01005587}
5588
James Conroy45a9b772018-10-31 11:47:53 +00005589LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory,
5590 const armnn::DataLayoutIndexed& dataLayout)
James Conroy69482272018-10-19 10:41:35 +01005591{
James Conroy45a9b772018-10-31 11:47:53 +00005592 return SimpleAveragePooling2dTestCommon<uint8_t>(workloadFactory, dataLayout, 0.5, -1);
telsoa014fcda012018-03-09 14:13:49 +00005593}
5594
surmeh01bceff2f2018-03-29 16:29:27 +01005595LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(armnn::IWorkloadFactory& workloadFactory,
5596 bool forceNoPadding)
5597{
5598 return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<float>(workloadFactory, forceNoPadding);
5599}
5600
telsoa014fcda012018-03-09 14:13:49 +00005601LayerTestResult<float, 4> LargeTensorsAveragePooling2dTest(armnn::IWorkloadFactory& workloadFactory)
5602{
5603 return LargeTensorsAveragePooling2dTestCommon<float>(workloadFactory);
5604}
5605
5606LayerTestResult<uint8_t, 4> LargeTensorsAveragePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
5607{
5608 return LargeTensorsAveragePooling2dTestCommon<uint8_t>(workloadFactory, 0.5, -1);
5609}
5610
James Conroy45a9b772018-10-31 11:47:53 +00005611LayerTestResult<float, 4> SimpleL2Pooling2dTest(armnn::IWorkloadFactory& workloadFactory,
5612 const armnn::DataLayoutIndexed& dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00005613{
James Conroy45a9b772018-10-31 11:47:53 +00005614 return SimpleL2Pooling2dTestCommon<float>(workloadFactory, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00005615}
5616
James Conroy45a9b772018-10-31 11:47:53 +00005617LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory,
5618 const armnn::DataLayoutIndexed& dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00005619{
James Conroy45a9b772018-10-31 11:47:53 +00005620 return SimpleL2Pooling2dTestCommon<uint8_t>(workloadFactory, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00005621}
5622
5623LayerTestResult<float, 4> L2Pooling2dSize3Stride1Test(armnn::IWorkloadFactory& workloadFactory)
5624{
5625 return L2Pooling2dSize3Stride1TestCommon<float>(workloadFactory);
5626}
5627
5628LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride1Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5629{
5630 return L2Pooling2dSize3Stride1TestCommon<uint8_t>(workloadFactory);
5631}
5632
5633LayerTestResult<float, 4> L2Pooling2dSize3Stride3Test(armnn::IWorkloadFactory& workloadFactory)
5634{
5635 return L2Pooling2dSize3Stride3TestCommon<float>(workloadFactory);
5636}
5637
5638LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride3Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5639{
5640 return L2Pooling2dSize3Stride3TestCommon<uint8_t>(workloadFactory);
5641}
5642
5643LayerTestResult<float, 4> L2Pooling2dSize3Stride4Test(armnn::IWorkloadFactory& workloadFactory)
5644{
5645 return L2Pooling2dSize3Stride4TestCommon<float>(workloadFactory);
5646}
5647
5648LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride4Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5649{
5650 return L2Pooling2dSize3Stride4TestCommon<uint8_t>(workloadFactory);
5651}
5652
5653LayerTestResult<float, 4> L2Pooling2dSize7Test(armnn::IWorkloadFactory& workloadFactory)
5654{
5655 return L2Pooling2dSize7TestCommon<float>(workloadFactory);
5656}
5657
5658LayerTestResult<uint8_t, 4> L2Pooling2dSize7Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5659{
5660 return L2Pooling2dSize7TestCommon<uint8_t>(workloadFactory);
5661}
5662
5663LayerTestResult<float, 4> L2Pooling2dSize9Test(armnn::IWorkloadFactory& workloadFactory)
5664{
5665 return L2Pooling2dSize9TestCommon<float>(workloadFactory);
5666}
5667
5668LayerTestResult<uint8_t, 4> L2Pooling2dSize9Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5669{
5670 return L2Pooling2dSize9TestCommon<uint8_t>(workloadFactory);
5671}
5672
5673LayerTestResult<float, 4> AsymmetricNonSquarePooling2dTest(armnn::IWorkloadFactory& workloadFactory)
5674{
5675 return AsymmetricNonSquarePooling2dTestCommon<float>(workloadFactory);
5676}
5677
5678LayerTestResult<uint8_t, 4> AsymmetricNonSquarePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
5679{
5680 return AsymmetricNonSquarePooling2dTestCommon<uint8_t>(workloadFactory);
5681}
5682
5683LayerTestResult<float, 4> ComparePooling2dTest(armnn::IWorkloadFactory& workloadFactory,
5684 armnn::IWorkloadFactory& refWorkloadFactory,
5685 armnn::PoolingAlgorithm poolingType)
5686{
5687 return ComparePooling2dTestCommon<float>(workloadFactory, refWorkloadFactory, poolingType);
5688}
5689
5690LayerTestResult<uint8_t, 4> ComparePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory,
5691 armnn::IWorkloadFactory& refWorkloadFactory,
5692 armnn::PoolingAlgorithm poolingType)
5693{
5694 return ComparePooling2dTestCommon<uint8_t>(workloadFactory, refWorkloadFactory, poolingType, 0.1f, 128);
5695}
5696
5697LayerTestResult<float, 2> FullyConnectedLargeTest(armnn::IWorkloadFactory& workloadFactory,
5698 bool transposeWeights)
5699{
5700 return FullyConnectedLargeTestCommon<float>(workloadFactory, transposeWeights);
5701}
5702
5703LayerTestResult<float, 4> IgnorePaddingSimpleMaxPooling2dTest(armnn::IWorkloadFactory& workloadFactory)
5704{
5705 return IgnorePaddingSimpleMaxPooling2dTestCommon<float>(workloadFactory);
5706}
5707
5708LayerTestResult<uint8_t, 4> IgnorePaddingSimpleMaxPooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
5709{
5710 return IgnorePaddingSimpleMaxPooling2dTestCommon<uint8_t>(workloadFactory, 1.0f, -5);
5711}
5712
5713LayerTestResult<float, 4> IgnorePaddingMaxPooling2dSize3Test(armnn::IWorkloadFactory& workloadFactory)
5714{
5715 return IgnorePaddingMaxPooling2dSize3TestCommon<float>(workloadFactory);
5716}
5717
5718LayerTestResult<uint8_t, 4> IgnorePaddingMaxPooling2dSize3Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5719{
5720 return IgnorePaddingMaxPooling2dSize3TestCommon<uint8_t>(workloadFactory, 1.0f, -5);
5721}
5722
5723LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dTest(armnn::IWorkloadFactory& workloadFactory)
5724{
5725 return IgnorePaddingSimpleAveragePooling2dTestCommon<float>(workloadFactory);
5726}
5727
5728LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
5729{
5730 return IgnorePaddingSimpleAveragePooling2dTestCommon<uint8_t>(workloadFactory);
5731}
5732
5733LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTest(armnn::IWorkloadFactory& workloadFactory)
5734{
5735 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<float>(workloadFactory);
5736}
5737
5738LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(
5739 armnn::IWorkloadFactory& workloadFactory)
5740{
5741 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<uint8_t>(workloadFactory);
5742}
5743
5744LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3Test(armnn::IWorkloadFactory& workloadFactory)
5745{
5746 return IgnorePaddingAveragePooling2dSize3TestCommon<float>(workloadFactory);
5747}
5748
5749LayerTestResult<uint8_t, 4> IgnorePaddingAveragePooling2dSize3Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5750{
5751 return IgnorePaddingAveragePooling2dSize3TestCommon<uint8_t>(workloadFactory);
5752}
5753
5754LayerTestResult<float, 4> IgnorePaddingSimpleL2Pooling2dTest(armnn::IWorkloadFactory& workloadFactory)
5755{
5756 return IgnorePaddingSimpleL2Pooling2dTestCommon<float>(workloadFactory);
5757}
5758
5759LayerTestResult<uint8_t, 4> IgnorePaddingSimpleL2Pooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
5760{
5761 return IgnorePaddingSimpleL2Pooling2dTestCommon<uint8_t>(workloadFactory);
5762}
5763
5764LayerTestResult<float, 4> IgnorePaddingL2Pooling2dSize3Test(armnn::IWorkloadFactory& workloadFactory)
5765{
5766 return IgnorePaddingL2Pooling2dSize3TestCommon<float>(workloadFactory);
5767}
5768
5769LayerTestResult<uint8_t, 4> IgnorePaddingL2Pooling2dSize3Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5770{
5771 return IgnorePaddingL2Pooling2dSize3TestCommon<uint8_t>(workloadFactory);
5772}
5773
5774LayerTestResult<float, 4> SimplePermuteFloat32Test(armnn::IWorkloadFactory& workloadFactory)
5775{
5776 return SimplePermuteFloat32TestCommon(workloadFactory);
5777};
5778
5779LayerTestResult<uint8_t, 4> SimplePermuteUint8Test(armnn::IWorkloadFactory& workloadFactory)
5780{
5781 return SimplePermuteUint8TestCommon(workloadFactory);
5782};
surmeh01bceff2f2018-03-29 16:29:27 +01005783
5784LayerTestResult<float, 4> PermuteFloat32ValueSet1Test(armnn::IWorkloadFactory& workloadFactory)
5785{
5786 return PermuteFloat32ValueSet1TestCommon(workloadFactory);
5787};
5788
5789LayerTestResult<float, 4> PermuteFloat32ValueSet2Test(armnn::IWorkloadFactory& workloadFactory)
5790{
5791 return PermuteFloat32ValueSet2TestCommon(workloadFactory);
5792};
5793
5794LayerTestResult<float, 4> PermuteFloat32ValueSet3Test(armnn::IWorkloadFactory& workloadFactory)
5795{
5796 return PermuteFloat32ValueSet3TestCommon(workloadFactory);
narpra011e4c31d2018-09-28 11:07:51 +01005797};
5798
5799namespace
5800{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005801
narpra011e4c31d2018-09-28 11:07:51 +01005802template <typename T, std::size_t InputDim, std::size_t OutputDim>
5803LayerTestResult<T, OutputDim> MeanTestHelper(armnn::IWorkloadFactory& workloadFactory,
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005804 const unsigned int* inputShape,
5805 const std::vector<T>& inputData,
5806 const std::vector<unsigned int>& axis,
5807 bool keepDims,
5808 const unsigned int* outputShape,
5809 const std::vector<T>& outputData,
5810 float scale = 1.0f,
5811 int32_t offset = 0)
narpra011e4c31d2018-09-28 11:07:51 +01005812{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005813 auto dataType = (std::is_same<T, uint8_t>::value ? armnn::DataType::QuantisedAsymm8 : armnn::DataType::Float32);
narpra011e4c31d2018-09-28 11:07:51 +01005814
5815 armnn::TensorInfo inputTensorInfo(InputDim, inputShape, dataType);
5816 armnn::TensorInfo outputTensorInfo(OutputDim, outputShape, dataType);
5817
5818 inputTensorInfo.SetQuantizationScale(scale);
5819 inputTensorInfo.SetQuantizationOffset(offset);
5820
5821 outputTensorInfo.SetQuantizationScale(scale);
5822 outputTensorInfo.SetQuantizationOffset(offset);
5823
5824 auto input = MakeTensor<T, InputDim>(inputTensorInfo, inputData);
5825
5826 LayerTestResult<T, OutputDim> result(outputTensorInfo);
5827 result.outputExpected = MakeTensor<T, OutputDim>(outputTensorInfo, outputData);
5828
5829 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5830 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5831
5832 armnn::MeanQueueDescriptor data;
5833 data.m_Parameters.m_Axis = axis;
5834 data.m_Parameters.m_KeepDims = keepDims;
5835 armnn::WorkloadInfo info;
5836 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
5837 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
5838
5839 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMean(data, info);
5840
5841 inputHandle->Allocate();
5842 outputHandle->Allocate();
5843
5844 CopyDataToITensorHandle(inputHandle.get(), input.origin());
5845
narpra011e4c31d2018-09-28 11:07:51 +01005846 workload->Execute();
5847
5848 CopyDataFromITensorHandle(result.output.origin(), outputHandle.get());
5849
5850 return result;
5851}
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005852
narpra011e4c31d2018-09-28 11:07:51 +01005853} // anonymous namespace
5854
5855LayerTestResult<uint8_t, 1> MeanUint8SimpleTest(armnn::IWorkloadFactory& workloadFactory)
5856{
5857 const unsigned int inputShape[] = { 3, 2 };
5858 const unsigned int outputShape[] = { 1 };
5859
5860 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
5861 std::vector<uint8_t> output({ 2 });
5862
5863 return MeanTestHelper<uint8_t, 2, 1>(workloadFactory, inputShape, input, {}, false, outputShape, output);
5864}
5865
5866LayerTestResult<uint8_t, 3> MeanUint8SimpleAxisTest(armnn::IWorkloadFactory& workloadFactory)
5867{
5868 const unsigned int inputShape[] = { 1, 1, 3, 2 };
5869 const unsigned int outputShape[] = { 1, 1, 2 };
5870
5871 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
5872 std::vector<uint8_t> output({ 2, 2 });
5873
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005874 return MeanTestHelper<uint8_t, 4, 3>(workloadFactory, inputShape, input, { 2 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01005875}
5876
5877LayerTestResult<uint8_t, 4> MeanUint8KeepDimsTest(armnn::IWorkloadFactory& workloadFactory)
5878{
5879 const unsigned int inputShape[] = { 1, 1, 3, 2 };
5880 const unsigned int outputShape[] = { 1, 1, 1, 2 };
5881
5882 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
5883 std::vector<uint8_t> output({ 2, 2 });
5884
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005885 return MeanTestHelper<uint8_t, 4, 4>(workloadFactory, inputShape, input, { 2 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01005886}
5887
5888LayerTestResult<uint8_t, 4> MeanUint8MultipleDimsTest(armnn::IWorkloadFactory& workloadFactory)
5889{
5890 const unsigned int inputShape[] = { 2, 3, 1, 2 };
5891 const unsigned int outputShape[] = { 1, 3, 1, 1 };
5892
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005893 std::vector<uint8_t> input({ 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6 });
narpra011e4c31d2018-09-28 11:07:51 +01005894 std::vector<uint8_t> output({ 1, 3, 5 });
5895
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005896 return MeanTestHelper<uint8_t, 4, 4>(workloadFactory, inputShape, input, { 0, 3 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01005897}
5898
5899LayerTestResult<uint8_t, 1> MeanVtsUint8Test(armnn::IWorkloadFactory& workloadFactory)
5900{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005901 const unsigned int inputShape[] = { 4, 3, 2 };
narpra011e4c31d2018-09-28 11:07:51 +01005902 const unsigned int outputShape[] = { 2 };
5903
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005904 std::vector<uint8_t> input({ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
5905 24 });
5906 std::vector<uint8_t> output({ 12, 13 });
narpra011e4c31d2018-09-28 11:07:51 +01005907
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005908 return MeanTestHelper<uint8_t, 3, 1>(workloadFactory, inputShape, input, { 0, 1 }, false, outputShape,
5909 output, 0.8f, 5);
narpra011e4c31d2018-09-28 11:07:51 +01005910}
5911
5912LayerTestResult<float, 1> MeanFloatSimpleTest(armnn::IWorkloadFactory& workloadFactory)
5913{
5914 const unsigned int inputShape[] = { 3, 2 };
5915 const unsigned int outputShape[] = { 1 };
5916
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005917 std::vector<float> input({ 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f });
5918 std::vector<float> output({ 2.0f });
narpra011e4c31d2018-09-28 11:07:51 +01005919
5920 return MeanTestHelper<float, 2, 1>(workloadFactory, inputShape, input, {}, false, outputShape, output);
5921}
5922
5923LayerTestResult<float, 3> MeanFloatSimpleAxisTest(armnn::IWorkloadFactory& workloadFactory)
5924{
5925 const unsigned int inputShape[] = { 2, 3, 1, 2 };
5926 const unsigned int outputShape[] = { 3, 1, 2 };
5927
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005928 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f });
5929 std::vector<float> output({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f });
narpra011e4c31d2018-09-28 11:07:51 +01005930
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005931 return MeanTestHelper<float, 4, 3>(workloadFactory, inputShape, input, { 0 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01005932}
5933
5934LayerTestResult<float, 4> MeanFloatKeepDimsTest(armnn::IWorkloadFactory& workloadFactory)
5935{
5936 const unsigned int inputShape[] = { 1, 1, 3, 2 };
5937 const unsigned int outputShape[] = { 1, 1, 1, 2 };
5938
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005939 std::vector<float> input({ 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f });
5940 std::vector<float> output({ 2.0f, 2.0f });
narpra011e4c31d2018-09-28 11:07:51 +01005941
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005942 return MeanTestHelper<float, 4, 4>(workloadFactory, inputShape, input, { 2 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01005943}
5944
5945LayerTestResult<float, 4> MeanFloatMultipleDimsTest(armnn::IWorkloadFactory& workloadFactory)
5946{
5947 const unsigned int inputShape[] = { 2, 3, 1, 2 };
5948 const unsigned int outputShape[] = { 1, 3, 1, 1 };
5949
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005950 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f });
5951 std::vector<float> output({ 1.5f, 3.5f, 5.5f });
narpra011e4c31d2018-09-28 11:07:51 +01005952
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005953 return MeanTestHelper<float, 4, 4>(workloadFactory, inputShape, input, { 0, 3 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01005954}
5955
5956LayerTestResult<float, 1> MeanVtsFloat1Test(armnn::IWorkloadFactory& workloadFactory)
5957{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005958 const unsigned int inputShape[] = { 4, 3, 2 };
narpra011e4c31d2018-09-28 11:07:51 +01005959 const unsigned int outputShape[] = { 2 };
5960
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005961 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f,
5962 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f });
5963 std::vector<float> output({ 12.0f, 13.0f });
narpra011e4c31d2018-09-28 11:07:51 +01005964
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005965 return MeanTestHelper<float, 3, 1>(workloadFactory, inputShape, input, { 0, 1 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01005966}
5967
5968LayerTestResult<float, 3> MeanVtsFloat2Test(armnn::IWorkloadFactory& workloadFactory)
5969{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005970 const unsigned int inputShape[] = { 4, 3, 2 };
5971 const unsigned int outputShape[] = { 1, 3, 1 };
narpra011e4c31d2018-09-28 11:07:51 +01005972
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005973 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f,
5974 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f });
5975 std::vector<float> output({ 10.5f, 12.5f, 14.5f });
narpra011e4c31d2018-09-28 11:07:51 +01005976
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005977 return MeanTestHelper<float, 3, 3>(workloadFactory, inputShape, input, { 0, 2 }, true, outputShape, output);
5978}
5979
5980LayerTestResult<float, 3> MeanVtsFloat3Test(armnn::IWorkloadFactory& workloadFactory)
5981{
5982 const unsigned int inputShape[] = { 1, 2, 2, 1 };
5983 const unsigned int outputShape[] = { 1, 2, 1 };
5984
5985 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f });
5986 std::vector<float> output({ 1.5f, 3.5f });
5987
5988 return MeanTestHelper<float, 4, 3>(workloadFactory, inputShape, input, { 2 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01005989}
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01005990
5991LayerTestResult<float, 4> AdditionAfterMaxPoolTest(armnn::IWorkloadFactory& workloadFactory)
5992{
5993 // Create Initial Tensor
5994 // 1, 2, 3
5995 // 4, 5, 6
5996 // 7, 8, 9
5997
5998 armnn::TensorInfo poolingInputTensorInfo({ 1, 1, 3, 3}, armnn::GetDataType<float>());
5999 armnn::TensorInfo poolingOutputTensorInfo({ 1, 1, 2, 2}, armnn::GetDataType<float>());
6000
6001 boost::multi_array<float, 4> poolingInput = MakeTensor<float,4>(poolingInputTensorInfo,
6002 {1, 2, 3,
6003 4, 5, 6,
6004 7, 8, 9
6005 });
6006
6007 std::unique_ptr<armnn::ITensorHandle> poolingInputHandle =
6008 workloadFactory.CreateTensorHandle(poolingInputTensorInfo);
6009 std::unique_ptr<armnn::ITensorHandle> poolingOutputHandle =
6010 workloadFactory.CreateTensorHandle(poolingOutputTensorInfo);
6011
6012 // Apply MaxPool poolSize = 1x1, stride=2x2
6013 // Result =
6014 // 1, 3
6015 // 7, 9
6016 armnn::Pooling2dDescriptor descriptor;
6017 descriptor.m_PoolHeight = 1;
6018 descriptor.m_PoolWidth = 1;
6019 descriptor.m_StrideX = 2;
6020 descriptor.m_StrideY = 2;
6021 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
6022
6023 armnn::Pooling2dQueueDescriptor queueDescriptor;
6024 queueDescriptor.m_Parameters = descriptor;
6025 armnn::WorkloadInfo workloadInfo;
6026 AddInputToWorkload(queueDescriptor, workloadInfo, poolingInputTensorInfo, poolingInputHandle.get());
6027 AddOutputToWorkload(queueDescriptor, workloadInfo, poolingOutputTensorInfo, poolingOutputHandle.get());
6028
6029 // Create the MaxPool
6030 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
6031
6032 //LayerTestResult<float, 4> result(poolingOutputTensorInfo);
6033 auto shape( GetTensorShapeAsArray<4>(poolingOutputTensorInfo));
6034 boost::multi_array<float, 4> resultMaxPool;
6035 resultMaxPool.resize(shape);
6036
6037
6038 // Create addition with another tensor the same size
6039 // This would be the result to apply a Conv2d with kernel ones(2) and stride 1x1
6040 // with the initial tensor.
6041 // 12, 16
6042 // 24, 28
6043
6044 armnn::TensorInfo addInputTensorInfo({ 1,1,2,2}, armnn::GetDataType<float>());
6045 armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2}, armnn::GetDataType<float>());
6046
6047 boost::multi_array<float, 4> addInput = MakeTensor<float,4>(addInputTensorInfo,
6048 {12, 16,
6049 24, 28,
6050 });
6051
6052 // Expected output tensor after MaxPool and Addition.
6053 LayerTestResult<float,4> addRet(addOutputTensorInfo);
6054 addRet.outputExpected = MakeTensor<float, 4>(addOutputTensorInfo, std::vector<float>(
6055 {
6056 13, 19,
6057 31, 37
6058 }));
6059
6060 std::unique_ptr<armnn::ITensorHandle> addInputHandle = workloadFactory.CreateTensorHandle(addInputTensorInfo);
6061 std::unique_ptr<armnn::ITensorHandle> addOutputHandle = workloadFactory.CreateTensorHandle(addOutputTensorInfo);
6062
6063 armnn::AdditionQueueDescriptor data;
6064 armnn::WorkloadInfo info;
6065
6066 // Add the output of the MaxPool and the new tensor
6067 AddInputToWorkload(data, info, poolingOutputTensorInfo, poolingOutputHandle.get());
6068 AddInputToWorkload(data, info, addInputTensorInfo, addInputHandle.get());
6069 AddOutputToWorkload(data, info, addOutputTensorInfo, addOutputHandle.get());
6070
6071 std::unique_ptr<armnn::IWorkload> addWorkload = workloadFactory.CreateAddition(data, info);
6072
6073 poolingInputHandle->Allocate();
6074 poolingOutputHandle->Allocate();
6075 addInputHandle->Allocate();
6076 addOutputHandle->Allocate();
6077
6078 CopyDataToITensorHandle(poolingInputHandle.get(), &poolingInput[0][0][0][0]);
6079 CopyDataFromITensorHandle(&resultMaxPool[0][0][0][0], poolingOutputHandle.get());
6080
6081 CopyDataToITensorHandle(poolingOutputHandle.get(), &resultMaxPool[0][0][0][0]);
6082 CopyDataToITensorHandle(addInputHandle.get(), &addInput[0][0][0][0]);
6083
6084 workload->Execute();
6085 addWorkload->Execute();
6086
6087 CopyDataFromITensorHandle(&addRet.output[0][0][0][0], addOutputHandle.get());
6088
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01006089 return addRet;
6090}