blob: 85b3e1b2b7788e0008699156e895d033a964a240 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#include "LayerTests.hpp"
6
7#include "test/TensorHelpers.hpp"
8#include "TensorCopyUtils.hpp"
surmeh013537c2c2018-05-18 16:31:43 +01009#include "Permute.hpp"
telsoa014fcda012018-03-09 14:13:49 +000010
11#include <boost/test/unit_test.hpp>
surmeh013537c2c2018-05-18 16:31:43 +010012#include <boost/assert.hpp>
telsoa014fcda012018-03-09 14:13:49 +000013
David Beck711fa312018-09-24 10:46:38 +010014#include <armnn/LayerSupport.hpp>
telsoa014fcda012018-03-09 14:13:49 +000015
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000016#include <backendsCommon/CpuTensorHandle.hpp>
17#include <backendsCommon/WorkloadFactory.hpp>
telsoa014fcda012018-03-09 14:13:49 +000018
telsoa014fcda012018-03-09 14:13:49 +000019#include <algorithm>
20#include <boost/cast.hpp>
21
22#include "WorkloadTestUtils.hpp"
23#include "Conv2dTestImpl.hpp"
24#include "BatchNormTestImpl.hpp"
25#include "ActivationTestImpl.hpp"
26#include "Pooling2dTestImpl.hpp"
27#include "ReshapeTestImpl.hpp"
28#include "FullyConnectedTestImpl.hpp"
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +000029#include "SpaceToBatchNdTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000030#include "SplitterTestImpl.hpp"
31#include "SoftmaxTestImpl.hpp"
32#include "NormTestImpl.hpp"
33#include "PermuteTestImpl.hpp"
telsoa01c577f2c2018-08-31 09:22:23 +010034#include "LstmTestImpl.hpp"
35#include "ConvertFp16ToFp32TestImpl.hpp"
36#include "ConvertFp32ToFp16TestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000037
telsoa01c577f2c2018-08-31 09:22:23 +010038// 3-channel 16x8 image used as common input data for a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000039static std::vector<float> ConvInput3x8x16({
40 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
41 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
42 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
43 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
44 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
45 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
46 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
47 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
48 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
49 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
50 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
51 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
52 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
53 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
54 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
55 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
56 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
57 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
58 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
59 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
60 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
61 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
62 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
63 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
64});
65
telsoa01c577f2c2018-08-31 09:22:23 +010066// 2-channel bias used by a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000067static std::vector<float> Bias2({0, 2});
68
telsoa01c577f2c2018-08-31 09:22:23 +010069// Helper function that returns either Bias2 or an empty vector depending on whether bias is enabled.
telsoa014fcda012018-03-09 14:13:49 +000070template<typename T>
71boost::multi_array<T, 1> GetBias2(bool biasEnabled, float qScale, int32_t qOffset)
72{
73 if(biasEnabled)
74 {
75 armnn::TensorInfo biasDesc({static_cast<unsigned int>(Bias2.size())}, armnn::GetDataType<T>());
76 boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(qScale, qOffset, Bias2));
77 return bias;
78 }
79 else
80 {
81 return boost::multi_array<T, 1>();
82 }
83}
84
85template<typename T>
86LayerTestResult<T, 4> SimpleConvolution2d3x5TestCommon(armnn::IWorkloadFactory& workloadFactory,
87 float qScale,
88 int32_t qOffset,
jimfly010a088a62018-10-25 17:05:05 +010089 bool biasEnabled,
90 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +000091{
telsoa01c577f2c2018-08-31 09:22:23 +010092 // Use common single-batch 3-channel 16x8 image.
telsoa014fcda012018-03-09 14:13:49 +000093 armnn::TensorInfo inputDesc({1, 3, 8, 16}, armnn::GetDataType<T>());
94 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
95
telsoa01c577f2c2018-08-31 09:22:23 +010096 // Use a 2-element batch with 3-channel 3x5 kernels.
telsoa014fcda012018-03-09 14:13:49 +000097 armnn::TensorInfo kernelDesc({2, 3, 5, 3}, armnn::GetDataType<T>());
98 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
99 QuantizedVector<T>(qScale, qOffset, {
100 1, 1, 1,
101 1, -1, 1,
102 1, 1, 1,
103 1, 1, 1,
104 1, 1, 1,
105
106 0, 0, 0,
107 0, 0, 0,
108 0, 0, 0,
109 0, 0, 0,
110 0, 0, 0,
111
112 2, 2, 2,
113 2, 2, 2,
114 2, 2, 2,
115 2, 2, 2,
116 2, 2, 2,
117
118
119 0, 0, 0,
120 0, 0, 0,
121 0, 0, 0,
122 0, 0, 0,
123 0, 0, 0,
124
125 1, 1, 1,
126 1, 1, 1,
127 1, 1, 1,
128 1, 1, 1,
129 1, 1, 1,
130
131 0, 0, 0,
132 0, 0, 0,
133 0, 0, 0,
134 0, 0, 0,
135 0, 0, 0
136 })));
137
telsoa01c577f2c2018-08-31 09:22:23 +0100138 // Expected output is 2 batch elements of a 1-channel 14x4 image.
telsoa014fcda012018-03-09 14:13:49 +0000139 armnn::TensorInfo outputDesc({1, 2, 4, 14}, armnn::GetDataType<T>());
140 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
141 QuantizedVector<T>(qScale, qOffset, {
142 -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24,
143 -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25,
144 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
145 -23.5f, -23.5f, -23.5f,
146 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
147 -23.5f, -23.5f, -23.5f,
148
149 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
150 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
151 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
152 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
153 })));
154
155 return SimpleConvolution2dTestImpl<T>(workloadFactory,
156 input,
157 kernel,
158 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
159 expectedOutput,
160 qScale,
jimfly010a088a62018-10-25 17:05:05 +0100161 qOffset,
162 layout);
telsoa014fcda012018-03-09 14:13:49 +0000163}
164
165template<typename T>
166LayerTestResult<T, 4> SimpleConvolution2d3x3TestCommon(armnn::IWorkloadFactory& workloadFactory,
167 float qScale,
168 int32_t qOffset,
narpra015f703182018-10-26 16:24:58 +0100169 bool biasEnabled,
170 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000171{
telsoa01c577f2c2018-08-31 09:22:23 +0100172 // Use a 3x3 kernel, which exercises ArmCompute's direct convolution path.
telsoa014fcda012018-03-09 14:13:49 +0000173
telsoa01c577f2c2018-08-31 09:22:23 +0100174 // Use common single-batch 3-channel 16x8 image.
telsoa014fcda012018-03-09 14:13:49 +0000175 armnn::TensorInfo inputDesc({1, 3, 8, 16}, armnn::GetDataType<T>());
176 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
177
telsoa01c577f2c2018-08-31 09:22:23 +0100178 // Use a 2-element batch of 3-channel 3x3 kernels.
telsoa014fcda012018-03-09 14:13:49 +0000179 armnn::TensorInfo kernelDesc({2, 3, 3, 3}, armnn::GetDataType<T>());
180 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
181 QuantizedVector<T>(qScale, qOffset, {
182 1, 1, 1,
183 1, -1, 1,
184 1, 1, 1,
185
186 0, 0, 0,
187 0, 0, 0,
188 0, 0, 0,
189
190 2, 2, 2,
191 2, 2, 2,
192 2, 2, 2,
193
194
195 0, 0, 0,
196 0, 0, 0,
197 0, 0, 0,
198
199 1, 1, 1,
200 1, 1, 1,
201 1, 1, 1,
202
203 0, 0, 0,
204 0, 0, 0,
205 0, 0, 0
206 })));
207
telsoa01c577f2c2018-08-31 09:22:23 +0100208 // Expected output is 1 batch of a 2-channel 14x6 image.
telsoa014fcda012018-03-09 14:13:49 +0000209 armnn::TensorInfo outputDesc({1, 2, 6, 14}, armnn::GetDataType<T>());
210 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
211 QuantizedVector<T>(qScale, qOffset, {
212 -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15,
213 -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16,
214 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
215 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
216 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
217 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
218
219 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
220 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
221 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
222 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
223 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
224 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
225 })));
226
227 return SimpleConvolution2dTestImpl<T>(workloadFactory,
228 input,
229 kernel,
230 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
231 expectedOutput,
232 qScale,
narpra015f703182018-10-26 16:24:58 +0100233 qOffset,
234 layout);
telsoa014fcda012018-03-09 14:13:49 +0000235}
236
Francis Murtaghd59116e2018-10-04 16:03:07 +0100237template<typename T>
238LayerTestResult<T, 4> SimpleConvolution2d3x3NhwcTestCommon(armnn::IWorkloadFactory& workloadFactory,
239 float qScale,
240 int32_t qOffset,
241 bool biasEnabled,
242 armnn::DataLayout dataLayout)
243{
244 // Use common single-batch 5x5 image.
245
246 armnn::TensorInfo inputDesc({1, 3, 4, 1}, armnn::GetDataType<T>());
247 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
248 {
249 1, 5, 2, 3,
250 8, 7, 3, 6,
251 3, 3, 9, 1
252 });
253
254
255 // Use a 2-element batch of 3-channel 3x3 kernels.
256 armnn::TensorInfo kernelDesc({1, 3, 3, 1}, armnn::GetDataType<T>());
257 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, {
258 4, 5, 6,
259 0, 0, 0,
260 3, 2, 1
261 });
262
263 // Expected output is 1 batch of a 5x5 image.
264 armnn::TensorInfo outputDesc({1, 3, 4, 1}, armnn::GetDataType<T>());
265
266 const std::vector<float> outputData =
267 {
268 23, 41, 33, 21,
269 44, 65, 76, 52,
270 82, 85, 79, 42
271 };
272
273 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
274
275 return SimpleConvolution2dNhwcTestImpl<T>(workloadFactory,
276 input,
277 kernel,
278 boost::multi_array<T, 1>(),
279 expectedOutput,
280 dataLayout,
281 qScale,
282 qOffset);
283}
284
telsoa014fcda012018-03-09 14:13:49 +0000285LayerTestResult<float, 4> SimpleConvolution2d3x5Test(armnn::IWorkloadFactory& workloadFactory,
jimfly010a088a62018-10-25 17:05:05 +0100286 bool biasEnabled,
287 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000288{
jimfly010a088a62018-10-25 17:05:05 +0100289 return SimpleConvolution2d3x5TestCommon<float>(workloadFactory, 0.f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000290}
291
292LayerTestResult<uint8_t, 4> SimpleConvolution2d3x5Uint8Test(armnn::IWorkloadFactory& workloadFactory,
jimfly010a088a62018-10-25 17:05:05 +0100293 bool biasEnabled,
294 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000295{
jimfly010a088a62018-10-25 17:05:05 +0100296 return SimpleConvolution2d3x5TestCommon<uint8_t>(workloadFactory, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000297}
298
299LayerTestResult<float, 4> SimpleConvolution2d3x3Test(armnn::IWorkloadFactory& workloadFactory,
narpra015f703182018-10-26 16:24:58 +0100300 bool biasEnabled,
301 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000302{
narpra015f703182018-10-26 16:24:58 +0100303 return SimpleConvolution2d3x3TestCommon<float>(workloadFactory, 0.f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000304}
305
Francis Murtaghd59116e2018-10-04 16:03:07 +0100306LayerTestResult<float, 4> SimpleConvolution2d3x3NhwcTest(armnn::IWorkloadFactory& workloadFactory,
307 bool biasEnabled)
308{
309 return SimpleConvolution2d3x3NhwcTestCommon<float>(workloadFactory, 0.f, 0, biasEnabled, armnn::DataLayout::NHWC);
310}
311
telsoa014fcda012018-03-09 14:13:49 +0000312LayerTestResult<uint8_t, 4> SimpleConvolution2d3x3Uint8Test(armnn::IWorkloadFactory& workloadFactory,
narpra015f703182018-10-26 16:24:58 +0100313 bool biasEnabled,
314 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000315{
narpra015f703182018-10-26 16:24:58 +0100316 return SimpleConvolution2d3x3TestCommon<uint8_t>(workloadFactory, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000317}
318
319template<typename T>
320LayerTestResult<T, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon(
321 armnn::IWorkloadFactory& workloadFactory,
narpra015f703182018-10-26 16:24:58 +0100322 const armnn::DataLayoutIndexed& layout,
telsoa014fcda012018-03-09 14:13:49 +0000323 float qScale,
324 int32_t qOffset)
325{
telsoa01c577f2c2018-08-31 09:22:23 +0100326 // Use a single-batch 1-channel 3x3 image as input.
telsoa014fcda012018-03-09 14:13:49 +0000327 armnn::TensorInfo inputDesc({1, 1, 3, 3}, armnn::GetDataType<T>());
328 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
329 QuantizedVector<T>(qScale, qOffset, {
330 11,21,31,
331 12,22,32,
332 13,23,33
333 })));
334
telsoa01c577f2c2018-08-31 09:22:23 +0100335 // Use 1 batch of a 1-channel 2x2 kernel.
telsoa014fcda012018-03-09 14:13:49 +0000336 armnn::TensorInfo kernelDesc({1, 1, 2, 2}, armnn::GetDataType<T>());
337 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
338 QuantizedVector<T>(qScale, qOffset, {
339 -11,-21,
340 -12,-22,
341 })));
342
telsoa01c577f2c2018-08-31 09:22:23 +0100343// Expected output is 1 batch of a 1-channel 6x8 image.
telsoa014fcda012018-03-09 14:13:49 +0000344// Manually calculated like this:
345//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
346//[-11*0 -21*0 -12*0 -22*11 ; -11*0 -21*0 -12*11 -22*21 ; -11*0 -21*0 -12*21 -22*31 ; -11*0 -21*0 -12*31 -22*0 ..]
347//[-11*0 -21*11 -12*0 -22*12 ; -11*11 -21*21 -12*12 -22*22 ; -11*21 -21*31 -12*22 -22*32 ; -11*31 -21*0 -12*32 -22*0 ..]
348//[-11*0 -21*12 -12*0 -22*13 ; -11*12 -21*22 -12*13 -22*23 ; -11*22 -21*32 -12*23 -22*33 ; -11*32 -21*0 -12*33 -22*0 ..]
349//[-11*0 -21*13 -12*0 -22*0 ; -11*13 -21*23 -12*0 -22*0 ; -11*23 -21*33 -12*0 -22*0 ; -11*33 -21*0 -12*0 -22*0 ..]
350//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
351//[..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ..]
352 armnn::TensorInfo outputDesc({1, 1, 8, 6}, armnn::GetDataType<T>());
353 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
354 QuantizedVector<T>(qScale, qOffset, {
355 0, 0, 0, 0, 0, 0,
356 -242, -594, -934, -372, 0, 0,
357 -495, -1190, -1850, -725, 0, 0,
358 -538, -1256, -1916, -748, 0, 0,
359 -273, -626, -946, -363, 0, 0,
360 0, 0, 0, 0, 0, 0,
361 0, 0, 0, 0, 0, 0,
362 0, 0, 0, 0, 0, 0
363 })));
364
365 return SimpleConvolution2dTestImpl<T>(workloadFactory,
366 input,
367 kernel,
368 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(false, qScale, qOffset),
369 expectedOutput,
370 qScale,
371 qOffset,
narpra015f703182018-10-26 16:24:58 +0100372 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100373 1, // Padding left.
374 2, // Padding top.
375 3, // Padding right.
376 4); // Padding bottom.
telsoa014fcda012018-03-09 14:13:49 +0000377}
378
379template<typename T>
380LayerTestResult<T, 4> SimpleConvolution2dAsymmetricPaddingTestCommon(armnn::IWorkloadFactory& workloadFactory,
narpra015f703182018-10-26 16:24:58 +0100381 const armnn::DataLayoutIndexed& layout,
382 float qScale,
383 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +0000384{
telsoa01c577f2c2018-08-31 09:22:23 +0100385 // Use a single-batch 1-channel 5x5 image as input.
telsoa014fcda012018-03-09 14:13:49 +0000386 armnn::TensorInfo inputDesc({ 1, 1, 5, 5 }, armnn::GetDataType<T>());
387 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
388 QuantizedVector<T>(qScale, qOffset, {
389 11,21,31,41,51,
390 12,22,32,42,52,
391 13,23,33,43,53,
392 14,24,34,44,54,
393 15,25,35,45,55,
394 })));
395
telsoa01c577f2c2018-08-31 09:22:23 +0100396 // Use 1 batch of a 1-channel 4x4 kernel.
telsoa014fcda012018-03-09 14:13:49 +0000397 armnn::TensorInfo kernelDesc({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
398 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
399 QuantizedVector<T>(qScale, qOffset, {
400 -11,-21,-31,-41,
401 -12,-22,-32,-42,
402 -13,-23,-33,-43,
403 -14,-24,-34,-44,
404 })));
405
telsoa01c577f2c2018-08-31 09:22:23 +0100406 // Expected output is 1 batch of a 1-channel 5x5 image.
telsoa014fcda012018-03-09 14:13:49 +0000407 armnn::TensorInfo outputDesc({ 1, 1, 5, 5 }, armnn::GetDataType<T>());
408 std::vector<T> myVec(outputDesc.GetNumElements(), 0);
409 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
410 QuantizedVector<T>(qScale, qOffset, {
telsoa014fcda012018-03-09 14:13:49 +0000411 -7140, -10580, -13940, -9300, -5230,
412 -9590, -14120, -18520, -12290, -6860,
413 -9980, -14560, -18960, -12560, -7000,
414 -7518, -10904, -14144, -9318, -5152,
surmeh013537c2c2018-05-18 16:31:43 +0100415 -5032, -7256, -9376, -6142, -3368,
telsoa014fcda012018-03-09 14:13:49 +0000416 })));
417
418 return SimpleConvolution2dTestImpl<T>(workloadFactory,
419 input,
420 kernel,
421 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(false, qScale, qOffset),
422 expectedOutput,
423 qScale,
424 qOffset,
narpra015f703182018-10-26 16:24:58 +0100425 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100426 1, // Padding left.
427 1, // Padding top.
428 2, // Padding right.
429 2); // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100430}
431
432template<typename T>
433LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(armnn::IWorkloadFactory& workloadFactory,
434 float qScale,
435 int32_t qOffset,
jimfly01382a91d2018-10-26 15:55:50 +0100436 bool biasEnabled,
437 const armnn::DataLayoutIndexed& layout)
surmeh013537c2c2018-05-18 16:31:43 +0100438{
telsoa01c577f2c2018-08-31 09:22:23 +0100439 // Use a single-batch 2-channel 5x5 image as input.
surmeh013537c2c2018-05-18 16:31:43 +0100440 armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5 }, armnn::GetDataType<T>());
441 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
442 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
443 0, 1, 2, 3, 4,
444 5, 6, 7, 8, 9,
445 10, 11, 12, 13, 14,
446 15, 16, 17, 18, 19,
447 20, 21, 22, 23, 24,
448
449 25, 26, 27, 28, 29,
450 30, 31, 32, 33, 34,
451 35, 36, 37, 38, 39,
452 40, 41, 42, 43, 44,
453 45, 46, 47, 48, 49
454 })));
455
telsoa01c577f2c2018-08-31 09:22:23 +0100456 // Use a depth multiplier of 1 on a 2-channel 4x4 kernel.
surmeh013537c2c2018-05-18 16:31:43 +0100457 armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, armnn::GetDataType<T>());
458 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
459 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
460 32, 31, 30, 29,
461 28, 27, 26, 25,
462 24, 23, 22, 21,
463 20, 19, 18, 17,
464
465 16, 15, 14, 13,
466 12, 11, 10, 9,
467 8, 7, 6, 5,
468 4, 3, 2, 1
469 })));
470
telsoa01c577f2c2018-08-31 09:22:23 +0100471 // Expected output is 1 batch of a 2-channel 5x5 image.
472 // Calculated using the python tensorflow library with strideX=1, strideY=1.
surmeh013537c2c2018-05-18 16:31:43 +0100473 armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5 }, armnn::GetDataType<T>());
474 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
475 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
476 1062, 1580, 1850, 1530, 1117,
477 2140, 3108, 3500, 2842, 2042,
478 3580, 5068, 5460, 4342, 3062,
479 3618, 5072, 5390, 4248, 2971,
480 3074, 4282, 4510, 3533, 2457,
481 1550, 2284, 2362, 1955, 1428,
482 2910, 4206, 4342, 3528, 2536,
483 3390, 4886, 5022, 4068, 2916,
484 3566, 5056, 5182, 4133, 2922,
485 3100, 4352, 4452, 3517, 2465
486 })));
487
488 return DepthwiseConvolution2dAsymmetricTestImpl<T>(workloadFactory,
489 input,
490 kernel,
491 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
492 expectedOutput,
493 qScale,
494 qOffset,
jimfly01382a91d2018-10-26 15:55:50 +0100495 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100496 1, // Padding left.
497 1, // Padding top.
498 2, // Padding right.
499 2, // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100500 1, // strideX
501 1); // strideY
telsoa014fcda012018-03-09 14:13:49 +0000502}
503
Nikhil Rajcec6b652018-10-12 13:51:57 +0100504template<typename T>
505LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestCommon(armnn::IWorkloadFactory& workloadFactory,
506 float qScale,
507 int32_t qOffset,
508 bool biasEnabled)
509{
510 armnn::TensorInfo inputTensorInfo({ 1, 5, 5, 2}, armnn::GetDataType<T>());
511 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
512 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
513 0, 25,
514 1, 26,
515 2, 27,
516 3, 28,
517 4, 29,
518
519 5, 30,
520 6, 31,
521 7, 32,
522 8, 33,
523 9, 34,
524
525 10, 35,
526 11, 36,
527 12, 37,
528 13, 38,
529 14, 39,
530
531 15, 40,
532 16, 41,
533 17, 42,
534 18, 43,
535 19, 44,
536
537 20, 45,
538 21, 46,
539 22, 47,
540 23, 48,
541 24, 49
542 })));
543
544 armnn::TensorInfo kernelTensorInfo({ 1, 4, 4, 2}, armnn::GetDataType<T>());
545 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
546 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
547 32, 16,
548 31, 15,
549 30, 14,
550 29, 13,
551
552 28, 12,
553 27, 11,
554 26, 10,
555 25, 9,
556
557 24, 8,
558 23, 7,
559 22, 6,
560 21, 5,
561
562 20, 4,
563 19, 3,
564 18, 2,
565 17, 1
566 })));
567
568 armnn::TensorInfo outputTensorInfo({ 1, 5, 5, 2}, armnn::GetDataType<T>());
569 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
570 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
571 1062, 1550,
572 1580, 2284,
573 1850, 2362,
574 1530, 1955,
575 1117, 1428,
576
577 2140, 2910,
578 3108, 4206,
579 3500, 4342,
580 2842, 3528,
581 2042, 2536,
582
583 3580, 3390,
584 5068, 4886,
585 5460, 5022,
586 4342, 4068,
587 3062, 2916,
588
589 3618, 3566,
590 5072, 5056,
591 5390, 5182,
592 4248, 4133,
593 2971, 2922,
594
595 3074, 3100,
596 4282, 4352,
597 4510, 4452,
598 3533, 3517,
599 2457, 2465
600 })));
601
602 return DepthwiseConvolution2dNhwcTestImpl<T>(workloadFactory,
603 input,
604 kernel,
605 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
606 expectedOutput,
607 qScale,
608 qOffset,
609 1, // Padding left.
610 1, // Padding top.
611 2, // Padding right.
612 2, // Padding bottom.
613 1, // strideX
614 1); // strideY
615}
616
telsoa014fcda012018-03-09 14:13:49 +0000617LayerTestResult<float, 4>
narpra015f703182018-10-26 16:24:58 +0100618Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest(armnn::IWorkloadFactory& workloadFactory,
619 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000620{
narpra015f703182018-10-26 16:24:58 +0100621 return Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon<float>(workloadFactory, layout, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000622}
623
narpra015f703182018-10-26 16:24:58 +0100624LayerTestResult<float, 4> Convolution2dAsymmetricPaddingTest(armnn::IWorkloadFactory& workloadFactory,
625 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000626{
narpra015f703182018-10-26 16:24:58 +0100627 return SimpleConvolution2dAsymmetricPaddingTestCommon<float>(workloadFactory, layout, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000628}
629
630LayerTestResult<float, 4> DepthwiseConvolution2dTest(armnn::IWorkloadFactory& workloadFactory,
jimfly01d84216a2018-10-26 12:56:21 +0100631 bool biasEnabled,
632 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000633{
jimfly01d84216a2018-10-26 12:56:21 +0100634 return DepthwiseConvolution2dTestImpl<float, float>(workloadFactory, 0.0f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000635}
636
Nikhil Rajcec6b652018-10-12 13:51:57 +0100637LayerTestResult<float, 4> DepthwiseConvolution2dDepthNhwcTest(armnn::IWorkloadFactory& workloadFactory,
638 bool biasEnabled)
639{
640 return DepthwiseConvolution2dNhwcTestCommon<float>(workloadFactory, 0.0f, 0, biasEnabled);
641}
642
telsoa014fcda012018-03-09 14:13:49 +0000643LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul1Test(armnn::IWorkloadFactory& workloadFactory,
jimfly01b9c89632018-10-26 16:50:13 +0100644 bool biasEnabled,
645 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000646{
jimfly01b9c89632018-10-26 16:50:13 +0100647 return DepthwiseConvolution2dDepthMul1TestImpl<float, float>(workloadFactory, 0.0f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000648}
649
surmeh013537c2c2018-05-18 16:31:43 +0100650LayerTestResult<float, 4> DepthwiseConvolution2dAsymmetricTest(armnn::IWorkloadFactory& workloadFactory,
jimfly01382a91d2018-10-26 15:55:50 +0100651 bool biasEnabled,
652 const armnn::DataLayoutIndexed& layout)
surmeh013537c2c2018-05-18 16:31:43 +0100653{
jimfly01382a91d2018-10-26 15:55:50 +0100654 return DepthwiseConvolution2dAsymmetricTestCommon<float>(workloadFactory, 0.0f, 0, biasEnabled, layout);
surmeh013537c2c2018-05-18 16:31:43 +0100655}
656
telsoa014fcda012018-03-09 14:13:49 +0000657LayerTestResult<uint8_t, 4> DepthwiseConvolution2dUint8Test(armnn::IWorkloadFactory& workloadFactory,
jimfly01d84216a2018-10-26 12:56:21 +0100658 bool biasEnabled,
659 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000660{
jimfly01d84216a2018-10-26 12:56:21 +0100661 return DepthwiseConvolution2dTestImpl<uint8_t, int32_t>(workloadFactory, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000662}
663
664LayerTestResult<uint8_t, 4> DepthwiseConvolution2dDepthMul1Uint8Test(armnn::IWorkloadFactory& workloadFactory,
jimfly01b9c89632018-10-26 16:50:13 +0100665 bool biasEnabled,
666 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000667{
jimfly01b9c89632018-10-26 16:50:13 +0100668 return DepthwiseConvolution2dDepthMul1TestImpl<uint8_t, int32_t>(workloadFactory, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000669}
670
671LayerTestResult<float, 4> Convolution1dTest(armnn::IWorkloadFactory& workloadFactory, bool biasEnabled)
672{
673 return Convolution1dTestImpl<float>(workloadFactory, 0.0f, 0, biasEnabled);
674}
675
676LayerTestResult<uint8_t, 4> Convolution1dUint8Test(armnn::IWorkloadFactory& workloadFactory, bool biasEnabled)
677{
678 return Convolution1dTestImpl<uint8_t>(workloadFactory, 0.1f, 128, biasEnabled);
679}
680
681LayerTestResult<float,4> CompareConvolution2dTest(armnn::IWorkloadFactory& workloadFactory,
jimfly01382a91d2018-10-26 15:55:50 +0100682 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +0000683{
684 return CompareConvolution2dTestImpl<float>(workloadFactory, refWorkloadFactory);
685}
686
687template<typename T>
688LayerTestResult<T,4> CompareDepthwiseConvolution2dTest(armnn::IWorkloadFactory& workloadFactory,
jimfly017af00da2018-10-31 14:43:53 +0000689 armnn::IWorkloadFactory& refWorkloadFactory,
690 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000691{
jimfly017af00da2018-10-31 14:43:53 +0000692 return CompareDepthwiseConvolution2dTestImpl<T>(workloadFactory, refWorkloadFactory, layout);
telsoa014fcda012018-03-09 14:13:49 +0000693}
694
695template LayerTestResult<float, 4> CompareDepthwiseConvolution2dTest<float>(
jimfly017af00da2018-10-31 14:43:53 +0000696 armnn::IWorkloadFactory&, armnn::IWorkloadFactory&, const armnn::DataLayoutIndexed&);
telsoa014fcda012018-03-09 14:13:49 +0000697template LayerTestResult<uint8_t, 4> CompareDepthwiseConvolution2dTest<uint8_t>(
jimfly017af00da2018-10-31 14:43:53 +0000698 armnn::IWorkloadFactory&, armnn::IWorkloadFactory&, const armnn::DataLayoutIndexed&);
telsoa014fcda012018-03-09 14:13:49 +0000699
700LayerTestResult<float,4> SimpleNormalizationAcrossTest(armnn::IWorkloadFactory& workloadFactory)
701{
702 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
703 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
704 return SimpleNormalizationTestImpl(workloadFactory, normChannel, normMethod);
705}
706
707LayerTestResult<float,4> SimpleNormalizationWithinTest(armnn::IWorkloadFactory& workloadFactory)
708{
709 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
710 auto normChannel = armnn::NormalizationAlgorithmChannel::Within;
711 return SimpleNormalizationTestImpl(workloadFactory, normChannel, normMethod);
712}
713
narpra0155a97bc2018-10-02 14:35:53 +0100714LayerTestResult<float,4> SimpleNormalizationAcrossNhwcTest(armnn::IWorkloadFactory& workloadFactory)
715{
716 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
717 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Matteo Martincigh8e6f92d2018-10-18 08:45:39 +0100718 return SimpleNormalizationNhwcTestImpl(workloadFactory, normChannel, normMethod);
narpra0155a97bc2018-10-02 14:35:53 +0100719}
720
telsoa014fcda012018-03-09 14:13:49 +0000721LayerTestResult<float,2> SimpleSoftmaxTest(armnn::IWorkloadFactory& workloadFactory, float beta)
722{
723 return SimpleSoftmaxTestImpl<float>(workloadFactory, beta);
724}
725
726LayerTestResult<uint8_t,2> SimpleSoftmaxUint8Test(armnn::IWorkloadFactory& workloadFactory, float beta)
727{
728 return SimpleSoftmaxTestImpl<uint8_t>(workloadFactory, beta);
729}
730
731LayerTestResult<float,4> CompareNormalizationTest(armnn::IWorkloadFactory& workloadFactory,
732 armnn::IWorkloadFactory& refWorkloadFactory,
733 armnn::NormalizationAlgorithmChannel normChannel,
734 armnn::NormalizationAlgorithmMethod normMethod)
735{
736 return CompareNormalizationTestImpl(workloadFactory, refWorkloadFactory, normChannel, normMethod);
737}
738
739LayerTestResult<float,2> CompareSoftmaxTest(armnn::IWorkloadFactory& workloadFactory,
740 armnn::IWorkloadFactory& refWorkloadFactory,
741 float beta)
742{
743 return CompareSoftmaxTestImpl<float>(workloadFactory, refWorkloadFactory, beta);
744}
745
746LayerTestResult<uint8_t,2> CompareSoftmaxUint8Test(armnn::IWorkloadFactory& workloadFactory,
747 armnn::IWorkloadFactory& refWorkloadFactory,
748 float beta)
749{
750 return CompareSoftmaxTestImpl<uint8_t>(workloadFactory, refWorkloadFactory, beta);
751}
752
753std::vector<LayerTestResult<float,3>> SplitterTest(armnn::IWorkloadFactory& workloadFactory)
754{
755 return SplitterTestCommon<float>(workloadFactory);
756}
757
758std::vector<LayerTestResult<uint8_t,3>> SplitterUint8Test(armnn::IWorkloadFactory& workloadFactory)
759{
760 return SplitterTestCommon<uint8_t>(workloadFactory, 1.0f, 0);
761}
762
763LayerTestResult<float, 3> CopyViaSplitterTest(armnn::IWorkloadFactory& workloadFactory)
764{
765 return CopyViaSplitterTestImpl<float>(workloadFactory, 0.0f, 0);
766}
767
768LayerTestResult<uint8_t, 3> CopyViaSplitterUint8Test(armnn::IWorkloadFactory& workloadFactory)
769{
770 return CopyViaSplitterTestImpl<uint8_t>(workloadFactory, 1.0f, 0);
771}
772
telsoa01c577f2c2018-08-31 09:22:23 +0100773LayerTestResult<float, 2> LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest(
774 armnn::IWorkloadFactory& workloadFactory)
775{
776 armnn::TensorInfo inputDesc({ 2, 2 }, armnn::GetDataType<float>());
777 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
778 { 2., 3., 3., 4. }));
779
780 armnn::TensorInfo outputDesc({ 2, 4 }, armnn::GetDataType<float>());
781 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
782 {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
783 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f}));
784 return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl(workloadFactory, input, expectedOutput);
785}
786
787LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest(
788 armnn::IWorkloadFactory& workloadFactory)
789{
790 armnn::TensorInfo inputDesc({ 2, 5 }, armnn::GetDataType<float>());
791 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
792 {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
793 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f}));
794
795 armnn::TensorInfo outputDesc({ 2, 16 }, armnn::GetDataType<float>());
796 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
797 {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f,
798 -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f,
799 -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f,
800 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f,
801 -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f,
802 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f,
803 0.02168f}));
804 return LstmLayerFloat32NoCifgWithPeepholeWithProjectionTestImpl(workloadFactory, input, expectedOutput);
805}
806
807LayerTestResult<float, 2> LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest(armnn::IWorkloadFactory& workloadFactory)
808{
809 armnn::TensorInfo inputDesc({2, 2}, armnn::GetDataType<float>());
810 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
811 {2., 3., 3., 4.}));
812
813
814 armnn::TensorInfo outputDesc({2, 4}, armnn::GetDataType<float>());
815 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
816 {{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
817 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}}));
818
819 return LstmNoCifgNoPeepholeNoProjectionTestImpl(workloadFactory, input, expectedOutput);
820}
821
telsoa014fcda012018-03-09 14:13:49 +0000822LayerTestResult<float,3> MergerTest(armnn::IWorkloadFactory& workloadFactory)
823{
surmeh013537c2c2018-05-18 16:31:43 +0100824 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +0000825 unsigned int outputHeight = 6;
826 unsigned int outputChannels = 3;
827
surmeh013537c2c2018-05-18 16:31:43 +0100828 unsigned int inputWidth1 = 3;
829 unsigned int inputHeight1 = 6;
830 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +0000831
surmeh013537c2c2018-05-18 16:31:43 +0100832 unsigned int inputWidth2 = 3;
833 unsigned int inputHeight2 = 6;
834 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +0000835
telsoa01c577f2c2018-08-31 09:22:23 +0100836 // Define the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +0000837 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::Float32);
838 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::Float32);
839 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +0000840
841 LayerTestResult<float,3> ret(outputTensorInfo);
842
telsoa014fcda012018-03-09 14:13:49 +0000843 ret.outputExpected = MakeTensor<float, 3>(outputTensorInfo, std::vector<float>(
surmeh013537c2c2018-05-18 16:31:43 +0100844 {
845 1.0f, 2.0f, 3.0f,
846 4.0f, 5.0f, 6.0f,
847 7.0f, 8.0f, 9.0f,
848 10.0f, 11.0f, 12.0f,
849 13.0f, 14.0f, 15.0f,
850 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +0000851
surmeh013537c2c2018-05-18 16:31:43 +0100852 19.0f, 20.0f, 21.0f,
853 22.0f, 23.0f, 24.0f,
854 25.0f, 26.0f, 27.0f,
855 28.0f, 29.0f, 30.0f,
856 31.0f, 32.0f, 33.0f,
857 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +0000858
surmeh013537c2c2018-05-18 16:31:43 +0100859 37.0f, 38.0f, 39.0f,
860 40.0f, 41.0f, 42.0f,
861 43.0f, 44.0f, 45.0f,
862 46.0f, 47.0f, 48.0f,
863 49.0f, 50.0f, 51.0f,
864 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +0000865 })
866 );
867
telsoa014fcda012018-03-09 14:13:49 +0000868 auto input1 = MakeTensor<float, 3>(inputTensorInfo1, std::vector<float>(
869 {
surmeh013537c2c2018-05-18 16:31:43 +0100870 1.0f, 2.0f, 3.0f,
871 4.0f, 5.0f, 6.0f,
872 7.0f, 8.0f, 9.0f,
873 10.0f, 11.0f, 12.0f,
874 13.0f, 14.0f, 15.0f,
875 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +0000876
surmeh013537c2c2018-05-18 16:31:43 +0100877 19.0f, 20.0f, 21.0f,
878 22.0f, 23.0f, 24.0f,
879 25.0f, 26.0f, 27.0f,
880 28.0f, 29.0f, 30.0f,
881 31.0f, 32.0f, 33.0f,
882 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +0000883 })
884 );
885
886 auto input2 = MakeTensor<float, 3>(inputTensorInfo2, std::vector<float>(
887 {
surmeh013537c2c2018-05-18 16:31:43 +0100888 37.0f, 38.0f, 39.0f,
889 40.0f, 41.0f, 42.0f,
telsoa014fcda012018-03-09 14:13:49 +0000890 43.0f, 44.0f, 45.0f,
surmeh013537c2c2018-05-18 16:31:43 +0100891 46.0f, 47.0f, 48.0f,
892 49.0f, 50.0f, 51.0f,
893 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +0000894 })
895 );
896
telsoa01c577f2c2018-08-31 09:22:23 +0100897 std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of input[0].
telsoa014fcda012018-03-09 14:13:49 +0000898 armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1);
899
telsoa01c577f2c2018-08-31 09:22:23 +0100900 std::vector<unsigned int> wOrigin2 = {2, 0, 0}; //Extent of the window is defined by size of input[1].
telsoa014fcda012018-03-09 14:13:49 +0000901 armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2);
902
telsoa014fcda012018-03-09 14:13:49 +0000903 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
904
905 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
906
907 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
908 subTensorsSupported ?
909 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
910 workloadFactory.CreateTensorHandle(inputTensorInfo1);
911
912 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
913 subTensorsSupported ?
914 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
915 workloadFactory.CreateTensorHandle(inputTensorInfo2);
916
telsoa014fcda012018-03-09 14:13:49 +0000917 armnn::MergerQueueDescriptor data;
918 armnn::WorkloadInfo info;
919 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
920 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +0000921 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
922
923 data.m_ViewOrigins.push_back(window1);
924 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +0000925
926 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(data, info);
927
928 inputHandle1->Allocate();
929 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +0000930 outputHandle->Allocate();
931
932 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
933 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +0000934
935 workload->Execute();
936
937 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
938
939 return ret;
940}
941
942LayerTestResult<float,4> AdditionTest(armnn::IWorkloadFactory& workloadFactory)
943{
944 unsigned int batchSize = 2;
945 unsigned int channels = 2;
946 unsigned int height = 2;
947 unsigned int width = 3;
948
949 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
950 armnn::TensorInfo outputTensorInfo;
951
952 unsigned int shape[] = {batchSize, channels, height, width};
953
954 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
955 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
956 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
957
958
959 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, std::vector<float>(
960 {
961 0.0f, 2.0f, 1.0f,
962 0.2f, 1.0f, 2.0f,
963
964 1.0f, 2.0f, 1.0f,
965 0.2f, 1.0f, 2.0f,
966
967 0.0f, 2.0f, 1.0f,
968 4.2f, 1.0f, 2.0f,
969
970 0.0f, 0.0f, 1.0f,
971 0.2f, 1.0f, 2.0f,
972 }));
973
974 auto input2 = MakeTensor<float, 4>(inputTensorInfo2, std::vector<float>(
975 {
976 1.0f, 2.0f, 1.0f,
977 0.0f, 1.0f, 2.0f,
978
979 1.0f, 2.0f, -2.0f,
980 0.2f, 1.0f, 2.0f,
981
982 0.0f, 2.0f, 1.0f,
983 4.2f, 0.0f, -3.0f,
984
985 0.0f, 0.0f, 1.0f,
986 0.7f, 1.0f, 5.0f,
987 }));
988
989 LayerTestResult<float,4> ret(outputTensorInfo);
990 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>(
991 {
992 1.0f, 4.0f, 2.0f,
993 0.2f, 2.0f, 4.0f,
994
995 2.0f, 4.0f, -1.0f,
996 0.4f, 2.0f, 4.0f,
997
998 0.0f, 4.0f, 2.0f,
999 8.4f, 1.0f, -1.0f,
1000
1001 0.0f, 0.0f, 2.0f,
1002 0.9f, 2.0f, 7.0f,
1003 }));
1004
1005 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1006 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1007 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1008
1009 armnn::AdditionQueueDescriptor data;
1010 armnn::WorkloadInfo info;
1011 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1012 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1013 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1014
1015 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1016
1017 inputHandle1->Allocate();
1018 inputHandle2->Allocate();
1019 outputHandle->Allocate();
1020
1021 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1022 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1023
1024 workload->Execute();
1025
1026 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1027
1028 return ret;
1029}
1030
1031template <typename T>
1032LayerTestResult<T, 4> AdditionBroadcastTestImpl(armnn::IWorkloadFactory& workloadFactory,
1033 float qScale,
1034 int32_t qOffset)
1035{
1036 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, armnn::GetDataType<T>());
1037 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, armnn::GetDataType<T>());
1038 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>());
1039
1040 if (armnn::IsQuantizedType<T>())
1041 {
1042 inputTensorInfo1.SetQuantizationScale(qScale);
1043 inputTensorInfo1.SetQuantizationOffset(qOffset);
1044 inputTensorInfo2.SetQuantizationScale(qScale);
1045 inputTensorInfo2.SetQuantizationOffset(qOffset);
1046 outputTensorInfo.SetQuantizationScale(qScale);
1047 outputTensorInfo.SetQuantizationOffset(qOffset);
1048 }
1049
1050 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
1051 {
1052 0.0f,
1053 1.0f,
1054
1055 2.0f,
1056 3.0f,
1057
1058 4.0f,
1059 5.0f,
1060 }));
1061
1062 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
1063 {
1064 0.5f, 1.5f, 2.5f,
1065 3.5f, 4.5f, 5.5f,
1066 }));
1067
1068 LayerTestResult<T,4> ret(outputTensorInfo);
1069 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
1070 {
1071 0.5f, 1.5f, 2.5f,
1072 4.5f, 5.5f, 6.5f,
1073
1074 2.5f, 3.5f, 4.5f,
1075 6.5f, 7.5f, 8.5f,
1076
1077 4.5f, 5.5f, 6.5f,
1078 8.5f, 9.5f, 10.5f,
1079 }));
1080
1081 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1082 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1083 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1084
1085 armnn::AdditionQueueDescriptor data;
1086 armnn::WorkloadInfo info;
1087 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1088 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1089 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1090
1091 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1092
1093 inputHandle1->Allocate();
1094 inputHandle2->Allocate();
1095 outputHandle->Allocate();
1096
1097 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1098 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1099
1100 workload->Execute();
1101
1102 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1103
1104 return ret;
1105}
1106
1107template <typename T>
1108LayerTestResult<T, 4> AdditionBroadcast1ElementTestImpl(armnn::IWorkloadFactory& workloadFactory,
1109 float qScale,
1110 int32_t qOffset)
1111{
1112 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>());
1113 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, armnn::GetDataType<T>());
1114 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>());
1115
1116 if (armnn::IsQuantizedType<T>())
1117 {
1118 inputTensorInfo1.SetQuantizationScale(qScale);
1119 inputTensorInfo1.SetQuantizationOffset(qOffset);
1120 inputTensorInfo2.SetQuantizationScale(qScale);
1121 inputTensorInfo2.SetQuantizationOffset(qOffset);
1122 outputTensorInfo.SetQuantizationScale(qScale);
1123 outputTensorInfo.SetQuantizationOffset(qOffset);
1124 }
1125
1126 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
1127 {
1128 0.0f, 1.0f, 2.0f,
1129 3.0f, 4.0f, 5.0f,
1130 6.0f, 7.0f, 8.0f,
1131 9.0f, 10.0f, 11.0f,
1132 12.0f, 13.0f, 14.0f,
1133 15.0f, 16.0f, 17.0f,
1134 }));
1135
1136 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
1137 {
1138 0.5f,
1139 }));
1140
1141 LayerTestResult<T,4> ret(outputTensorInfo);
1142 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
1143 {
1144 0.5f, 1.5f, 2.5f,
1145 3.5f, 4.5f, 5.5f,
1146 6.5f, 7.5f, 8.5f,
1147 9.5f, 10.5f, 11.5f,
1148 12.5f, 13.5f, 14.5f,
1149 15.5f, 16.5f, 17.5f,
1150 }));
1151
1152 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1153 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1154 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1155
1156 armnn::AdditionQueueDescriptor data;
1157 armnn::WorkloadInfo info;
1158 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1159 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1160 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1161
1162 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1163
1164 inputHandle1->Allocate();
1165 inputHandle2->Allocate();
1166 outputHandle->Allocate();
1167
1168 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1169 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1170
1171 workload->Execute();
1172
1173 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1174
1175 return ret;
1176}
1177
1178LayerTestResult<float, 4> AdditionBroadcastTest(armnn::IWorkloadFactory& workloadFactory)
1179{
1180 return AdditionBroadcastTestImpl<float>(workloadFactory, 0.0f, 0);
1181}
1182
1183LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test(armnn::IWorkloadFactory& workloadFactory)
1184{
1185 return AdditionBroadcastTestImpl<uint8_t>(workloadFactory, 2.f, 0);
1186}
1187
1188LayerTestResult<float, 4> AdditionBroadcast1ElementTest(armnn::IWorkloadFactory& workloadFactory)
1189{
1190 return AdditionBroadcast1ElementTestImpl<float>(workloadFactory, 0.0f, 0);
1191}
1192
1193LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test(armnn::IWorkloadFactory& workloadFactory)
1194{
1195 return AdditionBroadcast1ElementTestImpl<uint8_t>(workloadFactory, 0.1333333f, 128);
1196}
1197
1198LayerTestResult<float,4> CompareAdditionTest(armnn::IWorkloadFactory& workloadFactory,
David Beckf195f032018-09-06 16:46:34 +01001199 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00001200{
1201 unsigned int batchSize = 4;
1202 unsigned int channels = 1;
1203 unsigned int height = 2;
1204 unsigned int width = 3;
1205
1206 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
1207 armnn::TensorInfo outputTensorInfo;
1208
1209 unsigned int shape[] = {batchSize, channels, height, width};
1210
1211 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1212 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1213 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1214
1215 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 1232);
1216 auto input2 = MakeRandomTensor<float, 4>(inputTensorInfo2, 456);
1217
1218 LayerTestResult<float,4> ret(outputTensorInfo);
1219
1220 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1221 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1222 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1223
1224 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
1225 std::unique_ptr<armnn::ITensorHandle> inputHandle2Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo2);
1226 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
1227
1228 armnn::AdditionQueueDescriptor data;
1229 armnn::WorkloadInfo info;
1230 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1231 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1232 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1233
1234 armnn::AdditionQueueDescriptor refData = data;
1235 armnn::WorkloadInfo refInfo = info;
1236 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo1, inputHandle1Ref.get());
1237 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get());
1238 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1239
1240 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1241 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateAddition(refData, refInfo);
1242
1243 inputHandle1->Allocate();
1244 inputHandle2->Allocate();
1245 outputHandle->Allocate();
1246 inputHandle1Ref->Allocate();
1247 inputHandle2Ref->Allocate();
1248 outputHandleRef->Allocate();
1249
1250 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1251 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1252 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
1253 CopyDataToITensorHandle(inputHandle2Ref.get(), &input2[0][0][0][0]);
1254
1255 workload->Execute();
1256 workloadRef->Execute();
1257
1258 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1259 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
1260
1261 return ret;
1262}
1263
surmeh01bceff2f2018-03-29 16:29:27 +01001264namespace {
David Beck5cd01f32018-09-12 16:00:08 +01001265template <typename T>
1266LayerTestResult<T, 4> DivisionTestHelper(armnn::IWorkloadFactory& workloadFactory,
1267 const unsigned int shape0[4],
1268 const std::vector<T>& values0,
1269 float scale0,
1270 int32_t offset0,
1271 const unsigned int shape1[4],
1272 const std::vector<T> & values1,
1273 float scale1,
1274 int32_t offset1,
1275 const unsigned int outShape[4],
1276 const std::vector<T> & outValues,
1277 float outScale,
1278 int32_t outOffset)
1279{
1280 auto dataType = (std::is_same<T, uint8_t>::value ?
1281 armnn::DataType::QuantisedAsymm8 :
1282 armnn::DataType::Float32);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001283
David Beck5cd01f32018-09-12 16:00:08 +01001284 armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
1285 armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
1286 armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001287
David Beck5cd01f32018-09-12 16:00:08 +01001288 inputTensorInfo0.SetQuantizationScale(scale0);
1289 inputTensorInfo0.SetQuantizationOffset(offset0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001290
David Beck5cd01f32018-09-12 16:00:08 +01001291 inputTensorInfo1.SetQuantizationScale(scale1);
1292 inputTensorInfo1.SetQuantizationOffset(offset1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001293
David Beck5cd01f32018-09-12 16:00:08 +01001294 outputTensorInfo.SetQuantizationScale(outScale);
1295 outputTensorInfo.SetQuantizationOffset(outOffset);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001296
David Beck5cd01f32018-09-12 16:00:08 +01001297 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
1298 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001299
David Beck5cd01f32018-09-12 16:00:08 +01001300 LayerTestResult<T, 4> result(outputTensorInfo);
1301 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001302
David Beck5cd01f32018-09-12 16:00:08 +01001303 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
1304 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1305 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001306
David Beck5cd01f32018-09-12 16:00:08 +01001307 armnn::DivisionQueueDescriptor data;
1308 armnn::WorkloadInfo info;
1309 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
1310 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1311 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001312
David Beck5cd01f32018-09-12 16:00:08 +01001313 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDivision(data, info);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001314
David Beck5cd01f32018-09-12 16:00:08 +01001315 inputHandle0->Allocate();
1316 inputHandle1->Allocate();
1317 outputHandle->Allocate();
1318
1319 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
1320 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1321
David Beck5cd01f32018-09-12 16:00:08 +01001322 workload->Execute();
1323
1324 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
1325
1326 return result;
1327}
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001328} // anonymous namespace
1329
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01001330LayerTestResult<float,4> DivisionByZeroTest(armnn::IWorkloadFactory& workloadFactory)
1331{
1332 const unsigned int width = 2;
1333 const unsigned int height = 2;
1334 const unsigned int channelCount = 2;
1335 const unsigned int batchSize = 2;
1336
1337 unsigned int shape[] = { batchSize, channelCount, height, width };
1338
1339 std::vector<float> input0({
1340 1.f, 1.f, 1.f, 1.f, 0.f, 0.f, 0.f, 0.f,
1341 -1.f, -1.f, -1.f, -1.f, 5.f, 5.f, 5.f, 5.f });
1342
1343 std::vector<float> input1({
1344 0.f, 0.f, -0.f, -0.f, 0.f, 0.f, -0.f, -0.f,
1345 0.f, 0.f, -0.f, -0.f, 5.f, 5.f, 5.f, 5.f });
1346
1347 std::vector<float> output({
1348 INFINITY, INFINITY, -INFINITY, -INFINITY, NAN, NAN, -NAN, -NAN,
1349 -INFINITY, -INFINITY, INFINITY, INFINITY, 1, 1, 1, 1 });
1350
David Beck5cd01f32018-09-12 16:00:08 +01001351 return DivisionTestHelper<float>(workloadFactory,
1352 shape, input0, 1.0f, 0,
1353 shape, input1, 1.0f, 0,
1354 shape, output, 1.0f, 0);
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01001355}
1356
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001357LayerTestResult<float,4> DivisionTest(armnn::IWorkloadFactory& workloadFactory)
1358{
1359 const unsigned int width = 2;
1360 const unsigned int height = 2;
1361 const unsigned int channelCount = 2;
1362 const unsigned int batchSize = 2;
1363
1364 unsigned int shape[] = { batchSize, channelCount, height, width };
1365
1366 std::vector<float> input0({
1367 2, 2, 2, 2, 3, 3, 3, 3,
1368 4, 4, 4, 4, 5, 5, 5, 5 });
1369
1370 std::vector<float> input1({
1371 1, 1, 1, 1, 2, 2, 2, 2,
1372 4, 4, 4, 4, 4, 4, 4, 4 });
1373
1374 std::vector<float> output({
1375 2, 2, 2, 2, 1.5, 1.5, 1.5, 1.5,
1376 1, 1, 1, 1, 1.25, 1.25, 1.25, 1.25 });
1377
David Beck5cd01f32018-09-12 16:00:08 +01001378
1379 return DivisionTestHelper<float>(workloadFactory,
1380 shape, input0, 1.0f, 0,
1381 shape, input1, 1.0f, 0,
1382 shape, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001383}
1384
1385LayerTestResult<float, 4> DivisionBroadcast1ElementTest(armnn::IWorkloadFactory& workloadFactory)
1386{
1387 unsigned int shape0[] = { 1, 2, 2, 2 };
1388 std::vector<float> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
1389
1390 unsigned int shape1[] = { 1, 1, 1, 1 };
1391 std::vector<float> input1({ 2 });
1392
1393 std::vector<float> output({ 1, 2, 3, 4, 5, 6, 7, 8});
1394
David Beck5cd01f32018-09-12 16:00:08 +01001395
1396 return DivisionTestHelper<float>(workloadFactory,
1397 shape0, input0, 1.0f, 0,
1398 shape1, input1, 1.0f, 0,
1399 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001400}
1401
1402LayerTestResult<float, 4> DivisionBroadcast1DVectorTest(armnn::IWorkloadFactory& workloadFactory)
1403{
1404 unsigned int shape0[] = { 1, 3, 3, 2 };
1405 std::vector<float> input0({
1406 1, 4, 3, 8, 5, 12,
1407 7, 16, 9, 20, 11, 24,
1408 13, 28, 15, 32, 17, 36});
1409
1410 unsigned int shape1[] = { 1, 1, 1, 2 };
1411 std::vector<float> input1({ 1, 2 });
1412
1413 std::vector<float> output({
1414 1, 2, 3, 4, 5, 6,
1415 7, 8, 9, 10, 11, 12,
1416 13, 14, 15, 16, 17, 18});
1417
David Beck5cd01f32018-09-12 16:00:08 +01001418 return DivisionTestHelper<float>(workloadFactory,
1419 shape0, input0, 1.0f, 0,
1420 shape1, input1, 1.0f, 0,
1421 shape0, output, 1.0f, 0);
1422}
1423
1424
1425LayerTestResult<uint8_t,4> DivisionUint8Test(armnn::IWorkloadFactory& workloadFactory)
1426{
1427 const unsigned int width = 2;
1428 const unsigned int height = 2;
1429 const unsigned int channelCount = 2;
1430 const unsigned int batchSize = 2;
1431
1432 unsigned int shape[] = { batchSize, channelCount, height, width };
1433
1434 std::vector<uint8_t> input0({2, 2, 2, 2, 3, 3, 3, 3,
1435 4, 4, 4, 4, 5, 5, 5, 5 });
1436
1437 std::vector<uint8_t> input1({1, 1, 1, 1, 2, 2, 2, 2,
1438 4, 4, 4, 4, 4, 4, 4, 4 });
1439
1440 std::vector<uint8_t> output({8, 8, 8, 8, 6, 6, 6, 6,
1441 4, 4, 4, 4, 5, 5, 5, 5});
1442
1443
1444 return DivisionTestHelper<uint8_t>(workloadFactory,
1445 shape, input0, 1.0f, 0,
1446 shape, input1, 1.0f, 0,
1447 shape, output, 0.25f, 0);
1448}
1449
1450LayerTestResult<uint8_t, 4> DivisionBroadcast1ElementUint8Test(armnn::IWorkloadFactory& workloadFactory)
1451{
1452 unsigned int shape0[] = { 1, 2, 2, 2 };
1453 std::vector<uint8_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
1454
1455 unsigned int shape1[] = { 1, 1, 1, 1 };
1456 std::vector<uint8_t> input1({ 2 });
1457
1458 std::vector<uint8_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
1459
1460 return DivisionTestHelper<uint8_t>(workloadFactory,
1461 shape0, input0, 1.0f, 0,
1462 shape1, input1, 1.0f, 0,
1463 shape0, output, 1.0f, 0);
1464}
1465
1466LayerTestResult<uint8_t, 4> DivisionBroadcast1DVectorUint8Test(armnn::IWorkloadFactory& workloadFactory)
1467{
1468 unsigned int shape0[] = { 1, 3, 3, 2 };
1469 std::vector<uint8_t> input0({1, 4, 3, 8, 5, 12,
1470 7, 16, 9, 20, 11, 24,
1471 13, 28, 15, 32, 17, 36});
1472
1473 unsigned int shape1[] = { 1, 1, 1, 2 };
1474 std::vector<uint8_t> input1({ 1, 2 });
1475
1476 std::vector<uint8_t> output({1, 2, 3, 4, 5, 6,
1477 7, 8, 9, 10, 11, 12,
1478 13, 14, 15, 16, 17, 18});
1479
1480 return DivisionTestHelper<uint8_t>(workloadFactory,
1481 shape0, input0, 1.0f, 0,
1482 shape1, input1, 1.0f, 0,
1483 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001484}
1485
1486namespace {
surmeh01bceff2f2018-03-29 16:29:27 +01001487LayerTestResult<float,4> MultiplicationTestHelper(armnn::IWorkloadFactory& workloadFactory,
1488 const unsigned int shape0[4],
1489 const std::vector<float> & values0,
1490 const unsigned int shape1[4],
1491 const std::vector<float> & values1,
1492 const unsigned int outShape[4],
1493 const std::vector<float> & outValues)
telsoa014fcda012018-03-09 14:13:49 +00001494{
surmeh01bceff2f2018-03-29 16:29:27 +01001495 const size_t dimensionCount = 4;
1496 armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, armnn::DataType::Float32};
1497 armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, armnn::DataType::Float32};
1498 armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, armnn::DataType::Float32};
telsoa014fcda012018-03-09 14:13:49 +00001499
surmeh01bceff2f2018-03-29 16:29:27 +01001500 auto input0 = MakeTensor<float, 4>(inputTensorInfo0, values0);
1501 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00001502
1503 LayerTestResult<float,4> ret(outputTensorInfo);
1504
1505 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
1506 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1507 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1508
1509 armnn::MultiplicationQueueDescriptor data;
1510 armnn::WorkloadInfo info;
1511 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
1512 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1513 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1514
1515 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
1516
1517 inputHandle0->Allocate();
1518 inputHandle1->Allocate();
1519 outputHandle->Allocate();
1520
1521 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
1522 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1523
1524 workload->Execute();
1525
1526 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1527
surmeh01bceff2f2018-03-29 16:29:27 +01001528 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00001529 return ret;
1530}
surmeh01bceff2f2018-03-29 16:29:27 +01001531} // anonymous namespace
1532
1533
1534LayerTestResult<float,4> MultiplicationTest(armnn::IWorkloadFactory& workloadFactory)
1535{
1536 const unsigned int width = 2;
1537 const unsigned int height = 2;
1538 const unsigned int channelCount = 2;
1539 const unsigned int batchSize = 2;
1540
1541 unsigned int shape[] = { batchSize, channelCount, height, width };
1542
1543 std::vector<float> input0({
1544 1, 1, 1, 1, 2, 2, 2, 2,
1545 3, 3, 3, 3, 4, 4, 4, 4 });
1546
1547 std::vector<float> input1({
1548 2, 2, 2, 2, 3, 3, 3, 3,
1549 4, 4, 4, 4, 5, 5, 5, 5 });
1550
1551 std::vector<float> output({
1552 2, 2, 2, 2, 6, 6, 6, 6,
1553 12, 12, 12, 12, 20, 20, 20, 20 });
1554
1555 return MultiplicationTestHelper(workloadFactory,
1556 shape,
1557 input0,
1558 shape,
1559 input1,
1560 shape,
1561 output);
1562}
1563
1564LayerTestResult<float, 4> MultiplicationBroadcast1ElementTest(armnn::IWorkloadFactory& workloadFactory)
1565{
1566 unsigned int shape0[] = { 1, 2, 2, 2 };
1567 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
1568
1569 unsigned int shape1[] = { 1, 1, 1, 1 };
1570 std::vector<float> input1({ 2 });
1571
1572 std::vector<float> output({ 2, 4, 6, 8, 10, 12, 14, 16});
1573
1574 return MultiplicationTestHelper(workloadFactory,
1575 shape0,
1576 input0,
1577 shape1,
1578 input1,
1579 shape0,
1580 output);
1581}
1582
1583LayerTestResult<float, 4> MultiplicationBroadcast1DVectorTest(armnn::IWorkloadFactory& workloadFactory)
1584{
1585 unsigned int shape0[] = { 1, 3, 3, 2 };
1586 std::vector<float> input0({
1587 1, 2, 3, 4, 5, 6,
1588 7, 8, 9, 10, 11, 12,
1589 13, 14, 15, 16, 17, 18});
1590
1591 unsigned int shape1[] = { 1, 1, 1, 2 };
1592 std::vector<float> input1({ 1, 2 });
1593
1594 std::vector<float> output({
1595 1, 4, 3, 8, 5, 12,
1596 7, 16, 9, 20, 11, 24,
1597 13, 28, 15, 32, 17, 36});
1598
1599 return MultiplicationTestHelper(workloadFactory,
1600 shape0,
1601 input0,
1602 shape1,
1603 input1,
1604 shape0,
1605 output);
1606}
telsoa014fcda012018-03-09 14:13:49 +00001607
1608LayerTestResult<float,4> CompareMultiplicationTest(armnn::IWorkloadFactory& workloadFactory,
1609 armnn::IWorkloadFactory& refWorkloadFactory)
1610{
1611 const unsigned int width = 16;
1612 const unsigned int height = 32;
1613 const unsigned int channelCount = 2;
1614 const unsigned int batchSize = 5;
1615
1616 armnn::TensorInfo inputTensorInfo0;
1617 armnn::TensorInfo inputTensorInfo1;
1618 armnn::TensorInfo outputTensorInfo;
1619
1620 constexpr unsigned int shape[] = { batchSize, channelCount, height, width };
1621
1622 inputTensorInfo0 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1623 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1624 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1625
1626 LayerTestResult<float,4> comparisonResult(outputTensorInfo);
1627
1628 auto input0 = MakeRandomTensor<float, 4>(inputTensorInfo0, 803506992);
1629 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 54902257);
1630
1631 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
1632 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1633 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1634
1635 std::unique_ptr<armnn::ITensorHandle> inputHandle0Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo0);
1636 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
1637 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
1638
1639 armnn::MultiplicationQueueDescriptor data;
1640 armnn::WorkloadInfo info;
1641 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
1642 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1643 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1644
1645 armnn::MultiplicationQueueDescriptor refData = data;
1646 armnn::WorkloadInfo refInfo = info;
1647 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo0, inputHandle0Ref.get());
1648 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo1, inputHandle1Ref.get());
1649 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1650
1651 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
1652 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateMultiplication(refData, refInfo);
1653
1654 inputHandle0->Allocate();
1655 inputHandle1->Allocate();
1656 outputHandle->Allocate();
1657 inputHandle0Ref->Allocate();
1658 inputHandle1Ref->Allocate();
1659 outputHandleRef->Allocate();
1660
1661 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
1662 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1663 CopyDataToITensorHandle(inputHandle0Ref.get(), &input0[0][0][0][0]);
1664 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
1665
1666 workload->Execute();
1667 workloadRef->Execute();
1668
1669 CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get());
1670 CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get());
1671
1672 return comparisonResult;
1673}
1674
1675LayerTestResult<float,4> CompareBatchNormTest(armnn::IWorkloadFactory& workloadFactory,
1676 armnn::IWorkloadFactory& refWorkloadFactory)
1677{
1678 const unsigned int width = 2;
1679 const unsigned int height = 3;
1680 const unsigned int channels = 5;
1681 const unsigned int batchSize = 3;
1682
1683 armnn::TensorInfo inputTensorInfo;
1684 armnn::TensorInfo outputTensorInfo;
1685 armnn::TensorInfo tensorInfo;
1686
1687 constexpr unsigned int shape[] = {batchSize, channels, height, width};
1688 constexpr unsigned int tensorShape[] = {channels};
1689
1690 inputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1691 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1692 tensorInfo = armnn::TensorInfo(1, tensorShape, armnn::DataType::Float32);
1693
1694 auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 21312);
1695
1696 auto mean = MakeRandomTensor<float, 1>(tensorInfo, 123);
1697 auto variance = MakeRandomTensor<float, 1>(tensorInfo, 234, 0.0f);
1698 auto beta = MakeRandomTensor<float, 1>(tensorInfo, 123);
1699 auto gamma = MakeRandomTensor<float, 1>(tensorInfo, 345);
1700
1701 LayerTestResult<float,4> ret(outputTensorInfo);
1702
1703 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
1704 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1705
1706 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
1707 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
1708
1709 armnn::BatchNormalizationQueueDescriptor data;
1710 armnn::WorkloadInfo info;
1711 armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
1712 armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
1713 armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
1714 armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
1715
1716 AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
1717 AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
1718 AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
1719 AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
1720
1721 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1722 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1723 data.m_Mean = &meanTensor;
1724 data.m_Variance = &varianceTensor;
1725 data.m_Beta = &betaTensor;
1726 data.m_Gamma = &gammaTensor;
1727 data.m_Parameters.m_Eps = 0.01f;
1728
1729 armnn::BatchNormalizationQueueDescriptor refData = data;
1730 armnn::WorkloadInfo refInfo = info;
1731 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
1732 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1733
1734 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
1735 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateBatchNormalization(refData, refInfo);
1736
1737 inputHandle->Allocate();
1738 outputHandle->Allocate();
1739 inputHandleRef->Allocate();
1740 outputHandleRef->Allocate();
1741
1742 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
1743 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
1744
1745 workload->Execute();
1746 workloadRef->Execute();
1747
1748 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1749 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
1750
1751 return ret;
1752}
1753
surmeh013537c2c2018-05-18 16:31:43 +01001754template<typename T>
1755void PermuteTensorData(
1756 armnn::IWorkloadFactory& workloadFactory,
1757 const armnn::PermutationVector& mappings,
1758 armnn::TensorInfo & inputTensorInfo,
1759 const T * inputData,
1760 std::vector<T>& outputData)
telsoa014fcda012018-03-09 14:13:49 +00001761{
surmeh013537c2c2018-05-18 16:31:43 +01001762 BOOST_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
1763 if (inputData == nullptr)
1764 {
1765 // Nullptr is an error in the test. By returning without doing the concatenation
1766 // I expect the caller to fail the test. It still makes sense to report this as
1767 // an assert for Debug builds.
1768 return;
1769 }
telsoa014fcda012018-03-09 14:13:49 +00001770
surmeh013537c2c2018-05-18 16:31:43 +01001771 armnn::TensorInfo outputTensorInfo = armnnUtils::Permuted(inputTensorInfo, mappings);
1772
1773 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
1774 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1775
1776 armnn::PermuteQueueDescriptor queueDescriptor;
1777 queueDescriptor.m_Parameters = armnn::PermuteDescriptor{mappings};
1778 armnn::WorkloadInfo workloadInfo;
1779 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
1780 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
1781
1782 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePermute(queueDescriptor, workloadInfo);
1783
1784 inputHandle->Allocate();
1785 outputHandle->Allocate();
1786
1787 CopyDataToITensorHandle(inputHandle.get(), inputData);
1788
1789 workload->Execute();
1790
1791 outputData.resize(outputTensorInfo.GetNumElements());
1792 CopyDataFromITensorHandle(&outputData[0], outputHandle.get());
1793 inputTensorInfo = outputTensorInfo;
1794}
1795
1796armnn::OriginsDescriptor CreateMergerDescriptorForConcatenation(
1797 const std::vector<armnn::TensorInfo> & inputTensorInfos,
1798 unsigned int concatDim)
1799{
telsoa014fcda012018-03-09 14:13:49 +00001800 std::vector<armnn::TensorShape> shapes;
1801 shapes.reserve(inputTensorInfos.size());
1802 for (const armnn::TensorInfo& it: inputTensorInfos)
1803 {
1804 shapes.push_back(it.GetShape());
1805 }
surmeh013537c2c2018-05-18 16:31:43 +01001806
1807 return armnn::CreateMergerDescriptorForConcatenation(shapes.begin(),
1808 shapes.end(),
1809 concatDim);
1810}
1811
1812//
1813// Concatenation is only supported for N and C dimensions for NCHW. In case of
telsoa01c577f2c2018-08-31 09:22:23 +01001814// <4 dimensions we need to make sure that the concat dimensions are at least
surmeh013537c2c2018-05-18 16:31:43 +01001815// the 3rd slowest iterating one.
1816//
1817
1818bool NeedPermuteForConcat(
1819 const std::vector<armnn::TensorInfo> & inputTensorInfos,
1820 unsigned int concatDim)
1821{
1822 // See note above. Additionally we expect the input shapes to have the
1823 // same number of dimensions.
1824 unsigned int nDimensions = 0;
1825
telsoa01c577f2c2018-08-31 09:22:23 +01001826 // Determine the number of dimensions as well as sanity check them
1827 // agains test implementation issues.
surmeh013537c2c2018-05-18 16:31:43 +01001828 for (auto && tensorInfo : inputTensorInfos)
1829 {
1830 if (!nDimensions)
1831 {
1832 nDimensions = tensorInfo.GetShape().GetNumDimensions();
1833 }
1834 else
1835 {
1836 BOOST_ASSERT_MSG(nDimensions == tensorInfo.GetShape().GetNumDimensions(),
1837 "Input shapes must have the same number of dimensions");
1838 }
1839 }
1840
1841 return (nDimensions-concatDim) < 3;
1842}
1843
1844armnn::TensorShape ExpandTensorShapeTo3dForPermute(const armnn::TensorShape & inputShape)
1845{
1846 unsigned int numDims = inputShape.GetNumDimensions();
1847 if (numDims >= 3)
1848 {
1849 // Nothing to do if the inputShape has at least 3 dimensions.
1850 return inputShape;
1851 }
1852
1853 std::vector<unsigned int> newDims(size_t(3), 1u);
1854 unsigned int expandedBy = 3 - numDims;
1855 for (unsigned int i=0; i<numDims; ++i)
1856 {
1857 newDims[expandedBy+i] = inputShape[i];
1858 }
1859 return armnn::TensorShape(3u, &newDims[0]);
1860}
1861
1862void Generate3dPermuteVectorForConcat(
1863 unsigned int numDimensions,
1864 unsigned int & concatDim,
1865 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutations)
1866{
1867 BOOST_ASSERT_MSG(numDimensions <= 3,
1868 "Only dimensions 1,2 and 3 are supported by this helper");
1869
1870 unsigned int expandedBy = 3 - numDimensions;
1871 unsigned int expandedConcatAxis = concatDim + expandedBy;
1872
1873 if (expandedConcatAxis == 2)
1874 {
1875 concatDim = 0;
1876 armnn::PermutationVector forwardPermutation({1, 2, 0});
1877 armnn::PermutationVector reversePermutation({2, 0, 1});
1878 permutations = std::make_pair(forwardPermutation, reversePermutation);
1879 }
1880 else if (expandedConcatAxis == 1)
1881 {
1882 concatDim = 0;
1883 armnn::PermutationVector forwardPermutation({2, 0, 1});
1884 armnn::PermutationVector reversePermutation({1, 2, 0});
1885 permutations = std::make_pair(forwardPermutation, reversePermutation);
1886 }
1887 else
1888 {
1889 BOOST_ASSERT(expandedConcatAxis == 0);
1890 concatDim = 0;
1891 }
1892}
1893
1894//
1895// Permute the input tensors so we can do a supported concatenation.
1896// Also treat lower than 3d tensors as 3d by adding dummy 1 dimensions
1897// at the front. Finally this function tells what the output shape
1898// of the permuted concatenated tensor is going to be.
1899//
1900template <typename T>
1901void PermuteInputsForConcat(
1902 armnn::IWorkloadFactory& workloadFactory,
1903 std::vector<armnn::TensorInfo> & inputTensorInfos,
1904 std::vector<T *> & inputData,
1905 std::vector<std::vector<T>> & inputDataStorage,
1906 armnn::PermutationVector & permuteVector,
1907 unsigned int & concatDim,
1908 armnn::TensorInfo & outputTensorInfo)
1909{
1910 BOOST_ASSERT_MSG(inputTensorInfos.size() > 1,
1911 "Expecting more than one tensor to be concatenated here");
1912
1913 unsigned int numDims = 0;
1914 unsigned int nthInput = 0;
1915 const armnn::PermutationVector identity({0, 1, 2});
1916
1917 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutations =
1918 std::make_pair(identity, identity);
1919
1920 inputDataStorage.resize(inputData.size());
1921
1922 for (auto && tensorInfo : inputTensorInfos)
1923 {
1924 if (numDims == 0)
1925 {
1926 numDims = tensorInfo.GetShape().GetNumDimensions();
1927 Generate3dPermuteVectorForConcat(numDims, concatDim, permutations);
telsoa01c577f2c2018-08-31 09:22:23 +01001928 // Store the reverese permutation.
surmeh013537c2c2018-05-18 16:31:43 +01001929 permuteVector = permutations.second;
1930 BOOST_ASSERT_MSG(!permuteVector.IsEqual(identity),
1931 "Test logic error, we don't need permutation, so we shouldn't arrive here");
1932 }
1933 else
1934 {
1935 BOOST_ASSERT_MSG(numDims == tensorInfo.GetShape().GetNumDimensions(),
1936 "All inputs must have the same number of dimensions");
1937 }
1938
1939 armnn::TensorInfo newTensorInfo = tensorInfo;
1940 newTensorInfo.SetShape(ExpandTensorShapeTo3dForPermute(tensorInfo.GetShape()));
1941
1942 PermuteTensorData<T>(workloadFactory,
1943 permutations.first,
1944 newTensorInfo,
1945 inputData[nthInput],
1946 inputDataStorage[nthInput]);
1947
1948 inputData[nthInput] = inputDataStorage[nthInput].data();
1949 inputTensorInfos[nthInput] = newTensorInfo;
1950
1951 ++nthInput;
1952 }
1953
1954 outputTensorInfo.SetShape(
1955 armnnUtils::Permuted(
1956 ExpandTensorShapeTo3dForPermute(outputTensorInfo.GetShape()),
1957 permutations.first));
1958}
1959
1960
1961//
1962// This is the pair of PermuteInputsForConcat(...) which permutes back
telsoa01c577f2c2018-08-31 09:22:23 +01001963// the output of the concatenation so we can check it against an expected
surmeh013537c2c2018-05-18 16:31:43 +01001964// output.
1965//
1966template <typename T>
1967void PermuteOutputForConcat(
1968 armnn::IWorkloadFactory& workloadFactory,
1969 const armnn::TensorInfo & tensorInfo,
1970 const armnn::PermutationVector & permuteVector,
1971 std::unique_ptr<armnn::ITensorHandle> && inputDataHandle,
1972 T * data)
1973{
1974 BOOST_ASSERT_MSG(data != nullptr, "data must not be null");
1975 if (data == nullptr)
1976 {
1977 // Nullptr is an error in the test. By returning without doing the permutation
1978 // I expect the caller to fail the test. It still makes sense to report this as
1979 // an assert for Debug builds.
1980 return;
1981 }
1982
1983 armnn::TensorInfo resultTensorInfo = tensorInfo;
1984 std::vector<T> inputData(tensorInfo.GetNumElements());
1985 std::vector<T> outputData;
1986
1987 CopyDataFromITensorHandle(&inputData[0], inputDataHandle.get());
1988
1989 PermuteTensorData<T>(workloadFactory,
1990 permuteVector,
1991 resultTensorInfo,
1992 &inputData[0],
1993 outputData);
1994
1995 ::memcpy(data, &outputData[0], sizeof(T)*outputData.size());
1996}
1997
1998template <typename T>
1999void Concatenate(armnn::IWorkloadFactory& workloadFactory,
2000 std::initializer_list<const armnn::TensorInfo> inputTensorInfosOrig,
2001 std::initializer_list<T *> inputsOrig,
2002 const armnn::TensorInfo& outputTensorInfoOrig,
2003 T * output,
2004 unsigned int concatDim)
2005{
2006 BOOST_ASSERT_MSG(output != nullptr, "output must not be null");
2007 if (output == nullptr)
2008 {
2009 // Nullptr is an error in the test. By returning without doing the permutation
2010 // I expect the caller to fail the test. It still makes sense to report this as
2011 // an assert for Debug builds.
2012 return;
2013 }
2014
2015 armnn::MergerQueueDescriptor queueDescriptor;
2016
telsoa01c577f2c2018-08-31 09:22:23 +01002017 // Saves a copy of the parameters which we might need to change.
surmeh013537c2c2018-05-18 16:31:43 +01002018 std::vector<armnn::TensorInfo> inputTensorInfos(inputTensorInfosOrig.begin(), inputTensorInfosOrig.end());
2019 std::vector<T *> inputs = inputsOrig;
2020 armnn::TensorInfo outputTensorInfo = outputTensorInfoOrig;
2021
2022 armnn::PermutationVector permuteVector{0, 1, 2};
2023
telsoa01c577f2c2018-08-31 09:22:23 +01002024 // Holds and automatically releases memory for the reshaped input data.
surmeh013537c2c2018-05-18 16:31:43 +01002025 std::vector<std::vector<T>> tmpInputDataStorage;
2026
2027 const size_t inputCount = inputTensorInfos.size();
2028
2029 bool needPermuteForConcat = NeedPermuteForConcat(inputTensorInfos, concatDim);
2030
2031 if (needPermuteForConcat)
2032 {
2033 //
2034 // We need to permute the inputs, because concatenation along
telsoa01c577f2c2018-08-31 09:22:23 +01002035 // the requested axis is not supported.
surmeh013537c2c2018-05-18 16:31:43 +01002036 //
2037 PermuteInputsForConcat<T>(workloadFactory,
2038 inputTensorInfos,
2039 inputs,
2040 tmpInputDataStorage,
2041 permuteVector,
2042 concatDim,
2043 outputTensorInfo);
2044 }
2045
2046 armnn::OriginsDescriptor viewsDescriptor = CreateMergerDescriptorForConcatenation(inputTensorInfos, concatDim);
telsoa014fcda012018-03-09 14:13:49 +00002047
2048 queueDescriptor.m_ViewOrigins.reserve(viewsDescriptor.GetNumViews());
2049 for (unsigned int i = 0; i < viewsDescriptor.GetNumViews(); ++i)
2050 {
2051 queueDescriptor.m_ViewOrigins.emplace_back(std::vector<unsigned int>(viewsDescriptor.GetViewOrigin(i),
2052 viewsDescriptor.GetViewOrigin(i) + viewsDescriptor.GetNumDimensions()));
2053 }
2054
telsoa014fcda012018-03-09 14:13:49 +00002055 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2056
2057 std::vector<std::unique_ptr<armnn::ITensorHandle>> inputHandles;
2058 inputHandles.reserve(inputCount);
2059
2060 const bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2061 for (unsigned int i = 0; i < inputCount; ++i)
2062 {
surmeh013537c2c2018-05-18 16:31:43 +01002063 const armnn::TensorInfo& inputTensorInfo = inputTensorInfos[i];
telsoa014fcda012018-03-09 14:13:49 +00002064
2065 std::unique_ptr<armnn::ITensorHandle> inputHandle = subTensorsSupported ?
2066 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo.GetShape(),
2067 queueDescriptor.m_ViewOrigins[i].m_Origin.data())
2068 : workloadFactory.CreateTensorHandle(inputTensorInfo);
2069
2070 inputHandles.emplace_back(std::move(inputHandle));
2071 }
2072
2073 armnn::WorkloadInfo workloadInfo;
2074
2075 for (unsigned int i = 0; i < inputCount; ++i)
2076 {
surmeh013537c2c2018-05-18 16:31:43 +01002077 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfos[i], inputHandles[i].get());
telsoa014fcda012018-03-09 14:13:49 +00002078 }
2079
2080 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
2081
2082 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(queueDescriptor, workloadInfo);
2083
2084 for (auto& inputHandle : inputHandles)
2085 {
2086 inputHandle->Allocate();
2087 }
2088
2089 outputHandle->Allocate();
2090
2091 unsigned int nextInputId = 0;
2092 for (auto& inputHandle : inputHandles)
2093 {
surmeh013537c2c2018-05-18 16:31:43 +01002094 CopyDataToITensorHandle(inputHandle.get(), inputs[nextInputId]);
2095 ++nextInputId;
telsoa014fcda012018-03-09 14:13:49 +00002096 }
2097
2098 workload->Execute();
2099
surmeh013537c2c2018-05-18 16:31:43 +01002100 if (needPermuteForConcat)
2101 {
2102 PermuteOutputForConcat<T>(workloadFactory,
2103 outputTensorInfo,
2104 permuteVector,
2105 std::move(outputHandle),
2106 output);
2107 }
2108 else
2109 {
2110 CopyDataFromITensorHandle(output, outputHandle.get());
2111 }
telsoa014fcda012018-03-09 14:13:49 +00002112}
2113
2114template <typename T>
2115LayerTestResult<T, 1> Concatenation1dTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale, int32_t qOffset)
2116{
2117 armnn::TensorInfo inputTensorInfo({ 3 }, armnn::GetDataType<T>());
2118
2119 auto input0 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 1.0f, 2.0f, 3.0f }));
2120 auto input1 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 4.0f, 5.0f, 6.0f }));
2121 auto input2 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 7.0f, 8.0f, 9.0f }));
2122
2123 armnn::TensorInfo outputTensorInfo({ 9 }, armnn::GetDataType<T>());
2124
2125 LayerTestResult<T, 1> result(outputTensorInfo);
2126
2127 std::vector<T> output;
2128 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002129 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002130 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
2131 { input0.data(), input1.data(), input2.data() },
2132 outputTensorInfo,
2133 output.data(),
2134 0);
2135
2136 result.output = MakeTensor<T, 1>(outputTensorInfo, output);
2137 result.outputExpected = MakeTensor<T, 1>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2138 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f
2139 }));
2140
2141 return result;
2142}
2143
2144LayerTestResult<float, 1> Concatenation1dTest(armnn::IWorkloadFactory& workloadFactory)
2145{
2146 return Concatenation1dTestImpl<float>(workloadFactory, 0.0f, 0);
2147}
2148
2149template <typename T>
2150LayerTestResult<T, 2> Concatenation2dTestImpl(armnn::IWorkloadFactory& workloadFactory,
2151 const armnn::TensorInfo& outputTensorInfo,
2152 unsigned int dimension,
2153 const float qScale,
2154 const int32_t qOffset)
2155{
2156 armnn::TensorInfo inputTensorInfo({ 2, 3 }, armnn::GetDataType<T>());
2157
2158 auto input0 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2159 // Batch 0
2160 1.0f, 2.0f, 3.0f,
2161
2162 // Batch 1
2163 10.0f, 11.0f, 12.0f,
2164 }));
2165
2166 auto input1 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2167 // Batch 0
2168 4.0f, 5.0f, 6.0f,
2169
2170 // Batch 1
2171 13.0f, 14.0f, 15.0f,
2172 }));
2173
2174 auto input2 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2175 // Batch 0
2176 7.0f, 8.0f, 9.0f,
2177
2178 // Batch 1
2179 16.0f, 17.0f, 18.0f,
2180 }));
2181
2182 LayerTestResult<T, 2> result(outputTensorInfo);
2183
2184 std::vector<T> output;
2185 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002186 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002187 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
2188 { input0.data(), input1.data(), input2.data() },
2189 outputTensorInfo,
2190 output.data(),
2191 dimension);
2192
2193 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
2194 return result;
2195}
2196
2197template <typename T>
2198LayerTestResult<T, 2> Concatenation2dDim0TestImpl(armnn::IWorkloadFactory& workloadFactory,
2199 float qScale, int32_t qOffset)
2200{
2201 armnn::TensorInfo outputTensorInfo({ 6, 3 }, armnn::GetDataType<T>());
2202
2203 LayerTestResult<T, 2> result = Concatenation2dTestImpl<T>(workloadFactory, outputTensorInfo, 0, qScale, qOffset);
2204 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2205 // Batch 0
2206 1.0f, 2.0f, 3.0f,
2207
2208 // Batch 1
2209 10.0f, 11.0f, 12.0f,
2210
2211 // Batch 2
2212 4.0f, 5.0f, 6.0f,
2213
2214 // Batch 3
2215 13.0f, 14.0f, 15.0f,
2216
2217 // Batch 4
2218 7.0f, 8.0f, 9.0f,
2219
2220 // Batch 5
2221 16.0f, 17.0f, 18.0f,
2222 }));
2223
2224 return result;
2225}
2226
2227LayerTestResult<float, 2> Concatenation2dDim0Test(armnn::IWorkloadFactory& workloadFactory)
2228{
2229 return Concatenation2dDim0TestImpl<float>(workloadFactory, 0.0f, 0);
2230}
2231
2232template <typename T>
2233LayerTestResult<T, 2> Concatenation2dDim1TestImpl(armnn::IWorkloadFactory& workloadFactory,
2234 float qScale, int32_t qOffset)
2235{
2236 armnn::TensorInfo outputTensorInfo({ 2, 9 }, armnn::GetDataType<T>());
2237
2238 LayerTestResult<T, 2> result = Concatenation2dTestImpl<T>(workloadFactory, outputTensorInfo, 1, qScale, qOffset);
2239 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2240 // Batch 0
2241 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
2242
2243 // Batch 1
2244 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f
2245 }));
2246
2247 return result;
2248}
2249
2250LayerTestResult<float, 2> Concatenation2dDim1Test(armnn::IWorkloadFactory& workloadFactory)
2251{
2252 return Concatenation2dDim1TestImpl<float>(workloadFactory, 0.0f, 0);
2253}
2254
2255template <typename T>
2256LayerTestResult<T, 2> Concatenation2dDim0DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2257 int32_t qOffset)
2258{
2259 armnn::TensorInfo input0TensorInfo({ 2, 3 }, armnn::GetDataType<T>());
2260 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2261 // Batch 0
2262 1.0f, 2.0f, 3.0f,
2263
2264 // Batch 1
2265 10.0f, 11.0f, 12.0f,
2266 }));
2267
2268 armnn::TensorInfo input1TensorInfo({ 3, 3 }, armnn::GetDataType<T>());
2269 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2270 // Batch 0
2271 4.0f, 5.0f, 6.0f,
2272
2273 // Batch 1
2274 13.0f, 14.0f, 15.0f,
2275
2276 // Batch 0
2277 7.0f, 8.0f, 9.0f,
2278 }));
2279
2280 armnn::TensorInfo input2TensorInfo({ 1, 3 }, armnn::GetDataType<T>());
2281 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2282 // Batch 1
2283 16.0f, 17.0f, 18.0f,
2284 }));
2285
2286 armnn::TensorInfo outputTensorInfo({ 6, 3 }, armnn::GetDataType<T>());
2287 LayerTestResult<T, 2> result(outputTensorInfo);
2288
2289 std::vector<T> output;
2290 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002291 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002292 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
2293 { input0.data(), input1.data(), input2.data() },
2294 outputTensorInfo,
2295 output.data(),
2296 0);
2297
2298 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
2299 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2300 // Batch 0
2301 1.0f, 2.0f, 3.0f,
2302
2303 // Batch 1
2304 10.0f, 11.0f, 12.0f,
2305
2306 // Batch 2
2307 4.0f, 5.0f, 6.0f,
2308
2309 // Batch 3
2310 13.0f, 14.0f, 15.0f,
2311
2312 // Batch 4
2313 7.0f, 8.0f, 9.0f,
2314
2315 // Batch 5
2316 16.0f, 17.0f, 18.0f,
2317 }));
2318
2319 return result;
2320}
2321
2322LayerTestResult<float, 2> Concatenation2dDim0DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory)
2323{
2324 return Concatenation2dDim0DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
2325}
2326
2327template <typename T>
2328LayerTestResult<T, 2> Concatenation2dDim1DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2329 int32_t qOffset)
2330{
2331 armnn::TensorInfo input0TensorInfo({ 2, 3 }, armnn::GetDataType<T>());
2332 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2333 // Batch 0
2334 1.0f, 2.0f, 3.0f,
2335
2336 // Batch 1
2337 10.0f, 11.0f, 12.0f,
2338 }));
2339
2340 armnn::TensorInfo input1TensorInfo({ 2, 5 }, armnn::GetDataType<T>());
2341 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2342 // Batch 0
2343 4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
2344
2345 // Batch 1
2346 13.0f, 14.0f, 15.0f, 16.0f, 17.0f,
2347 }));
2348
2349 armnn::TensorInfo input2TensorInfo({ 2, 1 }, armnn::GetDataType<T>());
2350 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2351 // Batch 0
2352 9.0f,
2353
2354 // Batch 1
2355 18.0f
2356 }));
2357
2358 armnn::TensorInfo outputTensorInfo({ 2, 9 }, armnn::GetDataType<T>());
2359 LayerTestResult<T, 2> result(outputTensorInfo);
2360
2361 std::vector<T> output;
2362 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002363 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002364 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
2365 { input0.data(), input1.data(), input2.data() },
2366 outputTensorInfo,
2367 output.data(),
2368 1);
2369
2370 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
2371 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2372 // Batch 0
2373 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
2374
2375 // Batch 1
2376 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f,
2377 }));
2378
2379 return result;
2380}
2381
2382LayerTestResult<float, 2> Concatenation2dDim1DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory)
2383{
2384 return Concatenation2dDim1DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
2385}
2386
2387template <typename T>
2388LayerTestResult<T, 3> Concatenation3dTestImpl(armnn::IWorkloadFactory& workloadFactory,
2389 const armnn::TensorInfo& outputTensorInfo,
2390 unsigned int dimension,
2391 float qScale,
2392 int32_t qOffset)
2393{
2394 armnn::TensorInfo inputTensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
2395
2396 auto input0 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2397 // Batch 0, Channel 0
2398 1.0f, 2.0f,
2399
2400 // Batch 0, Channel 1
2401 3.0f, 4.0f,
2402
2403 // Batch 0, Channel 2
2404 5.0f, 6.0f,
2405
2406 // Batch 1, Channel 0
2407 19.0f, 20.0f,
2408
2409 // Batch 1, Channel 1
2410 21.0f, 22.0f,
2411
2412 // Batch 1, Channel 2
2413 23.0f, 24.0f
2414 }));
2415
2416 auto input1 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2417 // Batch 0, Channel 0
2418 7.0f, 8.0f,
2419
2420 // Batch 0, Channel 1
2421 9.0f, 10.0f,
2422
2423 // Batch 0, Channel 2
2424 11.0f, 12.0f,
2425
2426 // Batch 1, Channel 0
2427 25.0f, 26.0f,
2428
2429 // Batch 1, Channel 1
2430 27.0f, 28.0f,
2431
2432 // Batch 1, Channel 2
2433 29.0f, 30.0f
2434 }));
2435
2436 auto input2 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2437 // Batch 0, Channel 0
2438 13.0f, 14.0f,
2439
2440 // Batch 0, Channel 1
2441 15.0f, 16.0f,
2442
2443 // Batch 0, Channel 2
2444 17.0f, 18.0f,
2445
2446 // Batch 1, Channel 0
2447 31.0f, 32.0f,
2448
2449 // Batch 1, Channel 1
2450 33.0f, 34.0f,
2451
2452 // Batch 1, Channel 2
2453 35.0f, 36.0f
2454 }));
2455
2456 LayerTestResult<T, 3> result(outputTensorInfo);
2457
2458 std::vector<T> output;
2459 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002460 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002461 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
2462 { input0.data(), input1.data(), input2.data() },
2463 outputTensorInfo,
2464 output.data(),
2465 dimension);
2466
2467 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
2468 return result;
2469}
2470
2471template <typename T>
2472LayerTestResult<T, 3> Concatenation3dDim0TestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2473 int32_t qOffset)
2474{
2475 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, armnn::GetDataType<T>());
2476
2477 LayerTestResult<T, 3> result = Concatenation3dTestImpl<T>(workloadFactory, outputTensorInfo, 0,
2478 qScale, qOffset);
2479 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2480 // Batch 0, Channel 0
2481 1.0f, 2.0f,
2482
2483 // Batch 0, Channel 1
2484 3.0f, 4.0f,
2485
2486 // Batch 0, Channel 2
2487 5.0f, 6.0f,
2488
2489 // Batch 1, Channel 0
2490 19.0f, 20.0f,
2491
2492 // Batch 1, Channel 1
2493 21.0f, 22.0f,
2494
2495 // Batch 1, Channel 2
2496 23.0f, 24.0f,
2497
2498 // Batch 2, Channel 0
2499 7.0f, 8.0f,
2500
2501 // Batch 2, Channel 1
2502 9.0f, 10.0f,
2503
2504 // Batch 2, Channel 2
2505 11.0f, 12.0f,
2506
2507 // Batch 3, Channel 0
2508 25.0f, 26.0f,
2509
2510 // Batch 3, Channel 1
2511 27.0f, 28.0f,
2512
2513 // Batch 3, Channel 2
2514 29.0f, 30.0f,
2515
2516 // Batch 4, Channel 0
2517 13.0f, 14.0f,
2518
2519 // Batch 4, Channel 1
2520 15.0f, 16.0f,
2521
2522 // Batch 4, Channel 2
2523 17.0f, 18.0f,
2524
2525 // Batch 5, Channel 0
2526 31.0f, 32.0f,
2527
2528 // Batch 5, Channel 1
2529 33.0f, 34.0f,
2530
2531 // Batch 5, Channel 2
2532 35.0f, 36.0f
2533 }));
2534 return result;
2535}
2536
2537LayerTestResult<float, 3> Concatenation3dDim0Test(armnn::IWorkloadFactory& workloadFactory)
2538{
2539 return Concatenation3dDim0TestImpl<float>(workloadFactory, 0.0f, 0);
2540}
2541
2542template <typename T>
2543LayerTestResult<T, 3> Concatenation3dDim1TestImpl(armnn::IWorkloadFactory& workloadFactory,
2544 float qScale, int32_t qOffset)
2545{
2546 armnn::TensorInfo outputTensorInfo({ 2, 9, 2 }, armnn::GetDataType<T>());
2547
2548 LayerTestResult<T, 3> result = Concatenation3dTestImpl<T>(workloadFactory, outputTensorInfo, 1, qScale, qOffset);
2549 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2550 // Batch 0, Channel 0
2551 1.0f, 2.0f,
2552
2553 // Batch 0, Channel 1
2554 3.0f, 4.0f,
2555
2556 // Batch 0, Channel 2
2557 5.0f, 6.0f,
2558
2559 // Batch 0, Channel 3
2560 7.0f, 8.0f,
2561
2562 // Batch 0, Channel 4
2563 9.0f, 10.0f,
2564
2565 // Batch 0, Channel 5
2566 11.0f, 12.0f,
2567
2568 // Batch 0, Channel 6
2569 13.0f, 14.0f,
2570
2571 // Batch 0, Channel 7
2572 15.0f, 16.0f,
2573
2574 // Batch 0, Channel 8
2575 17.0f, 18.0f,
2576
2577 // Batch 1, Channel 0
2578 19.0f, 20.0f,
2579
2580 // Batch 1, Channel 1
2581 21.0f, 22.0f,
2582
2583 // Batch 1, Channel 2
2584 23.0f, 24.0f,
2585
2586 // Batch 1, Channel 3
2587 25.0f, 26.0f,
2588
2589 // Batch 1, Channel 4
2590 27.0f, 28.0f,
2591
2592 // Batch 1, Channel 5
2593 29.0f, 30.0f,
2594
2595 // Batch 1, Channel 6
2596 31.0f, 32.0f,
2597
2598 // Batch 1, Channel 7
2599 33.0f, 34.0f,
2600
2601 // Batch 1, Channel 8
2602 35.0f, 36.0f
2603 }));
2604
2605 return result;
2606}
2607
2608LayerTestResult<float, 3> Concatenation3dDim1Test(armnn::IWorkloadFactory& workloadFactory)
2609{
2610 return Concatenation3dDim1TestImpl<float>(workloadFactory, 0.0f, 0);
2611}
2612
2613template <typename T>
2614LayerTestResult<T, 3> Concatenation3dDim2TestImpl(armnn::IWorkloadFactory& workloadFactory,
2615 float qScale, int32_t qOffset)
2616{
2617 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, armnn::GetDataType<T>());
2618
2619 LayerTestResult<T, 3> result = Concatenation3dTestImpl<T>(workloadFactory, outputTensorInfo, 2, qScale, qOffset);
2620 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2621 // Batch 0, Channel 0
2622 1.0f, 2.0f, 7.0f, 8.0f, 13.0f, 14.0f,
2623
2624 // Batch 0, Channel 1
2625 3.0f, 4.0f, 9.0f, 10.0f, 15.0f, 16.0f,
2626
2627 // Batch 0, Channel 2
2628 5.0f, 6.0f, 11.0f, 12.0f, 17.0f, 18.0f,
2629
2630 // Batch 1, Channel 0
2631 19.0f, 20.0f, 25.0f, 26.0f, 31.0f, 32.0f,
2632
2633 // Batch 1, Channel 1
2634 21.0f, 22.0f, 27.0f, 28.0f, 33.0f, 34.0f,
2635
2636 // Batch 1, Channel 2
2637 23.0f, 24.0f, 29.0f, 30.0f, 35.0f, 36.0f,
2638 }));
2639
2640 return result;
2641}
2642
2643LayerTestResult<float, 3> Concatenation3dDim2Test(armnn::IWorkloadFactory& workloadFactory)
2644{
2645 return Concatenation3dDim2TestImpl<float>(workloadFactory, 0.0f, 0);
2646}
2647
2648template <typename T>
2649LayerTestResult<T, 3> Concatenation3dDim0DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2650 int32_t qOffset)
2651{
2652 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
2653 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2654 // Batch 0, Channel 0
2655 1.0f, 2.0f,
2656
2657 // Batch 0, Channel 1
2658 3.0f, 4.0f,
2659
2660 // Batch 0, Channel 2
2661 5.0f, 6.0f,
2662
2663 // Batch 1, Channel 0
2664 19.0f, 20.0f,
2665
2666 // Batch 1, Channel 1
2667 21.0f, 22.0f,
2668
2669 // Batch 1, Channel 2
2670 23.0f, 24.0f
2671 }));
2672
2673 armnn::TensorInfo input1TensorInfo({ 1, 3, 2 }, armnn::GetDataType<T>());
2674 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2675 // Batch 0, Channel 0
2676 7.0f, 8.0f,
2677
2678 // Batch 0, Channel 1
2679 9.0f, 10.0f,
2680
2681 // Batch 0, Channel 2
2682 11.0f, 12.0f,
2683 }));
2684
2685 armnn::TensorInfo input2TensorInfo({ 3, 3, 2 }, armnn::GetDataType<T>());
2686 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2687 // Batch 0, Channel 0
2688 25.0f, 26.0f,
2689
2690 // Batch 0, Channel 1
2691 27.0f, 28.0f,
2692
2693 // Batch 0, Channel 2
2694 29.0f, 30.0f,
2695
2696 // Batch 1, Channel 0
2697 13.0f, 14.0f,
2698
2699 // Batch 1, Channel 1
2700 15.0f, 16.0f,
2701
2702 // Batch 1, Channel 2
2703 17.0f, 18.0f,
2704
2705 // Batch 2, Channel 0
2706 31.0f, 32.0f,
2707
2708 // Batch 2, Channel 1
2709 33.0f, 34.0f,
2710
2711 // Batch 2, Channel 2
2712 35.0f, 36.0f
2713 }));
2714
2715 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, armnn::GetDataType<T>());
2716 LayerTestResult<T, 3> result(outputTensorInfo);
2717
2718 std::vector<T> output;
2719 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002720 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002721 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
2722 { input0.data(), input1.data(), input2.data() },
2723 outputTensorInfo,
2724 output.data(),
2725 0);
2726
2727 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
2728 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2729 // Batch 0, Channel 0
2730 1.0f, 2.0f,
2731
2732 // Batch 0, Channel 1
2733 3.0f, 4.0f,
2734
2735 // Batch 0, Channel 2
2736 5.0f, 6.0f,
2737
2738 // Batch 1, Channel 0
2739 19.0f, 20.0f,
2740
2741 // Batch 1, Channel 1
2742 21.0f, 22.0f,
2743
2744 // Batch 1, Channel 2
2745 23.0f, 24.0f,
2746
2747 // Batch 2, Channel 0
2748 7.0f, 8.0f,
2749
2750 // Batch 2, Channel 1
2751 9.0f, 10.0f,
2752
2753 // Batch 2, Channel 2
2754 11.0f, 12.0f,
2755
2756 // Batch 3, Channel 0
2757 25.0f, 26.0f,
2758
2759 // Batch 3, Channel 1
2760 27.0f, 28.0f,
2761
2762 // Batch 3, Channel 2
2763 29.0f, 30.0f,
2764
2765 // Batch 4, Channel 0
2766 13.0f, 14.0f,
2767
2768 // Batch 4, Channel 1
2769 15.0f, 16.0f,
2770
2771 // Batch 4, Channel 2
2772 17.0f, 18.0f,
2773
2774 // Batch 5, Channel 0
2775 31.0f, 32.0f,
2776
2777 // Batch 5, Channel 1
2778 33.0f, 34.0f,
2779
2780 // Batch 5, Channel 2
2781 35.0f, 36.0f
2782 }));
2783
2784 return result;
2785}
2786
2787LayerTestResult<float, 3> Concatenation3dDim0DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory)
2788{
2789 return Concatenation3dDim0DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
2790}
2791
2792template <typename T>
2793LayerTestResult<T, 3> Concatenation3dDim1DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2794 int32_t qOffset)
2795{
2796 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
2797 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2798 // Batch 0, Channel 0
2799 1.0f, 2.0f,
2800
2801 // Batch 0, Channel 1
2802 3.0f, 4.0f,
2803
2804 // Batch 0, Channel 2
2805 5.0f, 6.0f,
2806
2807 // Batch 1, Channel 0
2808 19.0f, 20.0f,
2809
2810 // Batch 1, Channel 1
2811 21.0f, 22.0f,
2812
2813 // Batch 1, Channel 2
2814 23.0f, 24.0f
2815 }));
2816
2817 armnn::TensorInfo input1TensorInfo({ 2, 4, 2 }, armnn::GetDataType<T>());
2818 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2819 // Batch 0, Channel 0
2820 7.0f, 8.0f,
2821
2822 // Batch 0, Channel 1
2823 9.0f, 10.0f,
2824
2825 // Batch 0, Channel 2
2826 11.0f, 12.0f,
2827
2828 // Batch 0, Channel 3
2829 25.0f, 26.0f,
2830
2831 // Batch 1, Channel 0
2832 27.0f, 28.0f,
2833
2834 // Batch 1, Channel 1
2835 29.0f, 30.0f,
2836
2837 // Batch 1, Channel 2
2838 13.0f, 14.0f,
2839
2840 // Batch 1, Channel 3
2841 15.0f, 16.0f,
2842 }));
2843
2844 armnn::TensorInfo input2TensorInfo({ 2, 1, 2 }, armnn::GetDataType<T>());
2845 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2846 // Batch 0, Channel 0
2847 17.0f, 18.0f,
2848
2849 // Batch 1, Channel 0
2850 31.0f, 32.0f,
2851 }));
2852
2853 armnn::TensorInfo outputTensorInfo({ 2, 8, 2 }, armnn::GetDataType<T>());
2854 LayerTestResult<T, 3> result(outputTensorInfo);
2855
2856 std::vector<T> output;
2857 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002858 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002859 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
2860 { input0.data(), input1.data(), input2.data() },
2861 outputTensorInfo,
2862 output.data(),
2863 1);
2864
2865 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
2866 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2867 // Batch 0, Channel 0
2868 1.0f, 2.0f,
2869
2870 // Batch 0, Channel 1
2871 3.0f, 4.0f,
2872
2873 // Batch 0, Channel 2
2874 5.0f, 6.0f,
2875
2876 // Batch 0, Channel 3
2877 7.0f, 8.0f,
2878
2879 // Batch 0, Channel 4
2880 9.0f, 10.0f,
2881
2882 // Batch 0, Channel 5
2883 11.0f, 12.0f,
2884
2885 // Batch 0, Channel 6
2886 25.0f, 26.0f,
2887
2888 // Batch 0, Channel 7
2889 17.0f, 18.0f,
2890
2891 // Batch 1, Channel 0
2892 19.0f, 20.0f,
2893
2894 // Batch 1, Channel 1
2895 21.0f, 22.0f,
2896
2897 // Batch 1, Channel 2
2898 23.0f, 24.0f,
2899
2900 // Batch 1, Channel 3
2901 27.0f, 28.0f,
2902
2903 // Batch 1, Channel 4
2904 29.0f, 30.0f,
2905
2906 // Batch 1, Channel 5
2907 13.0f, 14.0f,
2908
2909 // Batch 1, Channel 6
2910 15.0f, 16.0f,
2911
2912 // Batch 1, Channel 7
2913 31.0f, 32.0f,
2914 }));
2915
2916 return result;
2917}
2918
2919LayerTestResult<float, 3> Concatenation3dDim1DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory)
2920{
2921 return Concatenation3dDim1DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
2922}
2923
2924template <typename T>
2925LayerTestResult<T, 3> Concatenation3dDim2DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2926 int32_t qOffset)
2927{
2928 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
2929 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2930 // Batch 0, Channel 0
2931 1.0f, 2.0f,
2932
2933 // Batch 0, Channel 1
2934 3.0f, 4.0f,
2935
2936 // Batch 0, Channel 2
2937 5.0f, 6.0f,
2938
2939 // Batch 1, Channel 0
2940 19.0f, 20.0f,
2941
2942 // Batch 1, Channel 1
2943 21.0f, 22.0f,
2944
2945 // Batch 1, Channel 2
2946 23.0f, 24.0f
2947 }));
2948
2949 armnn::TensorInfo input1TensorInfo({ 2, 3, 1 }, armnn::GetDataType<T>());
2950 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2951 // Batch 0, Channel 0
2952 7.0f,
2953
2954 // Batch 0, Channel 1
2955 9.0f,
2956
2957 // Batch 0, Channel 2
2958 11.0f,
2959
2960 // Batch 1, Channel 0
2961 25.0f,
2962
2963 // Batch 1, Channel 1
2964 27.0f,
2965
2966 // Batch 1, Channel 2
2967 29.0f
2968 }));
2969
2970 armnn::TensorInfo input2TensorInfo({ 2, 3, 3 }, armnn::GetDataType<T>());
2971 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2972 // Batch 0, Channel 0
2973 13.0f, 14.0f, 50.0f,
2974
2975 // Batch 0, Channel 1
2976 15.0f, 16.0f, 51.0f,
2977
2978 // Batch 0, Channel 2
2979 17.0f, 18.0f, 52.0f,
2980
2981 // Batch 1, Channel 0
2982 31.0f, 32.0f, 53.0f,
2983
2984 // Batch 1, Channel 1
2985 33.0f, 34.0f, 54.0f,
2986
2987 // Batch 1, Channel 2
2988 35.0f, 36.0f, 55.0f,
2989 }));
2990
2991 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, armnn::GetDataType<T>());
2992 LayerTestResult<T, 3> result(outputTensorInfo);
2993
2994 std::vector<T> output;
2995 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002996 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002997 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
2998 { input0.data(), input1.data(), input2.data() },
2999 outputTensorInfo,
3000 output.data(),
3001 2);
3002
3003 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
3004 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3005 // Batch 0, Channel 0
3006 1.0f, 2.0f, 7.0f, 13.0f, 14.0f, 50.0f,
3007
3008 // Batch 0, Channel 1
3009 3.0f, 4.0f, 9.0f, 15.0f, 16.0f, 51.0f,
3010
3011 // Batch 0, Channel 2
3012 5.0f, 6.0f, 11.0f, 17.0f, 18.0f, 52.0f,
3013
3014 // Batch 1, Channel 0
3015 19.0f, 20.0f, 25.0f, 31.0f, 32.0f, 53.0f,
3016
3017 // Batch 1, Channel 1
3018 21.0f, 22.0f, 27.0f, 33.0f, 34.0f, 54.0f,
3019
3020 // Batch 1, Channel 2
3021 23.0f, 24.0f, 29.0f, 35.0f, 36.0f, 55.0f,
3022 }));
3023
3024 return result;
3025}
3026
3027LayerTestResult<float, 3> Concatenation3dDim2DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory)
3028{
3029 return Concatenation3dDim2DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
3030}
3031
James Conroy6b965822018-11-01 11:33:09 +00003032LayerTestResult<float, 4> ResizeBilinearNopTest(armnn::IWorkloadFactory& workloadFactory,
3033 const armnn::DataLayoutIndexed& dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00003034{
James Conroy6b965822018-11-01 11:33:09 +00003035 const armnn::TensorInfo inputTensorInfo = GetTensorInfo<float>(1, 2, 4, 4, dataLayout);
3036 const armnn::TensorInfo outputTensorInfo = GetTensorInfo<float>(1, 2, 4, 4, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00003037
James Conroy6b965822018-11-01 11:33:09 +00003038 std::vector<float> inputData({
3039 1.0f, 2.0f, 3.0f, 4.0f,
3040 2.0f, 3.0f, 4.0f, 5.0f,
3041 3.0f, 4.0f, 5.0f, 6.0f,
3042 4.0f, 5.0f, 6.0f, 7.0f,
3043
telsoa014fcda012018-03-09 14:13:49 +00003044 1.0f, 2.0f, 3.0f, 4.0f,
3045 2.0f, 3.0f, 4.0f, 5.0f,
3046 3.0f, 4.0f, 5.0f, 6.0f,
3047 4.0f, 5.0f, 6.0f, 7.0f
James Conroy6b965822018-11-01 11:33:09 +00003048 });
3049
3050 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
3051 if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC)
3052 {
3053 std::vector<float> tmp(inputData.size());
3054 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
3055 inputData = tmp;
3056 }
3057
3058 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
telsoa014fcda012018-03-09 14:13:49 +00003059
3060 LayerTestResult<float, 4> result(outputTensorInfo);
3061 result.outputExpected = input;
3062
3063 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3064 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3065
3066 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01003067 descriptor.m_Parameters.m_DataLayout = dataLayout;
3068 armnn::WorkloadInfo info;
3069 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3070 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3071
3072 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
3073
3074 inputHandle->Allocate();
3075 outputHandle->Allocate();
3076 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3077
James Conroy074f3712018-10-03 09:32:03 +01003078 workload->Execute();
3079
3080 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3081 return result;
3082}
3083
James Conroy6b965822018-11-01 11:33:09 +00003084LayerTestResult<float, 4> SimpleResizeBilinearTest(armnn::IWorkloadFactory& workloadFactory,
3085 const armnn::DataLayoutIndexed& dataLayout)
James Conroy074f3712018-10-03 09:32:03 +01003086{
James Conroy6b965822018-11-01 11:33:09 +00003087 const armnn::TensorInfo inputTensorInfo = GetTensorInfo<float>(1, 2, 2, 2, dataLayout);
3088 const armnn::TensorInfo outputTensorInfo = GetTensorInfo<float>(1, 2, 1, 1, dataLayout);
James Conroy074f3712018-10-03 09:32:03 +01003089
James Conroy6b965822018-11-01 11:33:09 +00003090 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01003091 1.0f, 255.0f,
James Conroy6b965822018-11-01 11:33:09 +00003092 200.0f, 250.0f,
3093
3094 250.0f, 200.0f,
3095 250.0f, 1.0f
3096 });
James Conroy074f3712018-10-03 09:32:03 +01003097
3098 // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
3099 // then figures out the interpolants and weights. Note this is different to projecting the centre of the
James Conroy6b965822018-11-01 11:33:09 +00003100 // output texel. Thus, for a input matrix of 2x2, we'll expect the output 1x1 matrix to contain, as
3101 // its single element, the value that was at position (0,0) of the input matrix (rather than an average,
3102 // which we would expect if projecting the centre).
3103
3104 std::vector<float> outputData({
3105 1.0f,
3106
3107 250.0f
3108 });
3109
3110 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
3111 if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC)
3112 {
3113 std::vector<float> tmp(inputData.size());
3114 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
3115 inputData = tmp;
3116
3117 std::vector<float> tmp1(outputData.size());
3118 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data());
3119 outputData = tmp1;
3120 }
3121
3122 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
3123
James Conroy074f3712018-10-03 09:32:03 +01003124 LayerTestResult<float, 4> result(outputTensorInfo);
James Conroy6b965822018-11-01 11:33:09 +00003125 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
James Conroy074f3712018-10-03 09:32:03 +01003126
3127 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3128 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3129
3130 armnn::ResizeBilinearQueueDescriptor descriptor;
3131 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00003132 armnn::WorkloadInfo info;
3133 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3134 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3135
3136 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
3137
3138 inputHandle->Allocate();
3139 outputHandle->Allocate();
3140 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3141
3142 workload->Execute();
3143
3144 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3145 return result;
3146}
3147
James Conroy6b965822018-11-01 11:33:09 +00003148LayerTestResult<float, 4> ResizeBilinearSqMinTest(armnn::IWorkloadFactory& workloadFactory,
3149 const armnn::DataLayoutIndexed& dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00003150{
James Conroy6b965822018-11-01 11:33:09 +00003151 const armnn::TensorInfo inputTensorInfo = GetTensorInfo<float>(1, 2, 4, 4, dataLayout);
3152 const armnn::TensorInfo outputTensorInfo = GetTensorInfo<float>(1, 2, 2, 2, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00003153
James Conroy6b965822018-11-01 11:33:09 +00003154 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01003155 1.0f, 2.0f, 3.0f, 4.0f,
3156 2.0f, 3.0f, 4.0f, 5.0f,
3157 3.0f, 4.0f, 5.0f, 6.0f,
James Conroy6b965822018-11-01 11:33:09 +00003158 4.0f, 5.0f, 6.0f, 7.0f,
3159
3160 7.0f, 6.0f, 5.0f, 4.0f,
3161 6.0f, 5.0f, 4.0f, 3.0f,
3162 5.0f, 4.0f, 3.0f, 2.0f,
3163 4.0f, 3.0f, 2.0f, 1.0f
3164 });
3165
3166 std::vector<float> outputData({
3167 1.0f, 3.0f,
3168 3.0f, 5.0f,
3169
3170 7.0f, 5.0f,
3171 5.0f, 3.0f
3172 });
3173
3174 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
3175 if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC)
3176 {
3177 std::vector<float> tmp(inputData.size());
3178 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
3179 inputData = tmp;
3180
3181 std::vector<float> tmp1(outputData.size());
3182 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data());
3183 outputData = tmp1;
3184 }
3185
3186 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
telsoa014fcda012018-03-09 14:13:49 +00003187
telsoa014fcda012018-03-09 14:13:49 +00003188 LayerTestResult<float, 4> result(outputTensorInfo);
James Conroy6b965822018-11-01 11:33:09 +00003189 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
telsoa014fcda012018-03-09 14:13:49 +00003190
3191 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3192 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3193
3194 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01003195 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00003196 armnn::WorkloadInfo info;
3197 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3198 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3199
3200 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
3201
3202 inputHandle->Allocate();
3203 outputHandle->Allocate();
3204 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3205
3206 workload->Execute();
3207
3208 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3209 return result;
3210}
3211
James Conroy6b965822018-11-01 11:33:09 +00003212LayerTestResult<float, 4> ResizeBilinearMinTest(armnn::IWorkloadFactory& workloadFactory,
3213 const armnn::DataLayoutIndexed& dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00003214{
James Conroy6b965822018-11-01 11:33:09 +00003215 const armnn::TensorInfo inputTensorInfo = GetTensorInfo<float>(1, 2, 3, 5, dataLayout);
3216 const armnn::TensorInfo outputTensorInfo = GetTensorInfo<float>(1, 2, 2, 3, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00003217
James Conroy6b965822018-11-01 11:33:09 +00003218 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01003219 1.0f, 2.0f, 3.0f, 5.0f, 8.0f,
3220 13.0f, 21.0f, 34.0f, 55.0f, 89.0f,
James Conroy6b965822018-11-01 11:33:09 +00003221 144.0f, 233.0f, 377.0f, 610.0f, 987.0f,
3222
3223 987.0f, 610.0f, 377.0f, 233.0f, 144.0f,
3224 89.0f, 55.0f, 34.0f, 21.0f, 13.0f,
3225 8.0f, 5.0f, 3.0f, 2.0f, 1.0f
3226 });
3227
3228 std::vector<float> outputData({
3229 1.0f, 2.6666f, 6.00f,
3230 78.5f, 179.3333f, 401.00f,
3231
3232 987.0f, 454.6670f, 203.33f,
3233 48.5f, 22.3333f, 10.00f
3234 });
3235
3236 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
3237 if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC)
3238 {
3239 std::vector<float> tmp(inputData.size());
3240 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
3241 inputData = tmp;
3242
3243 std::vector<float> tmp1(outputData.size());
3244 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data());
3245 outputData = tmp1;
3246 }
3247
3248 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
telsoa014fcda012018-03-09 14:13:49 +00003249
3250 LayerTestResult<float, 4> result(outputTensorInfo);
James Conroy6b965822018-11-01 11:33:09 +00003251 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
telsoa014fcda012018-03-09 14:13:49 +00003252
3253 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3254 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3255
3256 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01003257 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00003258 armnn::WorkloadInfo info;
3259 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3260 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3261
3262 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
3263
3264 inputHandle->Allocate();
3265 outputHandle->Allocate();
3266 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3267
3268 workload->Execute();
3269
3270 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3271 return result;
3272}
3273
James Conroy6b965822018-11-01 11:33:09 +00003274LayerTestResult<float, 4> ResizeBilinearMagTest(armnn::IWorkloadFactory& workloadFactory,
3275 const armnn::DataLayoutIndexed& dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00003276{
James Conroy6b965822018-11-01 11:33:09 +00003277 const armnn::TensorInfo inputTensorInfo = GetTensorInfo<float>(1, 2, 3, 2, dataLayout);
3278 const armnn::TensorInfo outputTensorInfo = GetTensorInfo<float>(1, 2, 3, 5, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00003279
James Conroy6b965822018-11-01 11:33:09 +00003280 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01003281 1.0f, 2.0f,
3282 13.0f, 21.0f,
James Conroy6b965822018-11-01 11:33:09 +00003283 144.0f, 233.0f,
telsoa014fcda012018-03-09 14:13:49 +00003284
James Conroy6b965822018-11-01 11:33:09 +00003285 233.0f, 144.0f,
3286 21.0f, 13.0f,
3287 2.0f, 1.0f
3288 });
3289
3290 std::vector<float> outputData({
James Conroy074f3712018-10-03 09:32:03 +01003291 1.0f, 1.4f, 1.8f, 2.0f, 2.0f,
3292 13.0f, 16.2f, 19.4f, 21.0f, 21.0f,
James Conroy6b965822018-11-01 11:33:09 +00003293 144.0f, 179.6f, 215.2f, 233.0f, 233.0f,
3294
3295 233.0f, 197.4f, 161.8f, 144.0f, 144.0f,
3296 21.0f, 17.8f, 14.6f, 13.0f, 13.0f,
3297 2.0f, 1.6f, 1.2f, 1.0f, 1.0f
3298 });
3299
3300 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
3301 if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC)
3302 {
3303 std::vector<float> tmp(inputData.size());
3304 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
3305 inputData = tmp;
3306
3307 std::vector<float> tmp1(outputData.size());
3308 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data());
3309 outputData = tmp1;
3310 }
3311
3312 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
3313
3314 LayerTestResult<float, 4> result(outputTensorInfo);
3315 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
telsoa014fcda012018-03-09 14:13:49 +00003316
3317 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3318 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3319
3320 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01003321 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00003322 armnn::WorkloadInfo info;
3323 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3324 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3325
3326 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
3327
3328 inputHandle->Allocate();
3329 outputHandle->Allocate();
3330 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3331
3332 workload->Execute();
3333
3334 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3335 return result;
3336}
3337
telsoa014fcda012018-03-09 14:13:49 +00003338LayerTestResult<float, 2> FakeQuantizationTest(armnn::IWorkloadFactory& workloadFactory)
3339{
3340 constexpr unsigned int width = 2;
3341 constexpr unsigned int height = 3;
3342
3343 const armnn::TensorInfo tensorInfo({height, width },
3344 armnn::DataType::Float32);
3345 auto input = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
3346 -10.0f, -5.0f,
3347 0.0f, 5.0f,
3348 10.0f, 10.0f
3349 }));
3350
3351 LayerTestResult<float, 2> ret(tensorInfo);
3352
3353 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
3354
3355 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
3356
3357 armnn::FakeQuantizationQueueDescriptor data;
3358 armnn::WorkloadInfo info;
3359
3360 AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
3361 AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
3362 float min = -10.f;
3363 float max = 10.f;
3364
3365 data.m_Parameters.m_Min = min;
3366 data.m_Parameters.m_Max = max;
3367
3368 armnn::PassthroughCpuTensorHandle refHandle(tensorInfo, &ret.outputExpected[0][0]);
3369 armnn::FakeQuantizationQueueDescriptor refData = data;
3370 armnn::WorkloadInfo refInfo = info;
3371 SetWorkloadOutput(refData, refInfo, 0, tensorInfo, &refHandle);
3372
3373 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFakeQuantization(data, info);
3374
3375 inputHandle->Allocate();
3376 outputHandle->Allocate();
3377
3378 CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
3379
3380 workload->Execute();
3381
3382 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
3383
3384 ret.outputExpected = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
3385 0.0f, 63.0f,
3386 128.0f, 191.0f,
3387 255.0f, 255.0f
3388 }));
3389 return ret;
3390}
3391
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003392namespace
3393{
3394
3395LayerTestResult<float, 4> L2NormalizationTestImpl(armnn::IWorkloadFactory& workloadFactory,
3396 const armnn::TensorShape& inputOutputTensorShape,
3397 const std::vector<float>& inputValues,
3398 const std::vector<float>& expectedOutputValues,
3399 armnn::DataLayout dataLayout)
3400{
3401 const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
3402 const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
3403
3404 auto inputTensor = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(inputValues));
3405
3406 LayerTestResult<float, 4> result(outputTensorInfo);
3407 result.outputExpected = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(expectedOutputValues));
3408
3409 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3410 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3411
3412 armnn::L2NormalizationQueueDescriptor descriptor;
3413 descriptor.m_Parameters.m_DataLayout = dataLayout;
3414 armnn::WorkloadInfo info;
3415
3416 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3417 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3418
3419 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
3420
3421 inputHandle->Allocate();
3422 outputHandle->Allocate();
3423
3424 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
3425
Aron Virginas-Tar60578952018-10-31 11:04:01 +00003426 workloadFactory.Acquire();
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003427 workload->Execute();
Aron Virginas-Tar60578952018-10-31 11:04:01 +00003428 workloadFactory.Release();
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003429
3430 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3431
3432 return result;
3433}
3434
3435float CalcInvL2Norm(std::initializer_list<float> elements)
3436{
3437 const float reduction = std::accumulate(elements.begin(), elements.end(), 0.0f,
3438 [](float acc, float element) { return acc + element * element; });
3439 return 1.0f / sqrtf(reduction);
3440}
3441
3442} // anonymous namespace
3443
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003444template<typename T>
3445LayerTestResult<T, 2> Pad2dTestCommon(armnn::IWorkloadFactory& workloadFactory, float qScale, int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003446{
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003447 const armnn::TensorShape inputShape{ 3, 3 };
3448 const armnn::TensorShape outputShape{ 7, 7 };
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003449
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003450 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::GetDataType<T>());
3451 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::GetDataType<T>());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003452
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003453 std::vector<T> inputValues(
3454 QuantizedVector<T>(qScale, qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003455 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003456 // Height (3) x Width (3)
3457 4, 8, 6,
3458 7, 4, 4,
3459 3, 2, 4
3460 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003461
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003462 std::vector<T> expectedOutputValues(
3463 QuantizedVector<T>(qScale, qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003464 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003465 0, 0, 0, 0, 0, 0, 0,
3466 0, 0, 0, 0, 0, 0, 0,
3467 0, 0, 4, 8, 6, 0, 0,
3468 0, 0, 7, 4, 4, 0, 0,
3469 0, 0, 3, 2, 4, 0, 0,
3470 0, 0, 0, 0, 0, 0, 0,
3471 0, 0, 0, 0, 0, 0, 0
3472 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003473
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003474 auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003475
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003476 LayerTestResult<T, 2> result(outputTensorInfo);
3477 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003478
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003479 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3480 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003481
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003482 armnn::PadQueueDescriptor descriptor;
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003483
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003484 std::vector<std::pair<unsigned int, unsigned int>> PadList;
3485 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
3486 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003487
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003488 descriptor.m_Parameters.m_PadList = PadList;
3489 armnn::WorkloadInfo info;
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003490
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003491 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3492 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003493
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003494 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003495
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003496 inputHandle->Allocate();
3497 outputHandle->Allocate();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003498
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003499 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003500
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003501 workload->Execute();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003502
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003503 CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003504
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003505 return result;
3506}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003507
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003508template <typename T>
3509LayerTestResult<T, 3> Pad3dTestCommon(armnn::IWorkloadFactory& workloadFactory, float qScale, int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003510{
3511 const armnn::TensorShape inputShape{ 2, 2, 2 };
3512 const armnn::TensorShape outputShape{ 3, 5, 6 };
3513
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003514 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::GetDataType<T>());
3515 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::GetDataType<T>());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003516
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003517 std::vector<T> inputValues(
3518 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003519 {
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003520 // Channel 0, Height (2) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003521 0, 4,
3522 2, 5,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003523
3524 // Channel 1, Height (2) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003525 6, 1,
3526 5, 2
3527 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003528
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003529 std::vector<T> expectedOutputValues(
3530 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003531 {
3532
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003533 0, 0, 0, 0, 0, 0,
3534 0, 0, 0, 0, 0, 0,
3535 0, 0, 0, 4, 0, 0,
3536 0, 0, 2, 5, 0, 0,
3537 0, 0, 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003538
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003539 0, 0, 0, 0, 0, 0,
3540 0, 0, 0, 0, 0, 0,
3541 0, 0, 6, 1, 0, 0,
3542 0, 0, 5, 2, 0, 0,
3543 0, 0, 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003544
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003545 0, 0, 0, 0, 0, 0,
3546 0, 0, 0, 0, 0, 0,
3547 0, 0, 0, 0, 0, 0,
3548 0, 0, 0, 0, 0, 0,
3549 0, 0, 0, 0, 0, 0
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003550
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003551 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003552
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003553 auto inputTensor = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003554
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003555 LayerTestResult<T, 3> result(outputTensorInfo);
3556 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003557
3558 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3559 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3560
3561 armnn::PadQueueDescriptor descriptor;
3562
3563 std::vector<std::pair<unsigned int, unsigned int>> PadList;
3564 PadList.push_back(std::pair<unsigned int, unsigned int>(0,1));
3565 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
3566 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
3567
3568 descriptor.m_Parameters.m_PadList = PadList;
3569 armnn::WorkloadInfo info;
3570
3571 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3572 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3573
3574 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
3575
3576 inputHandle->Allocate();
3577 outputHandle->Allocate();
3578
3579 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]);
3580
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003581 workload->Execute();
3582
3583 CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
3584
3585 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003586}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003587
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003588template <typename T>
3589LayerTestResult<T, 4> Pad4dTestCommon(armnn::IWorkloadFactory& workloadFactory, float qScale, int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003590{
3591 const armnn::TensorShape inputShape{ 2, 2, 3, 2 };
3592 const armnn::TensorShape outputShape{ 4, 5, 7, 4 };
3593
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003594 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::GetDataType<T>());
3595 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::GetDataType<T>());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003596
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003597 std::vector<T> inputValues(
3598 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003599 {
3600 // Batch 0, Channel 0, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003601 0, 1,
3602 2, 3,
3603 4, 5,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003604
3605 // Batch 0, Channel 1, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003606 6, 7,
3607 8, 9,
3608 10, 11,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003609
3610 // Batch 1, Channel 0, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003611 12, 13,
3612 14, 15,
3613 16, 17,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003614
3615 // Batch 1, Channel 1, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003616 18, 19,
3617 20, 21,
3618 22, 23
3619 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003620
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003621 std::vector<T> expectedOutputValues(
3622 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003623 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003624 0, 0, 0, 0,
3625 0, 0, 0, 0,
3626 0, 0, 0, 0,
3627 0, 0, 0, 0,
3628 0, 0, 0, 0,
3629 0, 0, 0, 0,
3630 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003631
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003632 0, 0, 0, 0,
3633 0, 0, 0, 0,
3634 0, 0, 0, 0,
3635 0, 0, 0, 0,
3636 0, 0, 0, 0,
3637 0, 0, 0, 0,
3638 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003639
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003640 0, 0, 0, 0,
3641 0, 0, 0, 0,
3642 0, 0, 0, 0,
3643 0, 0, 0, 0,
3644 0, 0, 0, 0,
3645 0, 0, 0, 0,
3646 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003647
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003648 0, 0, 0, 0,
3649 0, 0, 0, 0,
3650 0, 0, 0, 0,
3651 0, 0, 0, 0,
3652 0, 0, 0, 0,
3653 0, 0, 0, 0,
3654 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003655
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003656 0, 0, 0, 0,
3657 0, 0, 0, 0,
3658 0, 0, 0, 0,
3659 0, 0, 0, 0,
3660 0, 0, 0, 0,
3661 0, 0, 0, 0,
3662 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003663
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003664 0, 0, 0, 0,
3665 0, 0, 0, 0,
3666 0, 0, 0, 0,
3667 0, 0, 0, 0,
3668 0, 0, 0, 0,
3669 0, 0, 0, 0,
3670 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003671
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003672 0, 0, 0, 0,
3673 0, 0, 0, 0,
3674 0, 0, 0, 0,
3675 0, 0, 0, 0,
3676 0, 0, 0, 0,
3677 0, 0, 0, 0,
3678 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003679
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003680 0, 0, 0, 0,
3681 0, 0, 0, 0,
3682 0, 0, 0, 0,
3683 0, 0, 1, 0,
3684 0, 2, 3, 0,
3685 0, 4, 5, 0,
3686 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003687
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003688 0, 0, 0, 0,
3689 0, 0, 0, 0,
3690 0, 0, 0, 0,
3691 0, 6, 7, 0,
3692 0, 8, 9, 0,
3693 0, 10, 11, 0,
3694 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003695
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003696 0, 0, 0, 0,
3697 0, 0, 0, 0,
3698 0, 0, 0, 0,
3699 0, 0, 0, 0,
3700 0, 0, 0, 0,
3701 0, 0, 0, 0,
3702 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003703
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003704 0, 0, 0, 0,
3705 0, 0, 0, 0,
3706 0, 0, 0, 0,
3707 0, 0, 0, 0,
3708 0, 0, 0, 0,
3709 0, 0, 0, 0,
3710 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003711
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003712 0, 0, 0, 0,
3713 0, 0, 0, 0,
3714 0, 0, 0, 0,
3715 0, 0, 0, 0,
3716 0, 0, 0, 0,
3717 0, 0, 0, 0,
3718 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003719
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003720 0, 0, 0, 0,
3721 0, 0, 0, 0,
3722 0, 0, 0, 0,
3723 0, 12, 13, 0,
3724 0, 14, 15, 0,
3725 0, 16, 17, 0,
3726 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003727
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003728 0, 0, 0, 0,
3729 0, 0, 0, 0,
3730 0, 0, 0, 0,
3731 0, 18, 19, 0,
3732 0, 20, 21, 0,
3733 0, 22, 23, 0,
3734 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003735
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003736 0, 0, 0, 0,
3737 0, 0, 0, 0,
3738 0, 0, 0, 0,
3739 0, 0, 0, 0,
3740 0, 0, 0, 0,
3741 0, 0, 0, 0,
3742 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003743
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003744 0, 0, 0, 0,
3745 0, 0, 0, 0,
3746 0, 0, 0, 0,
3747 0, 0, 0, 0,
3748 0, 0, 0, 0,
3749 0, 0, 0, 0,
3750 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003751
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003752 0, 0, 0, 0,
3753 0, 0, 0, 0,
3754 0, 0, 0, 0,
3755 0, 0, 0, 0,
3756 0, 0, 0, 0,
3757 0, 0, 0, 0,
3758 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003759
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003760 0, 0, 0, 0,
3761 0, 0, 0, 0,
3762 0, 0, 0, 0,
3763 0, 0, 0, 0,
3764 0, 0, 0, 0,
3765 0, 0, 0, 0,
3766 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003767
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003768 0, 0, 0, 0,
3769 0, 0, 0, 0,
3770 0, 0, 0, 0,
3771 0, 0, 0, 0,
3772 0, 0, 0, 0,
3773 0, 0, 0, 0,
3774 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003775
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003776 0, 0, 0, 0,
3777 0, 0, 0, 0,
3778 0, 0, 0, 0,
3779 0, 0, 0, 0,
3780 0, 0, 0, 0,
3781 0, 0, 0, 0,
3782 0, 0, 0, 0
3783 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003784
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003785 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003786
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003787 LayerTestResult<T, 4> result(outputTensorInfo);
3788 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003789
3790 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3791 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3792
3793 armnn::PadQueueDescriptor descriptor;
3794
3795 std::vector<std::pair<unsigned int, unsigned int>> PadList;
3796 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
3797 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
3798 PadList.push_back(std::pair<unsigned int, unsigned int>(3,1));
3799 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
3800
3801 descriptor.m_Parameters.m_PadList = PadList;
3802 armnn::WorkloadInfo info;
3803
3804 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3805 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3806
3807 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
3808
3809 inputHandle->Allocate();
3810 outputHandle->Allocate();
3811
3812 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
3813
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003814 workload->Execute();
3815
3816 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3817
3818 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003819}
3820
3821LayerTestResult<uint8_t, 2> PadUint82dTest(armnn::IWorkloadFactory& workloadFactory)
3822{
3823 return Pad2dTestCommon<uint8_t>(workloadFactory, 1.0f, 0);
3824}
3825
3826LayerTestResult<uint8_t, 3> PadUint83dTest(armnn::IWorkloadFactory& workloadFactory)
3827{
3828 return Pad3dTestCommon<uint8_t>(workloadFactory, 1.0f, 0);
3829}
3830
3831LayerTestResult<uint8_t, 4> PadUint84dTest(armnn::IWorkloadFactory& workloadFactory)
3832{
3833 return Pad4dTestCommon<uint8_t>(workloadFactory, 1.0f, 0);
3834}
3835
3836LayerTestResult<float, 2> PadFloat322dTest(armnn::IWorkloadFactory& workloadFactory)
3837{
3838 return Pad2dTestCommon<float>(workloadFactory, 0.0f, 0);
3839}
3840
3841LayerTestResult<float, 3> PadFloat323dTest(armnn::IWorkloadFactory& workloadFactory)
3842{
3843 return Pad3dTestCommon<float>(workloadFactory, 0.0f, 0);
3844}
3845
3846LayerTestResult<float, 4> PadFloat324dTest(armnn::IWorkloadFactory& workloadFactory)
3847{
3848 return Pad4dTestCommon<float>(workloadFactory, 0.0f, 0);
3849}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003850
telsoa014fcda012018-03-09 14:13:49 +00003851LayerTestResult<float, 4> L2Normalization1dTest(armnn::IWorkloadFactory& workloadFactory)
3852{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003853 // Width: 1
3854 // Height: 1
3855 // Channels: 10
3856 // BatchSize: 1
telsoa014fcda012018-03-09 14:13:49 +00003857
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003858 const armnn::TensorShape inputOutputShape{ 1, 10, 1, 1 };
3859 std::vector<float> inputValues
3860 {
3861 // Batch 0, Channel 0, Height (1) x Width (1)
3862 1.0f,
telsoa014fcda012018-03-09 14:13:49 +00003863
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003864 // Batch 0, Channel 1, Height (1) x Width (1)
3865 2.0f,
telsoa014fcda012018-03-09 14:13:49 +00003866
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003867 // Batch 0, Channel 2, Height (1) x Width (1)
3868 3.0f,
telsoa014fcda012018-03-09 14:13:49 +00003869
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003870 // Batch 0, Channel 3, Height (1) x Width (1)
3871 4.0f,
3872
3873 // Batch 0, Channel 4, Height (1) x Width (1)
3874 5.0f,
3875
3876 // Batch 0, Channel 5, Height (1) x Width (1)
3877 6.0f,
3878
3879 // Batch 0, Channel 6, Height (1) x Width (1)
3880 7.0f,
3881
3882 // Batch 0, Channel 7, Height (1) x Width (1)
3883 8.0f,
3884
3885 // Batch 0, Channel 8, Height (1) x Width (1)
3886 9.0f,
3887
3888 // Batch 0, Channel 9, Height (1) x Width (1)
3889 10.0f
3890 };
telsoa014fcda012018-03-09 14:13:49 +00003891 const float approxInvL2Norm = 0.050964719f;
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003892 std::vector<float> expectedOutputValues
3893 {
3894 // Batch 0, Channel 0, Height (1) x Width (1)
telsoa014fcda012018-03-09 14:13:49 +00003895 1.0f * approxInvL2Norm,
3896 2.0f * approxInvL2Norm,
3897 3.0f * approxInvL2Norm,
3898 4.0f * approxInvL2Norm,
3899 5.0f * approxInvL2Norm,
3900 6.0f * approxInvL2Norm,
3901 7.0f * approxInvL2Norm,
3902 8.0f * approxInvL2Norm,
3903 9.0f * approxInvL2Norm,
3904 10.0f * approxInvL2Norm
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003905 };
telsoa014fcda012018-03-09 14:13:49 +00003906
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003907 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
3908 inputValues, expectedOutputValues, armnn::DataLayout::NCHW);
telsoa014fcda012018-03-09 14:13:49 +00003909}
3910
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003911LayerTestResult<float, 4> L2Normalization1dNhwcTest(armnn::IWorkloadFactory& workloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00003912{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003913 // Width: 1
3914 // Height: 1
3915 // Channels: 10
3916 // BatchSize: 1
telsoa014fcda012018-03-09 14:13:49 +00003917
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003918 const armnn::TensorShape inputOutputShape{ 1, 1, 1, 10 };
3919 std::vector<float> inputValues
3920 {
3921 // Batch 0, Height 0, Width (1) x Channel (10)
3922 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f
3923 };
3924 const float approxInvL2Norm = 0.050964719f;
3925 std::vector<float> expectedOutputValues
3926 {
3927 // Batch 0, Height 0, Width (1) x Channel (10)
3928 1.0f * approxInvL2Norm,
3929 2.0f * approxInvL2Norm,
3930 3.0f * approxInvL2Norm,
3931 4.0f * approxInvL2Norm,
3932 5.0f * approxInvL2Norm,
3933 6.0f * approxInvL2Norm,
3934 7.0f * approxInvL2Norm,
3935 8.0f * approxInvL2Norm,
3936 9.0f * approxInvL2Norm,
3937 10.0f * approxInvL2Norm
3938 };
3939
3940 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
3941 inputValues, expectedOutputValues, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00003942}
3943
3944LayerTestResult<float, 4> L2Normalization2dTest(armnn::IWorkloadFactory& workloadFactory)
3945{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003946 // Width: 5
3947 // Height: 1
3948 // Channels: 2
3949 // BatchSize: 1
telsoa014fcda012018-03-09 14:13:49 +00003950
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003951 const armnn::TensorShape inputOutputShape{ 1, 2, 1, 5 };
3952 std::vector<float> inputValues
3953 {
3954 // Batch 0, Channel 0, Height (1) x Width (5)
telsoa014fcda012018-03-09 14:13:49 +00003955 1.0f, 3.0f, 5.0f, 7.0f, 9.0f,
telsoa014fcda012018-03-09 14:13:49 +00003956
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003957 // Batch 0, Channel 1, Height (1) x Width (5)
3958 2.0f, 4.0f, 6.0f, 8.0f, 10.0f
3959 };
3960 std::vector<float> expectedOutputValues
3961 {
3962 // Batch 0, Channel 0, Height (1) x Width (5)
3963 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
3964 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
3965 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
3966 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
telsoa014fcda012018-03-09 14:13:49 +00003967 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
3968
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003969 // Batch 0, Channel 1, Height (1) x Width (5)
3970 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
3971 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
3972 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
3973 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
telsoa014fcda012018-03-09 14:13:49 +00003974 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003975 };
telsoa014fcda012018-03-09 14:13:49 +00003976
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003977 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
3978 inputValues, expectedOutputValues, armnn::DataLayout::NCHW);
3979}
telsoa014fcda012018-03-09 14:13:49 +00003980
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003981LayerTestResult<float, 4> L2Normalization2dNhwcTest(armnn::IWorkloadFactory& workloadFactory)
3982{
3983 // Width: 5
3984 // Height: 1
3985 // Channels: 2
3986 // BatchSize: 1
telsoa014fcda012018-03-09 14:13:49 +00003987
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003988 const armnn::TensorShape inputOutputShape{ 1, 1, 5, 2 };
3989 std::vector<float> inputValues
3990 {
3991 // Batch 0, Height 0, Width (5) x Channel (2)
3992 1.0f, 2.0f,
3993 3.0f, 4.0f,
3994 5.0f, 6.0f,
3995 7.0f, 8.0f,
3996 9.0f, 10.0f
3997 };
3998 std::vector<float> expectedOutputValues
3999 {
4000 // Batch 0, Height 0, Width (5) x Channel (2)
4001 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
4002 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
4003 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
4004 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
4005 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
4006 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
4007 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
4008 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
4009 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
4010 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
4011 };
telsoa014fcda012018-03-09 14:13:49 +00004012
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004013 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
4014 inputValues, expectedOutputValues, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00004015}
4016
4017LayerTestResult<float, 4> L2Normalization3dTest(armnn::IWorkloadFactory& workloadFactory)
4018{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004019 // Width: 3
4020 // Height: 4
4021 // Channels: 2
4022 // BatchSize: 1
telsoa014fcda012018-03-09 14:13:49 +00004023
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004024 const armnn::TensorShape inputOutputShape{ 1, 2, 4, 3 };
4025 std::vector<float> inputValues
4026 {
4027 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004028 119.0f, 21.0f, 150.0f,
4029 149.0f, 32.0f, 179.0f,
4030 15.0f, 227.0f, 141.0f,
4031 147.0f, 199.0f, 220.0f,
4032
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004033 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004034 110.0f, 140.0f, 73.0f,
4035 211.0f, 212.0f, 89.0f,
4036 24.0f, 138.0f, 188.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004037 162.0f, 12.0f, 161.0f
4038 };
4039 std::vector<float> expectedOutputValues
4040 {
4041 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004042 119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
4043 21.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
4044 150.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
4045 149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
4046 32.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
4047 179.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
4048 15.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
4049 227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
4050 141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
4051 147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
4052 199.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
4053 220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
4054
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004055 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004056 110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
4057 140.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
4058 73.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
4059 211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
4060 212.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
4061 89.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
4062 24.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
4063 138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
4064 188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
4065 162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
4066 12.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004067 161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
4068 };
telsoa014fcda012018-03-09 14:13:49 +00004069
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004070 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
4071 inputValues, expectedOutputValues, armnn::DataLayout::NCHW);
4072}
telsoa014fcda012018-03-09 14:13:49 +00004073
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004074LayerTestResult<float, 4> L2Normalization3dNhwcTest(armnn::IWorkloadFactory& workloadFactory)
4075{
4076 // Width: 3
4077 // Height: 4
4078 // Channels: 2
4079 // BatchSize: 1
telsoa014fcda012018-03-09 14:13:49 +00004080
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004081 const armnn::TensorShape inputOutputShape{ 1, 4, 3, 2 };
4082 std::vector<float> inputValues
4083 {
4084 // Batch 0, Height 0, Width (3) x Channel (2)
4085 119.0f, 110.0f,
4086 21.0f, 140.0f,
4087 150.0f, 73.0f,
telsoa014fcda012018-03-09 14:13:49 +00004088
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004089 // Batch 0, Height 1, Width (3) x Channel (2)
4090 149.0f, 211.0f,
4091 32.0f, 212.0f,
4092 179.0f, 89.0f,
telsoa014fcda012018-03-09 14:13:49 +00004093
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004094 // Batch 0, Height 2, Width (3) x Channel (2)
4095 15.0f, 24.0f,
4096 227.0f, 138.0f,
4097 141.0f, 188.0f,
telsoa014fcda012018-03-09 14:13:49 +00004098
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004099 // Batch 0, Height 3, Width (3) x Channel (2)
4100 147.0f, 162.0f,
4101 199.0f, 12.0f,
4102 220.0f, 161.0f
4103 };
4104 std::vector<float> expectedOutputValues
4105 {
4106 // Batch 0, Height 0, Width (3) x Channel (2)
4107 119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
4108 110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
4109 21.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
4110 140.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
4111 150.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
4112 73.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
4113
4114 // Batch 0, Height 1, Width (3) x Channel (2)
4115 149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
4116 211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
4117 32.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
4118 212.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
4119 179.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
4120 89.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
4121
4122 // Batch 0, Height 2, Width (3) x Channel (2)
4123 15.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
4124 24.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
4125 227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
4126 138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
4127 141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
4128 188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
4129
4130 // Batch 0, Height 3, Width (3) x Channel (2)
4131 147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
4132 162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
4133 199.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
4134 12.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
4135 220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
4136 161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
4137 };
4138
4139 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
4140 inputValues, expectedOutputValues, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00004141}
4142
4143LayerTestResult<float, 4> L2Normalization4dTest(armnn::IWorkloadFactory& workloadFactory)
4144{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004145 // Width: 3
4146 // Height: 4
4147 // Channels: 3
4148 // BatchSize: 2
telsoa014fcda012018-03-09 14:13:49 +00004149
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004150 const armnn::TensorShape inputOutputShape{ 2, 3, 4, 3 };
4151 std::vector<float> inputValues
4152 {
4153 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004154 235.0f, 46.0f, 178.0f,
4155 100.0f, 123.0f, 19.0f,
4156 172.0f, 74.0f, 250.0f,
4157 6.0f, 195.0f, 80.0f,
4158
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004159 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004160 113.0f, 95.0f, 202.0f,
4161 77.0f, 114.0f, 71.0f,
4162 122.0f, 246.0f, 166.0f,
4163 82.0f, 28.0f, 37.0f,
4164
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004165 // Batch 0, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004166 56.0f, 170.0f, 162.0f,
4167 194.0f, 89.0f, 254.0f,
4168 12.0f, 209.0f, 200.0f,
4169 1.0f, 64.0f, 54.0f,
4170
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004171 // Batch 1, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004172 67.0f, 90.0f, 49.0f,
4173 7.0f, 163.0f, 18.0f,
4174 25.0f, 117.0f, 103.0f,
4175 247.0f, 59.0f, 189.0f,
4176
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004177 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004178 239.0f, 104.0f, 199.0f,
4179 17.0f, 124.0f, 153.0f,
4180 222.0f, 217.0f, 75.0f,
4181 32.0f, 126.0f, 21.0f,
4182
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004183 // Batch 1, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004184 97.0f, 145.0f, 215.0f,
4185 115.0f, 116.0f, 238.0f,
4186 226.0f, 16.0f, 132.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004187 92.0f, 125.0f, 88.0f
4188 };
4189 std::vector<float> expectedOutputValues
4190 {
4191 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004192 235.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
4193 46.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
4194 178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
4195 100.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
4196 123.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
4197 19.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
4198 172.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
4199 74.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
4200 250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
4201 6.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
4202 195.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
4203 80.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
4204
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004205 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004206 113.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
4207 95.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
4208 202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
4209 77.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
4210 114.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
4211 71.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
4212 122.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
4213 246.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
4214 166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
4215 82.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
4216 28.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
4217 37.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
4218
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004219 // Batch 0, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004220 56.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
4221 170.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
4222 162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
4223 194.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
4224 89.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
4225 254.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
4226 12.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
4227 209.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
4228 200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
4229 1.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
4230 64.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
4231 54.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
4232
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004233 // Batch 1, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004234 67.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
4235 90.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
4236 49.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
4237 7.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
4238 163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
4239 18.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
4240 25.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
4241 117.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
4242 103.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
4243 247.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
4244 59.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
4245 189.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
4246
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004247 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004248 239.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
4249 104.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
4250 199.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
4251 17.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
4252 124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
4253 153.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
4254 222.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
4255 217.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
4256 75.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
4257 32.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
4258 126.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
4259 21.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
4260
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004261 // Batch 1, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004262 97.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
4263 145.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
4264 215.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
4265 115.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
4266 116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
4267 238.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
4268 226.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
4269 16.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
4270 132.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
4271 92.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
4272 125.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004273 88.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f })
4274 };
telsoa014fcda012018-03-09 14:13:49 +00004275
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004276 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
4277 inputValues, expectedOutputValues, armnn::DataLayout::NCHW);
4278}
telsoa014fcda012018-03-09 14:13:49 +00004279
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004280LayerTestResult<float, 4> L2Normalization4dNhwcTest(armnn::IWorkloadFactory& workloadFactory)
4281{
4282 // Width: 3
4283 // Height: 4
4284 // Channels: 3
4285 // BatchSize: 2
telsoa014fcda012018-03-09 14:13:49 +00004286
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004287 const armnn::TensorShape inputOutputShape{ 2, 4, 3, 3 };
4288 std::vector<float> inputValues
4289 {
4290 // Batch 0, Height 0, Width (3) x Channel (3)
4291 235.0f, 113.0f, 56.0f,
4292 46.0f, 95.0f, 170.0f,
4293 178.0f, 202.0f, 162.0f,
telsoa014fcda012018-03-09 14:13:49 +00004294
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004295 // Batch 0, Height 1, Width (3) x Channel (3)
4296 100.0f, 77.0f, 194.0f,
4297 123.0f, 114.0f, 89.0f,
4298 19.0f, 71.0f, 254.0f,
telsoa014fcda012018-03-09 14:13:49 +00004299
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004300 // Batch 0, Height 2, Width (3) x Channel (3)
4301 172.0f, 122.0f, 12.0f,
4302 74.0f, 246.0f, 209.0f,
4303 250.0f, 166.0f, 200.0f,
telsoa014fcda012018-03-09 14:13:49 +00004304
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004305 // Batch 0, Height 3, Width (3) x Channel (3)
4306 6.0f, 82.0f, 1.0f,
4307 195.0f, 28.0f, 64.0f,
4308 80.0f, 37.0f, 54.0f,
4309
4310 // Batch 1, Height 0, Width (3) x Channel (3)
4311 67.0f, 239.0f, 97.0f,
4312 90.0f, 104.0f, 145.0f,
4313 49.0f, 199.0f, 215.0f,
4314
4315 // Batch 1, Height 1, Width (3) x Channel (3)
4316 7.0f, 17.0f, 115.0f,
4317 163.0f, 124.0f, 116.0f,
4318 18.0f, 153.0f, 238.0f,
4319
4320 // Batch 1, Height 2, Width (3) x Channel (3)
4321 25.0f, 222.0f, 226.0f,
4322 117.0f, 217.0f, 16.0f,
4323 103.0f, 75.0f, 132.0f,
4324
4325 // Batch 1, Height 3, Width (3) x Channel (3)
4326 247.0f, 32.0f, 92.0f,
4327 59.0f, 126.0f, 125.0f,
4328 189.0f, 21.0f, 88.0f
4329 };
4330 std::vector<float> expectedOutputValues
4331 {
4332 // Batch 0, Height 0, Width (3) x Channel (3)
4333 235.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
4334 113.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
4335 56.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
4336 46.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
4337 95.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
4338 170.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
4339 178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
4340 202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
4341 162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
4342
4343 // Batch 0, Height 1, Width (3) x Channel (3)
4344 100.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
4345 77.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
4346 194.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
4347 123.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
4348 114.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
4349 89.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
4350 19.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
4351 71.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
4352 254.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
4353
4354 // Batch 0, Height 2, Width (3) x Channel (3)
4355 172.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
4356 122.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
4357 12.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
4358 74.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
4359 246.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
4360 209.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
4361 250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
4362 166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
4363 200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
4364
4365 // Batch 0, Height 3, Width (3) x Channel (3)
4366 6.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
4367 82.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
4368 1.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
4369 195.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
4370 28.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
4371 64.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
4372 80.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
4373 37.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
4374 54.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
4375
4376 // Batch 1, Height 0, Width (3) x Channel (3)
4377 67.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
4378 239.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
4379 97.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
4380 90.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
4381 104.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
4382 145.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
4383 49.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
4384 199.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
4385 215.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
4386
4387 // Batch 1, Height 1, Width (3) x Channel (3)
4388 7.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
4389 17.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
4390 115.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
4391 163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
4392 124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
4393 116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
4394 18.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
4395 153.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
4396 238.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
4397
4398 // Batch 1, Height 2, Width (3) x Channel (3)
4399 25.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
4400 222.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
4401 226.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
4402 117.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
4403 217.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
4404 16.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
4405 103.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
4406 75.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
4407 132.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
4408
4409 // Batch 1, Height 3, Width (3) x Channel (3)
4410 247.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
4411 32.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
4412 92.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
4413 59.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
4414 126.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
4415 125.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
4416 189.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
4417 21.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
4418 88.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f })
4419 };
4420
4421 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
4422 inputValues, expectedOutputValues, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00004423}
4424
4425template <typename T>
4426LayerTestResult<T, 4> ConstantTestImpl(armnn::IWorkloadFactory& workloadFactory,
4427 float qScale,
4428 int32_t qOffset)
4429{
4430 constexpr unsigned int inputWidth = 3;
4431 constexpr unsigned int inputHeight = 4;
4432 constexpr unsigned int inputChannels = 3;
4433 constexpr unsigned int inputBatchSize = 2;
4434
4435 constexpr unsigned int outputWidth = inputWidth;
4436 constexpr unsigned int outputHeight = inputHeight;
4437 constexpr unsigned int outputChannels = inputChannels;
4438 constexpr unsigned int outputBatchSize = inputBatchSize;
4439
4440 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
4441 armnn::GetDataType<T>());
4442
4443 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
4444 armnn::GetDataType<T>());
4445
4446 // Set quantization parameters if the requested type is a quantized type.
4447 if(armnn::IsQuantizedType<T>())
4448 {
4449 inputTensorInfo.SetQuantizationScale(qScale);
4450 inputTensorInfo.SetQuantizationOffset(qOffset);
4451 outputTensorInfo.SetQuantizationScale(qScale);
4452 outputTensorInfo.SetQuantizationOffset(qOffset);
4453 }
4454
4455 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
4456 QuantizedVector<T>(qScale, qOffset, {
4457 // Batch 0, Channel 0
4458 235.0f, 46.0f, 178.0f,
4459 100.0f, 123.0f, 19.0f,
4460 172.0f, 74.0f, 250.0f,
4461 6.0f, 195.0f, 80.0f,
4462
4463 // Batch 0, Channel 1
4464 113.0f, 95.0f, 202.0f,
4465 77.0f, 114.0f, 71.0f,
4466 122.0f, 246.0f, 166.0f,
4467 82.0f, 28.0f, 37.0f,
4468
4469 // Batch 0, Channel 2
4470 56.0f, 170.0f, 162.0f,
4471 194.0f, 89.0f, 254.0f,
4472 12.0f, 209.0f, 200.0f,
4473 1.0f, 64.0f, 54.0f,
4474
4475 // Batch 1, Channel 0
4476 67.0f, 90.0f, 49.0f,
4477 7.0f, 163.0f, 18.0f,
4478 25.0f, 117.0f, 103.0f,
4479 247.0f, 59.0f, 189.0f,
4480
4481 // Batch 1, Channel 1
4482 239.0f, 104.0f, 199.0f,
4483 17.0f, 124.0f, 153.0f,
4484 222.0f, 217.0f, 75.0f,
4485 32.0f, 126.0f, 21.0f,
4486
4487 // Batch 1, Channel 2
4488 97.0f, 145.0f, 215.0f,
4489 115.0f, 116.0f, 238.0f,
4490 226.0f, 16.0f, 132.0f,
4491 92.0f, 125.0f, 88.0f,
4492 })));
4493
4494 LayerTestResult<T, 4> result(outputTensorInfo);
4495 result.outputExpected = input;
4496
4497 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4498
4499 armnn::ScopedCpuTensorHandle constantTensor(inputTensorInfo);
4500 AllocateAndCopyDataToITensorHandle(&constantTensor, &input[0][0][0][0]);
4501
4502 armnn::ConstantQueueDescriptor descriptor;
4503 descriptor.m_LayerOutput = &constantTensor;
4504
4505 armnn::WorkloadInfo info;
4506 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4507
4508 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConstant(descriptor, info);
4509
4510 outputHandle->Allocate();
4511
4512 workload->Execute();
4513
4514 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4515 return result;
4516}
4517
4518LayerTestResult<float, 4> ConstantTest(armnn::IWorkloadFactory& workloadFactory)
4519{
4520 return ConstantTestImpl<float>(workloadFactory, 0.0f, 0);
4521}
4522
4523LayerTestResult<uint8_t, 4> ConstantTestUint8(armnn::IWorkloadFactory& workloadFactory)
4524{
4525 return ConstantTestImpl<uint8_t>(workloadFactory, 1.0f, 0);
4526}
4527
4528LayerTestResult<uint8_t, 3> MergerUint8Test(armnn::IWorkloadFactory& workloadFactory)
4529{
surmeh013537c2c2018-05-18 16:31:43 +01004530 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +00004531 unsigned int outputHeight = 6;
4532 unsigned int outputChannels = 3;
4533
surmeh013537c2c2018-05-18 16:31:43 +01004534 unsigned int inputWidth1 = 3;
4535 unsigned int inputHeight1 = 6;
4536 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +00004537
surmeh013537c2c2018-05-18 16:31:43 +01004538 unsigned int inputWidth2 = 3;
4539 unsigned int inputHeight2 = 6;
4540 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +00004541
telsoa01c577f2c2018-08-31 09:22:23 +01004542 // Defines the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +00004543 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
4544 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
4545 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
telsoa014fcda012018-03-09 14:13:49 +00004546
telsoa01c577f2c2018-08-31 09:22:23 +01004547 // Arbitrary scale and offsets. They don't really matter as the merger operator doesn't dequantize/quantize them.
telsoa014fcda012018-03-09 14:13:49 +00004548 const float scale = 0.13497836f;
4549 const int32_t offset = -7;
4550
4551 outputTensorInfo.SetQuantizationScale(scale);
4552 outputTensorInfo.SetQuantizationOffset(offset);
4553 inputTensorInfo1.SetQuantizationScale(scale);
4554 inputTensorInfo1.SetQuantizationOffset(offset);
4555 inputTensorInfo2.SetQuantizationScale(scale);
4556 inputTensorInfo2.SetQuantizationOffset(offset);
telsoa014fcda012018-03-09 14:13:49 +00004557
4558 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
4559
4560 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
surmeh013537c2c2018-05-18 16:31:43 +01004561 {
4562 1, 2, 3,
4563 4, 5, 6,
4564 7, 8, 9,
4565 10, 11, 12,
4566 13, 14, 15,
4567 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00004568
surmeh013537c2c2018-05-18 16:31:43 +01004569 19, 20, 21,
4570 22, 23, 24,
4571 25, 26, 27,
4572 28, 29, 30,
4573 31, 32, 33,
4574 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00004575
surmeh013537c2c2018-05-18 16:31:43 +01004576 37, 38, 39,
4577 40, 41, 42,
4578 43, 44, 45,
4579 46, 47, 48,
4580 49, 50, 51,
4581 52, 53, 54,
4582 })
telsoa014fcda012018-03-09 14:13:49 +00004583 );
4584
telsoa014fcda012018-03-09 14:13:49 +00004585 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
4586 {
surmeh013537c2c2018-05-18 16:31:43 +01004587 1, 2, 3,
4588 4, 5, 6,
4589 7, 8, 9,
4590 10, 11, 12,
4591 13, 14, 15,
4592 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00004593
surmeh013537c2c2018-05-18 16:31:43 +01004594 19, 20, 21,
4595 22, 23, 24,
4596 25, 26, 27,
4597 28, 29, 30,
4598 31, 32, 33,
4599 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00004600 })
4601 );
4602
4603 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
4604 {
surmeh013537c2c2018-05-18 16:31:43 +01004605 37, 38, 39,
4606 40, 41, 42,
telsoa014fcda012018-03-09 14:13:49 +00004607 43, 44, 45,
surmeh013537c2c2018-05-18 16:31:43 +01004608 46, 47, 48,
4609 49, 50, 51,
4610 52, 53, 54,
telsoa014fcda012018-03-09 14:13:49 +00004611 })
4612 );
4613
telsoa01c577f2c2018-08-31 09:22:23 +01004614 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
telsoa014fcda012018-03-09 14:13:49 +00004615 armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1);
4616
telsoa01c577f2c2018-08-31 09:22:23 +01004617 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
telsoa014fcda012018-03-09 14:13:49 +00004618 armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2);
4619
telsoa014fcda012018-03-09 14:13:49 +00004620
4621 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4622
4623 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
4624
4625 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
4626 subTensorsSupported ?
4627 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
4628 workloadFactory.CreateTensorHandle(inputTensorInfo1);
4629
4630 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
4631 subTensorsSupported ?
4632 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
4633 workloadFactory.CreateTensorHandle(inputTensorInfo2);
4634
telsoa014fcda012018-03-09 14:13:49 +00004635
4636 armnn::MergerQueueDescriptor data;
4637 armnn::WorkloadInfo info;
4638 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
4639 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +00004640 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4641
4642 data.m_ViewOrigins.push_back(window1);
4643 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +00004644
4645 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(data, info);
4646
4647 inputHandle1->Allocate();
4648 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00004649 outputHandle->Allocate();
4650
4651 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
4652 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00004653
4654 workload->Execute();
4655
4656 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
4657
4658 return ret;
4659}
4660
4661LayerTestResult<uint8_t, 4> AdditionUint8Test(armnn::IWorkloadFactory& workloadFactory)
4662{
4663 unsigned int batchSize = 1;
4664 unsigned int channels = 2;
4665 unsigned int height = 2;
4666 unsigned int width = 3;
4667
4668 const float scale = 7.0f;
4669 const int32_t offset = 3;
4670
4671 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
4672 armnn::TensorInfo outputTensorInfo;
4673
4674 const unsigned int shape[] = { batchSize, channels, height, width };
4675 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
4676 inputTensorInfo1.SetQuantizationScale(scale);
4677 inputTensorInfo1.SetQuantizationOffset(offset);
4678
4679 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
4680 inputTensorInfo2.SetQuantizationScale(scale);
4681 inputTensorInfo2.SetQuantizationOffset(offset);
4682
4683 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
4684 outputTensorInfo.SetQuantizationScale(scale);
4685 outputTensorInfo.SetQuantizationOffset(offset);
4686
telsoa01c577f2c2018-08-31 09:22:23 +01004687 // See dequantized values to the right.
telsoa014fcda012018-03-09 14:13:49 +00004688 auto input1 = MakeTensor<uint8_t, 4>(inputTensorInfo1, std::vector<uint8_t>(
4689 {
4690 63, 35, 77, 70, 56, 112, // 420, 224, 518, 469, 371, 763
4691 203, 28, 252, 168, 245, 91 // 1400, 175, 1743, 1155, 1694, 616
4692 }));
4693
telsoa01c577f2c2018-08-31 09:22:23 +01004694 // See dequantized values to the right.
telsoa014fcda012018-03-09 14:13:49 +00004695 auto input2 = MakeTensor<uint8_t, 4>(inputTensorInfo1, std::vector<uint8_t>(
4696 {
4697 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
4698 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
4699 }));
4700
telsoa01c577f2c2018-08-31 09:22:23 +01004701 // See dequantized values to the right.
telsoa014fcda012018-03-09 14:13:49 +00004702 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
4703 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>(
4704 {
4705 81, 39, 249, 255, 228, 255, // 546, 252, 1722, 2065(clamped), 1575, 2212(clamped)
4706 255, 186, 255, 186, 255, 214, // 2261(clamped), 1281, 2163(clamped), 1281, 2408(clamped), 1477
4707 }));
4708
4709 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
4710 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
4711 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4712
4713 armnn::AdditionQueueDescriptor data;
4714 armnn::WorkloadInfo info;
4715 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
4716 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
4717 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4718
4719 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
4720
4721 inputHandle1->Allocate();
4722 inputHandle2->Allocate();
4723 outputHandle->Allocate();
4724
4725 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
4726 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
4727
4728 workload->Execute();
4729
4730 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4731
4732 return result;
4733}
4734
surmeh01bceff2f2018-03-29 16:29:27 +01004735namespace
telsoa014fcda012018-03-09 14:13:49 +00004736{
surmeh01bceff2f2018-03-29 16:29:27 +01004737LayerTestResult<uint8_t, 4> MultiplicationUint8TestHelper(armnn::IWorkloadFactory& workloadFactory,
4738 const unsigned int shape0[4],
4739 const std::vector<uint8_t> & values0,
4740 float scale0,
4741 int32_t offset0,
4742 const unsigned int shape1[4],
4743 const std::vector<uint8_t> & values1,
4744 float scale1,
4745 int32_t offset1,
4746 const unsigned int outShape[4],
4747 const std::vector<uint8_t> & outValues,
4748 float outScale,
4749 int32_t outOffset)
4750{
4751 armnn::TensorInfo inputTensorInfo0(4, shape0, armnn::DataType::QuantisedAsymm8);
4752 armnn::TensorInfo inputTensorInfo1(4, shape1, armnn::DataType::QuantisedAsymm8);
4753 armnn::TensorInfo outputTensorInfo(4, outShape, armnn::DataType::QuantisedAsymm8);
telsoa014fcda012018-03-09 14:13:49 +00004754
surmeh01bceff2f2018-03-29 16:29:27 +01004755 inputTensorInfo0.SetQuantizationScale(scale0);
4756 inputTensorInfo0.SetQuantizationOffset(offset0);
telsoa014fcda012018-03-09 14:13:49 +00004757
surmeh01bceff2f2018-03-29 16:29:27 +01004758 inputTensorInfo1.SetQuantizationScale(scale1);
4759 inputTensorInfo1.SetQuantizationOffset(offset1);
telsoa014fcda012018-03-09 14:13:49 +00004760
surmeh01bceff2f2018-03-29 16:29:27 +01004761 outputTensorInfo.SetQuantizationScale(outScale);
4762 outputTensorInfo.SetQuantizationOffset(outOffset);
telsoa014fcda012018-03-09 14:13:49 +00004763
surmeh01bceff2f2018-03-29 16:29:27 +01004764 auto input0 = MakeTensor<uint8_t, 4>(inputTensorInfo0, values0);
4765 auto input1 = MakeTensor<uint8_t, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00004766
telsoa014fcda012018-03-09 14:13:49 +00004767 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
surmeh01bceff2f2018-03-29 16:29:27 +01004768 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00004769
surmeh01bceff2f2018-03-29 16:29:27 +01004770 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
telsoa014fcda012018-03-09 14:13:49 +00004771 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
telsoa014fcda012018-03-09 14:13:49 +00004772 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4773
4774 armnn::MultiplicationQueueDescriptor data;
4775 armnn::WorkloadInfo info;
surmeh01bceff2f2018-03-29 16:29:27 +01004776 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
4777 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
telsoa014fcda012018-03-09 14:13:49 +00004778 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4779
4780 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
4781
surmeh01bceff2f2018-03-29 16:29:27 +01004782 inputHandle0->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00004783 inputHandle1->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00004784 outputHandle->Allocate();
4785
surmeh01bceff2f2018-03-29 16:29:27 +01004786 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00004787 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00004788
4789 workload->Execute();
4790
4791 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4792
4793 return result;
4794}
surmeh01bceff2f2018-03-29 16:29:27 +01004795} // anonymous namespace
4796
4797LayerTestResult<uint8_t, 4> MultiplicationUint8Test(armnn::IWorkloadFactory& workloadFactory)
4798{
4799 unsigned int batchSize = 1;
4800 unsigned int channels = 2;
4801 unsigned int height = 2;
4802 unsigned int width = 3;
4803 const unsigned int shape[] = { batchSize, channels, height, width };
4804
telsoa01c577f2c2018-08-31 09:22:23 +01004805 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01004806 std::vector<uint8_t> input0({
4807 62, 37, 3, 172, 13, 111, // 244, 144, 8, 684, 48, 440,
4808 188, 20, 73, 31, 23, 31 // 748, 76, 288, 120, 88, 120
4809 });
4810
telsoa01c577f2c2018-08-31 09:22:23 +01004811 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01004812 std::vector<uint8_t> input1({
4813 126, 240, 252, 183, 121, 247, // 384, 726, 762, 555, 369, 747,
4814 48, 115, 151, 79, 78, 97 // 150, 351, 459, 243, 240, 297
4815 });
4816
telsoa01c577f2c2018-08-31 09:22:23 +01004817 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01004818 std::vector<uint8_t> output(
4819 {
4820 64, 72, 0, 255, 8, 236, // 93696, 104544, 6096(clamped), 379620(clamped), 17712, 328680,
4821 77, 15, 92, 16, 10, 21, // 112200, 26676, 132192, 29160, 21120, 35640
4822 });
4823
4824 return MultiplicationUint8TestHelper(workloadFactory,
4825 shape,
4826 input0,
4827 4.0f,
4828 1,
4829 shape,
4830 input1,
4831 3.0f,
4832 -2,
4833 shape,
4834 output,
telsoa01c577f2c2018-08-31 09:22:23 +01004835 1366.255f, // Scale/offset chosen to have output values out of range.
surmeh01bceff2f2018-03-29 16:29:27 +01004836 -5);
4837}
4838
4839LayerTestResult<uint8_t, 4> MultiplicationBroadcast1ElementUint8Test(armnn::IWorkloadFactory& workloadFactory)
4840{
4841 const unsigned int shape0[] = { 1, 2, 2, 3 };
4842 const unsigned int shape1[] = { 1, 1, 1, 1 };
4843
4844 std::vector<uint8_t> input0({
4845 1, 2, 3, 4, 5, 6,
4846 7, 8, 9, 10, 11, 12
4847 });
4848
4849 std::vector<uint8_t> input1({2});
4850
4851 std::vector<uint8_t> output({
4852 2, 4, 6, 8, 10, 12,
4853 14, 16, 18, 20, 22, 24
4854 });
4855
4856 return MultiplicationUint8TestHelper(workloadFactory,
4857 shape0,
4858 input0,
4859 1.0f,
4860 0,
4861 shape1,
4862 input1,
4863 1.0f,
4864 0,
4865 shape0,
4866 output,
4867 1.0f,
4868 0);
4869}
4870
4871LayerTestResult<uint8_t, 4> MultiplicationBroadcast1DVectorUint8Test(armnn::IWorkloadFactory& workloadFactory)
4872{
4873 const unsigned int shape0[] = { 1, 2, 2, 3 };
4874 const unsigned int shape1[] = { 1, 1, 1, 3 };
4875
4876 std::vector<uint8_t> input0({
4877 1, 2, 3, 4, 5, 6,
4878 7, 8, 9, 10, 11, 12
4879 });
4880
4881 std::vector<uint8_t> input1({1, 2, 3});
4882
4883 std::vector<uint8_t> output({
4884 1, 4, 9, 4, 10, 18,
4885 7, 16, 27, 10, 22, 36
4886 });
4887
4888 return MultiplicationUint8TestHelper(workloadFactory,
4889 shape0,
4890 input0,
4891 1.0f,
4892 0,
4893 shape1,
4894 input1,
4895 1.0f,
4896 0,
4897 shape0,
4898 output,
4899 1.0f,
4900 0);
4901}
telsoa014fcda012018-03-09 14:13:49 +00004902
David Beckf195f032018-09-06 16:46:34 +01004903namespace
4904{
4905template <typename T>
4906LayerTestResult<T, 4> SubtractionTestHelper(armnn::IWorkloadFactory& workloadFactory,
4907 const unsigned int shape0[4],
4908 const std::vector<T>& values0,
4909 float scale0,
4910 int32_t offset0,
4911 const unsigned int shape1[4],
4912 const std::vector<T> & values1,
4913 float scale1,
4914 int32_t offset1,
4915 const unsigned int outShape[4],
4916 const std::vector<T> & outValues,
4917 float outScale,
4918 int32_t outOffset)
4919{
4920 auto dataType = (std::is_same<T, uint8_t>::value ?
4921 armnn::DataType::QuantisedAsymm8 :
4922 armnn::DataType::Float32);
4923
4924 armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
4925 armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
4926 armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
4927
4928 inputTensorInfo0.SetQuantizationScale(scale0);
4929 inputTensorInfo0.SetQuantizationOffset(offset0);
4930
4931 inputTensorInfo1.SetQuantizationScale(scale1);
4932 inputTensorInfo1.SetQuantizationOffset(offset1);
4933
4934 outputTensorInfo.SetQuantizationScale(outScale);
4935 outputTensorInfo.SetQuantizationOffset(outOffset);
4936
4937 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
4938 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
4939
4940 LayerTestResult<T, 4> result(outputTensorInfo);
4941 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
4942
4943 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
4944 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
4945 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4946
4947 armnn::SubtractionQueueDescriptor data;
4948 armnn::WorkloadInfo info;
4949 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
4950 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
4951 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4952
4953 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSubtraction(data, info);
4954
4955 inputHandle0->Allocate();
4956 inputHandle1->Allocate();
4957 outputHandle->Allocate();
4958
4959 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
4960 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
4961
David Beckf195f032018-09-06 16:46:34 +01004962 workload->Execute();
4963
4964 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4965
4966 return result;
4967}
4968} // anonymous namespace
4969
4970LayerTestResult<uint8_t, 4> SubtractionUint8Test(armnn::IWorkloadFactory& workloadFactory)
4971{
4972 const unsigned int shape0[] = { 1, 1, 2, 2 };
4973 const unsigned int shape1[] = { 1, 1, 2, 2 };
4974
4975 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
4976 std::vector<uint8_t> input1({ 1, 2, 1, 2 });
4977 std::vector<uint8_t> output({ 3, 3, 5, 5 });
4978
4979 return SubtractionTestHelper(workloadFactory,
4980 shape0, input0, 0.5f, 2,
4981 shape1, input1, 1.0f, 0,
4982 shape0, output, 1.0f, 0);
4983}
4984
4985LayerTestResult<uint8_t, 4> SubtractionBroadcast1ElementUint8Test(armnn::IWorkloadFactory& workloadFactory)
4986{
4987 const unsigned int shape0[] = { 1, 1, 2, 2 };
4988 const unsigned int shape1[] = { 1, 1, 1, 1 };
4989
4990 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
4991 std::vector<uint8_t> input1({ 2 });
4992 std::vector<uint8_t> output({ 5, 6, 7, 8 });
4993
4994 return SubtractionTestHelper(workloadFactory,
4995 shape0, input0, 0.5f, 2,
4996 shape1, input1, 1.0f, 0,
4997 shape0, output, 1.0f, 3);
4998}
4999
5000LayerTestResult<uint8_t, 4> SubtractionBroadcastUint8Test(armnn::IWorkloadFactory& workloadFactory)
5001{
5002 const unsigned int shape0[] = { 1, 1, 2, 2 };
5003 const unsigned int shape1[] = { 1, 1, 2, 1 };
5004
5005 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
5006 std::vector<uint8_t> input1({ 2, 1 });
5007 std::vector<uint8_t> output({ 8, 11, 12, 15 });
5008
5009 return SubtractionTestHelper(workloadFactory,
5010 shape0, input0, 1.0f, 0,
5011 shape1, input1, 1.0f, 0,
5012 shape0, output, 1.0f, 0);
5013}
5014
5015LayerTestResult<float, 4> SubtractionTest(armnn::IWorkloadFactory& workloadFactory)
5016{
5017 const unsigned int shape0[] = { 1, 1, 2, 2 };
5018 const unsigned int shape1[] = { 1, 1, 2, 2 };
5019
5020 std::vector<float> input0({ 1, 2, 3, 4 });
5021 std::vector<float> input1({ 1, -1, 0, 2 });
5022 std::vector<float> output({ 0, 3, 3, 2 });
5023
5024 return SubtractionTestHelper(workloadFactory,
5025 shape0, input0, 1.0f, 0,
5026 shape1, input1, 1.0f, 0,
5027 shape0, output, 1.0f, 0);
5028}
5029
5030LayerTestResult<float, 4> SubtractionBroadcast1ElementTest(armnn::IWorkloadFactory& workloadFactory)
5031{
5032 const unsigned int shape0[] = { 1, 1, 2, 2 };
5033 const unsigned int shape1[] = { 1, 1, 1, 1 };
5034
5035 std::vector<float> input0({ 1, 2, 3, 4 });
5036 std::vector<float> input1({ 10 });
5037 std::vector<float> output({ -9, -8, -7, -6 });
5038
5039 return SubtractionTestHelper(workloadFactory,
5040 shape0, input0, 1.0f, 0,
5041 shape1, input1, 1.0f, 0,
5042 shape0, output, 1.0f, 0);
5043}
5044
5045LayerTestResult<float, 4> SubtractionBroadcastTest(armnn::IWorkloadFactory& workloadFactory)
5046{
5047 const unsigned int shape0[] = { 1, 1, 2, 2 };
5048 const unsigned int shape1[] = { 1, 1, 1, 2 };
5049
5050 std::vector<float> input0({ 1, 2, 3, 4 });
5051 std::vector<float> input1({ 10, -5 });
5052 std::vector<float> output({ -9, 7, -7, 9 });
5053
5054 return SubtractionTestHelper(workloadFactory,
5055 shape0, input0, 1.0f, 0,
5056 shape1, input1, 1.0f, 0,
5057 shape0, output, 1.0f, 0);
5058}
5059
telsoa014fcda012018-03-09 14:13:49 +00005060LayerTestResult<uint8_t, 4> ResizeBilinearNopUint8Test(armnn::IWorkloadFactory& workloadFactory)
5061{
5062 constexpr unsigned int inputWidth = 4;
5063 constexpr unsigned int inputHeight = 4;
5064 constexpr unsigned int inputChannels = 1;
5065 constexpr unsigned int inputBatchSize = 1;
5066
5067 constexpr unsigned int outputWidth = inputWidth;
5068 constexpr unsigned int outputHeight = inputHeight;
5069 constexpr unsigned int outputChannels = inputChannels;
5070 constexpr unsigned int outputBatchSize = inputBatchSize;
5071
5072 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
5073 armnn::DataType::QuantisedAsymm8);
5074 inputTensorInfo.SetQuantizationScale(1.5f);
5075 inputTensorInfo.SetQuantizationOffset(-3);
5076
5077 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
5078 armnn::DataType::QuantisedAsymm8);
5079 outputTensorInfo.SetQuantizationScale(1.5f);
5080 outputTensorInfo.SetQuantizationOffset(-3);
5081
5082 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
5083 1, 2, 3, 4,
5084 2, 3, 4, 5,
5085 3, 4, 5, 6,
5086 4, 5, 6, 7
5087 }));
5088
5089 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
5090 result.outputExpected = input;
5091
5092 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5093 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5094
5095 armnn::ResizeBilinearQueueDescriptor descriptor;
5096 armnn::WorkloadInfo info;
5097 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5098 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5099
5100 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5101
5102 inputHandle->Allocate();
5103 outputHandle->Allocate();
5104 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5105
5106 workload->Execute();
5107
5108 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5109 return result;
5110}
5111
5112LayerTestResult<uint8_t, 4> SimpleResizeBilinearUint8Test(armnn::IWorkloadFactory& workloadFactory)
5113{
5114 constexpr unsigned int inputWidth = 2;
5115 constexpr unsigned int inputHeight = 2;
5116 constexpr unsigned int inputChannels = 1;
5117 constexpr unsigned int inputBatchSize = 1;
5118
5119 constexpr unsigned int outputWidth = inputWidth / 2;
5120 constexpr unsigned int outputHeight = inputHeight / 2;
5121 constexpr unsigned int outputChannels = inputChannels;
5122 constexpr unsigned int outputBatchSize = inputBatchSize;
5123
5124 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
5125 armnn::DataType::QuantisedAsymm8);
5126 inputTensorInfo.SetQuantizationScale(0.1567f);
5127 inputTensorInfo.SetQuantizationOffset(1);
5128
5129 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
5130 armnn::DataType::QuantisedAsymm8);
5131 outputTensorInfo.SetQuantizationScale(0.1567f);
5132 outputTensorInfo.SetQuantizationOffset(1);
5133
5134 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
5135 1, 255,
5136 200, 250
5137 }));
5138
5139 // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
5140 // then figures out the interpolants and weights. Note this is different to projecting the centre of the
telsoa01c577f2c2018-08-31 09:22:23 +01005141 // output texel - and thus we'll expect the output 1x1 matrix to contain, as its single element, the value
telsoa014fcda012018-03-09 14:13:49 +00005142 // that was at position (0,0) of the input matrix (rather than an average, which we would expect if projecting
5143 // the centre).
5144 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
5145 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
5146 1
5147 }));
5148
5149 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5150 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5151
5152 armnn::ResizeBilinearQueueDescriptor descriptor;
5153 armnn::WorkloadInfo info;
5154 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5155 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5156
5157 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5158
5159 inputHandle->Allocate();
5160 outputHandle->Allocate();
5161 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5162
5163 workload->Execute();
5164
5165 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5166 return result;
5167}
5168
5169LayerTestResult<uint8_t, 4> ResizeBilinearSqMinUint8Test(armnn::IWorkloadFactory& workloadFactory)
5170{
5171 constexpr unsigned int inputWidth = 4;
5172 constexpr unsigned int inputHeight = 4;
5173 constexpr unsigned int inputChannels = 1;
5174 constexpr unsigned int inputBatchSize = 1;
5175
5176 constexpr unsigned int outputWidth = inputWidth / 2;
5177 constexpr unsigned int outputHeight = inputHeight / 2;
5178 constexpr unsigned int outputChannels = inputChannels;
5179 constexpr unsigned int outputBatchSize = inputBatchSize;
5180
5181 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
5182 armnn::DataType::QuantisedAsymm8);
5183 inputTensorInfo.SetQuantizationScale(3.141592f);
5184 inputTensorInfo.SetQuantizationOffset(3);
5185
5186 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
5187 armnn::DataType::QuantisedAsymm8);
5188 outputTensorInfo.SetQuantizationScale(3.141592f);
5189 outputTensorInfo.SetQuantizationOffset(3);
5190
5191 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
5192 1, 2, 3, 4,
5193 2, 3, 4, 5,
5194 3, 4, 5, 6,
5195 4, 5, 6, 7
5196 }));
5197
5198 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
5199 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
5200 1, 3,
5201 3, 5
5202 }));
5203
5204 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5205 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5206
5207 armnn::ResizeBilinearQueueDescriptor descriptor;
5208 armnn::WorkloadInfo info;
5209 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5210 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5211
5212 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5213
5214 inputHandle->Allocate();
5215 outputHandle->Allocate();
5216 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5217
5218 workload->Execute();
5219
5220 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5221 return result;
5222}
5223
5224LayerTestResult<uint8_t, 4> ResizeBilinearMinUint8Test(armnn::IWorkloadFactory& workloadFactory)
5225{
5226 constexpr unsigned int inputWidth = 3;
5227 constexpr unsigned int inputHeight = 2;
5228 constexpr unsigned int inputChannels = 1;
5229 constexpr unsigned int inputBatchSize = 1;
5230
5231 constexpr unsigned int outputWidth = 2;
5232 constexpr unsigned int outputHeight = 1;
5233 constexpr unsigned int outputChannels = inputChannels;
5234 constexpr unsigned int outputBatchSize = inputBatchSize;
5235
5236 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
5237 armnn::DataType::QuantisedAsymm8);
5238 inputTensorInfo.SetQuantizationScale(1.5f);
5239 inputTensorInfo.SetQuantizationOffset(-1);
5240
5241 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
5242 armnn::DataType::QuantisedAsymm8);
5243 outputTensorInfo.SetQuantizationScale(1.5f);
5244 outputTensorInfo.SetQuantizationOffset(-1);
5245
5246 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
5247 1, 2, 3, // 3.0, 4.5, 6.0
5248 5, 8, 13 // 9.0, 13.5, 21.0
5249 }));
5250
5251 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
5252 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
5253 1, 3 // 3.0, 5.25
5254 }));
5255
5256 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5257 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5258
5259 armnn::ResizeBilinearQueueDescriptor descriptor;
5260 armnn::WorkloadInfo info;
5261 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5262 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5263
5264 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5265
5266 inputHandle->Allocate();
5267 outputHandle->Allocate();
5268
5269 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5270
5271 workload->Execute();
5272
5273 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5274 return result;
5275}
5276
5277LayerTestResult<uint8_t, 4> ResizeBilinearMagUint8Test(armnn::IWorkloadFactory& workloadFactory)
5278{
5279 constexpr unsigned int inputWidth = 2;
5280 constexpr unsigned int inputHeight = 3;
5281 constexpr unsigned int inputChannels = 1;
5282 constexpr unsigned int inputBatchSize = 1;
5283
5284 constexpr unsigned int outputWidth = 5;
5285 constexpr unsigned int outputHeight = 3;
5286 constexpr unsigned int outputChannels = inputChannels;
5287 constexpr unsigned int outputBatchSize = inputBatchSize;
5288
5289 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
5290 armnn::DataType::QuantisedAsymm8);
5291 inputTensorInfo.SetQuantizationScale(0.010765f);
5292 inputTensorInfo.SetQuantizationOffset(7);
5293
5294 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
5295 armnn::DataType::QuantisedAsymm8);
5296 outputTensorInfo.SetQuantizationScale(0.010132f);
5297 outputTensorInfo.SetQuantizationOffset(-18);
5298
5299 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
5300 24, 228, // 0.183005, 2.379065,
5301 105, 128, // 1.05497, 1.302565
5302 230, 71 // 2.400595, 0.68896
5303 }));
5304
5305 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
5306 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
5307 0, 87, 173, 217, 217, // 0.18300501, 1.06142902, 1.93985295, 2.37906504, 2.37906504
5308 86, 96, 106, 111, 111, // 1.05497003, 1.15400803, 1.25304604, 1.30256498, 1.30256498
5309 219, 151, 84, 50, 50 // 2.40059495, 1.71594095, 1.03128707, 0.68896002, 0.68896002
5310 }));
5311
5312 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5313 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5314
5315 armnn::ResizeBilinearQueueDescriptor descriptor;
5316 armnn::WorkloadInfo info;
5317 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5318 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5319
5320 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5321
5322 inputHandle->Allocate();
5323 outputHandle->Allocate();
5324 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5325
5326 workload->Execute();
5327
5328 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5329 return result;
5330}
5331
5332LayerTestResult<float, 4> BatchNormTest(armnn::IWorkloadFactory& workloadFactory)
5333{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01005334 // BatchSize: 1
5335 // Channels: 2
5336 // Height: 3
5337 // Width: 2
5338
5339 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
5340 std::vector<float> inputValues
5341 {
5342 // Batch 0, Channel 0, Height (3) x Width (2)
5343 1.f, 4.f,
5344 4.f, 2.f,
5345 1.f, 6.f,
5346
5347 // Batch 0, Channel 1, Height (3) x Width (2)
5348 1.f, 1.f,
5349 4.f, 1.f,
5350 -2.f, 4.f
5351 };
5352 std::vector<float> expectedOutputValues
5353 {
5354 // Batch 0, Channel 0, Height (3) x Width (2)
5355 1.f, 4.f,
5356 4.f, 2.f,
5357 1.f, 6.f,
5358
5359 // Batch 0, Channel 1, Height (3) x Width (2)
5360 3.f, 3.f,
5361 4.f, 3.f,
5362 2.f, 4.f
5363 };
5364
5365 return BatchNormTestImpl<float>(workloadFactory, inputOutputShape, inputValues, expectedOutputValues,
5366 0.f, 0, armnn::DataLayout::NCHW);
5367}
5368
5369LayerTestResult<float, 4> BatchNormNhwcTest(armnn::IWorkloadFactory& workloadFactory)
5370{
5371 // BatchSize: 1
5372 // Height: 3
5373 // Width: 2
5374 // Channels: 2
5375
5376 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
5377 std::vector<float> inputValues
5378 {
5379 // Batch 0, Height 0, Width (2) x Channel (2)
5380 1.f, 1.f,
5381 4.f, 1.f,
5382
5383 // Batch 0, Height 1, Width (2) x Channel (2)
5384 4.f, 4.f,
5385 2.f, 1.f,
5386
5387 // Batch 0, Height 2, Width (2) x Channel (2)
5388 1.f, -2.f,
5389 6.f, 4.f
5390 };
5391 std::vector<float> expectedOutputValues
5392 {
5393 // Batch 0, Height 0, Width (2) x Channel (2)
5394 1.f, 3.f,
5395 4.f, 3.f,
5396
5397 // Batch 0, Height 1, Width (2) x Channel (2)
5398 4.f, 4.f,
5399 2.f, 3.f,
5400
5401 // Batch 0, Height 2, Width (2) x Channel (2)
5402 1.f, 2.f,
5403 6.f, 4.f
5404 };
5405
5406 return BatchNormTestImpl<float>(workloadFactory, inputOutputShape, inputValues, expectedOutputValues,
5407 0.f, 0, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00005408}
5409
5410LayerTestResult<uint8_t, 4> BatchNormUint8Test(armnn::IWorkloadFactory& workloadFactory)
5411{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01005412 // BatchSize: 1
5413 // Channels: 2
5414 // Height: 3
5415 // Width: 2
5416
5417 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
5418 std::vector<float> inputValues
5419 {
5420 // Batch 0, Channel 0, Height (3) x Width (2)
5421 1.f, 4.f,
5422 4.f, 2.f,
5423 1.f, 6.f,
5424
5425 // Batch 0, Channel 1, Height (3) x Width (2)
5426 1.f, 1.f,
5427 4.f, 1.f,
5428 -2.f, 4.f
5429 };
5430 std::vector<float> expectedOutputValues
5431 {
5432 // Batch 0, Channel 0, Height (3) x Width (2)
5433 1.f, 4.f,
5434 4.f, 2.f,
5435 1.f, 6.f,
5436
5437 // Batch 0, Channel 1, Height (3) x Width (2)
5438 3.f, 3.f,
5439 4.f, 3.f,
5440 2.f, 4.f
5441 };
5442
5443 return BatchNormTestImpl<uint8_t>(workloadFactory, inputOutputShape, inputValues, expectedOutputValues,
5444 1.f/20.f, 50, armnn::DataLayout::NCHW);
5445}
5446
5447LayerTestResult<uint8_t, 4> BatchNormUint8NhwcTest(armnn::IWorkloadFactory& workloadFactory)
5448{
5449 // BatchSize: 1
5450 // Height: 3
5451 // Width: 2
5452 // Channels: 2
5453
5454 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
5455 std::vector<float> inputValues
5456 {
5457 // Batch 0, Height 0, Width (2) x Channel (2)
5458 1.f, 1.f,
5459 4.f, 1.f,
5460
5461 // Batch 0, Height 1, Width (2) x Channel (2)
5462 4.f, 4.f,
5463 2.f, 1.f,
5464
5465 // Batch 0, Height 2, Width (2) x Channel (2)
5466 1.f, -2.f,
5467 6.f, 4.f
5468 };
5469 std::vector<float> expectedOutputValues
5470 {
5471 // Batch 0, Height 0, Width (2) x Channel (2)
5472 1.f, 3.f,
5473 4.f, 3.f,
5474
5475 // Batch 0, Height 1, Width (2) x Channel (2)
5476 4.f, 4.f,
5477 2.f, 3.f,
5478
5479 // Batch 0, Height 2, Width (2) x Channel (2)
5480 1.f, 2.f,
5481 6.f, 4.f
5482 };
5483
5484 return BatchNormTestImpl<uint8_t>(workloadFactory, inputOutputShape, inputValues, expectedOutputValues,
5485 1.f/20.f, 50, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00005486}
5487
5488LayerTestResult<uint8_t, 4> ConstantUint8Test(armnn::IWorkloadFactory& workloadFactory)
5489{
5490 return ConstantTestImpl<uint8_t>(workloadFactory, 2e-6f, 1);
5491}
5492
5493LayerTestResult<uint8_t, 1> Concatenation1dUint8Test(armnn::IWorkloadFactory& workloadFactory)
5494{
5495 return Concatenation1dTestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5496}
5497
5498LayerTestResult<uint8_t, 2> Concatenation2dDim0Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5499{
5500 return Concatenation2dDim0TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5501}
5502
5503LayerTestResult<uint8_t, 2> Concatenation2dDim1Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5504{
5505 return Concatenation2dDim1TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5506}
5507
5508LayerTestResult<uint8_t, 2> Concatenation2dDim0DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory)
5509{
5510 return Concatenation2dDim0DiffInputDimsTestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5511}
5512
5513LayerTestResult<uint8_t, 2> Concatenation2dDim1DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory)
5514{
5515 return Concatenation2dDim1DiffInputDimsTestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5516}
5517
5518LayerTestResult<uint8_t, 3> Concatenation3dDim0Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5519{
5520 return Concatenation3dDim0TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5521}
5522
5523LayerTestResult<uint8_t, 3> Concatenation3dDim1Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5524{
5525 return Concatenation3dDim1TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5526}
5527
5528LayerTestResult<uint8_t, 3> Concatenation3dDim2Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5529{
5530 return Concatenation3dDim2TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5531}
5532
5533LayerTestResult<uint8_t, 3> Concatenation3dDim0DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory)
5534{
5535 return Concatenation3dDim0TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5536}
5537
5538LayerTestResult<uint8_t, 3> Concatenation3dDim1DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory)
5539{
5540 return Concatenation3dDim1DiffInputDimsTestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5541}
5542
5543LayerTestResult<uint8_t, 3> Concatenation3dDim2DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory)
5544{
5545 return Concatenation3dDim2DiffInputDimsTestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5546}
5547
5548LayerTestResult<float, 4> SimpleMaxPooling2dSize2x2Stride2x2Test(armnn::IWorkloadFactory& workloadFactory,
5549 bool forceNoPadding)
5550{
5551 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<float>(workloadFactory, forceNoPadding);
5552}
5553
5554LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(armnn::IWorkloadFactory& workloadFactory,
5555 bool forceNoPadding)
5556{
5557 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<uint8_t>(workloadFactory, forceNoPadding, 3.0f, -5);
5558}
5559
5560LayerTestResult<float, 4> SimpleMaxPooling2dSize3x3Stride2x4Test(armnn::IWorkloadFactory& workloadFactory,
5561 bool forceNoPadding)
5562{
5563 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<float>(workloadFactory, forceNoPadding);
5564}
5565
5566LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(armnn::IWorkloadFactory& workloadFactory,
5567 bool forceNoPadding)
5568{
5569 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<uint8_t>(workloadFactory, forceNoPadding, 0.1f, 128);
5570}
5571
James Conroy45a9b772018-10-31 11:47:53 +00005572LayerTestResult<float, 4> SimpleMaxPooling2dTest(armnn::IWorkloadFactory& workloadFactory,
5573 const armnn::DataLayoutIndexed& dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00005574{
James Conroy45a9b772018-10-31 11:47:53 +00005575 return SimpleMaxPooling2dTestCommon<float>(workloadFactory, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00005576}
5577
James Conroy45a9b772018-10-31 11:47:53 +00005578LayerTestResult<uint8_t, 4> SimpleMaxPooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory,
5579 const armnn::DataLayoutIndexed& dataLayout)
Francis Murtagh043d0d02018-10-05 14:08:48 +01005580{
James Conroy45a9b772018-10-31 11:47:53 +00005581 return SimpleMaxPooling2dTestCommon<uint8_t>(workloadFactory, dataLayout);
Francis Murtagh043d0d02018-10-05 14:08:48 +01005582}
5583
James Conroy45a9b772018-10-31 11:47:53 +00005584LayerTestResult<float, 4> SimpleAveragePooling2dTest(armnn::IWorkloadFactory& workloadFactory,
5585 const armnn::DataLayoutIndexed& dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00005586{
James Conroy45a9b772018-10-31 11:47:53 +00005587 return SimpleAveragePooling2dTestCommon<float>(workloadFactory, dataLayout);
James Conroy69482272018-10-19 10:41:35 +01005588}
5589
James Conroy45a9b772018-10-31 11:47:53 +00005590LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory,
5591 const armnn::DataLayoutIndexed& dataLayout)
James Conroy69482272018-10-19 10:41:35 +01005592{
James Conroy45a9b772018-10-31 11:47:53 +00005593 return SimpleAveragePooling2dTestCommon<uint8_t>(workloadFactory, dataLayout, 0.5, -1);
telsoa014fcda012018-03-09 14:13:49 +00005594}
5595
surmeh01bceff2f2018-03-29 16:29:27 +01005596LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(armnn::IWorkloadFactory& workloadFactory,
5597 bool forceNoPadding)
5598{
5599 return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<float>(workloadFactory, forceNoPadding);
5600}
5601
telsoa014fcda012018-03-09 14:13:49 +00005602LayerTestResult<float, 4> LargeTensorsAveragePooling2dTest(armnn::IWorkloadFactory& workloadFactory)
5603{
5604 return LargeTensorsAveragePooling2dTestCommon<float>(workloadFactory);
5605}
5606
5607LayerTestResult<uint8_t, 4> LargeTensorsAveragePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
5608{
5609 return LargeTensorsAveragePooling2dTestCommon<uint8_t>(workloadFactory, 0.5, -1);
5610}
5611
James Conroy45a9b772018-10-31 11:47:53 +00005612LayerTestResult<float, 4> SimpleL2Pooling2dTest(armnn::IWorkloadFactory& workloadFactory,
5613 const armnn::DataLayoutIndexed& dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00005614{
James Conroy45a9b772018-10-31 11:47:53 +00005615 return SimpleL2Pooling2dTestCommon<float>(workloadFactory, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00005616}
5617
James Conroy45a9b772018-10-31 11:47:53 +00005618LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory,
5619 const armnn::DataLayoutIndexed& dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00005620{
James Conroy45a9b772018-10-31 11:47:53 +00005621 return SimpleL2Pooling2dTestCommon<uint8_t>(workloadFactory, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00005622}
5623
5624LayerTestResult<float, 4> L2Pooling2dSize3Stride1Test(armnn::IWorkloadFactory& workloadFactory)
5625{
5626 return L2Pooling2dSize3Stride1TestCommon<float>(workloadFactory);
5627}
5628
5629LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride1Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5630{
5631 return L2Pooling2dSize3Stride1TestCommon<uint8_t>(workloadFactory);
5632}
5633
5634LayerTestResult<float, 4> L2Pooling2dSize3Stride3Test(armnn::IWorkloadFactory& workloadFactory)
5635{
5636 return L2Pooling2dSize3Stride3TestCommon<float>(workloadFactory);
5637}
5638
5639LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride3Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5640{
5641 return L2Pooling2dSize3Stride3TestCommon<uint8_t>(workloadFactory);
5642}
5643
5644LayerTestResult<float, 4> L2Pooling2dSize3Stride4Test(armnn::IWorkloadFactory& workloadFactory)
5645{
5646 return L2Pooling2dSize3Stride4TestCommon<float>(workloadFactory);
5647}
5648
5649LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride4Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5650{
5651 return L2Pooling2dSize3Stride4TestCommon<uint8_t>(workloadFactory);
5652}
5653
5654LayerTestResult<float, 4> L2Pooling2dSize7Test(armnn::IWorkloadFactory& workloadFactory)
5655{
5656 return L2Pooling2dSize7TestCommon<float>(workloadFactory);
5657}
5658
5659LayerTestResult<uint8_t, 4> L2Pooling2dSize7Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5660{
5661 return L2Pooling2dSize7TestCommon<uint8_t>(workloadFactory);
5662}
5663
5664LayerTestResult<float, 4> L2Pooling2dSize9Test(armnn::IWorkloadFactory& workloadFactory)
5665{
5666 return L2Pooling2dSize9TestCommon<float>(workloadFactory);
5667}
5668
5669LayerTestResult<uint8_t, 4> L2Pooling2dSize9Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5670{
5671 return L2Pooling2dSize9TestCommon<uint8_t>(workloadFactory);
5672}
5673
5674LayerTestResult<float, 4> AsymmetricNonSquarePooling2dTest(armnn::IWorkloadFactory& workloadFactory)
5675{
5676 return AsymmetricNonSquarePooling2dTestCommon<float>(workloadFactory);
5677}
5678
5679LayerTestResult<uint8_t, 4> AsymmetricNonSquarePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
5680{
5681 return AsymmetricNonSquarePooling2dTestCommon<uint8_t>(workloadFactory);
5682}
5683
5684LayerTestResult<float, 4> ComparePooling2dTest(armnn::IWorkloadFactory& workloadFactory,
5685 armnn::IWorkloadFactory& refWorkloadFactory,
5686 armnn::PoolingAlgorithm poolingType)
5687{
5688 return ComparePooling2dTestCommon<float>(workloadFactory, refWorkloadFactory, poolingType);
5689}
5690
5691LayerTestResult<uint8_t, 4> ComparePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory,
5692 armnn::IWorkloadFactory& refWorkloadFactory,
5693 armnn::PoolingAlgorithm poolingType)
5694{
5695 return ComparePooling2dTestCommon<uint8_t>(workloadFactory, refWorkloadFactory, poolingType, 0.1f, 128);
5696}
5697
5698LayerTestResult<float, 2> FullyConnectedLargeTest(armnn::IWorkloadFactory& workloadFactory,
5699 bool transposeWeights)
5700{
5701 return FullyConnectedLargeTestCommon<float>(workloadFactory, transposeWeights);
5702}
5703
5704LayerTestResult<float, 4> IgnorePaddingSimpleMaxPooling2dTest(armnn::IWorkloadFactory& workloadFactory)
5705{
5706 return IgnorePaddingSimpleMaxPooling2dTestCommon<float>(workloadFactory);
5707}
5708
5709LayerTestResult<uint8_t, 4> IgnorePaddingSimpleMaxPooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
5710{
5711 return IgnorePaddingSimpleMaxPooling2dTestCommon<uint8_t>(workloadFactory, 1.0f, -5);
5712}
5713
5714LayerTestResult<float, 4> IgnorePaddingMaxPooling2dSize3Test(armnn::IWorkloadFactory& workloadFactory)
5715{
5716 return IgnorePaddingMaxPooling2dSize3TestCommon<float>(workloadFactory);
5717}
5718
5719LayerTestResult<uint8_t, 4> IgnorePaddingMaxPooling2dSize3Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5720{
5721 return IgnorePaddingMaxPooling2dSize3TestCommon<uint8_t>(workloadFactory, 1.0f, -5);
5722}
5723
5724LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dTest(armnn::IWorkloadFactory& workloadFactory)
5725{
5726 return IgnorePaddingSimpleAveragePooling2dTestCommon<float>(workloadFactory);
5727}
5728
5729LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
5730{
5731 return IgnorePaddingSimpleAveragePooling2dTestCommon<uint8_t>(workloadFactory);
5732}
5733
5734LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTest(armnn::IWorkloadFactory& workloadFactory)
5735{
5736 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<float>(workloadFactory);
5737}
5738
5739LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(
5740 armnn::IWorkloadFactory& workloadFactory)
5741{
5742 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<uint8_t>(workloadFactory);
5743}
5744
5745LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3Test(armnn::IWorkloadFactory& workloadFactory)
5746{
5747 return IgnorePaddingAveragePooling2dSize3TestCommon<float>(workloadFactory);
5748}
5749
5750LayerTestResult<uint8_t, 4> IgnorePaddingAveragePooling2dSize3Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5751{
5752 return IgnorePaddingAveragePooling2dSize3TestCommon<uint8_t>(workloadFactory);
5753}
5754
5755LayerTestResult<float, 4> IgnorePaddingSimpleL2Pooling2dTest(armnn::IWorkloadFactory& workloadFactory)
5756{
5757 return IgnorePaddingSimpleL2Pooling2dTestCommon<float>(workloadFactory);
5758}
5759
5760LayerTestResult<uint8_t, 4> IgnorePaddingSimpleL2Pooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
5761{
5762 return IgnorePaddingSimpleL2Pooling2dTestCommon<uint8_t>(workloadFactory);
5763}
5764
5765LayerTestResult<float, 4> IgnorePaddingL2Pooling2dSize3Test(armnn::IWorkloadFactory& workloadFactory)
5766{
5767 return IgnorePaddingL2Pooling2dSize3TestCommon<float>(workloadFactory);
5768}
5769
5770LayerTestResult<uint8_t, 4> IgnorePaddingL2Pooling2dSize3Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5771{
5772 return IgnorePaddingL2Pooling2dSize3TestCommon<uint8_t>(workloadFactory);
5773}
5774
5775LayerTestResult<float, 4> SimplePermuteFloat32Test(armnn::IWorkloadFactory& workloadFactory)
5776{
5777 return SimplePermuteFloat32TestCommon(workloadFactory);
5778};
5779
5780LayerTestResult<uint8_t, 4> SimplePermuteUint8Test(armnn::IWorkloadFactory& workloadFactory)
5781{
5782 return SimplePermuteUint8TestCommon(workloadFactory);
5783};
surmeh01bceff2f2018-03-29 16:29:27 +01005784
5785LayerTestResult<float, 4> PermuteFloat32ValueSet1Test(armnn::IWorkloadFactory& workloadFactory)
5786{
5787 return PermuteFloat32ValueSet1TestCommon(workloadFactory);
5788};
5789
5790LayerTestResult<float, 4> PermuteFloat32ValueSet2Test(armnn::IWorkloadFactory& workloadFactory)
5791{
5792 return PermuteFloat32ValueSet2TestCommon(workloadFactory);
5793};
5794
5795LayerTestResult<float, 4> PermuteFloat32ValueSet3Test(armnn::IWorkloadFactory& workloadFactory)
5796{
5797 return PermuteFloat32ValueSet3TestCommon(workloadFactory);
narpra011e4c31d2018-09-28 11:07:51 +01005798};
5799
5800namespace
5801{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005802
narpra011e4c31d2018-09-28 11:07:51 +01005803template <typename T, std::size_t InputDim, std::size_t OutputDim>
5804LayerTestResult<T, OutputDim> MeanTestHelper(armnn::IWorkloadFactory& workloadFactory,
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005805 const unsigned int* inputShape,
5806 const std::vector<T>& inputData,
5807 const std::vector<unsigned int>& axis,
5808 bool keepDims,
5809 const unsigned int* outputShape,
5810 const std::vector<T>& outputData,
5811 float scale = 1.0f,
5812 int32_t offset = 0)
narpra011e4c31d2018-09-28 11:07:51 +01005813{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005814 auto dataType = (std::is_same<T, uint8_t>::value ? armnn::DataType::QuantisedAsymm8 : armnn::DataType::Float32);
narpra011e4c31d2018-09-28 11:07:51 +01005815
5816 armnn::TensorInfo inputTensorInfo(InputDim, inputShape, dataType);
5817 armnn::TensorInfo outputTensorInfo(OutputDim, outputShape, dataType);
5818
5819 inputTensorInfo.SetQuantizationScale(scale);
5820 inputTensorInfo.SetQuantizationOffset(offset);
5821
5822 outputTensorInfo.SetQuantizationScale(scale);
5823 outputTensorInfo.SetQuantizationOffset(offset);
5824
5825 auto input = MakeTensor<T, InputDim>(inputTensorInfo, inputData);
5826
5827 LayerTestResult<T, OutputDim> result(outputTensorInfo);
5828 result.outputExpected = MakeTensor<T, OutputDim>(outputTensorInfo, outputData);
5829
5830 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5831 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5832
5833 armnn::MeanQueueDescriptor data;
5834 data.m_Parameters.m_Axis = axis;
5835 data.m_Parameters.m_KeepDims = keepDims;
5836 armnn::WorkloadInfo info;
5837 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
5838 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
5839
5840 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMean(data, info);
5841
5842 inputHandle->Allocate();
5843 outputHandle->Allocate();
5844
5845 CopyDataToITensorHandle(inputHandle.get(), input.origin());
5846
narpra011e4c31d2018-09-28 11:07:51 +01005847 workload->Execute();
5848
5849 CopyDataFromITensorHandle(result.output.origin(), outputHandle.get());
5850
5851 return result;
5852}
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005853
narpra011e4c31d2018-09-28 11:07:51 +01005854} // anonymous namespace
5855
5856LayerTestResult<uint8_t, 1> MeanUint8SimpleTest(armnn::IWorkloadFactory& workloadFactory)
5857{
5858 const unsigned int inputShape[] = { 3, 2 };
5859 const unsigned int outputShape[] = { 1 };
5860
5861 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
5862 std::vector<uint8_t> output({ 2 });
5863
5864 return MeanTestHelper<uint8_t, 2, 1>(workloadFactory, inputShape, input, {}, false, outputShape, output);
5865}
5866
5867LayerTestResult<uint8_t, 3> MeanUint8SimpleAxisTest(armnn::IWorkloadFactory& workloadFactory)
5868{
5869 const unsigned int inputShape[] = { 1, 1, 3, 2 };
5870 const unsigned int outputShape[] = { 1, 1, 2 };
5871
5872 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
5873 std::vector<uint8_t> output({ 2, 2 });
5874
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005875 return MeanTestHelper<uint8_t, 4, 3>(workloadFactory, inputShape, input, { 2 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01005876}
5877
5878LayerTestResult<uint8_t, 4> MeanUint8KeepDimsTest(armnn::IWorkloadFactory& workloadFactory)
5879{
5880 const unsigned int inputShape[] = { 1, 1, 3, 2 };
5881 const unsigned int outputShape[] = { 1, 1, 1, 2 };
5882
5883 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
5884 std::vector<uint8_t> output({ 2, 2 });
5885
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005886 return MeanTestHelper<uint8_t, 4, 4>(workloadFactory, inputShape, input, { 2 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01005887}
5888
5889LayerTestResult<uint8_t, 4> MeanUint8MultipleDimsTest(armnn::IWorkloadFactory& workloadFactory)
5890{
5891 const unsigned int inputShape[] = { 2, 3, 1, 2 };
5892 const unsigned int outputShape[] = { 1, 3, 1, 1 };
5893
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005894 std::vector<uint8_t> input({ 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6 });
narpra011e4c31d2018-09-28 11:07:51 +01005895 std::vector<uint8_t> output({ 1, 3, 5 });
5896
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005897 return MeanTestHelper<uint8_t, 4, 4>(workloadFactory, inputShape, input, { 0, 3 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01005898}
5899
5900LayerTestResult<uint8_t, 1> MeanVtsUint8Test(armnn::IWorkloadFactory& workloadFactory)
5901{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005902 const unsigned int inputShape[] = { 4, 3, 2 };
narpra011e4c31d2018-09-28 11:07:51 +01005903 const unsigned int outputShape[] = { 2 };
5904
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005905 std::vector<uint8_t> input({ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
5906 24 });
5907 std::vector<uint8_t> output({ 12, 13 });
narpra011e4c31d2018-09-28 11:07:51 +01005908
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005909 return MeanTestHelper<uint8_t, 3, 1>(workloadFactory, inputShape, input, { 0, 1 }, false, outputShape,
5910 output, 0.8f, 5);
narpra011e4c31d2018-09-28 11:07:51 +01005911}
5912
5913LayerTestResult<float, 1> MeanFloatSimpleTest(armnn::IWorkloadFactory& workloadFactory)
5914{
5915 const unsigned int inputShape[] = { 3, 2 };
5916 const unsigned int outputShape[] = { 1 };
5917
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005918 std::vector<float> input({ 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f });
5919 std::vector<float> output({ 2.0f });
narpra011e4c31d2018-09-28 11:07:51 +01005920
5921 return MeanTestHelper<float, 2, 1>(workloadFactory, inputShape, input, {}, false, outputShape, output);
5922}
5923
5924LayerTestResult<float, 3> MeanFloatSimpleAxisTest(armnn::IWorkloadFactory& workloadFactory)
5925{
5926 const unsigned int inputShape[] = { 2, 3, 1, 2 };
5927 const unsigned int outputShape[] = { 3, 1, 2 };
5928
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005929 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f });
5930 std::vector<float> output({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f });
narpra011e4c31d2018-09-28 11:07:51 +01005931
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005932 return MeanTestHelper<float, 4, 3>(workloadFactory, inputShape, input, { 0 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01005933}
5934
5935LayerTestResult<float, 4> MeanFloatKeepDimsTest(armnn::IWorkloadFactory& workloadFactory)
5936{
5937 const unsigned int inputShape[] = { 1, 1, 3, 2 };
5938 const unsigned int outputShape[] = { 1, 1, 1, 2 };
5939
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005940 std::vector<float> input({ 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f });
5941 std::vector<float> output({ 2.0f, 2.0f });
narpra011e4c31d2018-09-28 11:07:51 +01005942
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005943 return MeanTestHelper<float, 4, 4>(workloadFactory, inputShape, input, { 2 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01005944}
5945
5946LayerTestResult<float, 4> MeanFloatMultipleDimsTest(armnn::IWorkloadFactory& workloadFactory)
5947{
5948 const unsigned int inputShape[] = { 2, 3, 1, 2 };
5949 const unsigned int outputShape[] = { 1, 3, 1, 1 };
5950
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005951 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f });
5952 std::vector<float> output({ 1.5f, 3.5f, 5.5f });
narpra011e4c31d2018-09-28 11:07:51 +01005953
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005954 return MeanTestHelper<float, 4, 4>(workloadFactory, inputShape, input, { 0, 3 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01005955}
5956
5957LayerTestResult<float, 1> MeanVtsFloat1Test(armnn::IWorkloadFactory& workloadFactory)
5958{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005959 const unsigned int inputShape[] = { 4, 3, 2 };
narpra011e4c31d2018-09-28 11:07:51 +01005960 const unsigned int outputShape[] = { 2 };
5961
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005962 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f,
5963 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f });
5964 std::vector<float> output({ 12.0f, 13.0f });
narpra011e4c31d2018-09-28 11:07:51 +01005965
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005966 return MeanTestHelper<float, 3, 1>(workloadFactory, inputShape, input, { 0, 1 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01005967}
5968
5969LayerTestResult<float, 3> MeanVtsFloat2Test(armnn::IWorkloadFactory& workloadFactory)
5970{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005971 const unsigned int inputShape[] = { 4, 3, 2 };
5972 const unsigned int outputShape[] = { 1, 3, 1 };
narpra011e4c31d2018-09-28 11:07:51 +01005973
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005974 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f,
5975 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f });
5976 std::vector<float> output({ 10.5f, 12.5f, 14.5f });
narpra011e4c31d2018-09-28 11:07:51 +01005977
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005978 return MeanTestHelper<float, 3, 3>(workloadFactory, inputShape, input, { 0, 2 }, true, outputShape, output);
5979}
5980
5981LayerTestResult<float, 3> MeanVtsFloat3Test(armnn::IWorkloadFactory& workloadFactory)
5982{
5983 const unsigned int inputShape[] = { 1, 2, 2, 1 };
5984 const unsigned int outputShape[] = { 1, 2, 1 };
5985
5986 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f });
5987 std::vector<float> output({ 1.5f, 3.5f });
5988
5989 return MeanTestHelper<float, 4, 3>(workloadFactory, inputShape, input, { 2 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01005990}
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01005991
5992LayerTestResult<float, 4> AdditionAfterMaxPoolTest(armnn::IWorkloadFactory& workloadFactory)
5993{
5994 // Create Initial Tensor
5995 // 1, 2, 3
5996 // 4, 5, 6
5997 // 7, 8, 9
5998
5999 armnn::TensorInfo poolingInputTensorInfo({ 1, 1, 3, 3}, armnn::GetDataType<float>());
6000 armnn::TensorInfo poolingOutputTensorInfo({ 1, 1, 2, 2}, armnn::GetDataType<float>());
6001
6002 boost::multi_array<float, 4> poolingInput = MakeTensor<float,4>(poolingInputTensorInfo,
6003 {1, 2, 3,
6004 4, 5, 6,
6005 7, 8, 9
6006 });
6007
6008 std::unique_ptr<armnn::ITensorHandle> poolingInputHandle =
6009 workloadFactory.CreateTensorHandle(poolingInputTensorInfo);
6010 std::unique_ptr<armnn::ITensorHandle> poolingOutputHandle =
6011 workloadFactory.CreateTensorHandle(poolingOutputTensorInfo);
6012
6013 // Apply MaxPool poolSize = 1x1, stride=2x2
6014 // Result =
6015 // 1, 3
6016 // 7, 9
6017 armnn::Pooling2dDescriptor descriptor;
6018 descriptor.m_PoolHeight = 1;
6019 descriptor.m_PoolWidth = 1;
6020 descriptor.m_StrideX = 2;
6021 descriptor.m_StrideY = 2;
6022 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
6023
6024 armnn::Pooling2dQueueDescriptor queueDescriptor;
6025 queueDescriptor.m_Parameters = descriptor;
6026 armnn::WorkloadInfo workloadInfo;
6027 AddInputToWorkload(queueDescriptor, workloadInfo, poolingInputTensorInfo, poolingInputHandle.get());
6028 AddOutputToWorkload(queueDescriptor, workloadInfo, poolingOutputTensorInfo, poolingOutputHandle.get());
6029
6030 // Create the MaxPool
6031 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
6032
6033 //LayerTestResult<float, 4> result(poolingOutputTensorInfo);
6034 auto shape( GetTensorShapeAsArray<4>(poolingOutputTensorInfo));
6035 boost::multi_array<float, 4> resultMaxPool;
6036 resultMaxPool.resize(shape);
6037
6038
6039 // Create addition with another tensor the same size
6040 // This would be the result to apply a Conv2d with kernel ones(2) and stride 1x1
6041 // with the initial tensor.
6042 // 12, 16
6043 // 24, 28
6044
6045 armnn::TensorInfo addInputTensorInfo({ 1,1,2,2}, armnn::GetDataType<float>());
6046 armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2}, armnn::GetDataType<float>());
6047
6048 boost::multi_array<float, 4> addInput = MakeTensor<float,4>(addInputTensorInfo,
6049 {12, 16,
6050 24, 28,
6051 });
6052
6053 // Expected output tensor after MaxPool and Addition.
6054 LayerTestResult<float,4> addRet(addOutputTensorInfo);
6055 addRet.outputExpected = MakeTensor<float, 4>(addOutputTensorInfo, std::vector<float>(
6056 {
6057 13, 19,
6058 31, 37
6059 }));
6060
6061 std::unique_ptr<armnn::ITensorHandle> addInputHandle = workloadFactory.CreateTensorHandle(addInputTensorInfo);
6062 std::unique_ptr<armnn::ITensorHandle> addOutputHandle = workloadFactory.CreateTensorHandle(addOutputTensorInfo);
6063
6064 armnn::AdditionQueueDescriptor data;
6065 armnn::WorkloadInfo info;
6066
6067 // Add the output of the MaxPool and the new tensor
6068 AddInputToWorkload(data, info, poolingOutputTensorInfo, poolingOutputHandle.get());
6069 AddInputToWorkload(data, info, addInputTensorInfo, addInputHandle.get());
6070 AddOutputToWorkload(data, info, addOutputTensorInfo, addOutputHandle.get());
6071
6072 std::unique_ptr<armnn::IWorkload> addWorkload = workloadFactory.CreateAddition(data, info);
6073
6074 poolingInputHandle->Allocate();
6075 poolingOutputHandle->Allocate();
6076 addInputHandle->Allocate();
6077 addOutputHandle->Allocate();
6078
6079 CopyDataToITensorHandle(poolingInputHandle.get(), &poolingInput[0][0][0][0]);
6080 CopyDataFromITensorHandle(&resultMaxPool[0][0][0][0], poolingOutputHandle.get());
6081
6082 CopyDataToITensorHandle(poolingOutputHandle.get(), &resultMaxPool[0][0][0][0]);
6083 CopyDataToITensorHandle(addInputHandle.get(), &addInput[0][0][0][0]);
6084
6085 workload->Execute();
6086 addWorkload->Execute();
6087
6088 CopyDataFromITensorHandle(&addRet.output[0][0][0][0], addOutputHandle.get());
6089
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01006090 return addRet;
6091}
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00006092
6093LayerTestResult<float, 4> SpaceToBatchNdSimpleFloat32Test(armnn::IWorkloadFactory& workloadFactory)
6094{
6095 return SpaceToBatchNdSimpleTest<float>(workloadFactory);
6096}
6097
6098LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsFloat32Test(armnn::IWorkloadFactory& workloadFactory)
6099{
6100 return SpaceToBatchNdMultiChannelsTest<float>(workloadFactory);
6101}
6102
6103LayerTestResult<float, 4> SpaceToBatchNdMultiBlockFloat32Test(armnn::IWorkloadFactory& workloadFactory)
6104{
6105 return SpaceToBatchNdMultiBlockTest<float>(workloadFactory);
6106}
6107
6108LayerTestResult<float, 4> SpaceToBatchNdPaddingFloat32Test(armnn::IWorkloadFactory& workloadFactory)
6109{
6110 return SpaceToBatchNdPaddingTest<float>(workloadFactory);
6111}
6112
6113LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleUint8Test(armnn::IWorkloadFactory& workloadFactory)
6114{
6115 return SpaceToBatchNdSimpleTest<uint8_t>(workloadFactory);
6116}
6117
6118LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsUint8Test(armnn::IWorkloadFactory& workloadFactory)
6119{
6120 return SpaceToBatchNdMultiChannelsTest<uint8_t>(workloadFactory);
6121}
6122
6123LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockUint8Test(armnn::IWorkloadFactory& workloadFactory)
6124{
6125 return SpaceToBatchNdMultiBlockTest<uint8_t>(workloadFactory);
6126}
6127
6128LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingUint8Test(armnn::IWorkloadFactory& workloadFactory)
6129{
6130 return SpaceToBatchNdPaddingTest<uint8_t>(workloadFactory);
6131}
6132
6133LayerTestResult<float, 4> SpaceToBatchNdSimpleNHWCFloat32Test(armnn::IWorkloadFactory& workloadFactory)
6134{
6135 return SpaceToBatchNdSimpleNHWCTest<float>(workloadFactory);
6136}
6137
6138LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsNHWCFloat32Test(armnn::IWorkloadFactory& workloadFactory)
6139{
6140 return SpaceToBatchNdMultiChannelsNHWCTest<float>(workloadFactory);
6141}
6142
6143LayerTestResult<float, 4> SpaceToBatchNdMultiBlockNHWCFloat32Test(armnn::IWorkloadFactory& workloadFactory)
6144{
6145 return SpaceToBatchNdMultiBlockNHWCTest<float>(workloadFactory);
6146}
6147
6148LayerTestResult<float, 4> SpaceToBatchNdPaddingNHWCFloat32Test(armnn::IWorkloadFactory& workloadFactory)
6149{
6150 return SpaceToBatchNdPaddingNHWCTest<float>(workloadFactory);
6151}
6152
6153LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleNHWCUint8Test(armnn::IWorkloadFactory& workloadFactory)
6154{
6155 return SpaceToBatchNdSimpleNHWCTest<uint8_t>(workloadFactory);
6156}
6157
6158LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsNHWCUint8Test(armnn::IWorkloadFactory& workloadFactory)
6159{
6160 return SpaceToBatchNdMultiChannelsNHWCTest<uint8_t>(workloadFactory);
6161}
6162
6163LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockNHWCUint8Test(armnn::IWorkloadFactory& workloadFactory)
6164{
6165 return SpaceToBatchNdMultiBlockNHWCTest<uint8_t>(workloadFactory);
6166}
6167
6168LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingNHWCUint8Test(armnn::IWorkloadFactory& workloadFactory)
6169{
6170 return SpaceToBatchNdPaddingNHWCTest<uint8_t>(workloadFactory);
6171}
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00006172
6173namespace {
6174
6175template<typename T, std::size_t InputDim, std::size_t OutputDim>
6176LayerTestResult<T, OutputDim> BatchToSpaceNdHelper(armnn::IWorkloadFactory &workloadFactory,
6177 const armnn::DataLayout& dataLayout,
6178 const unsigned int *inputShape,
6179 const std::vector<T> &inputData,
6180 const std::vector<unsigned int> &blockShape,
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00006181 const std::vector<std::pair<unsigned int, unsigned int>> &crops,
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00006182 const unsigned int *outputShape,
6183 const std::vector<T> &outputData,
6184 float scale = 1.0f,
6185 int32_t offset = 0)
6186 {
6187 auto dataType = (std::is_same<T, uint8_t>::value ? armnn::DataType::QuantisedAsymm8 : armnn::DataType::Float32);
6188
6189 armnn::TensorInfo inputTensorInfo(InputDim, inputShape, dataType);
6190 armnn::TensorInfo outputTensorInfo(OutputDim, outputShape, dataType);
6191
6192 inputTensorInfo.SetQuantizationScale(scale);
6193 inputTensorInfo.SetQuantizationOffset(offset);
6194
6195 outputTensorInfo.SetQuantizationScale(scale);
6196 outputTensorInfo.SetQuantizationOffset(offset);
6197
6198 auto input = MakeTensor<T, InputDim>(inputTensorInfo, inputData);
6199
6200 LayerTestResult<T, OutputDim> result(outputTensorInfo);
6201 result.outputExpected = MakeTensor<T, OutputDim>(outputTensorInfo, outputData);
6202
6203 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6204 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6205
6206 armnn::BatchToSpaceNdQueueDescriptor data;
6207 data.m_Parameters.m_DataLayout = dataLayout;
6208 data.m_Parameters.m_BlockShape = blockShape;
6209 data.m_Parameters.m_Crops = crops;
6210 armnn::WorkloadInfo info;
6211 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
6212 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
6213
6214 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchToSpaceNd(data, info);
6215
6216 inputHandle->Allocate();
6217 outputHandle->Allocate();
6218
6219 CopyDataToITensorHandle(inputHandle.get(), input.origin());
6220
6221 workload->Execute();
6222
6223 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6224
6225 return result;
6226}
6227
6228} // anonymous namespace
6229
6230LayerTestResult<float, 4> BatchToSpaceNdNhwcFloat32Test1(armnn::IWorkloadFactory& workloadFactory)
6231{
6232 const unsigned int inputShape[] = {4, 2, 2, 1};
6233 const unsigned int outputShape[] = {1, 4, 4, 1 };
6234
6235 std::vector<float> input
6236 ({
6237 // Batch 0, Height 0, Width (2) x Channel (1)
6238 1.0f, 3.0f,
6239 // Batch 0, Height 1, Width (2) x Channel (1)
6240 9.0f, 11.0f,
6241
6242
6243 // Batch 1, Height 0, Width (2) x Channel (1)
6244 2.0f, 4.0f,
6245 // Batch 1, Height 1, Width (2) x Channel (1)
6246 10.0f, 12.0f,
6247
6248
6249 // Batch 2, Height 0, Width (2) x Channel (1)
6250 5.0f, 7.0f,
6251 // Batch 2, Height 1, Width (2) x Channel (1)
6252 13.0f, 15.0f,
6253
6254 // Batch 3, Height 0, Width (2) x Channel (3)
6255 6.0f, 8.0f,
6256 // Batch 3, Height 1, Width (2) x Channel (1)
6257 14.0f, 16.0f
6258 });
6259
6260 std::vector<float> expectedOutput
6261 ({
6262 1.0f, 2.0f, 3.0f, 4.0f,
6263 5.0f, 6.0f, 7.0f, 8.0f,
6264 9.0f, 10.0f, 11.0f, 12.0f,
6265 13.0f, 14.0f, 15.0f, 16.0f
6266 });
6267
6268 std::vector<unsigned int> blockShape {2, 2};
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00006269 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00006270
6271 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, armnn::DataLayout::NHWC, inputShape, input, blockShape,
6272 crops, outputShape, expectedOutput);
6273}
6274
6275LayerTestResult<float, 4> BatchToSpaceNdNhwcFloat32Test2(armnn::IWorkloadFactory& workloadFactory)
6276{
6277 const unsigned int inputShape[] = {4, 1, 1, 1};
6278 const unsigned int outputShape[] = {1, 2, 2, 1};
6279
6280 std::vector<float> input
6281 ({
6282 // Batch 0, Height 0, Width (2) x Channel (1)
6283 1.0f, 2.0f, 3.0f, 4.0f
6284 });
6285
6286 std::vector<float> expectedOutput({1.0f, 2.0f, 3.0f, 4.0f});
6287
6288 std::vector<unsigned int> blockShape({2, 2});
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00006289 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00006290
6291 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, armnn::DataLayout::NHWC, inputShape, input, blockShape,
6292 crops, outputShape, expectedOutput);
6293}
6294
6295LayerTestResult<float, 4> BatchToSpaceNdNhwcFloat32Test3(armnn::IWorkloadFactory& workloadFactory)
6296{
6297 const unsigned int inputShape[] = {4, 1, 1, 3};
6298 const unsigned int outputShape[] = {1, 2, 2, 3};
6299
6300 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f });
6301
6302 std::vector<float> expectedOutput({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f });
6303
6304 std::vector<unsigned int> blockShape({2, 2});
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00006305 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00006306
6307 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, armnn::DataLayout::NHWC, inputShape, input, blockShape,
6308 crops, outputShape, expectedOutput);
6309}
6310
6311LayerTestResult<float, 4> BatchToSpaceNdNchwFloat32Test1(armnn::IWorkloadFactory &workloadFactory)
6312{
6313 const unsigned int inputShape[] = {4, 3, 1, 1};
6314 const unsigned int outputShape[] = {1, 3, 2, 2};
6315
6316 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f });
6317
6318 std::vector<float> expectedOutput
6319 ({
6320 // Batch 0, Channel 0, Height (2) x Width (2)
6321 1.0f, 4.0f,
6322 7.0f, 10.0f,
6323
6324 // Batch 0, Channel 1, Height (2) x Width (2)
6325 2.0f, 5.0f,
6326 8.0f, 11.0f,
6327
6328 // Batch 0, Channel 2, Height (2) x Width (2)
6329 3.0f, 6.0f,
6330 9.0f, 12.0f,
6331 });
6332
6333 std::vector<unsigned int> blockShape({2, 2});
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00006334 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00006335
6336 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, armnn::DataLayout::NCHW, inputShape, input, blockShape,
6337 crops, outputShape, expectedOutput);
6338}