blob: c28a1d46ad92fe5bd5902a5cac8ae50e7d26ec07 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#include "LayerTests.hpp"
6
7#include "test/TensorHelpers.hpp"
8#include "TensorCopyUtils.hpp"
surmeh013537c2c2018-05-18 16:31:43 +01009#include "Permute.hpp"
telsoa014fcda012018-03-09 14:13:49 +000010
11#include <boost/test/unit_test.hpp>
surmeh013537c2c2018-05-18 16:31:43 +010012#include <boost/assert.hpp>
telsoa014fcda012018-03-09 14:13:49 +000013
David Beck711fa312018-09-24 10:46:38 +010014#include <armnn/LayerSupport.hpp>
telsoa014fcda012018-03-09 14:13:49 +000015
David Beck711fa312018-09-24 10:46:38 +010016#include <backends/CpuTensorHandle.hpp>
17#include <backends/WorkloadFactory.hpp>
telsoa014fcda012018-03-09 14:13:49 +000018
telsoa014fcda012018-03-09 14:13:49 +000019#include <algorithm>
20#include <boost/cast.hpp>
21
22#include "WorkloadTestUtils.hpp"
23#include "Conv2dTestImpl.hpp"
24#include "BatchNormTestImpl.hpp"
25#include "ActivationTestImpl.hpp"
26#include "Pooling2dTestImpl.hpp"
27#include "ReshapeTestImpl.hpp"
28#include "FullyConnectedTestImpl.hpp"
29#include "SplitterTestImpl.hpp"
30#include "SoftmaxTestImpl.hpp"
31#include "NormTestImpl.hpp"
32#include "PermuteTestImpl.hpp"
telsoa01c577f2c2018-08-31 09:22:23 +010033#include "LstmTestImpl.hpp"
34#include "ConvertFp16ToFp32TestImpl.hpp"
35#include "ConvertFp32ToFp16TestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000036
arovir0143095f32018-10-09 18:04:24 +010037#include <backends/cl/test/ClContextControlFixture.hpp>
Matteo Martincigh539b44d2018-10-01 09:26:39 +010038
telsoa01c577f2c2018-08-31 09:22:23 +010039// 3-channel 16x8 image used as common input data for a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000040static std::vector<float> ConvInput3x8x16({
41 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
42 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
43 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
44 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
45 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
46 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
47 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
48 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
49 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
50 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
51 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
52 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
53 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
54 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
55 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
56 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
57 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
58 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
59 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
60 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
61 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
62 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
63 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
64 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
65});
66
telsoa01c577f2c2018-08-31 09:22:23 +010067// 2-channel bias used by a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000068static std::vector<float> Bias2({0, 2});
69
telsoa01c577f2c2018-08-31 09:22:23 +010070// Helper function that returns either Bias2 or an empty vector depending on whether bias is enabled.
telsoa014fcda012018-03-09 14:13:49 +000071template<typename T>
72boost::multi_array<T, 1> GetBias2(bool biasEnabled, float qScale, int32_t qOffset)
73{
74 if(biasEnabled)
75 {
76 armnn::TensorInfo biasDesc({static_cast<unsigned int>(Bias2.size())}, armnn::GetDataType<T>());
77 boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(qScale, qOffset, Bias2));
78 return bias;
79 }
80 else
81 {
82 return boost::multi_array<T, 1>();
83 }
84}
85
86template<typename T>
87LayerTestResult<T, 4> SimpleConvolution2d3x5TestCommon(armnn::IWorkloadFactory& workloadFactory,
88 float qScale,
89 int32_t qOffset,
90 bool biasEnabled)
91{
telsoa01c577f2c2018-08-31 09:22:23 +010092 // Use common single-batch 3-channel 16x8 image.
telsoa014fcda012018-03-09 14:13:49 +000093 armnn::TensorInfo inputDesc({1, 3, 8, 16}, armnn::GetDataType<T>());
94 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
95
telsoa01c577f2c2018-08-31 09:22:23 +010096 // Use a 2-element batch with 3-channel 3x5 kernels.
telsoa014fcda012018-03-09 14:13:49 +000097 armnn::TensorInfo kernelDesc({2, 3, 5, 3}, armnn::GetDataType<T>());
98 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
99 QuantizedVector<T>(qScale, qOffset, {
100 1, 1, 1,
101 1, -1, 1,
102 1, 1, 1,
103 1, 1, 1,
104 1, 1, 1,
105
106 0, 0, 0,
107 0, 0, 0,
108 0, 0, 0,
109 0, 0, 0,
110 0, 0, 0,
111
112 2, 2, 2,
113 2, 2, 2,
114 2, 2, 2,
115 2, 2, 2,
116 2, 2, 2,
117
118
119 0, 0, 0,
120 0, 0, 0,
121 0, 0, 0,
122 0, 0, 0,
123 0, 0, 0,
124
125 1, 1, 1,
126 1, 1, 1,
127 1, 1, 1,
128 1, 1, 1,
129 1, 1, 1,
130
131 0, 0, 0,
132 0, 0, 0,
133 0, 0, 0,
134 0, 0, 0,
135 0, 0, 0
136 })));
137
telsoa01c577f2c2018-08-31 09:22:23 +0100138 // Expected output is 2 batch elements of a 1-channel 14x4 image.
telsoa014fcda012018-03-09 14:13:49 +0000139 armnn::TensorInfo outputDesc({1, 2, 4, 14}, armnn::GetDataType<T>());
140 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
141 QuantizedVector<T>(qScale, qOffset, {
142 -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24,
143 -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25,
144 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
145 -23.5f, -23.5f, -23.5f,
146 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
147 -23.5f, -23.5f, -23.5f,
148
149 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
150 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
151 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
152 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
153 })));
154
155 return SimpleConvolution2dTestImpl<T>(workloadFactory,
156 input,
157 kernel,
158 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
159 expectedOutput,
160 qScale,
161 qOffset);
162}
163
164template<typename T>
165LayerTestResult<T, 4> SimpleConvolution2d3x3TestCommon(armnn::IWorkloadFactory& workloadFactory,
166 float qScale,
167 int32_t qOffset,
168 bool biasEnabled)
169{
telsoa01c577f2c2018-08-31 09:22:23 +0100170 // Use a 3x3 kernel, which exercises ArmCompute's direct convolution path.
telsoa014fcda012018-03-09 14:13:49 +0000171
telsoa01c577f2c2018-08-31 09:22:23 +0100172 // Use common single-batch 3-channel 16x8 image.
telsoa014fcda012018-03-09 14:13:49 +0000173 armnn::TensorInfo inputDesc({1, 3, 8, 16}, armnn::GetDataType<T>());
174 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
175
telsoa01c577f2c2018-08-31 09:22:23 +0100176 // Use a 2-element batch of 3-channel 3x3 kernels.
telsoa014fcda012018-03-09 14:13:49 +0000177 armnn::TensorInfo kernelDesc({2, 3, 3, 3}, armnn::GetDataType<T>());
178 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
179 QuantizedVector<T>(qScale, qOffset, {
180 1, 1, 1,
181 1, -1, 1,
182 1, 1, 1,
183
184 0, 0, 0,
185 0, 0, 0,
186 0, 0, 0,
187
188 2, 2, 2,
189 2, 2, 2,
190 2, 2, 2,
191
192
193 0, 0, 0,
194 0, 0, 0,
195 0, 0, 0,
196
197 1, 1, 1,
198 1, 1, 1,
199 1, 1, 1,
200
201 0, 0, 0,
202 0, 0, 0,
203 0, 0, 0
204 })));
205
telsoa01c577f2c2018-08-31 09:22:23 +0100206 // Expected output is 1 batch of a 2-channel 14x6 image.
telsoa014fcda012018-03-09 14:13:49 +0000207 armnn::TensorInfo outputDesc({1, 2, 6, 14}, armnn::GetDataType<T>());
208 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
209 QuantizedVector<T>(qScale, qOffset, {
210 -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15,
211 -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16,
212 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
213 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
214 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
215 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
216
217 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
218 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
219 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
220 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
221 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
222 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
223 })));
224
225 return SimpleConvolution2dTestImpl<T>(workloadFactory,
226 input,
227 kernel,
228 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
229 expectedOutput,
230 qScale,
231 qOffset);
232}
233
Francis Murtaghd59116e2018-10-04 16:03:07 +0100234template<typename T>
235LayerTestResult<T, 4> SimpleConvolution2d3x3NhwcTestCommon(armnn::IWorkloadFactory& workloadFactory,
236 float qScale,
237 int32_t qOffset,
238 bool biasEnabled,
239 armnn::DataLayout dataLayout)
240{
241 // Use common single-batch 5x5 image.
242
243 armnn::TensorInfo inputDesc({1, 3, 4, 1}, armnn::GetDataType<T>());
244 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
245 {
246 1, 5, 2, 3,
247 8, 7, 3, 6,
248 3, 3, 9, 1
249 });
250
251
252 // Use a 2-element batch of 3-channel 3x3 kernels.
253 armnn::TensorInfo kernelDesc({1, 3, 3, 1}, armnn::GetDataType<T>());
254 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, {
255 4, 5, 6,
256 0, 0, 0,
257 3, 2, 1
258 });
259
260 // Expected output is 1 batch of a 5x5 image.
261 armnn::TensorInfo outputDesc({1, 3, 4, 1}, armnn::GetDataType<T>());
262
263 const std::vector<float> outputData =
264 {
265 23, 41, 33, 21,
266 44, 65, 76, 52,
267 82, 85, 79, 42
268 };
269
270 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
271
272 return SimpleConvolution2dNhwcTestImpl<T>(workloadFactory,
273 input,
274 kernel,
275 boost::multi_array<T, 1>(),
276 expectedOutput,
277 dataLayout,
278 qScale,
279 qOffset);
280}
281
telsoa014fcda012018-03-09 14:13:49 +0000282LayerTestResult<float, 4> SimpleConvolution2d3x5Test(armnn::IWorkloadFactory& workloadFactory,
283 bool biasEnabled)
284{
285 return SimpleConvolution2d3x5TestCommon<float>(workloadFactory, 0.f, 0, biasEnabled);
286}
287
288LayerTestResult<uint8_t, 4> SimpleConvolution2d3x5Uint8Test(armnn::IWorkloadFactory& workloadFactory,
289 bool biasEnabled)
290{
291 return SimpleConvolution2d3x5TestCommon<uint8_t>(workloadFactory, 0.5f, 50, biasEnabled);
292}
293
294LayerTestResult<float, 4> SimpleConvolution2d3x3Test(armnn::IWorkloadFactory& workloadFactory,
295 bool biasEnabled)
296{
297 return SimpleConvolution2d3x3TestCommon<float>(workloadFactory, 0.f, 0, biasEnabled);
298}
299
Francis Murtaghd59116e2018-10-04 16:03:07 +0100300LayerTestResult<float, 4> SimpleConvolution2d3x3NhwcTest(armnn::IWorkloadFactory& workloadFactory,
301 bool biasEnabled)
302{
303 return SimpleConvolution2d3x3NhwcTestCommon<float>(workloadFactory, 0.f, 0, biasEnabled, armnn::DataLayout::NHWC);
304}
305
telsoa014fcda012018-03-09 14:13:49 +0000306LayerTestResult<uint8_t, 4> SimpleConvolution2d3x3Uint8Test(armnn::IWorkloadFactory& workloadFactory,
307 bool biasEnabled)
308{
309 return SimpleConvolution2d3x3TestCommon<uint8_t>(workloadFactory, 0.5f, 50, biasEnabled);
310}
311
312template<typename T>
313LayerTestResult<T, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon(
314 armnn::IWorkloadFactory& workloadFactory,
315 float qScale,
316 int32_t qOffset)
317{
telsoa01c577f2c2018-08-31 09:22:23 +0100318 // Use a single-batch 1-channel 3x3 image as input.
telsoa014fcda012018-03-09 14:13:49 +0000319 armnn::TensorInfo inputDesc({1, 1, 3, 3}, armnn::GetDataType<T>());
320 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
321 QuantizedVector<T>(qScale, qOffset, {
322 11,21,31,
323 12,22,32,
324 13,23,33
325 })));
326
telsoa01c577f2c2018-08-31 09:22:23 +0100327 // Use 1 batch of a 1-channel 2x2 kernel.
telsoa014fcda012018-03-09 14:13:49 +0000328 armnn::TensorInfo kernelDesc({1, 1, 2, 2}, armnn::GetDataType<T>());
329 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
330 QuantizedVector<T>(qScale, qOffset, {
331 -11,-21,
332 -12,-22,
333 })));
334
telsoa01c577f2c2018-08-31 09:22:23 +0100335// Expected output is 1 batch of a 1-channel 6x8 image.
telsoa014fcda012018-03-09 14:13:49 +0000336// Manually calculated like this:
337//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
338//[-11*0 -21*0 -12*0 -22*11 ; -11*0 -21*0 -12*11 -22*21 ; -11*0 -21*0 -12*21 -22*31 ; -11*0 -21*0 -12*31 -22*0 ..]
339//[-11*0 -21*11 -12*0 -22*12 ; -11*11 -21*21 -12*12 -22*22 ; -11*21 -21*31 -12*22 -22*32 ; -11*31 -21*0 -12*32 -22*0 ..]
340//[-11*0 -21*12 -12*0 -22*13 ; -11*12 -21*22 -12*13 -22*23 ; -11*22 -21*32 -12*23 -22*33 ; -11*32 -21*0 -12*33 -22*0 ..]
341//[-11*0 -21*13 -12*0 -22*0 ; -11*13 -21*23 -12*0 -22*0 ; -11*23 -21*33 -12*0 -22*0 ; -11*33 -21*0 -12*0 -22*0 ..]
342//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
343//[..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ..]
344 armnn::TensorInfo outputDesc({1, 1, 8, 6}, armnn::GetDataType<T>());
345 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
346 QuantizedVector<T>(qScale, qOffset, {
347 0, 0, 0, 0, 0, 0,
348 -242, -594, -934, -372, 0, 0,
349 -495, -1190, -1850, -725, 0, 0,
350 -538, -1256, -1916, -748, 0, 0,
351 -273, -626, -946, -363, 0, 0,
352 0, 0, 0, 0, 0, 0,
353 0, 0, 0, 0, 0, 0,
354 0, 0, 0, 0, 0, 0
355 })));
356
357 return SimpleConvolution2dTestImpl<T>(workloadFactory,
358 input,
359 kernel,
360 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(false, qScale, qOffset),
361 expectedOutput,
362 qScale,
363 qOffset,
telsoa01c577f2c2018-08-31 09:22:23 +0100364 1, // Padding left.
365 2, // Padding top.
366 3, // Padding right.
367 4); // Padding bottom.
telsoa014fcda012018-03-09 14:13:49 +0000368}
369
370template<typename T>
371LayerTestResult<T, 4> SimpleConvolution2dAsymmetricPaddingTestCommon(armnn::IWorkloadFactory& workloadFactory,
372 float qScale,
373 int32_t qOffset)
374{
telsoa01c577f2c2018-08-31 09:22:23 +0100375 // Use a single-batch 1-channel 5x5 image as input.
telsoa014fcda012018-03-09 14:13:49 +0000376 armnn::TensorInfo inputDesc({ 1, 1, 5, 5 }, armnn::GetDataType<T>());
377 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
378 QuantizedVector<T>(qScale, qOffset, {
379 11,21,31,41,51,
380 12,22,32,42,52,
381 13,23,33,43,53,
382 14,24,34,44,54,
383 15,25,35,45,55,
384 })));
385
telsoa01c577f2c2018-08-31 09:22:23 +0100386 // Use 1 batch of a 1-channel 4x4 kernel.
telsoa014fcda012018-03-09 14:13:49 +0000387 armnn::TensorInfo kernelDesc({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
388 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
389 QuantizedVector<T>(qScale, qOffset, {
390 -11,-21,-31,-41,
391 -12,-22,-32,-42,
392 -13,-23,-33,-43,
393 -14,-24,-34,-44,
394 })));
395
telsoa01c577f2c2018-08-31 09:22:23 +0100396 // Expected output is 1 batch of a 1-channel 5x5 image.
telsoa014fcda012018-03-09 14:13:49 +0000397 armnn::TensorInfo outputDesc({ 1, 1, 5, 5 }, armnn::GetDataType<T>());
398 std::vector<T> myVec(outputDesc.GetNumElements(), 0);
399 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
400 QuantizedVector<T>(qScale, qOffset, {
telsoa014fcda012018-03-09 14:13:49 +0000401 -7140, -10580, -13940, -9300, -5230,
402 -9590, -14120, -18520, -12290, -6860,
403 -9980, -14560, -18960, -12560, -7000,
404 -7518, -10904, -14144, -9318, -5152,
surmeh013537c2c2018-05-18 16:31:43 +0100405 -5032, -7256, -9376, -6142, -3368,
telsoa014fcda012018-03-09 14:13:49 +0000406 })));
407
408 return SimpleConvolution2dTestImpl<T>(workloadFactory,
409 input,
410 kernel,
411 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(false, qScale, qOffset),
412 expectedOutput,
413 qScale,
414 qOffset,
telsoa01c577f2c2018-08-31 09:22:23 +0100415 1, // Padding left.
416 1, // Padding top.
417 2, // Padding right.
418 2); // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100419}
420
421template<typename T>
422LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(armnn::IWorkloadFactory& workloadFactory,
423 float qScale,
424 int32_t qOffset,
425 bool biasEnabled)
426{
telsoa01c577f2c2018-08-31 09:22:23 +0100427 // Use a single-batch 2-channel 5x5 image as input.
surmeh013537c2c2018-05-18 16:31:43 +0100428 armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5 }, armnn::GetDataType<T>());
429 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
430 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
431 0, 1, 2, 3, 4,
432 5, 6, 7, 8, 9,
433 10, 11, 12, 13, 14,
434 15, 16, 17, 18, 19,
435 20, 21, 22, 23, 24,
436
437 25, 26, 27, 28, 29,
438 30, 31, 32, 33, 34,
439 35, 36, 37, 38, 39,
440 40, 41, 42, 43, 44,
441 45, 46, 47, 48, 49
442 })));
443
telsoa01c577f2c2018-08-31 09:22:23 +0100444 // Use a depth multiplier of 1 on a 2-channel 4x4 kernel.
surmeh013537c2c2018-05-18 16:31:43 +0100445 armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, armnn::GetDataType<T>());
446 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
447 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
448 32, 31, 30, 29,
449 28, 27, 26, 25,
450 24, 23, 22, 21,
451 20, 19, 18, 17,
452
453 16, 15, 14, 13,
454 12, 11, 10, 9,
455 8, 7, 6, 5,
456 4, 3, 2, 1
457 })));
458
telsoa01c577f2c2018-08-31 09:22:23 +0100459 // Expected output is 1 batch of a 2-channel 5x5 image.
460 // Calculated using the python tensorflow library with strideX=1, strideY=1.
surmeh013537c2c2018-05-18 16:31:43 +0100461 armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5 }, armnn::GetDataType<T>());
462 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
463 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
464 1062, 1580, 1850, 1530, 1117,
465 2140, 3108, 3500, 2842, 2042,
466 3580, 5068, 5460, 4342, 3062,
467 3618, 5072, 5390, 4248, 2971,
468 3074, 4282, 4510, 3533, 2457,
469 1550, 2284, 2362, 1955, 1428,
470 2910, 4206, 4342, 3528, 2536,
471 3390, 4886, 5022, 4068, 2916,
472 3566, 5056, 5182, 4133, 2922,
473 3100, 4352, 4452, 3517, 2465
474 })));
475
476 return DepthwiseConvolution2dAsymmetricTestImpl<T>(workloadFactory,
477 input,
478 kernel,
479 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
480 expectedOutput,
481 qScale,
482 qOffset,
telsoa01c577f2c2018-08-31 09:22:23 +0100483 1, // Padding left.
484 1, // Padding top.
485 2, // Padding right.
486 2, // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100487 1, // strideX
488 1); // strideY
telsoa014fcda012018-03-09 14:13:49 +0000489}
490
Nikhil Rajcec6b652018-10-12 13:51:57 +0100491template<typename T>
492LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestCommon(armnn::IWorkloadFactory& workloadFactory,
493 float qScale,
494 int32_t qOffset,
495 bool biasEnabled)
496{
497 armnn::TensorInfo inputTensorInfo({ 1, 5, 5, 2}, armnn::GetDataType<T>());
498 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
499 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
500 0, 25,
501 1, 26,
502 2, 27,
503 3, 28,
504 4, 29,
505
506 5, 30,
507 6, 31,
508 7, 32,
509 8, 33,
510 9, 34,
511
512 10, 35,
513 11, 36,
514 12, 37,
515 13, 38,
516 14, 39,
517
518 15, 40,
519 16, 41,
520 17, 42,
521 18, 43,
522 19, 44,
523
524 20, 45,
525 21, 46,
526 22, 47,
527 23, 48,
528 24, 49
529 })));
530
531 armnn::TensorInfo kernelTensorInfo({ 1, 4, 4, 2}, armnn::GetDataType<T>());
532 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
533 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
534 32, 16,
535 31, 15,
536 30, 14,
537 29, 13,
538
539 28, 12,
540 27, 11,
541 26, 10,
542 25, 9,
543
544 24, 8,
545 23, 7,
546 22, 6,
547 21, 5,
548
549 20, 4,
550 19, 3,
551 18, 2,
552 17, 1
553 })));
554
555 armnn::TensorInfo outputTensorInfo({ 1, 5, 5, 2}, armnn::GetDataType<T>());
556 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
557 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
558 1062, 1550,
559 1580, 2284,
560 1850, 2362,
561 1530, 1955,
562 1117, 1428,
563
564 2140, 2910,
565 3108, 4206,
566 3500, 4342,
567 2842, 3528,
568 2042, 2536,
569
570 3580, 3390,
571 5068, 4886,
572 5460, 5022,
573 4342, 4068,
574 3062, 2916,
575
576 3618, 3566,
577 5072, 5056,
578 5390, 5182,
579 4248, 4133,
580 2971, 2922,
581
582 3074, 3100,
583 4282, 4352,
584 4510, 4452,
585 3533, 3517,
586 2457, 2465
587 })));
588
589 return DepthwiseConvolution2dNhwcTestImpl<T>(workloadFactory,
590 input,
591 kernel,
592 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
593 expectedOutput,
594 qScale,
595 qOffset,
596 1, // Padding left.
597 1, // Padding top.
598 2, // Padding right.
599 2, // Padding bottom.
600 1, // strideX
601 1); // strideY
602}
603
telsoa014fcda012018-03-09 14:13:49 +0000604LayerTestResult<float, 4>
605Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest(armnn::IWorkloadFactory& workloadFactory)
606{
607 return Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon<float>(workloadFactory, 0.0f, 0);
608}
609
610LayerTestResult<float, 4> Convolution2dAsymmetricPaddingTest(armnn::IWorkloadFactory& workloadFactory)
611{
612 return SimpleConvolution2dAsymmetricPaddingTestCommon<float>(workloadFactory, 0.0f, 0);
613}
614
615LayerTestResult<float, 4> DepthwiseConvolution2dTest(armnn::IWorkloadFactory& workloadFactory,
616 bool biasEnabled)
617{
618 return DepthwiseConvolution2dTestImpl<float, float>(workloadFactory, 0.0f, 0, biasEnabled);
619}
620
Nikhil Rajcec6b652018-10-12 13:51:57 +0100621LayerTestResult<float, 4> DepthwiseConvolution2dDepthNhwcTest(armnn::IWorkloadFactory& workloadFactory,
622 bool biasEnabled)
623{
624 return DepthwiseConvolution2dNhwcTestCommon<float>(workloadFactory, 0.0f, 0, biasEnabled);
625}
626
telsoa014fcda012018-03-09 14:13:49 +0000627LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul1Test(armnn::IWorkloadFactory& workloadFactory,
628 bool biasEnabled)
629{
630 return DepthwiseConvolution2dDepthMul1TestImpl<float, float>(workloadFactory, 0.0f, 0, biasEnabled);
631}
632
surmeh013537c2c2018-05-18 16:31:43 +0100633LayerTestResult<float, 4> DepthwiseConvolution2dAsymmetricTest(armnn::IWorkloadFactory& workloadFactory,
634 bool biasEnabled)
635{
636 return DepthwiseConvolution2dAsymmetricTestCommon<float>(workloadFactory, 0.0f, 0, biasEnabled);
637}
638
telsoa014fcda012018-03-09 14:13:49 +0000639LayerTestResult<uint8_t, 4> DepthwiseConvolution2dUint8Test(armnn::IWorkloadFactory& workloadFactory,
640 bool biasEnabled)
641{
642 return DepthwiseConvolution2dTestImpl<uint8_t, int32_t>(workloadFactory, 0.5f, 50, biasEnabled);
643}
644
645LayerTestResult<uint8_t, 4> DepthwiseConvolution2dDepthMul1Uint8Test(armnn::IWorkloadFactory& workloadFactory,
646 bool biasEnabled)
647{
648 return DepthwiseConvolution2dDepthMul1TestImpl<uint8_t, int32_t>(workloadFactory, 0.5f, 50, biasEnabled);
649}
650
651LayerTestResult<float, 4> Convolution1dTest(armnn::IWorkloadFactory& workloadFactory, bool biasEnabled)
652{
653 return Convolution1dTestImpl<float>(workloadFactory, 0.0f, 0, biasEnabled);
654}
655
656LayerTestResult<uint8_t, 4> Convolution1dUint8Test(armnn::IWorkloadFactory& workloadFactory, bool biasEnabled)
657{
658 return Convolution1dTestImpl<uint8_t>(workloadFactory, 0.1f, 128, biasEnabled);
659}
660
661LayerTestResult<float,4> CompareConvolution2dTest(armnn::IWorkloadFactory& workloadFactory,
662 armnn::IWorkloadFactory& refWorkloadFactory)
663{
664 return CompareConvolution2dTestImpl<float>(workloadFactory, refWorkloadFactory);
665}
666
667template<typename T>
668LayerTestResult<T,4> CompareDepthwiseConvolution2dTest(armnn::IWorkloadFactory& workloadFactory,
669 armnn::IWorkloadFactory& refWorkloadFactory)
670{
671 return CompareDepthwiseConvolution2dTestImpl<T>(workloadFactory, refWorkloadFactory);
672}
673
674template LayerTestResult<float, 4> CompareDepthwiseConvolution2dTest<float>(
675 armnn::IWorkloadFactory&, armnn::IWorkloadFactory&);
676template LayerTestResult<uint8_t, 4> CompareDepthwiseConvolution2dTest<uint8_t>(
677 armnn::IWorkloadFactory&, armnn::IWorkloadFactory&);
678
679LayerTestResult<float,4> SimpleNormalizationAcrossTest(armnn::IWorkloadFactory& workloadFactory)
680{
681 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
682 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
683 return SimpleNormalizationTestImpl(workloadFactory, normChannel, normMethod);
684}
685
686LayerTestResult<float,4> SimpleNormalizationWithinTest(armnn::IWorkloadFactory& workloadFactory)
687{
688 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
689 auto normChannel = armnn::NormalizationAlgorithmChannel::Within;
690 return SimpleNormalizationTestImpl(workloadFactory, normChannel, normMethod);
691}
692
narpra0155a97bc2018-10-02 14:35:53 +0100693LayerTestResult<float,4> SimpleNormalizationAcrossNhwcTest(armnn::IWorkloadFactory& workloadFactory)
694{
695 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
696 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
697 return SimpleNormalizationNhwcClNeonTestImpl(workloadFactory, normChannel, normMethod);
698}
699
telsoa014fcda012018-03-09 14:13:49 +0000700LayerTestResult<float,2> SimpleSoftmaxTest(armnn::IWorkloadFactory& workloadFactory, float beta)
701{
702 return SimpleSoftmaxTestImpl<float>(workloadFactory, beta);
703}
704
705LayerTestResult<uint8_t,2> SimpleSoftmaxUint8Test(armnn::IWorkloadFactory& workloadFactory, float beta)
706{
707 return SimpleSoftmaxTestImpl<uint8_t>(workloadFactory, beta);
708}
709
710LayerTestResult<float,4> CompareNormalizationTest(armnn::IWorkloadFactory& workloadFactory,
711 armnn::IWorkloadFactory& refWorkloadFactory,
712 armnn::NormalizationAlgorithmChannel normChannel,
713 armnn::NormalizationAlgorithmMethod normMethod)
714{
715 return CompareNormalizationTestImpl(workloadFactory, refWorkloadFactory, normChannel, normMethod);
716}
717
718LayerTestResult<float,2> CompareSoftmaxTest(armnn::IWorkloadFactory& workloadFactory,
719 armnn::IWorkloadFactory& refWorkloadFactory,
720 float beta)
721{
722 return CompareSoftmaxTestImpl<float>(workloadFactory, refWorkloadFactory, beta);
723}
724
725LayerTestResult<uint8_t,2> CompareSoftmaxUint8Test(armnn::IWorkloadFactory& workloadFactory,
726 armnn::IWorkloadFactory& refWorkloadFactory,
727 float beta)
728{
729 return CompareSoftmaxTestImpl<uint8_t>(workloadFactory, refWorkloadFactory, beta);
730}
731
732std::vector<LayerTestResult<float,3>> SplitterTest(armnn::IWorkloadFactory& workloadFactory)
733{
734 return SplitterTestCommon<float>(workloadFactory);
735}
736
737std::vector<LayerTestResult<uint8_t,3>> SplitterUint8Test(armnn::IWorkloadFactory& workloadFactory)
738{
739 return SplitterTestCommon<uint8_t>(workloadFactory, 1.0f, 0);
740}
741
742LayerTestResult<float, 3> CopyViaSplitterTest(armnn::IWorkloadFactory& workloadFactory)
743{
744 return CopyViaSplitterTestImpl<float>(workloadFactory, 0.0f, 0);
745}
746
747LayerTestResult<uint8_t, 3> CopyViaSplitterUint8Test(armnn::IWorkloadFactory& workloadFactory)
748{
749 return CopyViaSplitterTestImpl<uint8_t>(workloadFactory, 1.0f, 0);
750}
751
telsoa01c577f2c2018-08-31 09:22:23 +0100752LayerTestResult<float, 2> LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest(
753 armnn::IWorkloadFactory& workloadFactory)
754{
755 armnn::TensorInfo inputDesc({ 2, 2 }, armnn::GetDataType<float>());
756 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
757 { 2., 3., 3., 4. }));
758
759 armnn::TensorInfo outputDesc({ 2, 4 }, armnn::GetDataType<float>());
760 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
761 {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
762 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f}));
763 return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl(workloadFactory, input, expectedOutput);
764}
765
766LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest(
767 armnn::IWorkloadFactory& workloadFactory)
768{
769 armnn::TensorInfo inputDesc({ 2, 5 }, armnn::GetDataType<float>());
770 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
771 {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
772 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f}));
773
774 armnn::TensorInfo outputDesc({ 2, 16 }, armnn::GetDataType<float>());
775 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
776 {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f,
777 -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f,
778 -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f,
779 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f,
780 -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f,
781 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f,
782 0.02168f}));
783 return LstmLayerFloat32NoCifgWithPeepholeWithProjectionTestImpl(workloadFactory, input, expectedOutput);
784}
785
786LayerTestResult<float, 2> LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest(armnn::IWorkloadFactory& workloadFactory)
787{
788 armnn::TensorInfo inputDesc({2, 2}, armnn::GetDataType<float>());
789 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
790 {2., 3., 3., 4.}));
791
792
793 armnn::TensorInfo outputDesc({2, 4}, armnn::GetDataType<float>());
794 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
795 {{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
796 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}}));
797
798 return LstmNoCifgNoPeepholeNoProjectionTestImpl(workloadFactory, input, expectedOutput);
799}
800
telsoa014fcda012018-03-09 14:13:49 +0000801LayerTestResult<float,3> MergerTest(armnn::IWorkloadFactory& workloadFactory)
802{
surmeh013537c2c2018-05-18 16:31:43 +0100803 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +0000804 unsigned int outputHeight = 6;
805 unsigned int outputChannels = 3;
806
surmeh013537c2c2018-05-18 16:31:43 +0100807 unsigned int inputWidth1 = 3;
808 unsigned int inputHeight1 = 6;
809 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +0000810
surmeh013537c2c2018-05-18 16:31:43 +0100811 unsigned int inputWidth2 = 3;
812 unsigned int inputHeight2 = 6;
813 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +0000814
telsoa01c577f2c2018-08-31 09:22:23 +0100815 // Define the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +0000816 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::Float32);
817 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::Float32);
818 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +0000819
820 LayerTestResult<float,3> ret(outputTensorInfo);
821
telsoa014fcda012018-03-09 14:13:49 +0000822 ret.outputExpected = MakeTensor<float, 3>(outputTensorInfo, std::vector<float>(
surmeh013537c2c2018-05-18 16:31:43 +0100823 {
824 1.0f, 2.0f, 3.0f,
825 4.0f, 5.0f, 6.0f,
826 7.0f, 8.0f, 9.0f,
827 10.0f, 11.0f, 12.0f,
828 13.0f, 14.0f, 15.0f,
829 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +0000830
surmeh013537c2c2018-05-18 16:31:43 +0100831 19.0f, 20.0f, 21.0f,
832 22.0f, 23.0f, 24.0f,
833 25.0f, 26.0f, 27.0f,
834 28.0f, 29.0f, 30.0f,
835 31.0f, 32.0f, 33.0f,
836 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +0000837
surmeh013537c2c2018-05-18 16:31:43 +0100838 37.0f, 38.0f, 39.0f,
839 40.0f, 41.0f, 42.0f,
840 43.0f, 44.0f, 45.0f,
841 46.0f, 47.0f, 48.0f,
842 49.0f, 50.0f, 51.0f,
843 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +0000844 })
845 );
846
telsoa014fcda012018-03-09 14:13:49 +0000847 auto input1 = MakeTensor<float, 3>(inputTensorInfo1, std::vector<float>(
848 {
surmeh013537c2c2018-05-18 16:31:43 +0100849 1.0f, 2.0f, 3.0f,
850 4.0f, 5.0f, 6.0f,
851 7.0f, 8.0f, 9.0f,
852 10.0f, 11.0f, 12.0f,
853 13.0f, 14.0f, 15.0f,
854 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +0000855
surmeh013537c2c2018-05-18 16:31:43 +0100856 19.0f, 20.0f, 21.0f,
857 22.0f, 23.0f, 24.0f,
858 25.0f, 26.0f, 27.0f,
859 28.0f, 29.0f, 30.0f,
860 31.0f, 32.0f, 33.0f,
861 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +0000862 })
863 );
864
865 auto input2 = MakeTensor<float, 3>(inputTensorInfo2, std::vector<float>(
866 {
surmeh013537c2c2018-05-18 16:31:43 +0100867 37.0f, 38.0f, 39.0f,
868 40.0f, 41.0f, 42.0f,
telsoa014fcda012018-03-09 14:13:49 +0000869 43.0f, 44.0f, 45.0f,
surmeh013537c2c2018-05-18 16:31:43 +0100870 46.0f, 47.0f, 48.0f,
871 49.0f, 50.0f, 51.0f,
872 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +0000873 })
874 );
875
telsoa01c577f2c2018-08-31 09:22:23 +0100876 std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of input[0].
telsoa014fcda012018-03-09 14:13:49 +0000877 armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1);
878
telsoa01c577f2c2018-08-31 09:22:23 +0100879 std::vector<unsigned int> wOrigin2 = {2, 0, 0}; //Extent of the window is defined by size of input[1].
telsoa014fcda012018-03-09 14:13:49 +0000880 armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2);
881
telsoa014fcda012018-03-09 14:13:49 +0000882 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
883
884 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
885
886 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
887 subTensorsSupported ?
888 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
889 workloadFactory.CreateTensorHandle(inputTensorInfo1);
890
891 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
892 subTensorsSupported ?
893 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
894 workloadFactory.CreateTensorHandle(inputTensorInfo2);
895
telsoa014fcda012018-03-09 14:13:49 +0000896 armnn::MergerQueueDescriptor data;
897 armnn::WorkloadInfo info;
898 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
899 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +0000900 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
901
902 data.m_ViewOrigins.push_back(window1);
903 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +0000904
905 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(data, info);
906
907 inputHandle1->Allocate();
908 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +0000909 outputHandle->Allocate();
910
911 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
912 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +0000913
surmeh013537c2c2018-05-18 16:31:43 +0100914 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +0000915 workload->Execute();
916
917 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
918
919 return ret;
920}
921
922LayerTestResult<float,4> AdditionTest(armnn::IWorkloadFactory& workloadFactory)
923{
924 unsigned int batchSize = 2;
925 unsigned int channels = 2;
926 unsigned int height = 2;
927 unsigned int width = 3;
928
929 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
930 armnn::TensorInfo outputTensorInfo;
931
932 unsigned int shape[] = {batchSize, channels, height, width};
933
934 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
935 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
936 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
937
938
939 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, std::vector<float>(
940 {
941 0.0f, 2.0f, 1.0f,
942 0.2f, 1.0f, 2.0f,
943
944 1.0f, 2.0f, 1.0f,
945 0.2f, 1.0f, 2.0f,
946
947 0.0f, 2.0f, 1.0f,
948 4.2f, 1.0f, 2.0f,
949
950 0.0f, 0.0f, 1.0f,
951 0.2f, 1.0f, 2.0f,
952 }));
953
954 auto input2 = MakeTensor<float, 4>(inputTensorInfo2, std::vector<float>(
955 {
956 1.0f, 2.0f, 1.0f,
957 0.0f, 1.0f, 2.0f,
958
959 1.0f, 2.0f, -2.0f,
960 0.2f, 1.0f, 2.0f,
961
962 0.0f, 2.0f, 1.0f,
963 4.2f, 0.0f, -3.0f,
964
965 0.0f, 0.0f, 1.0f,
966 0.7f, 1.0f, 5.0f,
967 }));
968
969 LayerTestResult<float,4> ret(outputTensorInfo);
970 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>(
971 {
972 1.0f, 4.0f, 2.0f,
973 0.2f, 2.0f, 4.0f,
974
975 2.0f, 4.0f, -1.0f,
976 0.4f, 2.0f, 4.0f,
977
978 0.0f, 4.0f, 2.0f,
979 8.4f, 1.0f, -1.0f,
980
981 0.0f, 0.0f, 2.0f,
982 0.9f, 2.0f, 7.0f,
983 }));
984
985 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
986 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
987 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
988
989 armnn::AdditionQueueDescriptor data;
990 armnn::WorkloadInfo info;
991 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
992 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
993 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
994
995 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
996
997 inputHandle1->Allocate();
998 inputHandle2->Allocate();
999 outputHandle->Allocate();
1000
1001 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1002 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1003
surmeh013537c2c2018-05-18 16:31:43 +01001004 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001005 workload->Execute();
1006
1007 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1008
1009 return ret;
1010}
1011
1012template <typename T>
1013LayerTestResult<T, 4> AdditionBroadcastTestImpl(armnn::IWorkloadFactory& workloadFactory,
1014 float qScale,
1015 int32_t qOffset)
1016{
1017 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, armnn::GetDataType<T>());
1018 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, armnn::GetDataType<T>());
1019 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>());
1020
1021 if (armnn::IsQuantizedType<T>())
1022 {
1023 inputTensorInfo1.SetQuantizationScale(qScale);
1024 inputTensorInfo1.SetQuantizationOffset(qOffset);
1025 inputTensorInfo2.SetQuantizationScale(qScale);
1026 inputTensorInfo2.SetQuantizationOffset(qOffset);
1027 outputTensorInfo.SetQuantizationScale(qScale);
1028 outputTensorInfo.SetQuantizationOffset(qOffset);
1029 }
1030
1031 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
1032 {
1033 0.0f,
1034 1.0f,
1035
1036 2.0f,
1037 3.0f,
1038
1039 4.0f,
1040 5.0f,
1041 }));
1042
1043 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
1044 {
1045 0.5f, 1.5f, 2.5f,
1046 3.5f, 4.5f, 5.5f,
1047 }));
1048
1049 LayerTestResult<T,4> ret(outputTensorInfo);
1050 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
1051 {
1052 0.5f, 1.5f, 2.5f,
1053 4.5f, 5.5f, 6.5f,
1054
1055 2.5f, 3.5f, 4.5f,
1056 6.5f, 7.5f, 8.5f,
1057
1058 4.5f, 5.5f, 6.5f,
1059 8.5f, 9.5f, 10.5f,
1060 }));
1061
1062 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1063 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1064 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1065
1066 armnn::AdditionQueueDescriptor data;
1067 armnn::WorkloadInfo info;
1068 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1069 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1070 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1071
1072 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1073
1074 inputHandle1->Allocate();
1075 inputHandle2->Allocate();
1076 outputHandle->Allocate();
1077
1078 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1079 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1080
surmeh013537c2c2018-05-18 16:31:43 +01001081 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001082 workload->Execute();
1083
1084 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1085
1086 return ret;
1087}
1088
1089template <typename T>
1090LayerTestResult<T, 4> AdditionBroadcast1ElementTestImpl(armnn::IWorkloadFactory& workloadFactory,
1091 float qScale,
1092 int32_t qOffset)
1093{
1094 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>());
1095 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, armnn::GetDataType<T>());
1096 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>());
1097
1098 if (armnn::IsQuantizedType<T>())
1099 {
1100 inputTensorInfo1.SetQuantizationScale(qScale);
1101 inputTensorInfo1.SetQuantizationOffset(qOffset);
1102 inputTensorInfo2.SetQuantizationScale(qScale);
1103 inputTensorInfo2.SetQuantizationOffset(qOffset);
1104 outputTensorInfo.SetQuantizationScale(qScale);
1105 outputTensorInfo.SetQuantizationOffset(qOffset);
1106 }
1107
1108 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
1109 {
1110 0.0f, 1.0f, 2.0f,
1111 3.0f, 4.0f, 5.0f,
1112 6.0f, 7.0f, 8.0f,
1113 9.0f, 10.0f, 11.0f,
1114 12.0f, 13.0f, 14.0f,
1115 15.0f, 16.0f, 17.0f,
1116 }));
1117
1118 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
1119 {
1120 0.5f,
1121 }));
1122
1123 LayerTestResult<T,4> ret(outputTensorInfo);
1124 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
1125 {
1126 0.5f, 1.5f, 2.5f,
1127 3.5f, 4.5f, 5.5f,
1128 6.5f, 7.5f, 8.5f,
1129 9.5f, 10.5f, 11.5f,
1130 12.5f, 13.5f, 14.5f,
1131 15.5f, 16.5f, 17.5f,
1132 }));
1133
1134 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1135 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1136 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1137
1138 armnn::AdditionQueueDescriptor data;
1139 armnn::WorkloadInfo info;
1140 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1141 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1142 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1143
1144 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1145
1146 inputHandle1->Allocate();
1147 inputHandle2->Allocate();
1148 outputHandle->Allocate();
1149
1150 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1151 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1152
surmeh013537c2c2018-05-18 16:31:43 +01001153 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001154 workload->Execute();
1155
1156 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1157
1158 return ret;
1159}
1160
1161LayerTestResult<float, 4> AdditionBroadcastTest(armnn::IWorkloadFactory& workloadFactory)
1162{
1163 return AdditionBroadcastTestImpl<float>(workloadFactory, 0.0f, 0);
1164}
1165
1166LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test(armnn::IWorkloadFactory& workloadFactory)
1167{
1168 return AdditionBroadcastTestImpl<uint8_t>(workloadFactory, 2.f, 0);
1169}
1170
1171LayerTestResult<float, 4> AdditionBroadcast1ElementTest(armnn::IWorkloadFactory& workloadFactory)
1172{
1173 return AdditionBroadcast1ElementTestImpl<float>(workloadFactory, 0.0f, 0);
1174}
1175
1176LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test(armnn::IWorkloadFactory& workloadFactory)
1177{
1178 return AdditionBroadcast1ElementTestImpl<uint8_t>(workloadFactory, 0.1333333f, 128);
1179}
1180
1181LayerTestResult<float,4> CompareAdditionTest(armnn::IWorkloadFactory& workloadFactory,
David Beckf195f032018-09-06 16:46:34 +01001182 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00001183{
1184 unsigned int batchSize = 4;
1185 unsigned int channels = 1;
1186 unsigned int height = 2;
1187 unsigned int width = 3;
1188
1189 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
1190 armnn::TensorInfo outputTensorInfo;
1191
1192 unsigned int shape[] = {batchSize, channels, height, width};
1193
1194 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1195 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1196 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1197
1198 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 1232);
1199 auto input2 = MakeRandomTensor<float, 4>(inputTensorInfo2, 456);
1200
1201 LayerTestResult<float,4> ret(outputTensorInfo);
1202
1203 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1204 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1205 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1206
1207 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
1208 std::unique_ptr<armnn::ITensorHandle> inputHandle2Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo2);
1209 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
1210
1211 armnn::AdditionQueueDescriptor data;
1212 armnn::WorkloadInfo info;
1213 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1214 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1215 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1216
1217 armnn::AdditionQueueDescriptor refData = data;
1218 armnn::WorkloadInfo refInfo = info;
1219 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo1, inputHandle1Ref.get());
1220 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get());
1221 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1222
1223 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1224 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateAddition(refData, refInfo);
1225
1226 inputHandle1->Allocate();
1227 inputHandle2->Allocate();
1228 outputHandle->Allocate();
1229 inputHandle1Ref->Allocate();
1230 inputHandle2Ref->Allocate();
1231 outputHandleRef->Allocate();
1232
1233 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1234 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1235 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
1236 CopyDataToITensorHandle(inputHandle2Ref.get(), &input2[0][0][0][0]);
1237
surmeh013537c2c2018-05-18 16:31:43 +01001238 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001239 workload->Execute();
surmeh013537c2c2018-05-18 16:31:43 +01001240 refWorkloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001241 workloadRef->Execute();
1242
1243 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1244 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
1245
1246 return ret;
1247}
1248
surmeh01bceff2f2018-03-29 16:29:27 +01001249namespace {
David Beck5cd01f32018-09-12 16:00:08 +01001250template <typename T>
1251LayerTestResult<T, 4> DivisionTestHelper(armnn::IWorkloadFactory& workloadFactory,
1252 const unsigned int shape0[4],
1253 const std::vector<T>& values0,
1254 float scale0,
1255 int32_t offset0,
1256 const unsigned int shape1[4],
1257 const std::vector<T> & values1,
1258 float scale1,
1259 int32_t offset1,
1260 const unsigned int outShape[4],
1261 const std::vector<T> & outValues,
1262 float outScale,
1263 int32_t outOffset)
1264{
1265 auto dataType = (std::is_same<T, uint8_t>::value ?
1266 armnn::DataType::QuantisedAsymm8 :
1267 armnn::DataType::Float32);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001268
David Beck5cd01f32018-09-12 16:00:08 +01001269 armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
1270 armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
1271 armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001272
David Beck5cd01f32018-09-12 16:00:08 +01001273 inputTensorInfo0.SetQuantizationScale(scale0);
1274 inputTensorInfo0.SetQuantizationOffset(offset0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001275
David Beck5cd01f32018-09-12 16:00:08 +01001276 inputTensorInfo1.SetQuantizationScale(scale1);
1277 inputTensorInfo1.SetQuantizationOffset(offset1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001278
David Beck5cd01f32018-09-12 16:00:08 +01001279 outputTensorInfo.SetQuantizationScale(outScale);
1280 outputTensorInfo.SetQuantizationOffset(outOffset);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001281
David Beck5cd01f32018-09-12 16:00:08 +01001282 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
1283 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001284
David Beck5cd01f32018-09-12 16:00:08 +01001285 LayerTestResult<T, 4> result(outputTensorInfo);
1286 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001287
David Beck5cd01f32018-09-12 16:00:08 +01001288 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
1289 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1290 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001291
David Beck5cd01f32018-09-12 16:00:08 +01001292 armnn::DivisionQueueDescriptor data;
1293 armnn::WorkloadInfo info;
1294 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
1295 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1296 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001297
David Beck5cd01f32018-09-12 16:00:08 +01001298 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDivision(data, info);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001299
David Beck5cd01f32018-09-12 16:00:08 +01001300 inputHandle0->Allocate();
1301 inputHandle1->Allocate();
1302 outputHandle->Allocate();
1303
1304 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
1305 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1306
1307 workloadFactory.Finalize();
1308 workload->Execute();
1309
1310 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
1311
1312 return result;
1313}
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001314} // anonymous namespace
1315
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01001316LayerTestResult<float,4> DivisionByZeroTest(armnn::IWorkloadFactory& workloadFactory)
1317{
1318 const unsigned int width = 2;
1319 const unsigned int height = 2;
1320 const unsigned int channelCount = 2;
1321 const unsigned int batchSize = 2;
1322
1323 unsigned int shape[] = { batchSize, channelCount, height, width };
1324
1325 std::vector<float> input0({
1326 1.f, 1.f, 1.f, 1.f, 0.f, 0.f, 0.f, 0.f,
1327 -1.f, -1.f, -1.f, -1.f, 5.f, 5.f, 5.f, 5.f });
1328
1329 std::vector<float> input1({
1330 0.f, 0.f, -0.f, -0.f, 0.f, 0.f, -0.f, -0.f,
1331 0.f, 0.f, -0.f, -0.f, 5.f, 5.f, 5.f, 5.f });
1332
1333 std::vector<float> output({
1334 INFINITY, INFINITY, -INFINITY, -INFINITY, NAN, NAN, -NAN, -NAN,
1335 -INFINITY, -INFINITY, INFINITY, INFINITY, 1, 1, 1, 1 });
1336
David Beck5cd01f32018-09-12 16:00:08 +01001337 return DivisionTestHelper<float>(workloadFactory,
1338 shape, input0, 1.0f, 0,
1339 shape, input1, 1.0f, 0,
1340 shape, output, 1.0f, 0);
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01001341}
1342
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001343LayerTestResult<float,4> DivisionTest(armnn::IWorkloadFactory& workloadFactory)
1344{
1345 const unsigned int width = 2;
1346 const unsigned int height = 2;
1347 const unsigned int channelCount = 2;
1348 const unsigned int batchSize = 2;
1349
1350 unsigned int shape[] = { batchSize, channelCount, height, width };
1351
1352 std::vector<float> input0({
1353 2, 2, 2, 2, 3, 3, 3, 3,
1354 4, 4, 4, 4, 5, 5, 5, 5 });
1355
1356 std::vector<float> input1({
1357 1, 1, 1, 1, 2, 2, 2, 2,
1358 4, 4, 4, 4, 4, 4, 4, 4 });
1359
1360 std::vector<float> output({
1361 2, 2, 2, 2, 1.5, 1.5, 1.5, 1.5,
1362 1, 1, 1, 1, 1.25, 1.25, 1.25, 1.25 });
1363
David Beck5cd01f32018-09-12 16:00:08 +01001364
1365 return DivisionTestHelper<float>(workloadFactory,
1366 shape, input0, 1.0f, 0,
1367 shape, input1, 1.0f, 0,
1368 shape, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001369}
1370
1371LayerTestResult<float, 4> DivisionBroadcast1ElementTest(armnn::IWorkloadFactory& workloadFactory)
1372{
1373 unsigned int shape0[] = { 1, 2, 2, 2 };
1374 std::vector<float> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
1375
1376 unsigned int shape1[] = { 1, 1, 1, 1 };
1377 std::vector<float> input1({ 2 });
1378
1379 std::vector<float> output({ 1, 2, 3, 4, 5, 6, 7, 8});
1380
David Beck5cd01f32018-09-12 16:00:08 +01001381
1382 return DivisionTestHelper<float>(workloadFactory,
1383 shape0, input0, 1.0f, 0,
1384 shape1, input1, 1.0f, 0,
1385 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001386}
1387
1388LayerTestResult<float, 4> DivisionBroadcast1DVectorTest(armnn::IWorkloadFactory& workloadFactory)
1389{
1390 unsigned int shape0[] = { 1, 3, 3, 2 };
1391 std::vector<float> input0({
1392 1, 4, 3, 8, 5, 12,
1393 7, 16, 9, 20, 11, 24,
1394 13, 28, 15, 32, 17, 36});
1395
1396 unsigned int shape1[] = { 1, 1, 1, 2 };
1397 std::vector<float> input1({ 1, 2 });
1398
1399 std::vector<float> output({
1400 1, 2, 3, 4, 5, 6,
1401 7, 8, 9, 10, 11, 12,
1402 13, 14, 15, 16, 17, 18});
1403
David Beck5cd01f32018-09-12 16:00:08 +01001404 return DivisionTestHelper<float>(workloadFactory,
1405 shape0, input0, 1.0f, 0,
1406 shape1, input1, 1.0f, 0,
1407 shape0, output, 1.0f, 0);
1408}
1409
1410
1411LayerTestResult<uint8_t,4> DivisionUint8Test(armnn::IWorkloadFactory& workloadFactory)
1412{
1413 const unsigned int width = 2;
1414 const unsigned int height = 2;
1415 const unsigned int channelCount = 2;
1416 const unsigned int batchSize = 2;
1417
1418 unsigned int shape[] = { batchSize, channelCount, height, width };
1419
1420 std::vector<uint8_t> input0({2, 2, 2, 2, 3, 3, 3, 3,
1421 4, 4, 4, 4, 5, 5, 5, 5 });
1422
1423 std::vector<uint8_t> input1({1, 1, 1, 1, 2, 2, 2, 2,
1424 4, 4, 4, 4, 4, 4, 4, 4 });
1425
1426 std::vector<uint8_t> output({8, 8, 8, 8, 6, 6, 6, 6,
1427 4, 4, 4, 4, 5, 5, 5, 5});
1428
1429
1430 return DivisionTestHelper<uint8_t>(workloadFactory,
1431 shape, input0, 1.0f, 0,
1432 shape, input1, 1.0f, 0,
1433 shape, output, 0.25f, 0);
1434}
1435
1436LayerTestResult<uint8_t, 4> DivisionBroadcast1ElementUint8Test(armnn::IWorkloadFactory& workloadFactory)
1437{
1438 unsigned int shape0[] = { 1, 2, 2, 2 };
1439 std::vector<uint8_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
1440
1441 unsigned int shape1[] = { 1, 1, 1, 1 };
1442 std::vector<uint8_t> input1({ 2 });
1443
1444 std::vector<uint8_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
1445
1446 return DivisionTestHelper<uint8_t>(workloadFactory,
1447 shape0, input0, 1.0f, 0,
1448 shape1, input1, 1.0f, 0,
1449 shape0, output, 1.0f, 0);
1450}
1451
1452LayerTestResult<uint8_t, 4> DivisionBroadcast1DVectorUint8Test(armnn::IWorkloadFactory& workloadFactory)
1453{
1454 unsigned int shape0[] = { 1, 3, 3, 2 };
1455 std::vector<uint8_t> input0({1, 4, 3, 8, 5, 12,
1456 7, 16, 9, 20, 11, 24,
1457 13, 28, 15, 32, 17, 36});
1458
1459 unsigned int shape1[] = { 1, 1, 1, 2 };
1460 std::vector<uint8_t> input1({ 1, 2 });
1461
1462 std::vector<uint8_t> output({1, 2, 3, 4, 5, 6,
1463 7, 8, 9, 10, 11, 12,
1464 13, 14, 15, 16, 17, 18});
1465
1466 return DivisionTestHelper<uint8_t>(workloadFactory,
1467 shape0, input0, 1.0f, 0,
1468 shape1, input1, 1.0f, 0,
1469 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001470}
1471
1472namespace {
surmeh01bceff2f2018-03-29 16:29:27 +01001473LayerTestResult<float,4> MultiplicationTestHelper(armnn::IWorkloadFactory& workloadFactory,
1474 const unsigned int shape0[4],
1475 const std::vector<float> & values0,
1476 const unsigned int shape1[4],
1477 const std::vector<float> & values1,
1478 const unsigned int outShape[4],
1479 const std::vector<float> & outValues)
telsoa014fcda012018-03-09 14:13:49 +00001480{
surmeh01bceff2f2018-03-29 16:29:27 +01001481 const size_t dimensionCount = 4;
1482 armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, armnn::DataType::Float32};
1483 armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, armnn::DataType::Float32};
1484 armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, armnn::DataType::Float32};
telsoa014fcda012018-03-09 14:13:49 +00001485
surmeh01bceff2f2018-03-29 16:29:27 +01001486 auto input0 = MakeTensor<float, 4>(inputTensorInfo0, values0);
1487 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00001488
1489 LayerTestResult<float,4> ret(outputTensorInfo);
1490
1491 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
1492 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1493 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1494
1495 armnn::MultiplicationQueueDescriptor data;
1496 armnn::WorkloadInfo info;
1497 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
1498 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1499 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1500
1501 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
1502
1503 inputHandle0->Allocate();
1504 inputHandle1->Allocate();
1505 outputHandle->Allocate();
1506
1507 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
1508 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1509
surmeh013537c2c2018-05-18 16:31:43 +01001510 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001511 workload->Execute();
1512
1513 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1514
surmeh01bceff2f2018-03-29 16:29:27 +01001515 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00001516 return ret;
1517}
surmeh01bceff2f2018-03-29 16:29:27 +01001518} // anonymous namespace
1519
1520
1521LayerTestResult<float,4> MultiplicationTest(armnn::IWorkloadFactory& workloadFactory)
1522{
1523 const unsigned int width = 2;
1524 const unsigned int height = 2;
1525 const unsigned int channelCount = 2;
1526 const unsigned int batchSize = 2;
1527
1528 unsigned int shape[] = { batchSize, channelCount, height, width };
1529
1530 std::vector<float> input0({
1531 1, 1, 1, 1, 2, 2, 2, 2,
1532 3, 3, 3, 3, 4, 4, 4, 4 });
1533
1534 std::vector<float> input1({
1535 2, 2, 2, 2, 3, 3, 3, 3,
1536 4, 4, 4, 4, 5, 5, 5, 5 });
1537
1538 std::vector<float> output({
1539 2, 2, 2, 2, 6, 6, 6, 6,
1540 12, 12, 12, 12, 20, 20, 20, 20 });
1541
1542 return MultiplicationTestHelper(workloadFactory,
1543 shape,
1544 input0,
1545 shape,
1546 input1,
1547 shape,
1548 output);
1549}
1550
1551LayerTestResult<float, 4> MultiplicationBroadcast1ElementTest(armnn::IWorkloadFactory& workloadFactory)
1552{
1553 unsigned int shape0[] = { 1, 2, 2, 2 };
1554 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
1555
1556 unsigned int shape1[] = { 1, 1, 1, 1 };
1557 std::vector<float> input1({ 2 });
1558
1559 std::vector<float> output({ 2, 4, 6, 8, 10, 12, 14, 16});
1560
1561 return MultiplicationTestHelper(workloadFactory,
1562 shape0,
1563 input0,
1564 shape1,
1565 input1,
1566 shape0,
1567 output);
1568}
1569
1570LayerTestResult<float, 4> MultiplicationBroadcast1DVectorTest(armnn::IWorkloadFactory& workloadFactory)
1571{
1572 unsigned int shape0[] = { 1, 3, 3, 2 };
1573 std::vector<float> input0({
1574 1, 2, 3, 4, 5, 6,
1575 7, 8, 9, 10, 11, 12,
1576 13, 14, 15, 16, 17, 18});
1577
1578 unsigned int shape1[] = { 1, 1, 1, 2 };
1579 std::vector<float> input1({ 1, 2 });
1580
1581 std::vector<float> output({
1582 1, 4, 3, 8, 5, 12,
1583 7, 16, 9, 20, 11, 24,
1584 13, 28, 15, 32, 17, 36});
1585
1586 return MultiplicationTestHelper(workloadFactory,
1587 shape0,
1588 input0,
1589 shape1,
1590 input1,
1591 shape0,
1592 output);
1593}
telsoa014fcda012018-03-09 14:13:49 +00001594
1595LayerTestResult<float,4> CompareMultiplicationTest(armnn::IWorkloadFactory& workloadFactory,
1596 armnn::IWorkloadFactory& refWorkloadFactory)
1597{
1598 const unsigned int width = 16;
1599 const unsigned int height = 32;
1600 const unsigned int channelCount = 2;
1601 const unsigned int batchSize = 5;
1602
1603 armnn::TensorInfo inputTensorInfo0;
1604 armnn::TensorInfo inputTensorInfo1;
1605 armnn::TensorInfo outputTensorInfo;
1606
1607 constexpr unsigned int shape[] = { batchSize, channelCount, height, width };
1608
1609 inputTensorInfo0 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1610 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1611 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1612
1613 LayerTestResult<float,4> comparisonResult(outputTensorInfo);
1614
1615 auto input0 = MakeRandomTensor<float, 4>(inputTensorInfo0, 803506992);
1616 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 54902257);
1617
1618 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
1619 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1620 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1621
1622 std::unique_ptr<armnn::ITensorHandle> inputHandle0Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo0);
1623 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
1624 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
1625
1626 armnn::MultiplicationQueueDescriptor data;
1627 armnn::WorkloadInfo info;
1628 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
1629 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1630 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1631
1632 armnn::MultiplicationQueueDescriptor refData = data;
1633 armnn::WorkloadInfo refInfo = info;
1634 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo0, inputHandle0Ref.get());
1635 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo1, inputHandle1Ref.get());
1636 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1637
1638 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
1639 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateMultiplication(refData, refInfo);
1640
1641 inputHandle0->Allocate();
1642 inputHandle1->Allocate();
1643 outputHandle->Allocate();
1644 inputHandle0Ref->Allocate();
1645 inputHandle1Ref->Allocate();
1646 outputHandleRef->Allocate();
1647
1648 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
1649 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1650 CopyDataToITensorHandle(inputHandle0Ref.get(), &input0[0][0][0][0]);
1651 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
1652
surmeh013537c2c2018-05-18 16:31:43 +01001653 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001654 workload->Execute();
surmeh013537c2c2018-05-18 16:31:43 +01001655 refWorkloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001656 workloadRef->Execute();
1657
1658 CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get());
1659 CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get());
1660
1661 return comparisonResult;
1662}
1663
1664LayerTestResult<float,4> CompareBatchNormTest(armnn::IWorkloadFactory& workloadFactory,
1665 armnn::IWorkloadFactory& refWorkloadFactory)
1666{
1667 const unsigned int width = 2;
1668 const unsigned int height = 3;
1669 const unsigned int channels = 5;
1670 const unsigned int batchSize = 3;
1671
1672 armnn::TensorInfo inputTensorInfo;
1673 armnn::TensorInfo outputTensorInfo;
1674 armnn::TensorInfo tensorInfo;
1675
1676 constexpr unsigned int shape[] = {batchSize, channels, height, width};
1677 constexpr unsigned int tensorShape[] = {channels};
1678
1679 inputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1680 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1681 tensorInfo = armnn::TensorInfo(1, tensorShape, armnn::DataType::Float32);
1682
1683 auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 21312);
1684
1685 auto mean = MakeRandomTensor<float, 1>(tensorInfo, 123);
1686 auto variance = MakeRandomTensor<float, 1>(tensorInfo, 234, 0.0f);
1687 auto beta = MakeRandomTensor<float, 1>(tensorInfo, 123);
1688 auto gamma = MakeRandomTensor<float, 1>(tensorInfo, 345);
1689
1690 LayerTestResult<float,4> ret(outputTensorInfo);
1691
1692 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
1693 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1694
1695 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
1696 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
1697
1698 armnn::BatchNormalizationQueueDescriptor data;
1699 armnn::WorkloadInfo info;
1700 armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
1701 armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
1702 armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
1703 armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
1704
1705 AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
1706 AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
1707 AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
1708 AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
1709
1710 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1711 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1712 data.m_Mean = &meanTensor;
1713 data.m_Variance = &varianceTensor;
1714 data.m_Beta = &betaTensor;
1715 data.m_Gamma = &gammaTensor;
1716 data.m_Parameters.m_Eps = 0.01f;
1717
1718 armnn::BatchNormalizationQueueDescriptor refData = data;
1719 armnn::WorkloadInfo refInfo = info;
1720 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
1721 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1722
1723 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
1724 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateBatchNormalization(refData, refInfo);
1725
1726 inputHandle->Allocate();
1727 outputHandle->Allocate();
1728 inputHandleRef->Allocate();
1729 outputHandleRef->Allocate();
1730
1731 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
1732 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
1733
surmeh013537c2c2018-05-18 16:31:43 +01001734 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001735 workload->Execute();
surmeh013537c2c2018-05-18 16:31:43 +01001736 refWorkloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001737 workloadRef->Execute();
1738
1739 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1740 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
1741
1742 return ret;
1743}
1744
surmeh013537c2c2018-05-18 16:31:43 +01001745template<typename T>
1746void PermuteTensorData(
1747 armnn::IWorkloadFactory& workloadFactory,
1748 const armnn::PermutationVector& mappings,
1749 armnn::TensorInfo & inputTensorInfo,
1750 const T * inputData,
1751 std::vector<T>& outputData)
telsoa014fcda012018-03-09 14:13:49 +00001752{
surmeh013537c2c2018-05-18 16:31:43 +01001753 BOOST_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
1754 if (inputData == nullptr)
1755 {
1756 // Nullptr is an error in the test. By returning without doing the concatenation
1757 // I expect the caller to fail the test. It still makes sense to report this as
1758 // an assert for Debug builds.
1759 return;
1760 }
telsoa014fcda012018-03-09 14:13:49 +00001761
surmeh013537c2c2018-05-18 16:31:43 +01001762 armnn::TensorInfo outputTensorInfo = armnnUtils::Permuted(inputTensorInfo, mappings);
1763
1764 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
1765 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1766
1767 armnn::PermuteQueueDescriptor queueDescriptor;
1768 queueDescriptor.m_Parameters = armnn::PermuteDescriptor{mappings};
1769 armnn::WorkloadInfo workloadInfo;
1770 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
1771 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
1772
1773 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePermute(queueDescriptor, workloadInfo);
1774
1775 inputHandle->Allocate();
1776 outputHandle->Allocate();
1777
1778 CopyDataToITensorHandle(inputHandle.get(), inputData);
1779
1780 workload->Execute();
1781
1782 outputData.resize(outputTensorInfo.GetNumElements());
1783 CopyDataFromITensorHandle(&outputData[0], outputHandle.get());
1784 inputTensorInfo = outputTensorInfo;
1785}
1786
1787armnn::OriginsDescriptor CreateMergerDescriptorForConcatenation(
1788 const std::vector<armnn::TensorInfo> & inputTensorInfos,
1789 unsigned int concatDim)
1790{
telsoa014fcda012018-03-09 14:13:49 +00001791 std::vector<armnn::TensorShape> shapes;
1792 shapes.reserve(inputTensorInfos.size());
1793 for (const armnn::TensorInfo& it: inputTensorInfos)
1794 {
1795 shapes.push_back(it.GetShape());
1796 }
surmeh013537c2c2018-05-18 16:31:43 +01001797
1798 return armnn::CreateMergerDescriptorForConcatenation(shapes.begin(),
1799 shapes.end(),
1800 concatDim);
1801}
1802
1803//
1804// Concatenation is only supported for N and C dimensions for NCHW. In case of
telsoa01c577f2c2018-08-31 09:22:23 +01001805// <4 dimensions we need to make sure that the concat dimensions are at least
surmeh013537c2c2018-05-18 16:31:43 +01001806// the 3rd slowest iterating one.
1807//
1808
1809bool NeedPermuteForConcat(
1810 const std::vector<armnn::TensorInfo> & inputTensorInfos,
1811 unsigned int concatDim)
1812{
1813 // See note above. Additionally we expect the input shapes to have the
1814 // same number of dimensions.
1815 unsigned int nDimensions = 0;
1816
telsoa01c577f2c2018-08-31 09:22:23 +01001817 // Determine the number of dimensions as well as sanity check them
1818 // agains test implementation issues.
surmeh013537c2c2018-05-18 16:31:43 +01001819 for (auto && tensorInfo : inputTensorInfos)
1820 {
1821 if (!nDimensions)
1822 {
1823 nDimensions = tensorInfo.GetShape().GetNumDimensions();
1824 }
1825 else
1826 {
1827 BOOST_ASSERT_MSG(nDimensions == tensorInfo.GetShape().GetNumDimensions(),
1828 "Input shapes must have the same number of dimensions");
1829 }
1830 }
1831
1832 return (nDimensions-concatDim) < 3;
1833}
1834
1835armnn::TensorShape ExpandTensorShapeTo3dForPermute(const armnn::TensorShape & inputShape)
1836{
1837 unsigned int numDims = inputShape.GetNumDimensions();
1838 if (numDims >= 3)
1839 {
1840 // Nothing to do if the inputShape has at least 3 dimensions.
1841 return inputShape;
1842 }
1843
1844 std::vector<unsigned int> newDims(size_t(3), 1u);
1845 unsigned int expandedBy = 3 - numDims;
1846 for (unsigned int i=0; i<numDims; ++i)
1847 {
1848 newDims[expandedBy+i] = inputShape[i];
1849 }
1850 return armnn::TensorShape(3u, &newDims[0]);
1851}
1852
1853void Generate3dPermuteVectorForConcat(
1854 unsigned int numDimensions,
1855 unsigned int & concatDim,
1856 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutations)
1857{
1858 BOOST_ASSERT_MSG(numDimensions <= 3,
1859 "Only dimensions 1,2 and 3 are supported by this helper");
1860
1861 unsigned int expandedBy = 3 - numDimensions;
1862 unsigned int expandedConcatAxis = concatDim + expandedBy;
1863
1864 if (expandedConcatAxis == 2)
1865 {
1866 concatDim = 0;
1867 armnn::PermutationVector forwardPermutation({1, 2, 0});
1868 armnn::PermutationVector reversePermutation({2, 0, 1});
1869 permutations = std::make_pair(forwardPermutation, reversePermutation);
1870 }
1871 else if (expandedConcatAxis == 1)
1872 {
1873 concatDim = 0;
1874 armnn::PermutationVector forwardPermutation({2, 0, 1});
1875 armnn::PermutationVector reversePermutation({1, 2, 0});
1876 permutations = std::make_pair(forwardPermutation, reversePermutation);
1877 }
1878 else
1879 {
1880 BOOST_ASSERT(expandedConcatAxis == 0);
1881 concatDim = 0;
1882 }
1883}
1884
1885//
1886// Permute the input tensors so we can do a supported concatenation.
1887// Also treat lower than 3d tensors as 3d by adding dummy 1 dimensions
1888// at the front. Finally this function tells what the output shape
1889// of the permuted concatenated tensor is going to be.
1890//
1891template <typename T>
1892void PermuteInputsForConcat(
1893 armnn::IWorkloadFactory& workloadFactory,
1894 std::vector<armnn::TensorInfo> & inputTensorInfos,
1895 std::vector<T *> & inputData,
1896 std::vector<std::vector<T>> & inputDataStorage,
1897 armnn::PermutationVector & permuteVector,
1898 unsigned int & concatDim,
1899 armnn::TensorInfo & outputTensorInfo)
1900{
1901 BOOST_ASSERT_MSG(inputTensorInfos.size() > 1,
1902 "Expecting more than one tensor to be concatenated here");
1903
1904 unsigned int numDims = 0;
1905 unsigned int nthInput = 0;
1906 const armnn::PermutationVector identity({0, 1, 2});
1907
1908 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutations =
1909 std::make_pair(identity, identity);
1910
1911 inputDataStorage.resize(inputData.size());
1912
1913 for (auto && tensorInfo : inputTensorInfos)
1914 {
1915 if (numDims == 0)
1916 {
1917 numDims = tensorInfo.GetShape().GetNumDimensions();
1918 Generate3dPermuteVectorForConcat(numDims, concatDim, permutations);
telsoa01c577f2c2018-08-31 09:22:23 +01001919 // Store the reverese permutation.
surmeh013537c2c2018-05-18 16:31:43 +01001920 permuteVector = permutations.second;
1921 BOOST_ASSERT_MSG(!permuteVector.IsEqual(identity),
1922 "Test logic error, we don't need permutation, so we shouldn't arrive here");
1923 }
1924 else
1925 {
1926 BOOST_ASSERT_MSG(numDims == tensorInfo.GetShape().GetNumDimensions(),
1927 "All inputs must have the same number of dimensions");
1928 }
1929
1930 armnn::TensorInfo newTensorInfo = tensorInfo;
1931 newTensorInfo.SetShape(ExpandTensorShapeTo3dForPermute(tensorInfo.GetShape()));
1932
1933 PermuteTensorData<T>(workloadFactory,
1934 permutations.first,
1935 newTensorInfo,
1936 inputData[nthInput],
1937 inputDataStorage[nthInput]);
1938
1939 inputData[nthInput] = inputDataStorage[nthInput].data();
1940 inputTensorInfos[nthInput] = newTensorInfo;
1941
1942 ++nthInput;
1943 }
1944
1945 outputTensorInfo.SetShape(
1946 armnnUtils::Permuted(
1947 ExpandTensorShapeTo3dForPermute(outputTensorInfo.GetShape()),
1948 permutations.first));
1949}
1950
1951
1952//
1953// This is the pair of PermuteInputsForConcat(...) which permutes back
telsoa01c577f2c2018-08-31 09:22:23 +01001954// the output of the concatenation so we can check it against an expected
surmeh013537c2c2018-05-18 16:31:43 +01001955// output.
1956//
1957template <typename T>
1958void PermuteOutputForConcat(
1959 armnn::IWorkloadFactory& workloadFactory,
1960 const armnn::TensorInfo & tensorInfo,
1961 const armnn::PermutationVector & permuteVector,
1962 std::unique_ptr<armnn::ITensorHandle> && inputDataHandle,
1963 T * data)
1964{
1965 BOOST_ASSERT_MSG(data != nullptr, "data must not be null");
1966 if (data == nullptr)
1967 {
1968 // Nullptr is an error in the test. By returning without doing the permutation
1969 // I expect the caller to fail the test. It still makes sense to report this as
1970 // an assert for Debug builds.
1971 return;
1972 }
1973
1974 armnn::TensorInfo resultTensorInfo = tensorInfo;
1975 std::vector<T> inputData(tensorInfo.GetNumElements());
1976 std::vector<T> outputData;
1977
1978 CopyDataFromITensorHandle(&inputData[0], inputDataHandle.get());
1979
1980 PermuteTensorData<T>(workloadFactory,
1981 permuteVector,
1982 resultTensorInfo,
1983 &inputData[0],
1984 outputData);
1985
1986 ::memcpy(data, &outputData[0], sizeof(T)*outputData.size());
1987}
1988
1989template <typename T>
1990void Concatenate(armnn::IWorkloadFactory& workloadFactory,
1991 std::initializer_list<const armnn::TensorInfo> inputTensorInfosOrig,
1992 std::initializer_list<T *> inputsOrig,
1993 const armnn::TensorInfo& outputTensorInfoOrig,
1994 T * output,
1995 unsigned int concatDim)
1996{
1997 BOOST_ASSERT_MSG(output != nullptr, "output must not be null");
1998 if (output == nullptr)
1999 {
2000 // Nullptr is an error in the test. By returning without doing the permutation
2001 // I expect the caller to fail the test. It still makes sense to report this as
2002 // an assert for Debug builds.
2003 return;
2004 }
2005
2006 armnn::MergerQueueDescriptor queueDescriptor;
2007
telsoa01c577f2c2018-08-31 09:22:23 +01002008 // Saves a copy of the parameters which we might need to change.
surmeh013537c2c2018-05-18 16:31:43 +01002009 std::vector<armnn::TensorInfo> inputTensorInfos(inputTensorInfosOrig.begin(), inputTensorInfosOrig.end());
2010 std::vector<T *> inputs = inputsOrig;
2011 armnn::TensorInfo outputTensorInfo = outputTensorInfoOrig;
2012
2013 armnn::PermutationVector permuteVector{0, 1, 2};
2014
telsoa01c577f2c2018-08-31 09:22:23 +01002015 // Holds and automatically releases memory for the reshaped input data.
surmeh013537c2c2018-05-18 16:31:43 +01002016 std::vector<std::vector<T>> tmpInputDataStorage;
2017
2018 const size_t inputCount = inputTensorInfos.size();
2019
2020 bool needPermuteForConcat = NeedPermuteForConcat(inputTensorInfos, concatDim);
2021
2022 if (needPermuteForConcat)
2023 {
2024 //
2025 // We need to permute the inputs, because concatenation along
telsoa01c577f2c2018-08-31 09:22:23 +01002026 // the requested axis is not supported.
surmeh013537c2c2018-05-18 16:31:43 +01002027 //
2028 PermuteInputsForConcat<T>(workloadFactory,
2029 inputTensorInfos,
2030 inputs,
2031 tmpInputDataStorage,
2032 permuteVector,
2033 concatDim,
2034 outputTensorInfo);
2035 }
2036
2037 armnn::OriginsDescriptor viewsDescriptor = CreateMergerDescriptorForConcatenation(inputTensorInfos, concatDim);
telsoa014fcda012018-03-09 14:13:49 +00002038
2039 queueDescriptor.m_ViewOrigins.reserve(viewsDescriptor.GetNumViews());
2040 for (unsigned int i = 0; i < viewsDescriptor.GetNumViews(); ++i)
2041 {
2042 queueDescriptor.m_ViewOrigins.emplace_back(std::vector<unsigned int>(viewsDescriptor.GetViewOrigin(i),
2043 viewsDescriptor.GetViewOrigin(i) + viewsDescriptor.GetNumDimensions()));
2044 }
2045
telsoa014fcda012018-03-09 14:13:49 +00002046 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2047
2048 std::vector<std::unique_ptr<armnn::ITensorHandle>> inputHandles;
2049 inputHandles.reserve(inputCount);
2050
2051 const bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2052 for (unsigned int i = 0; i < inputCount; ++i)
2053 {
surmeh013537c2c2018-05-18 16:31:43 +01002054 const armnn::TensorInfo& inputTensorInfo = inputTensorInfos[i];
telsoa014fcda012018-03-09 14:13:49 +00002055
2056 std::unique_ptr<armnn::ITensorHandle> inputHandle = subTensorsSupported ?
2057 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo.GetShape(),
2058 queueDescriptor.m_ViewOrigins[i].m_Origin.data())
2059 : workloadFactory.CreateTensorHandle(inputTensorInfo);
2060
2061 inputHandles.emplace_back(std::move(inputHandle));
2062 }
2063
2064 armnn::WorkloadInfo workloadInfo;
2065
2066 for (unsigned int i = 0; i < inputCount; ++i)
2067 {
surmeh013537c2c2018-05-18 16:31:43 +01002068 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfos[i], inputHandles[i].get());
telsoa014fcda012018-03-09 14:13:49 +00002069 }
2070
2071 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
2072
2073 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(queueDescriptor, workloadInfo);
2074
2075 for (auto& inputHandle : inputHandles)
2076 {
2077 inputHandle->Allocate();
2078 }
2079
2080 outputHandle->Allocate();
2081
2082 unsigned int nextInputId = 0;
2083 for (auto& inputHandle : inputHandles)
2084 {
surmeh013537c2c2018-05-18 16:31:43 +01002085 CopyDataToITensorHandle(inputHandle.get(), inputs[nextInputId]);
2086 ++nextInputId;
telsoa014fcda012018-03-09 14:13:49 +00002087 }
2088
surmeh013537c2c2018-05-18 16:31:43 +01002089 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00002090 workload->Execute();
2091
surmeh013537c2c2018-05-18 16:31:43 +01002092 if (needPermuteForConcat)
2093 {
2094 PermuteOutputForConcat<T>(workloadFactory,
2095 outputTensorInfo,
2096 permuteVector,
2097 std::move(outputHandle),
2098 output);
2099 }
2100 else
2101 {
2102 CopyDataFromITensorHandle(output, outputHandle.get());
2103 }
telsoa014fcda012018-03-09 14:13:49 +00002104}
2105
2106template <typename T>
2107LayerTestResult<T, 1> Concatenation1dTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale, int32_t qOffset)
2108{
2109 armnn::TensorInfo inputTensorInfo({ 3 }, armnn::GetDataType<T>());
2110
2111 auto input0 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 1.0f, 2.0f, 3.0f }));
2112 auto input1 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 4.0f, 5.0f, 6.0f }));
2113 auto input2 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 7.0f, 8.0f, 9.0f }));
2114
2115 armnn::TensorInfo outputTensorInfo({ 9 }, armnn::GetDataType<T>());
2116
2117 LayerTestResult<T, 1> result(outputTensorInfo);
2118
2119 std::vector<T> output;
2120 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002121 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002122 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
2123 { input0.data(), input1.data(), input2.data() },
2124 outputTensorInfo,
2125 output.data(),
2126 0);
2127
2128 result.output = MakeTensor<T, 1>(outputTensorInfo, output);
2129 result.outputExpected = MakeTensor<T, 1>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2130 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f
2131 }));
2132
2133 return result;
2134}
2135
2136LayerTestResult<float, 1> Concatenation1dTest(armnn::IWorkloadFactory& workloadFactory)
2137{
2138 return Concatenation1dTestImpl<float>(workloadFactory, 0.0f, 0);
2139}
2140
2141template <typename T>
2142LayerTestResult<T, 2> Concatenation2dTestImpl(armnn::IWorkloadFactory& workloadFactory,
2143 const armnn::TensorInfo& outputTensorInfo,
2144 unsigned int dimension,
2145 const float qScale,
2146 const int32_t qOffset)
2147{
2148 armnn::TensorInfo inputTensorInfo({ 2, 3 }, armnn::GetDataType<T>());
2149
2150 auto input0 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2151 // Batch 0
2152 1.0f, 2.0f, 3.0f,
2153
2154 // Batch 1
2155 10.0f, 11.0f, 12.0f,
2156 }));
2157
2158 auto input1 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2159 // Batch 0
2160 4.0f, 5.0f, 6.0f,
2161
2162 // Batch 1
2163 13.0f, 14.0f, 15.0f,
2164 }));
2165
2166 auto input2 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2167 // Batch 0
2168 7.0f, 8.0f, 9.0f,
2169
2170 // Batch 1
2171 16.0f, 17.0f, 18.0f,
2172 }));
2173
2174 LayerTestResult<T, 2> result(outputTensorInfo);
2175
2176 std::vector<T> output;
2177 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002178 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002179 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
2180 { input0.data(), input1.data(), input2.data() },
2181 outputTensorInfo,
2182 output.data(),
2183 dimension);
2184
2185 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
2186 return result;
2187}
2188
2189template <typename T>
2190LayerTestResult<T, 2> Concatenation2dDim0TestImpl(armnn::IWorkloadFactory& workloadFactory,
2191 float qScale, int32_t qOffset)
2192{
2193 armnn::TensorInfo outputTensorInfo({ 6, 3 }, armnn::GetDataType<T>());
2194
2195 LayerTestResult<T, 2> result = Concatenation2dTestImpl<T>(workloadFactory, outputTensorInfo, 0, qScale, qOffset);
2196 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2197 // Batch 0
2198 1.0f, 2.0f, 3.0f,
2199
2200 // Batch 1
2201 10.0f, 11.0f, 12.0f,
2202
2203 // Batch 2
2204 4.0f, 5.0f, 6.0f,
2205
2206 // Batch 3
2207 13.0f, 14.0f, 15.0f,
2208
2209 // Batch 4
2210 7.0f, 8.0f, 9.0f,
2211
2212 // Batch 5
2213 16.0f, 17.0f, 18.0f,
2214 }));
2215
2216 return result;
2217}
2218
2219LayerTestResult<float, 2> Concatenation2dDim0Test(armnn::IWorkloadFactory& workloadFactory)
2220{
2221 return Concatenation2dDim0TestImpl<float>(workloadFactory, 0.0f, 0);
2222}
2223
2224template <typename T>
2225LayerTestResult<T, 2> Concatenation2dDim1TestImpl(armnn::IWorkloadFactory& workloadFactory,
2226 float qScale, int32_t qOffset)
2227{
2228 armnn::TensorInfo outputTensorInfo({ 2, 9 }, armnn::GetDataType<T>());
2229
2230 LayerTestResult<T, 2> result = Concatenation2dTestImpl<T>(workloadFactory, outputTensorInfo, 1, qScale, qOffset);
2231 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2232 // Batch 0
2233 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
2234
2235 // Batch 1
2236 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f
2237 }));
2238
2239 return result;
2240}
2241
2242LayerTestResult<float, 2> Concatenation2dDim1Test(armnn::IWorkloadFactory& workloadFactory)
2243{
2244 return Concatenation2dDim1TestImpl<float>(workloadFactory, 0.0f, 0);
2245}
2246
2247template <typename T>
2248LayerTestResult<T, 2> Concatenation2dDim0DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2249 int32_t qOffset)
2250{
2251 armnn::TensorInfo input0TensorInfo({ 2, 3 }, armnn::GetDataType<T>());
2252 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2253 // Batch 0
2254 1.0f, 2.0f, 3.0f,
2255
2256 // Batch 1
2257 10.0f, 11.0f, 12.0f,
2258 }));
2259
2260 armnn::TensorInfo input1TensorInfo({ 3, 3 }, armnn::GetDataType<T>());
2261 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2262 // Batch 0
2263 4.0f, 5.0f, 6.0f,
2264
2265 // Batch 1
2266 13.0f, 14.0f, 15.0f,
2267
2268 // Batch 0
2269 7.0f, 8.0f, 9.0f,
2270 }));
2271
2272 armnn::TensorInfo input2TensorInfo({ 1, 3 }, armnn::GetDataType<T>());
2273 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2274 // Batch 1
2275 16.0f, 17.0f, 18.0f,
2276 }));
2277
2278 armnn::TensorInfo outputTensorInfo({ 6, 3 }, armnn::GetDataType<T>());
2279 LayerTestResult<T, 2> result(outputTensorInfo);
2280
2281 std::vector<T> output;
2282 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002283 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002284 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
2285 { input0.data(), input1.data(), input2.data() },
2286 outputTensorInfo,
2287 output.data(),
2288 0);
2289
2290 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
2291 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2292 // Batch 0
2293 1.0f, 2.0f, 3.0f,
2294
2295 // Batch 1
2296 10.0f, 11.0f, 12.0f,
2297
2298 // Batch 2
2299 4.0f, 5.0f, 6.0f,
2300
2301 // Batch 3
2302 13.0f, 14.0f, 15.0f,
2303
2304 // Batch 4
2305 7.0f, 8.0f, 9.0f,
2306
2307 // Batch 5
2308 16.0f, 17.0f, 18.0f,
2309 }));
2310
2311 return result;
2312}
2313
2314LayerTestResult<float, 2> Concatenation2dDim0DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory)
2315{
2316 return Concatenation2dDim0DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
2317}
2318
2319template <typename T>
2320LayerTestResult<T, 2> Concatenation2dDim1DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2321 int32_t qOffset)
2322{
2323 armnn::TensorInfo input0TensorInfo({ 2, 3 }, armnn::GetDataType<T>());
2324 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2325 // Batch 0
2326 1.0f, 2.0f, 3.0f,
2327
2328 // Batch 1
2329 10.0f, 11.0f, 12.0f,
2330 }));
2331
2332 armnn::TensorInfo input1TensorInfo({ 2, 5 }, armnn::GetDataType<T>());
2333 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2334 // Batch 0
2335 4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
2336
2337 // Batch 1
2338 13.0f, 14.0f, 15.0f, 16.0f, 17.0f,
2339 }));
2340
2341 armnn::TensorInfo input2TensorInfo({ 2, 1 }, armnn::GetDataType<T>());
2342 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2343 // Batch 0
2344 9.0f,
2345
2346 // Batch 1
2347 18.0f
2348 }));
2349
2350 armnn::TensorInfo outputTensorInfo({ 2, 9 }, armnn::GetDataType<T>());
2351 LayerTestResult<T, 2> result(outputTensorInfo);
2352
2353 std::vector<T> output;
2354 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002355 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002356 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
2357 { input0.data(), input1.data(), input2.data() },
2358 outputTensorInfo,
2359 output.data(),
2360 1);
2361
2362 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
2363 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2364 // Batch 0
2365 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
2366
2367 // Batch 1
2368 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f,
2369 }));
2370
2371 return result;
2372}
2373
2374LayerTestResult<float, 2> Concatenation2dDim1DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory)
2375{
2376 return Concatenation2dDim1DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
2377}
2378
2379template <typename T>
2380LayerTestResult<T, 3> Concatenation3dTestImpl(armnn::IWorkloadFactory& workloadFactory,
2381 const armnn::TensorInfo& outputTensorInfo,
2382 unsigned int dimension,
2383 float qScale,
2384 int32_t qOffset)
2385{
2386 armnn::TensorInfo inputTensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
2387
2388 auto input0 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2389 // Batch 0, Channel 0
2390 1.0f, 2.0f,
2391
2392 // Batch 0, Channel 1
2393 3.0f, 4.0f,
2394
2395 // Batch 0, Channel 2
2396 5.0f, 6.0f,
2397
2398 // Batch 1, Channel 0
2399 19.0f, 20.0f,
2400
2401 // Batch 1, Channel 1
2402 21.0f, 22.0f,
2403
2404 // Batch 1, Channel 2
2405 23.0f, 24.0f
2406 }));
2407
2408 auto input1 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2409 // Batch 0, Channel 0
2410 7.0f, 8.0f,
2411
2412 // Batch 0, Channel 1
2413 9.0f, 10.0f,
2414
2415 // Batch 0, Channel 2
2416 11.0f, 12.0f,
2417
2418 // Batch 1, Channel 0
2419 25.0f, 26.0f,
2420
2421 // Batch 1, Channel 1
2422 27.0f, 28.0f,
2423
2424 // Batch 1, Channel 2
2425 29.0f, 30.0f
2426 }));
2427
2428 auto input2 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2429 // Batch 0, Channel 0
2430 13.0f, 14.0f,
2431
2432 // Batch 0, Channel 1
2433 15.0f, 16.0f,
2434
2435 // Batch 0, Channel 2
2436 17.0f, 18.0f,
2437
2438 // Batch 1, Channel 0
2439 31.0f, 32.0f,
2440
2441 // Batch 1, Channel 1
2442 33.0f, 34.0f,
2443
2444 // Batch 1, Channel 2
2445 35.0f, 36.0f
2446 }));
2447
2448 LayerTestResult<T, 3> result(outputTensorInfo);
2449
2450 std::vector<T> output;
2451 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002452 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002453 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
2454 { input0.data(), input1.data(), input2.data() },
2455 outputTensorInfo,
2456 output.data(),
2457 dimension);
2458
2459 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
2460 return result;
2461}
2462
2463template <typename T>
2464LayerTestResult<T, 3> Concatenation3dDim0TestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2465 int32_t qOffset)
2466{
2467 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, armnn::GetDataType<T>());
2468
2469 LayerTestResult<T, 3> result = Concatenation3dTestImpl<T>(workloadFactory, outputTensorInfo, 0,
2470 qScale, qOffset);
2471 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2472 // Batch 0, Channel 0
2473 1.0f, 2.0f,
2474
2475 // Batch 0, Channel 1
2476 3.0f, 4.0f,
2477
2478 // Batch 0, Channel 2
2479 5.0f, 6.0f,
2480
2481 // Batch 1, Channel 0
2482 19.0f, 20.0f,
2483
2484 // Batch 1, Channel 1
2485 21.0f, 22.0f,
2486
2487 // Batch 1, Channel 2
2488 23.0f, 24.0f,
2489
2490 // Batch 2, Channel 0
2491 7.0f, 8.0f,
2492
2493 // Batch 2, Channel 1
2494 9.0f, 10.0f,
2495
2496 // Batch 2, Channel 2
2497 11.0f, 12.0f,
2498
2499 // Batch 3, Channel 0
2500 25.0f, 26.0f,
2501
2502 // Batch 3, Channel 1
2503 27.0f, 28.0f,
2504
2505 // Batch 3, Channel 2
2506 29.0f, 30.0f,
2507
2508 // Batch 4, Channel 0
2509 13.0f, 14.0f,
2510
2511 // Batch 4, Channel 1
2512 15.0f, 16.0f,
2513
2514 // Batch 4, Channel 2
2515 17.0f, 18.0f,
2516
2517 // Batch 5, Channel 0
2518 31.0f, 32.0f,
2519
2520 // Batch 5, Channel 1
2521 33.0f, 34.0f,
2522
2523 // Batch 5, Channel 2
2524 35.0f, 36.0f
2525 }));
2526 return result;
2527}
2528
2529LayerTestResult<float, 3> Concatenation3dDim0Test(armnn::IWorkloadFactory& workloadFactory)
2530{
2531 return Concatenation3dDim0TestImpl<float>(workloadFactory, 0.0f, 0);
2532}
2533
2534template <typename T>
2535LayerTestResult<T, 3> Concatenation3dDim1TestImpl(armnn::IWorkloadFactory& workloadFactory,
2536 float qScale, int32_t qOffset)
2537{
2538 armnn::TensorInfo outputTensorInfo({ 2, 9, 2 }, armnn::GetDataType<T>());
2539
2540 LayerTestResult<T, 3> result = Concatenation3dTestImpl<T>(workloadFactory, outputTensorInfo, 1, qScale, qOffset);
2541 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2542 // Batch 0, Channel 0
2543 1.0f, 2.0f,
2544
2545 // Batch 0, Channel 1
2546 3.0f, 4.0f,
2547
2548 // Batch 0, Channel 2
2549 5.0f, 6.0f,
2550
2551 // Batch 0, Channel 3
2552 7.0f, 8.0f,
2553
2554 // Batch 0, Channel 4
2555 9.0f, 10.0f,
2556
2557 // Batch 0, Channel 5
2558 11.0f, 12.0f,
2559
2560 // Batch 0, Channel 6
2561 13.0f, 14.0f,
2562
2563 // Batch 0, Channel 7
2564 15.0f, 16.0f,
2565
2566 // Batch 0, Channel 8
2567 17.0f, 18.0f,
2568
2569 // Batch 1, Channel 0
2570 19.0f, 20.0f,
2571
2572 // Batch 1, Channel 1
2573 21.0f, 22.0f,
2574
2575 // Batch 1, Channel 2
2576 23.0f, 24.0f,
2577
2578 // Batch 1, Channel 3
2579 25.0f, 26.0f,
2580
2581 // Batch 1, Channel 4
2582 27.0f, 28.0f,
2583
2584 // Batch 1, Channel 5
2585 29.0f, 30.0f,
2586
2587 // Batch 1, Channel 6
2588 31.0f, 32.0f,
2589
2590 // Batch 1, Channel 7
2591 33.0f, 34.0f,
2592
2593 // Batch 1, Channel 8
2594 35.0f, 36.0f
2595 }));
2596
2597 return result;
2598}
2599
2600LayerTestResult<float, 3> Concatenation3dDim1Test(armnn::IWorkloadFactory& workloadFactory)
2601{
2602 return Concatenation3dDim1TestImpl<float>(workloadFactory, 0.0f, 0);
2603}
2604
2605template <typename T>
2606LayerTestResult<T, 3> Concatenation3dDim2TestImpl(armnn::IWorkloadFactory& workloadFactory,
2607 float qScale, int32_t qOffset)
2608{
2609 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, armnn::GetDataType<T>());
2610
2611 LayerTestResult<T, 3> result = Concatenation3dTestImpl<T>(workloadFactory, outputTensorInfo, 2, qScale, qOffset);
2612 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2613 // Batch 0, Channel 0
2614 1.0f, 2.0f, 7.0f, 8.0f, 13.0f, 14.0f,
2615
2616 // Batch 0, Channel 1
2617 3.0f, 4.0f, 9.0f, 10.0f, 15.0f, 16.0f,
2618
2619 // Batch 0, Channel 2
2620 5.0f, 6.0f, 11.0f, 12.0f, 17.0f, 18.0f,
2621
2622 // Batch 1, Channel 0
2623 19.0f, 20.0f, 25.0f, 26.0f, 31.0f, 32.0f,
2624
2625 // Batch 1, Channel 1
2626 21.0f, 22.0f, 27.0f, 28.0f, 33.0f, 34.0f,
2627
2628 // Batch 1, Channel 2
2629 23.0f, 24.0f, 29.0f, 30.0f, 35.0f, 36.0f,
2630 }));
2631
2632 return result;
2633}
2634
2635LayerTestResult<float, 3> Concatenation3dDim2Test(armnn::IWorkloadFactory& workloadFactory)
2636{
2637 return Concatenation3dDim2TestImpl<float>(workloadFactory, 0.0f, 0);
2638}
2639
2640template <typename T>
2641LayerTestResult<T, 3> Concatenation3dDim0DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2642 int32_t qOffset)
2643{
2644 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
2645 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2646 // Batch 0, Channel 0
2647 1.0f, 2.0f,
2648
2649 // Batch 0, Channel 1
2650 3.0f, 4.0f,
2651
2652 // Batch 0, Channel 2
2653 5.0f, 6.0f,
2654
2655 // Batch 1, Channel 0
2656 19.0f, 20.0f,
2657
2658 // Batch 1, Channel 1
2659 21.0f, 22.0f,
2660
2661 // Batch 1, Channel 2
2662 23.0f, 24.0f
2663 }));
2664
2665 armnn::TensorInfo input1TensorInfo({ 1, 3, 2 }, armnn::GetDataType<T>());
2666 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2667 // Batch 0, Channel 0
2668 7.0f, 8.0f,
2669
2670 // Batch 0, Channel 1
2671 9.0f, 10.0f,
2672
2673 // Batch 0, Channel 2
2674 11.0f, 12.0f,
2675 }));
2676
2677 armnn::TensorInfo input2TensorInfo({ 3, 3, 2 }, armnn::GetDataType<T>());
2678 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2679 // Batch 0, Channel 0
2680 25.0f, 26.0f,
2681
2682 // Batch 0, Channel 1
2683 27.0f, 28.0f,
2684
2685 // Batch 0, Channel 2
2686 29.0f, 30.0f,
2687
2688 // Batch 1, Channel 0
2689 13.0f, 14.0f,
2690
2691 // Batch 1, Channel 1
2692 15.0f, 16.0f,
2693
2694 // Batch 1, Channel 2
2695 17.0f, 18.0f,
2696
2697 // Batch 2, Channel 0
2698 31.0f, 32.0f,
2699
2700 // Batch 2, Channel 1
2701 33.0f, 34.0f,
2702
2703 // Batch 2, Channel 2
2704 35.0f, 36.0f
2705 }));
2706
2707 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, armnn::GetDataType<T>());
2708 LayerTestResult<T, 3> result(outputTensorInfo);
2709
2710 std::vector<T> output;
2711 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002712 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002713 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
2714 { input0.data(), input1.data(), input2.data() },
2715 outputTensorInfo,
2716 output.data(),
2717 0);
2718
2719 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
2720 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2721 // Batch 0, Channel 0
2722 1.0f, 2.0f,
2723
2724 // Batch 0, Channel 1
2725 3.0f, 4.0f,
2726
2727 // Batch 0, Channel 2
2728 5.0f, 6.0f,
2729
2730 // Batch 1, Channel 0
2731 19.0f, 20.0f,
2732
2733 // Batch 1, Channel 1
2734 21.0f, 22.0f,
2735
2736 // Batch 1, Channel 2
2737 23.0f, 24.0f,
2738
2739 // Batch 2, Channel 0
2740 7.0f, 8.0f,
2741
2742 // Batch 2, Channel 1
2743 9.0f, 10.0f,
2744
2745 // Batch 2, Channel 2
2746 11.0f, 12.0f,
2747
2748 // Batch 3, Channel 0
2749 25.0f, 26.0f,
2750
2751 // Batch 3, Channel 1
2752 27.0f, 28.0f,
2753
2754 // Batch 3, Channel 2
2755 29.0f, 30.0f,
2756
2757 // Batch 4, Channel 0
2758 13.0f, 14.0f,
2759
2760 // Batch 4, Channel 1
2761 15.0f, 16.0f,
2762
2763 // Batch 4, Channel 2
2764 17.0f, 18.0f,
2765
2766 // Batch 5, Channel 0
2767 31.0f, 32.0f,
2768
2769 // Batch 5, Channel 1
2770 33.0f, 34.0f,
2771
2772 // Batch 5, Channel 2
2773 35.0f, 36.0f
2774 }));
2775
2776 return result;
2777}
2778
2779LayerTestResult<float, 3> Concatenation3dDim0DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory)
2780{
2781 return Concatenation3dDim0DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
2782}
2783
2784template <typename T>
2785LayerTestResult<T, 3> Concatenation3dDim1DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2786 int32_t qOffset)
2787{
2788 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
2789 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2790 // Batch 0, Channel 0
2791 1.0f, 2.0f,
2792
2793 // Batch 0, Channel 1
2794 3.0f, 4.0f,
2795
2796 // Batch 0, Channel 2
2797 5.0f, 6.0f,
2798
2799 // Batch 1, Channel 0
2800 19.0f, 20.0f,
2801
2802 // Batch 1, Channel 1
2803 21.0f, 22.0f,
2804
2805 // Batch 1, Channel 2
2806 23.0f, 24.0f
2807 }));
2808
2809 armnn::TensorInfo input1TensorInfo({ 2, 4, 2 }, armnn::GetDataType<T>());
2810 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2811 // Batch 0, Channel 0
2812 7.0f, 8.0f,
2813
2814 // Batch 0, Channel 1
2815 9.0f, 10.0f,
2816
2817 // Batch 0, Channel 2
2818 11.0f, 12.0f,
2819
2820 // Batch 0, Channel 3
2821 25.0f, 26.0f,
2822
2823 // Batch 1, Channel 0
2824 27.0f, 28.0f,
2825
2826 // Batch 1, Channel 1
2827 29.0f, 30.0f,
2828
2829 // Batch 1, Channel 2
2830 13.0f, 14.0f,
2831
2832 // Batch 1, Channel 3
2833 15.0f, 16.0f,
2834 }));
2835
2836 armnn::TensorInfo input2TensorInfo({ 2, 1, 2 }, armnn::GetDataType<T>());
2837 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2838 // Batch 0, Channel 0
2839 17.0f, 18.0f,
2840
2841 // Batch 1, Channel 0
2842 31.0f, 32.0f,
2843 }));
2844
2845 armnn::TensorInfo outputTensorInfo({ 2, 8, 2 }, armnn::GetDataType<T>());
2846 LayerTestResult<T, 3> result(outputTensorInfo);
2847
2848 std::vector<T> output;
2849 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002850 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002851 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
2852 { input0.data(), input1.data(), input2.data() },
2853 outputTensorInfo,
2854 output.data(),
2855 1);
2856
2857 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
2858 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2859 // Batch 0, Channel 0
2860 1.0f, 2.0f,
2861
2862 // Batch 0, Channel 1
2863 3.0f, 4.0f,
2864
2865 // Batch 0, Channel 2
2866 5.0f, 6.0f,
2867
2868 // Batch 0, Channel 3
2869 7.0f, 8.0f,
2870
2871 // Batch 0, Channel 4
2872 9.0f, 10.0f,
2873
2874 // Batch 0, Channel 5
2875 11.0f, 12.0f,
2876
2877 // Batch 0, Channel 6
2878 25.0f, 26.0f,
2879
2880 // Batch 0, Channel 7
2881 17.0f, 18.0f,
2882
2883 // Batch 1, Channel 0
2884 19.0f, 20.0f,
2885
2886 // Batch 1, Channel 1
2887 21.0f, 22.0f,
2888
2889 // Batch 1, Channel 2
2890 23.0f, 24.0f,
2891
2892 // Batch 1, Channel 3
2893 27.0f, 28.0f,
2894
2895 // Batch 1, Channel 4
2896 29.0f, 30.0f,
2897
2898 // Batch 1, Channel 5
2899 13.0f, 14.0f,
2900
2901 // Batch 1, Channel 6
2902 15.0f, 16.0f,
2903
2904 // Batch 1, Channel 7
2905 31.0f, 32.0f,
2906 }));
2907
2908 return result;
2909}
2910
2911LayerTestResult<float, 3> Concatenation3dDim1DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory)
2912{
2913 return Concatenation3dDim1DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
2914}
2915
2916template <typename T>
2917LayerTestResult<T, 3> Concatenation3dDim2DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2918 int32_t qOffset)
2919{
2920 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
2921 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2922 // Batch 0, Channel 0
2923 1.0f, 2.0f,
2924
2925 // Batch 0, Channel 1
2926 3.0f, 4.0f,
2927
2928 // Batch 0, Channel 2
2929 5.0f, 6.0f,
2930
2931 // Batch 1, Channel 0
2932 19.0f, 20.0f,
2933
2934 // Batch 1, Channel 1
2935 21.0f, 22.0f,
2936
2937 // Batch 1, Channel 2
2938 23.0f, 24.0f
2939 }));
2940
2941 armnn::TensorInfo input1TensorInfo({ 2, 3, 1 }, armnn::GetDataType<T>());
2942 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2943 // Batch 0, Channel 0
2944 7.0f,
2945
2946 // Batch 0, Channel 1
2947 9.0f,
2948
2949 // Batch 0, Channel 2
2950 11.0f,
2951
2952 // Batch 1, Channel 0
2953 25.0f,
2954
2955 // Batch 1, Channel 1
2956 27.0f,
2957
2958 // Batch 1, Channel 2
2959 29.0f
2960 }));
2961
2962 armnn::TensorInfo input2TensorInfo({ 2, 3, 3 }, armnn::GetDataType<T>());
2963 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2964 // Batch 0, Channel 0
2965 13.0f, 14.0f, 50.0f,
2966
2967 // Batch 0, Channel 1
2968 15.0f, 16.0f, 51.0f,
2969
2970 // Batch 0, Channel 2
2971 17.0f, 18.0f, 52.0f,
2972
2973 // Batch 1, Channel 0
2974 31.0f, 32.0f, 53.0f,
2975
2976 // Batch 1, Channel 1
2977 33.0f, 34.0f, 54.0f,
2978
2979 // Batch 1, Channel 2
2980 35.0f, 36.0f, 55.0f,
2981 }));
2982
2983 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, armnn::GetDataType<T>());
2984 LayerTestResult<T, 3> result(outputTensorInfo);
2985
2986 std::vector<T> output;
2987 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002988 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002989 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
2990 { input0.data(), input1.data(), input2.data() },
2991 outputTensorInfo,
2992 output.data(),
2993 2);
2994
2995 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
2996 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2997 // Batch 0, Channel 0
2998 1.0f, 2.0f, 7.0f, 13.0f, 14.0f, 50.0f,
2999
3000 // Batch 0, Channel 1
3001 3.0f, 4.0f, 9.0f, 15.0f, 16.0f, 51.0f,
3002
3003 // Batch 0, Channel 2
3004 5.0f, 6.0f, 11.0f, 17.0f, 18.0f, 52.0f,
3005
3006 // Batch 1, Channel 0
3007 19.0f, 20.0f, 25.0f, 31.0f, 32.0f, 53.0f,
3008
3009 // Batch 1, Channel 1
3010 21.0f, 22.0f, 27.0f, 33.0f, 34.0f, 54.0f,
3011
3012 // Batch 1, Channel 2
3013 23.0f, 24.0f, 29.0f, 35.0f, 36.0f, 55.0f,
3014 }));
3015
3016 return result;
3017}
3018
3019LayerTestResult<float, 3> Concatenation3dDim2DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory)
3020{
3021 return Concatenation3dDim2DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
3022}
3023
James Conroy074f3712018-10-03 09:32:03 +01003024LayerTestResult<float, 4> ResizeBilinearNopTestImpl(armnn::IWorkloadFactory& workloadFactory,
3025 const armnn::TensorShape& inputOutputTensorShape,
3026 armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00003027{
James Conroy074f3712018-10-03 09:32:03 +01003028 const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
3029 const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00003030
3031 auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
3032 1.0f, 2.0f, 3.0f, 4.0f,
3033 2.0f, 3.0f, 4.0f, 5.0f,
3034 3.0f, 4.0f, 5.0f, 6.0f,
3035 4.0f, 5.0f, 6.0f, 7.0f
3036 }));
3037
3038 LayerTestResult<float, 4> result(outputTensorInfo);
3039 result.outputExpected = input;
3040
3041 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3042 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3043
3044 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01003045 descriptor.m_Parameters.m_DataLayout = dataLayout;
3046 armnn::WorkloadInfo info;
3047 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3048 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3049
3050 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
3051
3052 inputHandle->Allocate();
3053 outputHandle->Allocate();
3054 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3055
3056 workloadFactory.Finalize();
3057 workload->Execute();
3058
3059 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3060 return result;
3061}
3062
3063LayerTestResult<float, 4> ResizeBilinearNopTest(armnn::IWorkloadFactory& workloadFactory)
3064{
3065 // BatchSize = 1, Channels = 1, Height = 4, Width = 4
3066 const armnn::TensorShape inputOutputShape{ 1, 1, 4, 4 };
3067
3068 return ResizeBilinearNopTestImpl(workloadFactory, inputOutputShape, armnn::DataLayout::NCHW);
3069}
3070
3071LayerTestResult<float, 4> ResizeBilinearNopNhwcTest(armnn::IWorkloadFactory& workloadFactory)
3072{
3073 // BatchSize = 1, Height = 4, Width = 4, Channels = 1
3074 const armnn::TensorShape inputOutputShape{ 1, 4, 4, 1 };
3075
3076 return ResizeBilinearNopTestImpl(workloadFactory, inputOutputShape, armnn::DataLayout::NHWC);
3077}
3078
3079LayerTestResult<float, 4> SimpleResizeBilinearTestImpl(armnn::IWorkloadFactory& workloadFactory,
3080 const armnn::TensorShape& inputTensorShape,
3081 const armnn::TensorShape& outputTensorShape,
3082 armnn::DataLayout dataLayout)
3083{
3084 const armnn::TensorInfo inputTensorInfo(inputTensorShape, armnn::DataType::Float32);
3085 const armnn::TensorInfo outputTensorInfo(outputTensorShape, armnn::DataType::Float32);
3086
3087 auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
3088 1.0f, 255.0f,
3089 200.0f, 250.0f
3090 }));
3091
3092 // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
3093 // then figures out the interpolants and weights. Note this is different to projecting the centre of the
3094 // output texel - and thus we'll expect the output 1x1 matrix to contain, as its single element, the value
3095 // that was at position (0,0) of the input matrix (rather than an average, which we would expect if projecting
3096 // the centre).
3097 LayerTestResult<float, 4> result(outputTensorInfo);
3098 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>({
3099 1.0f
3100 }));
3101
3102 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3103 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3104
3105 armnn::ResizeBilinearQueueDescriptor descriptor;
3106 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00003107 armnn::WorkloadInfo info;
3108 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3109 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3110
3111 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
3112
3113 inputHandle->Allocate();
3114 outputHandle->Allocate();
3115 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3116
surmeh013537c2c2018-05-18 16:31:43 +01003117 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00003118 workload->Execute();
3119
3120 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3121 return result;
3122}
3123
3124LayerTestResult<float, 4> SimpleResizeBilinearTest(armnn::IWorkloadFactory& workloadFactory)
3125{
James Conroy074f3712018-10-03 09:32:03 +01003126 // inputShape: BatchSize = 1, Channels = 1, Height = 2, Width = 2
3127 const armnn::TensorShape inputShape{ 1, 1, 2, 2 };
telsoa014fcda012018-03-09 14:13:49 +00003128
James Conroy074f3712018-10-03 09:32:03 +01003129 // outputShape: BatchSize = 1, Channels = 1, Height = 1, Width = 1
3130 const armnn::TensorShape outputShape{ 1, 1, 1, 1 };
telsoa014fcda012018-03-09 14:13:49 +00003131
James Conroy074f3712018-10-03 09:32:03 +01003132 return SimpleResizeBilinearTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NCHW);
3133}
3134
3135LayerTestResult<float, 4> SimpleResizeBilinearNhwcTest(armnn::IWorkloadFactory& workloadFactory)
3136{
3137 // inputShape: BatchSize = 1, Height = 2, Width = 2, Channels = 1
3138 const armnn::TensorShape inputShape{ 1, 2, 2, 1 };
3139
3140 // outputShape: BatchSize = 1, Height = 1, Width = 1, Channels = 1
3141 const armnn::TensorShape outputShape{ 1, 1, 1, 1 };
3142
3143 return SimpleResizeBilinearTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NHWC);
3144}
3145
3146LayerTestResult<float, 4> ResizeBilinearSqMinTestImpl(armnn::IWorkloadFactory& workloadFactory,
3147 const armnn::TensorShape& inputTensorShape,
3148 const armnn::TensorShape& outputTensorShape,
3149 armnn::DataLayout dataLayout)
3150{
3151 const armnn::TensorInfo inputTensorInfo(inputTensorShape, armnn::DataType::Float32);
3152 const armnn::TensorInfo outputTensorInfo(outputTensorShape, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00003153
3154 auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
James Conroy074f3712018-10-03 09:32:03 +01003155 1.0f, 2.0f, 3.0f, 4.0f,
3156 2.0f, 3.0f, 4.0f, 5.0f,
3157 3.0f, 4.0f, 5.0f, 6.0f,
3158 4.0f, 5.0f, 6.0f, 7.0f
telsoa014fcda012018-03-09 14:13:49 +00003159 }));
3160
telsoa014fcda012018-03-09 14:13:49 +00003161 LayerTestResult<float, 4> result(outputTensorInfo);
3162 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>({
James Conroy074f3712018-10-03 09:32:03 +01003163 1.0f, 3.0f,
3164 3.0f, 5.0f
telsoa014fcda012018-03-09 14:13:49 +00003165 }));
3166
3167 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3168 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3169
3170 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01003171 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00003172 armnn::WorkloadInfo info;
3173 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3174 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3175
3176 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
3177
3178 inputHandle->Allocate();
3179 outputHandle->Allocate();
3180 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3181
surmeh013537c2c2018-05-18 16:31:43 +01003182 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00003183 workload->Execute();
3184
3185 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3186 return result;
3187}
3188
3189LayerTestResult<float, 4> ResizeBilinearSqMinTest(armnn::IWorkloadFactory& workloadFactory)
3190{
James Conroy074f3712018-10-03 09:32:03 +01003191 // inputShape: BatchSize = 1, Channels = 1, Height = 4, Width = 4
3192 const armnn::TensorShape inputShape{ 1, 1, 4, 4 };
telsoa014fcda012018-03-09 14:13:49 +00003193
James Conroy074f3712018-10-03 09:32:03 +01003194 // outputShape: BatchSize = 1, Channels = 1, Height = 2, Width = 2
3195 const armnn::TensorShape outputShape{ 1, 1, 2, 2 };
telsoa014fcda012018-03-09 14:13:49 +00003196
James Conroy074f3712018-10-03 09:32:03 +01003197 return ResizeBilinearSqMinTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NCHW);
3198}
3199
3200LayerTestResult<float, 4> ResizeBilinearSqMinNhwcTest(armnn::IWorkloadFactory& workloadFactory)
3201{
3202 // inputShape: BatchSize = 1, Height = 4, Width = 4, Channels = 1
3203 const armnn::TensorShape inputShape{ 1, 4, 4, 1 };
3204
3205 // outputShape: BatchSize = 1, Height = 2, Width = 2, Channels = 1
3206 const armnn::TensorShape outputShape{ 1, 2, 2, 1 };
3207
3208 return ResizeBilinearSqMinTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NHWC);
3209}
3210
3211LayerTestResult<float, 4> ResizeBilinearMinTestImpl(armnn::IWorkloadFactory& workloadFactory,
3212 const armnn::TensorShape& inputTensorShape,
3213 const armnn::TensorShape& outputTensorShape,
3214 armnn::DataLayout dataLayout)
3215{
3216 const armnn::TensorInfo inputTensorInfo(inputTensorShape, armnn::DataType::Float32);
3217 const armnn::TensorInfo outputTensorInfo(outputTensorShape, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00003218
3219 auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
James Conroy074f3712018-10-03 09:32:03 +01003220 1.0f, 2.0f, 3.0f, 5.0f, 8.0f,
3221 13.0f, 21.0f, 34.0f, 55.0f, 89.0f,
3222 144.0f, 233.0f, 377.0f, 610.0f, 987.0f
telsoa014fcda012018-03-09 14:13:49 +00003223 }));
3224
3225 LayerTestResult<float, 4> result(outputTensorInfo);
3226 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>({
James Conroy074f3712018-10-03 09:32:03 +01003227 1.0f, 2.6666f, 6.0f,
3228 78.5f, 179.3333f, 401.0f
telsoa014fcda012018-03-09 14:13:49 +00003229 }));
3230
3231 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3232 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3233
3234 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01003235 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00003236 armnn::WorkloadInfo info;
3237 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3238 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3239
3240 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
3241
3242 inputHandle->Allocate();
3243 outputHandle->Allocate();
3244 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3245
surmeh013537c2c2018-05-18 16:31:43 +01003246 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00003247 workload->Execute();
3248
3249 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3250 return result;
3251}
3252
3253LayerTestResult<float, 4> ResizeBilinearMinTest(armnn::IWorkloadFactory& workloadFactory)
3254{
James Conroy074f3712018-10-03 09:32:03 +01003255 // inputShape: BatchSize = 1, Channels = 1, Height = 3, Width = 5
3256 const armnn::TensorShape inputShape{ 1, 1, 3, 5 };
telsoa014fcda012018-03-09 14:13:49 +00003257
James Conroy074f3712018-10-03 09:32:03 +01003258 // outputShape: BatchSize = 1, Channels = 1, Height = 2, Width = 3
3259 const armnn::TensorShape outputShape{ 1, 1, 2, 3 };
telsoa014fcda012018-03-09 14:13:49 +00003260
James Conroy074f3712018-10-03 09:32:03 +01003261 return ResizeBilinearMinTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NCHW);
3262}
3263
3264LayerTestResult<float, 4> ResizeBilinearMinNhwcTest(armnn::IWorkloadFactory& workloadFactory)
3265{
3266 // inputShape: BatchSize = 1, Height = 3, Width = 5, Channels = 1
3267 const armnn::TensorShape inputShape{ 1, 3, 5, 1 };
3268
3269 // outputShape: BatchSize = 1, Height = 2, Width = 3, Channels = 1
3270 const armnn::TensorShape outputShape{ 1, 2, 3, 1 };
3271
3272 return ResizeBilinearMinTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NHWC);
3273}
3274
3275LayerTestResult<float, 4> ResizeBilinearMagTestImpl(armnn::IWorkloadFactory& workloadFactory,
3276 const armnn::TensorShape& inputTensorShape,
3277 const armnn::TensorShape& outputTensorShape,
3278 armnn::DataLayout dataLayout)
3279{
3280 const armnn::TensorInfo inputTensorInfo(inputTensorShape, armnn::DataType::Float32);
3281 const armnn::TensorInfo outputTensorInfo(outputTensorShape, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00003282
3283 auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
James Conroy074f3712018-10-03 09:32:03 +01003284 1.0f, 2.0f,
3285 13.0f, 21.0f,
3286 144.0f, 233.0f
telsoa014fcda012018-03-09 14:13:49 +00003287 }));
3288
3289 LayerTestResult<float, 4> result(outputTensorInfo);
3290 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>({
James Conroy074f3712018-10-03 09:32:03 +01003291 1.0f, 1.4f, 1.8f, 2.0f, 2.0f,
3292 13.0f, 16.2f, 19.4f, 21.0f, 21.0f,
3293 144.0f, 179.6f, 215.2f, 233.0f, 233.0f
telsoa014fcda012018-03-09 14:13:49 +00003294 }));
3295
3296 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3297 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3298
3299 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01003300 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00003301 armnn::WorkloadInfo info;
3302 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3303 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3304
3305 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
3306
3307 inputHandle->Allocate();
3308 outputHandle->Allocate();
3309 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3310
surmeh013537c2c2018-05-18 16:31:43 +01003311 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00003312 workload->Execute();
3313
3314 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3315 return result;
3316}
3317
3318LayerTestResult<float, 4> ResizeBilinearMagTest(armnn::IWorkloadFactory& workloadFactory)
3319{
James Conroy074f3712018-10-03 09:32:03 +01003320 // inputShape: BatchSize = 1, Channels = 1, Height = 3, Width = 2
3321 const armnn::TensorShape inputShape{ 1, 1, 3, 2 };
telsoa014fcda012018-03-09 14:13:49 +00003322
James Conroy074f3712018-10-03 09:32:03 +01003323 // outputShape: BatchSize = 1, Channels = 1, Height = 3, Width = 5
3324 const armnn::TensorShape outputShape{ 1, 1, 3, 5 };
telsoa014fcda012018-03-09 14:13:49 +00003325
James Conroy074f3712018-10-03 09:32:03 +01003326 return ResizeBilinearMagTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NCHW);
3327}
telsoa014fcda012018-03-09 14:13:49 +00003328
James Conroy074f3712018-10-03 09:32:03 +01003329LayerTestResult<float, 4> ResizeBilinearMagNhwcTest(armnn::IWorkloadFactory& workloadFactory)
3330{
3331 // inputShape: BatchSize = 1, Height = 3, Width = 2, Channels = 1
3332 const armnn::TensorShape inputShape{ 1, 3, 2, 1 };
telsoa014fcda012018-03-09 14:13:49 +00003333
James Conroy074f3712018-10-03 09:32:03 +01003334 // outputShape: BatchSize = 1, Height = 3, Width = 5, Channels = 1
3335 const armnn::TensorShape outputShape{ 1, 3, 5, 1 };
telsoa014fcda012018-03-09 14:13:49 +00003336
James Conroy074f3712018-10-03 09:32:03 +01003337 return ResizeBilinearMagTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00003338}
3339
3340LayerTestResult<float, 2> FakeQuantizationTest(armnn::IWorkloadFactory& workloadFactory)
3341{
3342 constexpr unsigned int width = 2;
3343 constexpr unsigned int height = 3;
3344
3345 const armnn::TensorInfo tensorInfo({height, width },
3346 armnn::DataType::Float32);
3347 auto input = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
3348 -10.0f, -5.0f,
3349 0.0f, 5.0f,
3350 10.0f, 10.0f
3351 }));
3352
3353 LayerTestResult<float, 2> ret(tensorInfo);
3354
3355 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
3356
3357 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
3358
3359 armnn::FakeQuantizationQueueDescriptor data;
3360 armnn::WorkloadInfo info;
3361
3362 AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
3363 AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
3364 float min = -10.f;
3365 float max = 10.f;
3366
3367 data.m_Parameters.m_Min = min;
3368 data.m_Parameters.m_Max = max;
3369
3370 armnn::PassthroughCpuTensorHandle refHandle(tensorInfo, &ret.outputExpected[0][0]);
3371 armnn::FakeQuantizationQueueDescriptor refData = data;
3372 armnn::WorkloadInfo refInfo = info;
3373 SetWorkloadOutput(refData, refInfo, 0, tensorInfo, &refHandle);
3374
3375 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFakeQuantization(data, info);
3376
3377 inputHandle->Allocate();
3378 outputHandle->Allocate();
3379
3380 CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
3381
surmeh013537c2c2018-05-18 16:31:43 +01003382 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00003383 workload->Execute();
3384
3385 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
3386
3387 ret.outputExpected = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
3388 0.0f, 63.0f,
3389 128.0f, 191.0f,
3390 255.0f, 255.0f
3391 }));
3392 return ret;
3393}
3394
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003395namespace
3396{
3397
3398LayerTestResult<float, 4> L2NormalizationTestImpl(armnn::IWorkloadFactory& workloadFactory,
3399 const armnn::TensorShape& inputOutputTensorShape,
3400 const std::vector<float>& inputValues,
3401 const std::vector<float>& expectedOutputValues,
3402 armnn::DataLayout dataLayout)
3403{
3404 const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
3405 const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
3406
3407 auto inputTensor = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(inputValues));
3408
3409 LayerTestResult<float, 4> result(outputTensorInfo);
3410 result.outputExpected = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(expectedOutputValues));
3411
3412 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3413 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3414
3415 armnn::L2NormalizationQueueDescriptor descriptor;
3416 descriptor.m_Parameters.m_DataLayout = dataLayout;
3417 armnn::WorkloadInfo info;
3418
3419 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3420 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3421
3422 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
3423
3424 inputHandle->Allocate();
3425 outputHandle->Allocate();
3426
3427 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
3428
3429 workloadFactory.Finalize();
3430 workload->Execute();
3431
3432 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3433
3434 return result;
3435}
3436
3437float CalcInvL2Norm(std::initializer_list<float> elements)
3438{
3439 const float reduction = std::accumulate(elements.begin(), elements.end(), 0.0f,
3440 [](float acc, float element) { return acc + element * element; });
3441 return 1.0f / sqrtf(reduction);
3442}
3443
3444} // anonymous namespace
3445
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003446LayerTestResult<float, 2> Pad2dTest(armnn::IWorkloadFactory& workloadFactory)
3447{
3448 const armnn::TensorShape inputShape{ 3, 3 };
3449 const armnn::TensorShape outputShape{ 7, 7 };
3450
3451 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32);
3452 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
3453
3454
3455 std::vector<float> inputValues
3456 {
3457
3458 // Height (3) x Width (3)
3459 4.0f, 8.0f, 6.0f,
3460 7.0f, 4.0f, 4.0f,
3461 3.0f, 2.0f, 4.0f
3462
3463 };
3464
3465 std::vector<float> expectedOutputValues
3466 {
3467
3468 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
3469 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
3470 0.0f, 0.0f, 4.0f, 8.0f, 6.0f, 0.0f, 0.0f,
3471 0.0f, 0.0f, 7.0f, 4.0f, 4.0f, 0.0f, 0.0f,
3472 0.0f, 0.0f, 3.0f, 2.0f, 4.0f, 0.0f, 0.0f,
3473 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
3474 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f
3475
3476 };
3477
3478 auto inputTensor = MakeTensor<float, 2>(inputTensorInfo, std::vector<float>(inputValues));
3479
3480 LayerTestResult<float, 2> result(outputTensorInfo);
3481 result.outputExpected = MakeTensor<float, 2>(outputTensorInfo, std::vector<float>(expectedOutputValues));
3482
3483 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3484 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3485
3486 armnn::PadQueueDescriptor descriptor;
3487
3488 std::vector<std::pair<unsigned int, unsigned int>> PadList;
3489 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
3490 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
3491
3492 descriptor.m_Parameters.m_PadList = PadList;
3493 armnn::WorkloadInfo info;
3494
3495 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3496 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3497
3498 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
3499
3500 inputHandle->Allocate();
3501 outputHandle->Allocate();
3502
3503 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
3504
3505 workloadFactory.Finalize();
3506 workload->Execute();
3507
3508 CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
3509
3510 return result;
3511};
3512
3513LayerTestResult<float, 3> Pad3dTest(armnn::IWorkloadFactory& workloadFactory)
3514{
3515 const armnn::TensorShape inputShape{ 2, 2, 2 };
3516 const armnn::TensorShape outputShape{ 3, 5, 6 };
3517
3518 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32);
3519 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
3520
3521
3522 std::vector<float> inputValues
3523 {
3524
3525 // Channel 0, Height (2) x Width (2)
3526 0.0f, 4.0f,
3527 2.0f, 5.0f,
3528
3529 // Channel 1, Height (2) x Width (2)
3530 6.0f, 1.0f,
3531 5.0f, 2.0f
3532 };
3533
3534 std::vector<float> expectedOutputValues
3535 {
3536
3537 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
3538 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
3539 0.0f, 0.0f, 0.0f, 4.0f, 0.0f, 0.0f,
3540 0.0f, 0.0f, 2.0f, 5.0f, 0.0f, 0.0f,
3541 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
3542
3543
3544 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
3545 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
3546 0.0f, 0.0f, 6.0f, 1.0f, 0.0f, 0.0f,
3547 0.0f, 0.0f, 5.0f, 2.0f, 0.0f, 0.0f,
3548 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
3549
3550
3551 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
3552 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
3553 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
3554 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
3555 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f
3556
3557 };
3558
3559 auto inputTensor = MakeTensor<float, 3>(inputTensorInfo, std::vector<float>(inputValues));
3560
3561 LayerTestResult<float, 3> result(outputTensorInfo);
3562 result.outputExpected = MakeTensor<float, 3>(outputTensorInfo, std::vector<float>(expectedOutputValues));
3563
3564 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3565 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3566
3567 armnn::PadQueueDescriptor descriptor;
3568
3569 std::vector<std::pair<unsigned int, unsigned int>> PadList;
3570 PadList.push_back(std::pair<unsigned int, unsigned int>(0,1));
3571 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
3572 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
3573
3574 descriptor.m_Parameters.m_PadList = PadList;
3575 armnn::WorkloadInfo info;
3576
3577 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3578 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3579
3580 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
3581
3582 inputHandle->Allocate();
3583 outputHandle->Allocate();
3584
3585 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]);
3586
3587 workloadFactory.Finalize();
3588 workload->Execute();
3589
3590 CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
3591
3592 return result;
3593};
3594
3595LayerTestResult<float, 4> Pad4dTest(armnn::IWorkloadFactory& workloadFactory)
3596{
3597 const armnn::TensorShape inputShape{ 2, 2, 3, 2 };
3598 const armnn::TensorShape outputShape{ 4, 5, 7, 4 };
3599
3600 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32);
3601 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
3602
3603 std::vector<float> inputValues
3604 {
3605 // Batch 0, Channel 0, Height (3) x Width (2)
3606 0.0f, 1.0f,
3607 2.0f, 3.0f,
3608 4.0f, 5.0f,
3609
3610 // Batch 0, Channel 1, Height (3) x Width (2)
3611 6.0f, 7.0f,
3612 8.0f, 9.0f,
3613 10.0f, 11.0f,
3614
3615 // Batch 1, Channel 0, Height (3) x Width (2)
3616 12.0f, 13.0f,
3617 14.0f, 15.0f,
3618 16.0f, 17.0f,
3619
3620 // Batch 1, Channel 1, Height (3) x Width (2)
3621 18.0f, 19.0f,
3622 20.0f, 21.0f,
3623 22.0f, 23.0f
3624
3625 };
3626
3627 std::vector<float> expectedOutputValues
3628 {
3629 0.0f, 0.0f, 0.0f, 0.0f,
3630 0.0f, 0.0f, 0.0f, 0.0f,
3631 0.0f, 0.0f, 0.0f, 0.0f,
3632 0.0f, 0.0f, 0.0f, 0.0f,
3633 0.0f, 0.0f, 0.0f, 0.0f,
3634 0.0f, 0.0f, 0.0f, 0.0f,
3635 0.0f, 0.0f, 0.0f, 0.0f,
3636
3637
3638 0.0f, 0.0f, 0.0f, 0.0f,
3639 0.0f, 0.0f, 0.0f, 0.0f,
3640 0.0f, 0.0f, 0.0f, 0.0f,
3641 0.0f, 0.0f, 0.0f, 0.0f,
3642 0.0f, 0.0f, 0.0f, 0.0f,
3643 0.0f, 0.0f, 0.0f, 0.0f,
3644 0.0f, 0.0f, 0.0f, 0.0f,
3645
3646
3647 0.0f, 0.0f, 0.0f, 0.0f,
3648 0.0f, 0.0f, 0.0f, 0.0f,
3649 0.0f, 0.0f, 0.0f, 0.0f,
3650 0.0f, 0.0f, 0.0f, 0.0f,
3651 0.0f, 0.0f, 0.0f, 0.0f,
3652 0.0f, 0.0f, 0.0f, 0.0f,
3653 0.0f, 0.0f, 0.0f, 0.0f,
3654
3655
3656 0.0f, 0.0f, 0.0f, 0.0f,
3657 0.0f, 0.0f, 0.0f, 0.0f,
3658 0.0f, 0.0f, 0.0f, 0.0f,
3659 0.0f, 0.0f, 0.0f, 0.0f,
3660 0.0f, 0.0f, 0.0f, 0.0f,
3661 0.0f, 0.0f, 0.0f, 0.0f,
3662 0.0f, 0.0f, 0.0f, 0.0f,
3663
3664
3665 0.0f, 0.0f, 0.0f, 0.0f,
3666 0.0f, 0.0f, 0.0f, 0.0f,
3667 0.0f, 0.0f, 0.0f, 0.0f,
3668 0.0f, 0.0f, 0.0f, 0.0f,
3669 0.0f, 0.0f, 0.0f, 0.0f,
3670 0.0f, 0.0f, 0.0f, 0.0f,
3671 0.0f, 0.0f, 0.0f, 0.0f,
3672
3673
3674 0.0f, 0.0f, 0.0f, 0.0f,
3675 0.0f, 0.0f, 0.0f, 0.0f,
3676 0.0f, 0.0f, 0.0f, 0.0f,
3677 0.0f, 0.0f, 0.0f, 0.0f,
3678 0.0f, 0.0f, 0.0f, 0.0f,
3679 0.0f, 0.0f, 0.0f, 0.0f,
3680 0.0f, 0.0f, 0.0f, 0.0f,
3681
3682
3683 0.0f, 0.0f, 0.0f, 0.0f,
3684 0.0f, 0.0f, 0.0f, 0.0f,
3685 0.0f, 0.0f, 0.0f, 0.0f,
3686 0.0f, 0.0f, 0.0f, 0.0f,
3687 0.0f, 0.0f, 0.0f, 0.0f,
3688 0.0f, 0.0f, 0.0f, 0.0f,
3689 0.0f, 0.0f, 0.0f, 0.0f,
3690
3691
3692 0.0f, 0.0f, 0.0f, 0.0f,
3693 0.0f, 0.0f, 0.0f, 0.0f,
3694 0.0f, 0.0f, 0.0f, 0.0f,
3695 0.0f, 0.0f, 1.0f, 0.0f,
3696 0.0f, 2.0f, 3.0f, 0.0f,
3697 0.0f, 4.0f, 5.0f, 0.0f,
3698 0.0f, 0.0f, 0.0f, 0.0f,
3699
3700
3701 0.0f, 0.0f, 0.0f, 0.0f,
3702 0.0f, 0.0f, 0.0f, 0.0f,
3703 0.0f, 0.0f, 0.0f, 0.0f,
3704 0.0f, 6.0f, 7.0f, 0.0f,
3705 0.0f, 8.0f, 9.0f, 0.0f,
3706 0.0f, 10.0f, 11.0f, 0.0f,
3707 0.0f, 0.0f, 0.0f, 0.0f,
3708
3709
3710 0.0f, 0.0f, 0.0f, 0.0f,
3711 0.0f, 0.0f, 0.0f, 0.0f,
3712 0.0f, 0.0f, 0.0f, 0.0f,
3713 0.0f, 0.0f, 0.0f, 0.0f,
3714 0.0f, 0.0f, 0.0f, 0.0f,
3715 0.0f, 0.0f, 0.0f, 0.0f,
3716 0.0f, 0.0f, 0.0f, 0.0f,
3717
3718
3719 0.0f, 0.0f, 0.0f, 0.0f,
3720 0.0f, 0.0f, 0.0f, 0.0f,
3721 0.0f, 0.0f, 0.0f, 0.0f,
3722 0.0f, 0.0f, 0.0f, 0.0f,
3723 0.0f, 0.0f, 0.0f, 0.0f,
3724 0.0f, 0.0f, 0.0f, 0.0f,
3725 0.0f, 0.0f, 0.0f, 0.0f,
3726
3727
3728 0.0f, 0.0f, 0.0f, 0.0f,
3729 0.0f, 0.0f, 0.0f, 0.0f,
3730 0.0f, 0.0f, 0.0f, 0.0f,
3731 0.0f, 0.0f, 0.0f, 0.0f,
3732 0.0f, 0.0f, 0.0f, 0.0f,
3733 0.0f, 0.0f, 0.0f, 0.0f,
3734 0.0f, 0.0f, 0.0f, 0.0f,
3735
3736
3737 0.0f, 0.0f, 0.0f, 0.0f,
3738 0.0f, 0.0f, 0.0f, 0.0f,
3739 0.0f, 0.0f, 0.0f, 0.0f,
3740 0.0f, 12.0f, 13.0f, 0.0f,
3741 0.0f, 14.0f, 15.0f, 0.0f,
3742 0.0f, 16.0f, 17.0f, 0.0f,
3743 0.0f, 0.0f, 0.0f, 0.0f,
3744
3745
3746 0.0f, 0.0f, 0.0f, 0.0f,
3747 0.0f, 0.0f, 0.0f, 0.0f,
3748 0.0f, 0.0f, 0.0f, 0.0f,
3749 0.0f, 18.0f, 19.0f, 0.0f,
3750 0.0f, 20.0f, 21.0f, 0.0f,
3751 0.0f, 22.0f, 23.0f, 0.0f,
3752 0.0f, 0.0f, 0.0f, 0.0f,
3753
3754
3755 0.0f, 0.0f, 0.0f, 0.0f,
3756 0.0f, 0.0f, 0.0f, 0.0f,
3757 0.0f, 0.0f, 0.0f, 0.0f,
3758 0.0f, 0.0f, 0.0f, 0.0f,
3759 0.0f, 0.0f, 0.0f, 0.0f,
3760 0.0f, 0.0f, 0.0f, 0.0f,
3761 0.0f, 0.0f, 0.0f, 0.0f,
3762
3763
3764 0.0f, 0.0f, 0.0f, 0.0f,
3765 0.0f, 0.0f, 0.0f, 0.0f,
3766 0.0f, 0.0f, 0.0f, 0.0f,
3767 0.0f, 0.0f, 0.0f, 0.0f,
3768 0.0f, 0.0f, 0.0f, 0.0f,
3769 0.0f, 0.0f, 0.0f, 0.0f,
3770 0.0f, 0.0f, 0.0f, 0.0f,
3771
3772
3773 0.0f, 0.0f, 0.0f, 0.0f,
3774 0.0f, 0.0f, 0.0f, 0.0f,
3775 0.0f, 0.0f, 0.0f, 0.0f,
3776 0.0f, 0.0f, 0.0f, 0.0f,
3777 0.0f, 0.0f, 0.0f, 0.0f,
3778 0.0f, 0.0f, 0.0f, 0.0f,
3779 0.0f, 0.0f, 0.0f, 0.0f,
3780
3781
3782 0.0f, 0.0f, 0.0f, 0.0f,
3783 0.0f, 0.0f, 0.0f, 0.0f,
3784 0.0f, 0.0f, 0.0f, 0.0f,
3785 0.0f, 0.0f, 0.0f, 0.0f,
3786 0.0f, 0.0f, 0.0f, 0.0f,
3787 0.0f, 0.0f, 0.0f, 0.0f,
3788 0.0f, 0.0f, 0.0f, 0.0f,
3789
3790
3791 0.0f, 0.0f, 0.0f, 0.0f,
3792 0.0f, 0.0f, 0.0f, 0.0f,
3793 0.0f, 0.0f, 0.0f, 0.0f,
3794 0.0f, 0.0f, 0.0f, 0.0f,
3795 0.0f, 0.0f, 0.0f, 0.0f,
3796 0.0f, 0.0f, 0.0f, 0.0f,
3797 0.0f, 0.0f, 0.0f, 0.0f,
3798
3799
3800 0.0f, 0.0f, 0.0f, 0.0f,
3801 0.0f, 0.0f, 0.0f, 0.0f,
3802 0.0f, 0.0f, 0.0f, 0.0f,
3803 0.0f, 0.0f, 0.0f, 0.0f,
3804 0.0f, 0.0f, 0.0f, 0.0f,
3805 0.0f, 0.0f, 0.0f, 0.0f,
3806 0.0f, 0.0f, 0.0f, 0.0f
3807
3808 };
3809
3810 auto inputTensor = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(inputValues));
3811
3812 LayerTestResult<float, 4> result(outputTensorInfo);
3813 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>(expectedOutputValues));
3814
3815 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3816 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3817
3818 armnn::PadQueueDescriptor descriptor;
3819
3820 std::vector<std::pair<unsigned int, unsigned int>> PadList;
3821 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
3822 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
3823 PadList.push_back(std::pair<unsigned int, unsigned int>(3,1));
3824 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
3825
3826 descriptor.m_Parameters.m_PadList = PadList;
3827 armnn::WorkloadInfo info;
3828
3829 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3830 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3831
3832 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
3833
3834 inputHandle->Allocate();
3835 outputHandle->Allocate();
3836
3837 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
3838
3839 workloadFactory.Finalize();
3840
3841 workload->Execute();
3842
3843 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3844
3845 return result;
3846};
3847
telsoa014fcda012018-03-09 14:13:49 +00003848LayerTestResult<float, 4> L2Normalization1dTest(armnn::IWorkloadFactory& workloadFactory)
3849{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003850 // Width: 1
3851 // Height: 1
3852 // Channels: 10
3853 // BatchSize: 1
telsoa014fcda012018-03-09 14:13:49 +00003854
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003855 const armnn::TensorShape inputOutputShape{ 1, 10, 1, 1 };
3856 std::vector<float> inputValues
3857 {
3858 // Batch 0, Channel 0, Height (1) x Width (1)
3859 1.0f,
telsoa014fcda012018-03-09 14:13:49 +00003860
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003861 // Batch 0, Channel 1, Height (1) x Width (1)
3862 2.0f,
telsoa014fcda012018-03-09 14:13:49 +00003863
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003864 // Batch 0, Channel 2, Height (1) x Width (1)
3865 3.0f,
telsoa014fcda012018-03-09 14:13:49 +00003866
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003867 // Batch 0, Channel 3, Height (1) x Width (1)
3868 4.0f,
3869
3870 // Batch 0, Channel 4, Height (1) x Width (1)
3871 5.0f,
3872
3873 // Batch 0, Channel 5, Height (1) x Width (1)
3874 6.0f,
3875
3876 // Batch 0, Channel 6, Height (1) x Width (1)
3877 7.0f,
3878
3879 // Batch 0, Channel 7, Height (1) x Width (1)
3880 8.0f,
3881
3882 // Batch 0, Channel 8, Height (1) x Width (1)
3883 9.0f,
3884
3885 // Batch 0, Channel 9, Height (1) x Width (1)
3886 10.0f
3887 };
telsoa014fcda012018-03-09 14:13:49 +00003888 const float approxInvL2Norm = 0.050964719f;
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003889 std::vector<float> expectedOutputValues
3890 {
3891 // Batch 0, Channel 0, Height (1) x Width (1)
telsoa014fcda012018-03-09 14:13:49 +00003892 1.0f * approxInvL2Norm,
3893 2.0f * approxInvL2Norm,
3894 3.0f * approxInvL2Norm,
3895 4.0f * approxInvL2Norm,
3896 5.0f * approxInvL2Norm,
3897 6.0f * approxInvL2Norm,
3898 7.0f * approxInvL2Norm,
3899 8.0f * approxInvL2Norm,
3900 9.0f * approxInvL2Norm,
3901 10.0f * approxInvL2Norm
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003902 };
telsoa014fcda012018-03-09 14:13:49 +00003903
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003904 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
3905 inputValues, expectedOutputValues, armnn::DataLayout::NCHW);
telsoa014fcda012018-03-09 14:13:49 +00003906}
3907
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003908LayerTestResult<float, 4> L2Normalization1dNhwcTest(armnn::IWorkloadFactory& workloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00003909{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003910 // Width: 1
3911 // Height: 1
3912 // Channels: 10
3913 // BatchSize: 1
telsoa014fcda012018-03-09 14:13:49 +00003914
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003915 const armnn::TensorShape inputOutputShape{ 1, 1, 1, 10 };
3916 std::vector<float> inputValues
3917 {
3918 // Batch 0, Height 0, Width (1) x Channel (10)
3919 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f
3920 };
3921 const float approxInvL2Norm = 0.050964719f;
3922 std::vector<float> expectedOutputValues
3923 {
3924 // Batch 0, Height 0, Width (1) x Channel (10)
3925 1.0f * approxInvL2Norm,
3926 2.0f * approxInvL2Norm,
3927 3.0f * approxInvL2Norm,
3928 4.0f * approxInvL2Norm,
3929 5.0f * approxInvL2Norm,
3930 6.0f * approxInvL2Norm,
3931 7.0f * approxInvL2Norm,
3932 8.0f * approxInvL2Norm,
3933 9.0f * approxInvL2Norm,
3934 10.0f * approxInvL2Norm
3935 };
3936
3937 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
3938 inputValues, expectedOutputValues, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00003939}
3940
3941LayerTestResult<float, 4> L2Normalization2dTest(armnn::IWorkloadFactory& workloadFactory)
3942{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003943 // Width: 5
3944 // Height: 1
3945 // Channels: 2
3946 // BatchSize: 1
telsoa014fcda012018-03-09 14:13:49 +00003947
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003948 const armnn::TensorShape inputOutputShape{ 1, 2, 1, 5 };
3949 std::vector<float> inputValues
3950 {
3951 // Batch 0, Channel 0, Height (1) x Width (5)
telsoa014fcda012018-03-09 14:13:49 +00003952 1.0f, 3.0f, 5.0f, 7.0f, 9.0f,
telsoa014fcda012018-03-09 14:13:49 +00003953
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003954 // Batch 0, Channel 1, Height (1) x Width (5)
3955 2.0f, 4.0f, 6.0f, 8.0f, 10.0f
3956 };
3957 std::vector<float> expectedOutputValues
3958 {
3959 // Batch 0, Channel 0, Height (1) x Width (5)
3960 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
3961 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
3962 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
3963 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
telsoa014fcda012018-03-09 14:13:49 +00003964 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
3965
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003966 // Batch 0, Channel 1, Height (1) x Width (5)
3967 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
3968 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
3969 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
3970 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
telsoa014fcda012018-03-09 14:13:49 +00003971 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003972 };
telsoa014fcda012018-03-09 14:13:49 +00003973
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003974 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
3975 inputValues, expectedOutputValues, armnn::DataLayout::NCHW);
3976}
telsoa014fcda012018-03-09 14:13:49 +00003977
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003978LayerTestResult<float, 4> L2Normalization2dNhwcTest(armnn::IWorkloadFactory& workloadFactory)
3979{
3980 // Width: 5
3981 // Height: 1
3982 // Channels: 2
3983 // BatchSize: 1
telsoa014fcda012018-03-09 14:13:49 +00003984
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003985 const armnn::TensorShape inputOutputShape{ 1, 1, 5, 2 };
3986 std::vector<float> inputValues
3987 {
3988 // Batch 0, Height 0, Width (5) x Channel (2)
3989 1.0f, 2.0f,
3990 3.0f, 4.0f,
3991 5.0f, 6.0f,
3992 7.0f, 8.0f,
3993 9.0f, 10.0f
3994 };
3995 std::vector<float> expectedOutputValues
3996 {
3997 // Batch 0, Height 0, Width (5) x Channel (2)
3998 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
3999 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
4000 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
4001 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
4002 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
4003 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
4004 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
4005 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
4006 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
4007 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
4008 };
telsoa014fcda012018-03-09 14:13:49 +00004009
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004010 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
4011 inputValues, expectedOutputValues, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00004012}
4013
4014LayerTestResult<float, 4> L2Normalization3dTest(armnn::IWorkloadFactory& workloadFactory)
4015{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004016 // Width: 3
4017 // Height: 4
4018 // Channels: 2
4019 // BatchSize: 1
telsoa014fcda012018-03-09 14:13:49 +00004020
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004021 const armnn::TensorShape inputOutputShape{ 1, 2, 4, 3 };
4022 std::vector<float> inputValues
4023 {
4024 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004025 119.0f, 21.0f, 150.0f,
4026 149.0f, 32.0f, 179.0f,
4027 15.0f, 227.0f, 141.0f,
4028 147.0f, 199.0f, 220.0f,
4029
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004030 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004031 110.0f, 140.0f, 73.0f,
4032 211.0f, 212.0f, 89.0f,
4033 24.0f, 138.0f, 188.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004034 162.0f, 12.0f, 161.0f
4035 };
4036 std::vector<float> expectedOutputValues
4037 {
4038 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004039 119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
4040 21.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
4041 150.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
4042 149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
4043 32.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
4044 179.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
4045 15.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
4046 227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
4047 141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
4048 147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
4049 199.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
4050 220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
4051
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004052 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004053 110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
4054 140.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
4055 73.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
4056 211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
4057 212.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
4058 89.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
4059 24.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
4060 138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
4061 188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
4062 162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
4063 12.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004064 161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
4065 };
telsoa014fcda012018-03-09 14:13:49 +00004066
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004067 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
4068 inputValues, expectedOutputValues, armnn::DataLayout::NCHW);
4069}
telsoa014fcda012018-03-09 14:13:49 +00004070
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004071LayerTestResult<float, 4> L2Normalization3dNhwcTest(armnn::IWorkloadFactory& workloadFactory)
4072{
4073 // Width: 3
4074 // Height: 4
4075 // Channels: 2
4076 // BatchSize: 1
telsoa014fcda012018-03-09 14:13:49 +00004077
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004078 const armnn::TensorShape inputOutputShape{ 1, 4, 3, 2 };
4079 std::vector<float> inputValues
4080 {
4081 // Batch 0, Height 0, Width (3) x Channel (2)
4082 119.0f, 110.0f,
4083 21.0f, 140.0f,
4084 150.0f, 73.0f,
telsoa014fcda012018-03-09 14:13:49 +00004085
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004086 // Batch 0, Height 1, Width (3) x Channel (2)
4087 149.0f, 211.0f,
4088 32.0f, 212.0f,
4089 179.0f, 89.0f,
telsoa014fcda012018-03-09 14:13:49 +00004090
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004091 // Batch 0, Height 2, Width (3) x Channel (2)
4092 15.0f, 24.0f,
4093 227.0f, 138.0f,
4094 141.0f, 188.0f,
telsoa014fcda012018-03-09 14:13:49 +00004095
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004096 // Batch 0, Height 3, Width (3) x Channel (2)
4097 147.0f, 162.0f,
4098 199.0f, 12.0f,
4099 220.0f, 161.0f
4100 };
4101 std::vector<float> expectedOutputValues
4102 {
4103 // Batch 0, Height 0, Width (3) x Channel (2)
4104 119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
4105 110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
4106 21.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
4107 140.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
4108 150.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
4109 73.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
4110
4111 // Batch 0, Height 1, Width (3) x Channel (2)
4112 149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
4113 211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
4114 32.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
4115 212.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
4116 179.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
4117 89.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
4118
4119 // Batch 0, Height 2, Width (3) x Channel (2)
4120 15.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
4121 24.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
4122 227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
4123 138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
4124 141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
4125 188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
4126
4127 // Batch 0, Height 3, Width (3) x Channel (2)
4128 147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
4129 162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
4130 199.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
4131 12.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
4132 220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
4133 161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
4134 };
4135
4136 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
4137 inputValues, expectedOutputValues, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00004138}
4139
4140LayerTestResult<float, 4> L2Normalization4dTest(armnn::IWorkloadFactory& workloadFactory)
4141{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004142 // Width: 3
4143 // Height: 4
4144 // Channels: 3
4145 // BatchSize: 2
telsoa014fcda012018-03-09 14:13:49 +00004146
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004147 const armnn::TensorShape inputOutputShape{ 2, 3, 4, 3 };
4148 std::vector<float> inputValues
4149 {
4150 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004151 235.0f, 46.0f, 178.0f,
4152 100.0f, 123.0f, 19.0f,
4153 172.0f, 74.0f, 250.0f,
4154 6.0f, 195.0f, 80.0f,
4155
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004156 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004157 113.0f, 95.0f, 202.0f,
4158 77.0f, 114.0f, 71.0f,
4159 122.0f, 246.0f, 166.0f,
4160 82.0f, 28.0f, 37.0f,
4161
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004162 // Batch 0, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004163 56.0f, 170.0f, 162.0f,
4164 194.0f, 89.0f, 254.0f,
4165 12.0f, 209.0f, 200.0f,
4166 1.0f, 64.0f, 54.0f,
4167
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004168 // Batch 1, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004169 67.0f, 90.0f, 49.0f,
4170 7.0f, 163.0f, 18.0f,
4171 25.0f, 117.0f, 103.0f,
4172 247.0f, 59.0f, 189.0f,
4173
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004174 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004175 239.0f, 104.0f, 199.0f,
4176 17.0f, 124.0f, 153.0f,
4177 222.0f, 217.0f, 75.0f,
4178 32.0f, 126.0f, 21.0f,
4179
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004180 // Batch 1, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004181 97.0f, 145.0f, 215.0f,
4182 115.0f, 116.0f, 238.0f,
4183 226.0f, 16.0f, 132.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004184 92.0f, 125.0f, 88.0f
4185 };
4186 std::vector<float> expectedOutputValues
4187 {
4188 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004189 235.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
4190 46.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
4191 178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
4192 100.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
4193 123.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
4194 19.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
4195 172.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
4196 74.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
4197 250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
4198 6.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
4199 195.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
4200 80.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
4201
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004202 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004203 113.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
4204 95.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
4205 202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
4206 77.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
4207 114.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
4208 71.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
4209 122.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
4210 246.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
4211 166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
4212 82.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
4213 28.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
4214 37.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
4215
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004216 // Batch 0, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004217 56.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
4218 170.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
4219 162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
4220 194.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
4221 89.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
4222 254.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
4223 12.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
4224 209.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
4225 200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
4226 1.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
4227 64.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
4228 54.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
4229
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004230 // Batch 1, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004231 67.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
4232 90.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
4233 49.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
4234 7.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
4235 163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
4236 18.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
4237 25.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
4238 117.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
4239 103.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
4240 247.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
4241 59.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
4242 189.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
4243
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004244 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004245 239.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
4246 104.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
4247 199.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
4248 17.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
4249 124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
4250 153.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
4251 222.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
4252 217.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
4253 75.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
4254 32.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
4255 126.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
4256 21.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
4257
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004258 // Batch 1, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004259 97.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
4260 145.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
4261 215.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
4262 115.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
4263 116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
4264 238.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
4265 226.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
4266 16.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
4267 132.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
4268 92.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
4269 125.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004270 88.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f })
4271 };
telsoa014fcda012018-03-09 14:13:49 +00004272
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004273 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
4274 inputValues, expectedOutputValues, armnn::DataLayout::NCHW);
4275}
telsoa014fcda012018-03-09 14:13:49 +00004276
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004277LayerTestResult<float, 4> L2Normalization4dNhwcTest(armnn::IWorkloadFactory& workloadFactory)
4278{
4279 // Width: 3
4280 // Height: 4
4281 // Channels: 3
4282 // BatchSize: 2
telsoa014fcda012018-03-09 14:13:49 +00004283
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004284 const armnn::TensorShape inputOutputShape{ 2, 4, 3, 3 };
4285 std::vector<float> inputValues
4286 {
4287 // Batch 0, Height 0, Width (3) x Channel (3)
4288 235.0f, 113.0f, 56.0f,
4289 46.0f, 95.0f, 170.0f,
4290 178.0f, 202.0f, 162.0f,
telsoa014fcda012018-03-09 14:13:49 +00004291
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004292 // Batch 0, Height 1, Width (3) x Channel (3)
4293 100.0f, 77.0f, 194.0f,
4294 123.0f, 114.0f, 89.0f,
4295 19.0f, 71.0f, 254.0f,
telsoa014fcda012018-03-09 14:13:49 +00004296
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004297 // Batch 0, Height 2, Width (3) x Channel (3)
4298 172.0f, 122.0f, 12.0f,
4299 74.0f, 246.0f, 209.0f,
4300 250.0f, 166.0f, 200.0f,
telsoa014fcda012018-03-09 14:13:49 +00004301
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004302 // Batch 0, Height 3, Width (3) x Channel (3)
4303 6.0f, 82.0f, 1.0f,
4304 195.0f, 28.0f, 64.0f,
4305 80.0f, 37.0f, 54.0f,
4306
4307 // Batch 1, Height 0, Width (3) x Channel (3)
4308 67.0f, 239.0f, 97.0f,
4309 90.0f, 104.0f, 145.0f,
4310 49.0f, 199.0f, 215.0f,
4311
4312 // Batch 1, Height 1, Width (3) x Channel (3)
4313 7.0f, 17.0f, 115.0f,
4314 163.0f, 124.0f, 116.0f,
4315 18.0f, 153.0f, 238.0f,
4316
4317 // Batch 1, Height 2, Width (3) x Channel (3)
4318 25.0f, 222.0f, 226.0f,
4319 117.0f, 217.0f, 16.0f,
4320 103.0f, 75.0f, 132.0f,
4321
4322 // Batch 1, Height 3, Width (3) x Channel (3)
4323 247.0f, 32.0f, 92.0f,
4324 59.0f, 126.0f, 125.0f,
4325 189.0f, 21.0f, 88.0f
4326 };
4327 std::vector<float> expectedOutputValues
4328 {
4329 // Batch 0, Height 0, Width (3) x Channel (3)
4330 235.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
4331 113.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
4332 56.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
4333 46.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
4334 95.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
4335 170.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
4336 178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
4337 202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
4338 162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
4339
4340 // Batch 0, Height 1, Width (3) x Channel (3)
4341 100.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
4342 77.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
4343 194.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
4344 123.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
4345 114.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
4346 89.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
4347 19.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
4348 71.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
4349 254.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
4350
4351 // Batch 0, Height 2, Width (3) x Channel (3)
4352 172.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
4353 122.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
4354 12.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
4355 74.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
4356 246.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
4357 209.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
4358 250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
4359 166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
4360 200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
4361
4362 // Batch 0, Height 3, Width (3) x Channel (3)
4363 6.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
4364 82.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
4365 1.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
4366 195.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
4367 28.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
4368 64.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
4369 80.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
4370 37.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
4371 54.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
4372
4373 // Batch 1, Height 0, Width (3) x Channel (3)
4374 67.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
4375 239.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
4376 97.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
4377 90.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
4378 104.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
4379 145.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
4380 49.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
4381 199.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
4382 215.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
4383
4384 // Batch 1, Height 1, Width (3) x Channel (3)
4385 7.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
4386 17.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
4387 115.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
4388 163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
4389 124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
4390 116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
4391 18.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
4392 153.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
4393 238.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
4394
4395 // Batch 1, Height 2, Width (3) x Channel (3)
4396 25.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
4397 222.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
4398 226.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
4399 117.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
4400 217.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
4401 16.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
4402 103.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
4403 75.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
4404 132.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
4405
4406 // Batch 1, Height 3, Width (3) x Channel (3)
4407 247.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
4408 32.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
4409 92.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
4410 59.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
4411 126.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
4412 125.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
4413 189.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
4414 21.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
4415 88.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f })
4416 };
4417
4418 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
4419 inputValues, expectedOutputValues, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00004420}
4421
4422template <typename T>
4423LayerTestResult<T, 4> ConstantTestImpl(armnn::IWorkloadFactory& workloadFactory,
4424 float qScale,
4425 int32_t qOffset)
4426{
4427 constexpr unsigned int inputWidth = 3;
4428 constexpr unsigned int inputHeight = 4;
4429 constexpr unsigned int inputChannels = 3;
4430 constexpr unsigned int inputBatchSize = 2;
4431
4432 constexpr unsigned int outputWidth = inputWidth;
4433 constexpr unsigned int outputHeight = inputHeight;
4434 constexpr unsigned int outputChannels = inputChannels;
4435 constexpr unsigned int outputBatchSize = inputBatchSize;
4436
4437 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
4438 armnn::GetDataType<T>());
4439
4440 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
4441 armnn::GetDataType<T>());
4442
4443 // Set quantization parameters if the requested type is a quantized type.
4444 if(armnn::IsQuantizedType<T>())
4445 {
4446 inputTensorInfo.SetQuantizationScale(qScale);
4447 inputTensorInfo.SetQuantizationOffset(qOffset);
4448 outputTensorInfo.SetQuantizationScale(qScale);
4449 outputTensorInfo.SetQuantizationOffset(qOffset);
4450 }
4451
4452 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
4453 QuantizedVector<T>(qScale, qOffset, {
4454 // Batch 0, Channel 0
4455 235.0f, 46.0f, 178.0f,
4456 100.0f, 123.0f, 19.0f,
4457 172.0f, 74.0f, 250.0f,
4458 6.0f, 195.0f, 80.0f,
4459
4460 // Batch 0, Channel 1
4461 113.0f, 95.0f, 202.0f,
4462 77.0f, 114.0f, 71.0f,
4463 122.0f, 246.0f, 166.0f,
4464 82.0f, 28.0f, 37.0f,
4465
4466 // Batch 0, Channel 2
4467 56.0f, 170.0f, 162.0f,
4468 194.0f, 89.0f, 254.0f,
4469 12.0f, 209.0f, 200.0f,
4470 1.0f, 64.0f, 54.0f,
4471
4472 // Batch 1, Channel 0
4473 67.0f, 90.0f, 49.0f,
4474 7.0f, 163.0f, 18.0f,
4475 25.0f, 117.0f, 103.0f,
4476 247.0f, 59.0f, 189.0f,
4477
4478 // Batch 1, Channel 1
4479 239.0f, 104.0f, 199.0f,
4480 17.0f, 124.0f, 153.0f,
4481 222.0f, 217.0f, 75.0f,
4482 32.0f, 126.0f, 21.0f,
4483
4484 // Batch 1, Channel 2
4485 97.0f, 145.0f, 215.0f,
4486 115.0f, 116.0f, 238.0f,
4487 226.0f, 16.0f, 132.0f,
4488 92.0f, 125.0f, 88.0f,
4489 })));
4490
4491 LayerTestResult<T, 4> result(outputTensorInfo);
4492 result.outputExpected = input;
4493
4494 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4495
4496 armnn::ScopedCpuTensorHandle constantTensor(inputTensorInfo);
4497 AllocateAndCopyDataToITensorHandle(&constantTensor, &input[0][0][0][0]);
4498
4499 armnn::ConstantQueueDescriptor descriptor;
4500 descriptor.m_LayerOutput = &constantTensor;
4501
4502 armnn::WorkloadInfo info;
4503 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4504
4505 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConstant(descriptor, info);
4506
4507 outputHandle->Allocate();
4508
surmeh013537c2c2018-05-18 16:31:43 +01004509 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00004510 workload->Execute();
4511
4512 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4513 return result;
4514}
4515
4516LayerTestResult<float, 4> ConstantTest(armnn::IWorkloadFactory& workloadFactory)
4517{
4518 return ConstantTestImpl<float>(workloadFactory, 0.0f, 0);
4519}
4520
4521LayerTestResult<uint8_t, 4> ConstantTestUint8(armnn::IWorkloadFactory& workloadFactory)
4522{
4523 return ConstantTestImpl<uint8_t>(workloadFactory, 1.0f, 0);
4524}
4525
4526LayerTestResult<uint8_t, 3> MergerUint8Test(armnn::IWorkloadFactory& workloadFactory)
4527{
surmeh013537c2c2018-05-18 16:31:43 +01004528 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +00004529 unsigned int outputHeight = 6;
4530 unsigned int outputChannels = 3;
4531
surmeh013537c2c2018-05-18 16:31:43 +01004532 unsigned int inputWidth1 = 3;
4533 unsigned int inputHeight1 = 6;
4534 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +00004535
surmeh013537c2c2018-05-18 16:31:43 +01004536 unsigned int inputWidth2 = 3;
4537 unsigned int inputHeight2 = 6;
4538 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +00004539
telsoa01c577f2c2018-08-31 09:22:23 +01004540 // Defines the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +00004541 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
4542 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
4543 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
telsoa014fcda012018-03-09 14:13:49 +00004544
telsoa01c577f2c2018-08-31 09:22:23 +01004545 // Arbitrary scale and offsets. They don't really matter as the merger operator doesn't dequantize/quantize them.
telsoa014fcda012018-03-09 14:13:49 +00004546 const float scale = 0.13497836f;
4547 const int32_t offset = -7;
4548
4549 outputTensorInfo.SetQuantizationScale(scale);
4550 outputTensorInfo.SetQuantizationOffset(offset);
4551 inputTensorInfo1.SetQuantizationScale(scale);
4552 inputTensorInfo1.SetQuantizationOffset(offset);
4553 inputTensorInfo2.SetQuantizationScale(scale);
4554 inputTensorInfo2.SetQuantizationOffset(offset);
telsoa014fcda012018-03-09 14:13:49 +00004555
4556 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
4557
4558 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
surmeh013537c2c2018-05-18 16:31:43 +01004559 {
4560 1, 2, 3,
4561 4, 5, 6,
4562 7, 8, 9,
4563 10, 11, 12,
4564 13, 14, 15,
4565 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00004566
surmeh013537c2c2018-05-18 16:31:43 +01004567 19, 20, 21,
4568 22, 23, 24,
4569 25, 26, 27,
4570 28, 29, 30,
4571 31, 32, 33,
4572 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00004573
surmeh013537c2c2018-05-18 16:31:43 +01004574 37, 38, 39,
4575 40, 41, 42,
4576 43, 44, 45,
4577 46, 47, 48,
4578 49, 50, 51,
4579 52, 53, 54,
4580 })
telsoa014fcda012018-03-09 14:13:49 +00004581 );
4582
telsoa014fcda012018-03-09 14:13:49 +00004583 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
4584 {
surmeh013537c2c2018-05-18 16:31:43 +01004585 1, 2, 3,
4586 4, 5, 6,
4587 7, 8, 9,
4588 10, 11, 12,
4589 13, 14, 15,
4590 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00004591
surmeh013537c2c2018-05-18 16:31:43 +01004592 19, 20, 21,
4593 22, 23, 24,
4594 25, 26, 27,
4595 28, 29, 30,
4596 31, 32, 33,
4597 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00004598 })
4599 );
4600
4601 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
4602 {
surmeh013537c2c2018-05-18 16:31:43 +01004603 37, 38, 39,
4604 40, 41, 42,
telsoa014fcda012018-03-09 14:13:49 +00004605 43, 44, 45,
surmeh013537c2c2018-05-18 16:31:43 +01004606 46, 47, 48,
4607 49, 50, 51,
4608 52, 53, 54,
telsoa014fcda012018-03-09 14:13:49 +00004609 })
4610 );
4611
telsoa01c577f2c2018-08-31 09:22:23 +01004612 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
telsoa014fcda012018-03-09 14:13:49 +00004613 armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1);
4614
telsoa01c577f2c2018-08-31 09:22:23 +01004615 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
telsoa014fcda012018-03-09 14:13:49 +00004616 armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2);
4617
telsoa014fcda012018-03-09 14:13:49 +00004618
4619 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4620
4621 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
4622
4623 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
4624 subTensorsSupported ?
4625 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
4626 workloadFactory.CreateTensorHandle(inputTensorInfo1);
4627
4628 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
4629 subTensorsSupported ?
4630 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
4631 workloadFactory.CreateTensorHandle(inputTensorInfo2);
4632
telsoa014fcda012018-03-09 14:13:49 +00004633
4634 armnn::MergerQueueDescriptor data;
4635 armnn::WorkloadInfo info;
4636 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
4637 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +00004638 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4639
4640 data.m_ViewOrigins.push_back(window1);
4641 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +00004642
4643 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(data, info);
4644
4645 inputHandle1->Allocate();
4646 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00004647 outputHandle->Allocate();
4648
4649 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
4650 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00004651
surmeh013537c2c2018-05-18 16:31:43 +01004652 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00004653 workload->Execute();
4654
4655 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
4656
4657 return ret;
4658}
4659
4660LayerTestResult<uint8_t, 4> AdditionUint8Test(armnn::IWorkloadFactory& workloadFactory)
4661{
4662 unsigned int batchSize = 1;
4663 unsigned int channels = 2;
4664 unsigned int height = 2;
4665 unsigned int width = 3;
4666
4667 const float scale = 7.0f;
4668 const int32_t offset = 3;
4669
4670 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
4671 armnn::TensorInfo outputTensorInfo;
4672
4673 const unsigned int shape[] = { batchSize, channels, height, width };
4674 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
4675 inputTensorInfo1.SetQuantizationScale(scale);
4676 inputTensorInfo1.SetQuantizationOffset(offset);
4677
4678 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
4679 inputTensorInfo2.SetQuantizationScale(scale);
4680 inputTensorInfo2.SetQuantizationOffset(offset);
4681
4682 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
4683 outputTensorInfo.SetQuantizationScale(scale);
4684 outputTensorInfo.SetQuantizationOffset(offset);
4685
telsoa01c577f2c2018-08-31 09:22:23 +01004686 // See dequantized values to the right.
telsoa014fcda012018-03-09 14:13:49 +00004687 auto input1 = MakeTensor<uint8_t, 4>(inputTensorInfo1, std::vector<uint8_t>(
4688 {
4689 63, 35, 77, 70, 56, 112, // 420, 224, 518, 469, 371, 763
4690 203, 28, 252, 168, 245, 91 // 1400, 175, 1743, 1155, 1694, 616
4691 }));
4692
telsoa01c577f2c2018-08-31 09:22:23 +01004693 // See dequantized values to the right.
telsoa014fcda012018-03-09 14:13:49 +00004694 auto input2 = MakeTensor<uint8_t, 4>(inputTensorInfo1, std::vector<uint8_t>(
4695 {
4696 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
4697 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
4698 }));
4699
telsoa01c577f2c2018-08-31 09:22:23 +01004700 // See dequantized values to the right.
telsoa014fcda012018-03-09 14:13:49 +00004701 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
4702 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>(
4703 {
4704 81, 39, 249, 255, 228, 255, // 546, 252, 1722, 2065(clamped), 1575, 2212(clamped)
4705 255, 186, 255, 186, 255, 214, // 2261(clamped), 1281, 2163(clamped), 1281, 2408(clamped), 1477
4706 }));
4707
4708 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
4709 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
4710 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4711
4712 armnn::AdditionQueueDescriptor data;
4713 armnn::WorkloadInfo info;
4714 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
4715 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
4716 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4717
4718 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
4719
4720 inputHandle1->Allocate();
4721 inputHandle2->Allocate();
4722 outputHandle->Allocate();
4723
4724 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
4725 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
4726
surmeh013537c2c2018-05-18 16:31:43 +01004727 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00004728 workload->Execute();
4729
4730 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4731
4732 return result;
4733}
4734
surmeh01bceff2f2018-03-29 16:29:27 +01004735namespace
telsoa014fcda012018-03-09 14:13:49 +00004736{
surmeh01bceff2f2018-03-29 16:29:27 +01004737LayerTestResult<uint8_t, 4> MultiplicationUint8TestHelper(armnn::IWorkloadFactory& workloadFactory,
4738 const unsigned int shape0[4],
4739 const std::vector<uint8_t> & values0,
4740 float scale0,
4741 int32_t offset0,
4742 const unsigned int shape1[4],
4743 const std::vector<uint8_t> & values1,
4744 float scale1,
4745 int32_t offset1,
4746 const unsigned int outShape[4],
4747 const std::vector<uint8_t> & outValues,
4748 float outScale,
4749 int32_t outOffset)
4750{
4751 armnn::TensorInfo inputTensorInfo0(4, shape0, armnn::DataType::QuantisedAsymm8);
4752 armnn::TensorInfo inputTensorInfo1(4, shape1, armnn::DataType::QuantisedAsymm8);
4753 armnn::TensorInfo outputTensorInfo(4, outShape, armnn::DataType::QuantisedAsymm8);
telsoa014fcda012018-03-09 14:13:49 +00004754
surmeh01bceff2f2018-03-29 16:29:27 +01004755 inputTensorInfo0.SetQuantizationScale(scale0);
4756 inputTensorInfo0.SetQuantizationOffset(offset0);
telsoa014fcda012018-03-09 14:13:49 +00004757
surmeh01bceff2f2018-03-29 16:29:27 +01004758 inputTensorInfo1.SetQuantizationScale(scale1);
4759 inputTensorInfo1.SetQuantizationOffset(offset1);
telsoa014fcda012018-03-09 14:13:49 +00004760
surmeh01bceff2f2018-03-29 16:29:27 +01004761 outputTensorInfo.SetQuantizationScale(outScale);
4762 outputTensorInfo.SetQuantizationOffset(outOffset);
telsoa014fcda012018-03-09 14:13:49 +00004763
surmeh01bceff2f2018-03-29 16:29:27 +01004764 auto input0 = MakeTensor<uint8_t, 4>(inputTensorInfo0, values0);
4765 auto input1 = MakeTensor<uint8_t, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00004766
telsoa014fcda012018-03-09 14:13:49 +00004767 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
surmeh01bceff2f2018-03-29 16:29:27 +01004768 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00004769
surmeh01bceff2f2018-03-29 16:29:27 +01004770 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
telsoa014fcda012018-03-09 14:13:49 +00004771 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
telsoa014fcda012018-03-09 14:13:49 +00004772 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4773
4774 armnn::MultiplicationQueueDescriptor data;
4775 armnn::WorkloadInfo info;
surmeh01bceff2f2018-03-29 16:29:27 +01004776 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
4777 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
telsoa014fcda012018-03-09 14:13:49 +00004778 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4779
4780 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
4781
surmeh01bceff2f2018-03-29 16:29:27 +01004782 inputHandle0->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00004783 inputHandle1->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00004784 outputHandle->Allocate();
4785
surmeh01bceff2f2018-03-29 16:29:27 +01004786 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00004787 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00004788
surmeh013537c2c2018-05-18 16:31:43 +01004789 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00004790 workload->Execute();
4791
4792 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4793
4794 return result;
4795}
surmeh01bceff2f2018-03-29 16:29:27 +01004796} // anonymous namespace
4797
4798LayerTestResult<uint8_t, 4> MultiplicationUint8Test(armnn::IWorkloadFactory& workloadFactory)
4799{
4800 unsigned int batchSize = 1;
4801 unsigned int channels = 2;
4802 unsigned int height = 2;
4803 unsigned int width = 3;
4804 const unsigned int shape[] = { batchSize, channels, height, width };
4805
telsoa01c577f2c2018-08-31 09:22:23 +01004806 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01004807 std::vector<uint8_t> input0({
4808 62, 37, 3, 172, 13, 111, // 244, 144, 8, 684, 48, 440,
4809 188, 20, 73, 31, 23, 31 // 748, 76, 288, 120, 88, 120
4810 });
4811
telsoa01c577f2c2018-08-31 09:22:23 +01004812 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01004813 std::vector<uint8_t> input1({
4814 126, 240, 252, 183, 121, 247, // 384, 726, 762, 555, 369, 747,
4815 48, 115, 151, 79, 78, 97 // 150, 351, 459, 243, 240, 297
4816 });
4817
telsoa01c577f2c2018-08-31 09:22:23 +01004818 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01004819 std::vector<uint8_t> output(
4820 {
4821 64, 72, 0, 255, 8, 236, // 93696, 104544, 6096(clamped), 379620(clamped), 17712, 328680,
4822 77, 15, 92, 16, 10, 21, // 112200, 26676, 132192, 29160, 21120, 35640
4823 });
4824
4825 return MultiplicationUint8TestHelper(workloadFactory,
4826 shape,
4827 input0,
4828 4.0f,
4829 1,
4830 shape,
4831 input1,
4832 3.0f,
4833 -2,
4834 shape,
4835 output,
telsoa01c577f2c2018-08-31 09:22:23 +01004836 1366.255f, // Scale/offset chosen to have output values out of range.
surmeh01bceff2f2018-03-29 16:29:27 +01004837 -5);
4838}
4839
4840LayerTestResult<uint8_t, 4> MultiplicationBroadcast1ElementUint8Test(armnn::IWorkloadFactory& workloadFactory)
4841{
4842 const unsigned int shape0[] = { 1, 2, 2, 3 };
4843 const unsigned int shape1[] = { 1, 1, 1, 1 };
4844
4845 std::vector<uint8_t> input0({
4846 1, 2, 3, 4, 5, 6,
4847 7, 8, 9, 10, 11, 12
4848 });
4849
4850 std::vector<uint8_t> input1({2});
4851
4852 std::vector<uint8_t> output({
4853 2, 4, 6, 8, 10, 12,
4854 14, 16, 18, 20, 22, 24
4855 });
4856
4857 return MultiplicationUint8TestHelper(workloadFactory,
4858 shape0,
4859 input0,
4860 1.0f,
4861 0,
4862 shape1,
4863 input1,
4864 1.0f,
4865 0,
4866 shape0,
4867 output,
4868 1.0f,
4869 0);
4870}
4871
4872LayerTestResult<uint8_t, 4> MultiplicationBroadcast1DVectorUint8Test(armnn::IWorkloadFactory& workloadFactory)
4873{
4874 const unsigned int shape0[] = { 1, 2, 2, 3 };
4875 const unsigned int shape1[] = { 1, 1, 1, 3 };
4876
4877 std::vector<uint8_t> input0({
4878 1, 2, 3, 4, 5, 6,
4879 7, 8, 9, 10, 11, 12
4880 });
4881
4882 std::vector<uint8_t> input1({1, 2, 3});
4883
4884 std::vector<uint8_t> output({
4885 1, 4, 9, 4, 10, 18,
4886 7, 16, 27, 10, 22, 36
4887 });
4888
4889 return MultiplicationUint8TestHelper(workloadFactory,
4890 shape0,
4891 input0,
4892 1.0f,
4893 0,
4894 shape1,
4895 input1,
4896 1.0f,
4897 0,
4898 shape0,
4899 output,
4900 1.0f,
4901 0);
4902}
telsoa014fcda012018-03-09 14:13:49 +00004903
David Beckf195f032018-09-06 16:46:34 +01004904namespace
4905{
4906template <typename T>
4907LayerTestResult<T, 4> SubtractionTestHelper(armnn::IWorkloadFactory& workloadFactory,
4908 const unsigned int shape0[4],
4909 const std::vector<T>& values0,
4910 float scale0,
4911 int32_t offset0,
4912 const unsigned int shape1[4],
4913 const std::vector<T> & values1,
4914 float scale1,
4915 int32_t offset1,
4916 const unsigned int outShape[4],
4917 const std::vector<T> & outValues,
4918 float outScale,
4919 int32_t outOffset)
4920{
4921 auto dataType = (std::is_same<T, uint8_t>::value ?
4922 armnn::DataType::QuantisedAsymm8 :
4923 armnn::DataType::Float32);
4924
4925 armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
4926 armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
4927 armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
4928
4929 inputTensorInfo0.SetQuantizationScale(scale0);
4930 inputTensorInfo0.SetQuantizationOffset(offset0);
4931
4932 inputTensorInfo1.SetQuantizationScale(scale1);
4933 inputTensorInfo1.SetQuantizationOffset(offset1);
4934
4935 outputTensorInfo.SetQuantizationScale(outScale);
4936 outputTensorInfo.SetQuantizationOffset(outOffset);
4937
4938 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
4939 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
4940
4941 LayerTestResult<T, 4> result(outputTensorInfo);
4942 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
4943
4944 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
4945 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
4946 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4947
4948 armnn::SubtractionQueueDescriptor data;
4949 armnn::WorkloadInfo info;
4950 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
4951 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
4952 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4953
4954 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSubtraction(data, info);
4955
4956 inputHandle0->Allocate();
4957 inputHandle1->Allocate();
4958 outputHandle->Allocate();
4959
4960 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
4961 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
4962
4963 workloadFactory.Finalize();
4964 workload->Execute();
4965
4966 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4967
4968 return result;
4969}
4970} // anonymous namespace
4971
4972LayerTestResult<uint8_t, 4> SubtractionUint8Test(armnn::IWorkloadFactory& workloadFactory)
4973{
4974 const unsigned int shape0[] = { 1, 1, 2, 2 };
4975 const unsigned int shape1[] = { 1, 1, 2, 2 };
4976
4977 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
4978 std::vector<uint8_t> input1({ 1, 2, 1, 2 });
4979 std::vector<uint8_t> output({ 3, 3, 5, 5 });
4980
4981 return SubtractionTestHelper(workloadFactory,
4982 shape0, input0, 0.5f, 2,
4983 shape1, input1, 1.0f, 0,
4984 shape0, output, 1.0f, 0);
4985}
4986
4987LayerTestResult<uint8_t, 4> SubtractionBroadcast1ElementUint8Test(armnn::IWorkloadFactory& workloadFactory)
4988{
4989 const unsigned int shape0[] = { 1, 1, 2, 2 };
4990 const unsigned int shape1[] = { 1, 1, 1, 1 };
4991
4992 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
4993 std::vector<uint8_t> input1({ 2 });
4994 std::vector<uint8_t> output({ 5, 6, 7, 8 });
4995
4996 return SubtractionTestHelper(workloadFactory,
4997 shape0, input0, 0.5f, 2,
4998 shape1, input1, 1.0f, 0,
4999 shape0, output, 1.0f, 3);
5000}
5001
5002LayerTestResult<uint8_t, 4> SubtractionBroadcastUint8Test(armnn::IWorkloadFactory& workloadFactory)
5003{
5004 const unsigned int shape0[] = { 1, 1, 2, 2 };
5005 const unsigned int shape1[] = { 1, 1, 2, 1 };
5006
5007 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
5008 std::vector<uint8_t> input1({ 2, 1 });
5009 std::vector<uint8_t> output({ 8, 11, 12, 15 });
5010
5011 return SubtractionTestHelper(workloadFactory,
5012 shape0, input0, 1.0f, 0,
5013 shape1, input1, 1.0f, 0,
5014 shape0, output, 1.0f, 0);
5015}
5016
5017LayerTestResult<float, 4> SubtractionTest(armnn::IWorkloadFactory& workloadFactory)
5018{
5019 const unsigned int shape0[] = { 1, 1, 2, 2 };
5020 const unsigned int shape1[] = { 1, 1, 2, 2 };
5021
5022 std::vector<float> input0({ 1, 2, 3, 4 });
5023 std::vector<float> input1({ 1, -1, 0, 2 });
5024 std::vector<float> output({ 0, 3, 3, 2 });
5025
5026 return SubtractionTestHelper(workloadFactory,
5027 shape0, input0, 1.0f, 0,
5028 shape1, input1, 1.0f, 0,
5029 shape0, output, 1.0f, 0);
5030}
5031
5032LayerTestResult<float, 4> SubtractionBroadcast1ElementTest(armnn::IWorkloadFactory& workloadFactory)
5033{
5034 const unsigned int shape0[] = { 1, 1, 2, 2 };
5035 const unsigned int shape1[] = { 1, 1, 1, 1 };
5036
5037 std::vector<float> input0({ 1, 2, 3, 4 });
5038 std::vector<float> input1({ 10 });
5039 std::vector<float> output({ -9, -8, -7, -6 });
5040
5041 return SubtractionTestHelper(workloadFactory,
5042 shape0, input0, 1.0f, 0,
5043 shape1, input1, 1.0f, 0,
5044 shape0, output, 1.0f, 0);
5045}
5046
5047LayerTestResult<float, 4> SubtractionBroadcastTest(armnn::IWorkloadFactory& workloadFactory)
5048{
5049 const unsigned int shape0[] = { 1, 1, 2, 2 };
5050 const unsigned int shape1[] = { 1, 1, 1, 2 };
5051
5052 std::vector<float> input0({ 1, 2, 3, 4 });
5053 std::vector<float> input1({ 10, -5 });
5054 std::vector<float> output({ -9, 7, -7, 9 });
5055
5056 return SubtractionTestHelper(workloadFactory,
5057 shape0, input0, 1.0f, 0,
5058 shape1, input1, 1.0f, 0,
5059 shape0, output, 1.0f, 0);
5060}
5061
telsoa014fcda012018-03-09 14:13:49 +00005062LayerTestResult<uint8_t, 4> ResizeBilinearNopUint8Test(armnn::IWorkloadFactory& workloadFactory)
5063{
5064 constexpr unsigned int inputWidth = 4;
5065 constexpr unsigned int inputHeight = 4;
5066 constexpr unsigned int inputChannels = 1;
5067 constexpr unsigned int inputBatchSize = 1;
5068
5069 constexpr unsigned int outputWidth = inputWidth;
5070 constexpr unsigned int outputHeight = inputHeight;
5071 constexpr unsigned int outputChannels = inputChannels;
5072 constexpr unsigned int outputBatchSize = inputBatchSize;
5073
5074 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
5075 armnn::DataType::QuantisedAsymm8);
5076 inputTensorInfo.SetQuantizationScale(1.5f);
5077 inputTensorInfo.SetQuantizationOffset(-3);
5078
5079 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
5080 armnn::DataType::QuantisedAsymm8);
5081 outputTensorInfo.SetQuantizationScale(1.5f);
5082 outputTensorInfo.SetQuantizationOffset(-3);
5083
5084 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
5085 1, 2, 3, 4,
5086 2, 3, 4, 5,
5087 3, 4, 5, 6,
5088 4, 5, 6, 7
5089 }));
5090
5091 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
5092 result.outputExpected = input;
5093
5094 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5095 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5096
5097 armnn::ResizeBilinearQueueDescriptor descriptor;
5098 armnn::WorkloadInfo info;
5099 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5100 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5101
5102 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5103
5104 inputHandle->Allocate();
5105 outputHandle->Allocate();
5106 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5107
surmeh013537c2c2018-05-18 16:31:43 +01005108 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00005109 workload->Execute();
5110
5111 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5112 return result;
5113}
5114
5115LayerTestResult<uint8_t, 4> SimpleResizeBilinearUint8Test(armnn::IWorkloadFactory& workloadFactory)
5116{
5117 constexpr unsigned int inputWidth = 2;
5118 constexpr unsigned int inputHeight = 2;
5119 constexpr unsigned int inputChannels = 1;
5120 constexpr unsigned int inputBatchSize = 1;
5121
5122 constexpr unsigned int outputWidth = inputWidth / 2;
5123 constexpr unsigned int outputHeight = inputHeight / 2;
5124 constexpr unsigned int outputChannels = inputChannels;
5125 constexpr unsigned int outputBatchSize = inputBatchSize;
5126
5127 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
5128 armnn::DataType::QuantisedAsymm8);
5129 inputTensorInfo.SetQuantizationScale(0.1567f);
5130 inputTensorInfo.SetQuantizationOffset(1);
5131
5132 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
5133 armnn::DataType::QuantisedAsymm8);
5134 outputTensorInfo.SetQuantizationScale(0.1567f);
5135 outputTensorInfo.SetQuantizationOffset(1);
5136
5137 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
5138 1, 255,
5139 200, 250
5140 }));
5141
5142 // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
5143 // then figures out the interpolants and weights. Note this is different to projecting the centre of the
telsoa01c577f2c2018-08-31 09:22:23 +01005144 // output texel - and thus we'll expect the output 1x1 matrix to contain, as its single element, the value
telsoa014fcda012018-03-09 14:13:49 +00005145 // that was at position (0,0) of the input matrix (rather than an average, which we would expect if projecting
5146 // the centre).
5147 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
5148 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
5149 1
5150 }));
5151
5152 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5153 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5154
5155 armnn::ResizeBilinearQueueDescriptor descriptor;
5156 armnn::WorkloadInfo info;
5157 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5158 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5159
5160 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5161
5162 inputHandle->Allocate();
5163 outputHandle->Allocate();
5164 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5165
surmeh013537c2c2018-05-18 16:31:43 +01005166 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00005167 workload->Execute();
5168
5169 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5170 return result;
5171}
5172
5173LayerTestResult<uint8_t, 4> ResizeBilinearSqMinUint8Test(armnn::IWorkloadFactory& workloadFactory)
5174{
5175 constexpr unsigned int inputWidth = 4;
5176 constexpr unsigned int inputHeight = 4;
5177 constexpr unsigned int inputChannels = 1;
5178 constexpr unsigned int inputBatchSize = 1;
5179
5180 constexpr unsigned int outputWidth = inputWidth / 2;
5181 constexpr unsigned int outputHeight = inputHeight / 2;
5182 constexpr unsigned int outputChannels = inputChannels;
5183 constexpr unsigned int outputBatchSize = inputBatchSize;
5184
5185 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
5186 armnn::DataType::QuantisedAsymm8);
5187 inputTensorInfo.SetQuantizationScale(3.141592f);
5188 inputTensorInfo.SetQuantizationOffset(3);
5189
5190 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
5191 armnn::DataType::QuantisedAsymm8);
5192 outputTensorInfo.SetQuantizationScale(3.141592f);
5193 outputTensorInfo.SetQuantizationOffset(3);
5194
5195 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
5196 1, 2, 3, 4,
5197 2, 3, 4, 5,
5198 3, 4, 5, 6,
5199 4, 5, 6, 7
5200 }));
5201
5202 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
5203 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
5204 1, 3,
5205 3, 5
5206 }));
5207
5208 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5209 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5210
5211 armnn::ResizeBilinearQueueDescriptor descriptor;
5212 armnn::WorkloadInfo info;
5213 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5214 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5215
5216 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5217
5218 inputHandle->Allocate();
5219 outputHandle->Allocate();
5220 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5221
surmeh013537c2c2018-05-18 16:31:43 +01005222 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00005223 workload->Execute();
5224
5225 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5226 return result;
5227}
5228
5229LayerTestResult<uint8_t, 4> ResizeBilinearMinUint8Test(armnn::IWorkloadFactory& workloadFactory)
5230{
5231 constexpr unsigned int inputWidth = 3;
5232 constexpr unsigned int inputHeight = 2;
5233 constexpr unsigned int inputChannels = 1;
5234 constexpr unsigned int inputBatchSize = 1;
5235
5236 constexpr unsigned int outputWidth = 2;
5237 constexpr unsigned int outputHeight = 1;
5238 constexpr unsigned int outputChannels = inputChannels;
5239 constexpr unsigned int outputBatchSize = inputBatchSize;
5240
5241 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
5242 armnn::DataType::QuantisedAsymm8);
5243 inputTensorInfo.SetQuantizationScale(1.5f);
5244 inputTensorInfo.SetQuantizationOffset(-1);
5245
5246 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
5247 armnn::DataType::QuantisedAsymm8);
5248 outputTensorInfo.SetQuantizationScale(1.5f);
5249 outputTensorInfo.SetQuantizationOffset(-1);
5250
5251 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
5252 1, 2, 3, // 3.0, 4.5, 6.0
5253 5, 8, 13 // 9.0, 13.5, 21.0
5254 }));
5255
5256 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
5257 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
5258 1, 3 // 3.0, 5.25
5259 }));
5260
5261 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5262 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5263
5264 armnn::ResizeBilinearQueueDescriptor descriptor;
5265 armnn::WorkloadInfo info;
5266 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5267 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5268
5269 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5270
5271 inputHandle->Allocate();
5272 outputHandle->Allocate();
5273
5274 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5275
surmeh013537c2c2018-05-18 16:31:43 +01005276 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00005277 workload->Execute();
5278
5279 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5280 return result;
5281}
5282
5283LayerTestResult<uint8_t, 4> ResizeBilinearMagUint8Test(armnn::IWorkloadFactory& workloadFactory)
5284{
5285 constexpr unsigned int inputWidth = 2;
5286 constexpr unsigned int inputHeight = 3;
5287 constexpr unsigned int inputChannels = 1;
5288 constexpr unsigned int inputBatchSize = 1;
5289
5290 constexpr unsigned int outputWidth = 5;
5291 constexpr unsigned int outputHeight = 3;
5292 constexpr unsigned int outputChannels = inputChannels;
5293 constexpr unsigned int outputBatchSize = inputBatchSize;
5294
5295 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
5296 armnn::DataType::QuantisedAsymm8);
5297 inputTensorInfo.SetQuantizationScale(0.010765f);
5298 inputTensorInfo.SetQuantizationOffset(7);
5299
5300 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
5301 armnn::DataType::QuantisedAsymm8);
5302 outputTensorInfo.SetQuantizationScale(0.010132f);
5303 outputTensorInfo.SetQuantizationOffset(-18);
5304
5305 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
5306 24, 228, // 0.183005, 2.379065,
5307 105, 128, // 1.05497, 1.302565
5308 230, 71 // 2.400595, 0.68896
5309 }));
5310
5311 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
5312 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
5313 0, 87, 173, 217, 217, // 0.18300501, 1.06142902, 1.93985295, 2.37906504, 2.37906504
5314 86, 96, 106, 111, 111, // 1.05497003, 1.15400803, 1.25304604, 1.30256498, 1.30256498
5315 219, 151, 84, 50, 50 // 2.40059495, 1.71594095, 1.03128707, 0.68896002, 0.68896002
5316 }));
5317
5318 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5319 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5320
5321 armnn::ResizeBilinearQueueDescriptor descriptor;
5322 armnn::WorkloadInfo info;
5323 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5324 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5325
5326 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5327
5328 inputHandle->Allocate();
5329 outputHandle->Allocate();
5330 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5331
surmeh013537c2c2018-05-18 16:31:43 +01005332 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00005333 workload->Execute();
5334
5335 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5336 return result;
5337}
5338
5339LayerTestResult<float, 4> BatchNormTest(armnn::IWorkloadFactory& workloadFactory)
5340{
5341 auto ret = BatchNormTestImpl<float>(workloadFactory, 0.f, 0);
5342 return ret;
5343}
5344
5345LayerTestResult<uint8_t, 4> BatchNormUint8Test(armnn::IWorkloadFactory& workloadFactory)
5346{
5347 auto ret = BatchNormTestImpl<uint8_t>(workloadFactory, 1.f/20.f, 50);
5348 return ret;
5349}
5350
5351LayerTestResult<uint8_t, 4> ConstantUint8Test(armnn::IWorkloadFactory& workloadFactory)
5352{
5353 return ConstantTestImpl<uint8_t>(workloadFactory, 2e-6f, 1);
5354}
5355
5356LayerTestResult<uint8_t, 1> Concatenation1dUint8Test(armnn::IWorkloadFactory& workloadFactory)
5357{
5358 return Concatenation1dTestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5359}
5360
5361LayerTestResult<uint8_t, 2> Concatenation2dDim0Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5362{
5363 return Concatenation2dDim0TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5364}
5365
5366LayerTestResult<uint8_t, 2> Concatenation2dDim1Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5367{
5368 return Concatenation2dDim1TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5369}
5370
5371LayerTestResult<uint8_t, 2> Concatenation2dDim0DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory)
5372{
5373 return Concatenation2dDim0DiffInputDimsTestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5374}
5375
5376LayerTestResult<uint8_t, 2> Concatenation2dDim1DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory)
5377{
5378 return Concatenation2dDim1DiffInputDimsTestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5379}
5380
5381LayerTestResult<uint8_t, 3> Concatenation3dDim0Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5382{
5383 return Concatenation3dDim0TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5384}
5385
5386LayerTestResult<uint8_t, 3> Concatenation3dDim1Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5387{
5388 return Concatenation3dDim1TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5389}
5390
5391LayerTestResult<uint8_t, 3> Concatenation3dDim2Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5392{
5393 return Concatenation3dDim2TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5394}
5395
5396LayerTestResult<uint8_t, 3> Concatenation3dDim0DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory)
5397{
5398 return Concatenation3dDim0TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5399}
5400
5401LayerTestResult<uint8_t, 3> Concatenation3dDim1DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory)
5402{
5403 return Concatenation3dDim1DiffInputDimsTestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5404}
5405
5406LayerTestResult<uint8_t, 3> Concatenation3dDim2DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory)
5407{
5408 return Concatenation3dDim2DiffInputDimsTestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5409}
5410
5411LayerTestResult<float, 4> SimpleMaxPooling2dSize2x2Stride2x2Test(armnn::IWorkloadFactory& workloadFactory,
5412 bool forceNoPadding)
5413{
5414 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<float>(workloadFactory, forceNoPadding);
5415}
5416
5417LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(armnn::IWorkloadFactory& workloadFactory,
5418 bool forceNoPadding)
5419{
5420 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<uint8_t>(workloadFactory, forceNoPadding, 3.0f, -5);
5421}
5422
5423LayerTestResult<float, 4> SimpleMaxPooling2dSize3x3Stride2x4Test(armnn::IWorkloadFactory& workloadFactory,
5424 bool forceNoPadding)
5425{
5426 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<float>(workloadFactory, forceNoPadding);
5427}
5428
5429LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(armnn::IWorkloadFactory& workloadFactory,
5430 bool forceNoPadding)
5431{
5432 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<uint8_t>(workloadFactory, forceNoPadding, 0.1f, 128);
5433}
5434
5435LayerTestResult<float, 4> SimpleAveragePooling2dTest(armnn::IWorkloadFactory& workloadFactory)
5436{
5437 return SimpleAveragePooling2dTestCommon<float>(workloadFactory);
5438}
5439
Francis Murtagh043d0d02018-10-05 14:08:48 +01005440LayerTestResult<float, 4> SimpleAveragePooling2dNhwcTest(armnn::IWorkloadFactory& workloadFactory)
5441{
5442 return SimpleAveragePooling2dNhwcTestCommon<float>(workloadFactory);
5443}
5444
telsoa014fcda012018-03-09 14:13:49 +00005445LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
5446{
5447 return SimpleAveragePooling2dTestCommon<uint8_t>(workloadFactory, 0.5, -1);
5448}
5449
surmeh01bceff2f2018-03-29 16:29:27 +01005450LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(armnn::IWorkloadFactory& workloadFactory,
5451 bool forceNoPadding)
5452{
5453 return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<float>(workloadFactory, forceNoPadding);
5454}
5455
telsoa014fcda012018-03-09 14:13:49 +00005456LayerTestResult<float, 4> LargeTensorsAveragePooling2dTest(armnn::IWorkloadFactory& workloadFactory)
5457{
5458 return LargeTensorsAveragePooling2dTestCommon<float>(workloadFactory);
5459}
5460
5461LayerTestResult<uint8_t, 4> LargeTensorsAveragePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
5462{
5463 return LargeTensorsAveragePooling2dTestCommon<uint8_t>(workloadFactory, 0.5, -1);
5464}
5465
5466LayerTestResult<float, 4> SimpleL2Pooling2dTest(armnn::IWorkloadFactory& workloadFactory)
5467{
5468 return SimpleL2Pooling2dTestCommon<float>(workloadFactory);
5469}
5470
5471LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
5472{
5473 return SimpleL2Pooling2dTestCommon<uint8_t>(workloadFactory);
5474}
5475
5476LayerTestResult<float, 4> L2Pooling2dSize3Stride1Test(armnn::IWorkloadFactory& workloadFactory)
5477{
5478 return L2Pooling2dSize3Stride1TestCommon<float>(workloadFactory);
5479}
5480
5481LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride1Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5482{
5483 return L2Pooling2dSize3Stride1TestCommon<uint8_t>(workloadFactory);
5484}
5485
5486LayerTestResult<float, 4> L2Pooling2dSize3Stride3Test(armnn::IWorkloadFactory& workloadFactory)
5487{
5488 return L2Pooling2dSize3Stride3TestCommon<float>(workloadFactory);
5489}
5490
5491LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride3Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5492{
5493 return L2Pooling2dSize3Stride3TestCommon<uint8_t>(workloadFactory);
5494}
5495
5496LayerTestResult<float, 4> L2Pooling2dSize3Stride4Test(armnn::IWorkloadFactory& workloadFactory)
5497{
5498 return L2Pooling2dSize3Stride4TestCommon<float>(workloadFactory);
5499}
5500
5501LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride4Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5502{
5503 return L2Pooling2dSize3Stride4TestCommon<uint8_t>(workloadFactory);
5504}
5505
5506LayerTestResult<float, 4> L2Pooling2dSize7Test(armnn::IWorkloadFactory& workloadFactory)
5507{
5508 return L2Pooling2dSize7TestCommon<float>(workloadFactory);
5509}
5510
5511LayerTestResult<uint8_t, 4> L2Pooling2dSize7Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5512{
5513 return L2Pooling2dSize7TestCommon<uint8_t>(workloadFactory);
5514}
5515
5516LayerTestResult<float, 4> L2Pooling2dSize9Test(armnn::IWorkloadFactory& workloadFactory)
5517{
5518 return L2Pooling2dSize9TestCommon<float>(workloadFactory);
5519}
5520
5521LayerTestResult<uint8_t, 4> L2Pooling2dSize9Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5522{
5523 return L2Pooling2dSize9TestCommon<uint8_t>(workloadFactory);
5524}
5525
5526LayerTestResult<float, 4> AsymmetricNonSquarePooling2dTest(armnn::IWorkloadFactory& workloadFactory)
5527{
5528 return AsymmetricNonSquarePooling2dTestCommon<float>(workloadFactory);
5529}
5530
5531LayerTestResult<uint8_t, 4> AsymmetricNonSquarePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
5532{
5533 return AsymmetricNonSquarePooling2dTestCommon<uint8_t>(workloadFactory);
5534}
5535
5536LayerTestResult<float, 4> ComparePooling2dTest(armnn::IWorkloadFactory& workloadFactory,
5537 armnn::IWorkloadFactory& refWorkloadFactory,
5538 armnn::PoolingAlgorithm poolingType)
5539{
5540 return ComparePooling2dTestCommon<float>(workloadFactory, refWorkloadFactory, poolingType);
5541}
5542
5543LayerTestResult<uint8_t, 4> ComparePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory,
5544 armnn::IWorkloadFactory& refWorkloadFactory,
5545 armnn::PoolingAlgorithm poolingType)
5546{
5547 return ComparePooling2dTestCommon<uint8_t>(workloadFactory, refWorkloadFactory, poolingType, 0.1f, 128);
5548}
5549
5550LayerTestResult<float, 2> FullyConnectedLargeTest(armnn::IWorkloadFactory& workloadFactory,
5551 bool transposeWeights)
5552{
5553 return FullyConnectedLargeTestCommon<float>(workloadFactory, transposeWeights);
5554}
5555
5556LayerTestResult<float, 4> IgnorePaddingSimpleMaxPooling2dTest(armnn::IWorkloadFactory& workloadFactory)
5557{
5558 return IgnorePaddingSimpleMaxPooling2dTestCommon<float>(workloadFactory);
5559}
5560
5561LayerTestResult<uint8_t, 4> IgnorePaddingSimpleMaxPooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
5562{
5563 return IgnorePaddingSimpleMaxPooling2dTestCommon<uint8_t>(workloadFactory, 1.0f, -5);
5564}
5565
5566LayerTestResult<float, 4> IgnorePaddingMaxPooling2dSize3Test(armnn::IWorkloadFactory& workloadFactory)
5567{
5568 return IgnorePaddingMaxPooling2dSize3TestCommon<float>(workloadFactory);
5569}
5570
5571LayerTestResult<uint8_t, 4> IgnorePaddingMaxPooling2dSize3Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5572{
5573 return IgnorePaddingMaxPooling2dSize3TestCommon<uint8_t>(workloadFactory, 1.0f, -5);
5574}
5575
5576LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dTest(armnn::IWorkloadFactory& workloadFactory)
5577{
5578 return IgnorePaddingSimpleAveragePooling2dTestCommon<float>(workloadFactory);
5579}
5580
5581LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
5582{
5583 return IgnorePaddingSimpleAveragePooling2dTestCommon<uint8_t>(workloadFactory);
5584}
5585
5586LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTest(armnn::IWorkloadFactory& workloadFactory)
5587{
5588 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<float>(workloadFactory);
5589}
5590
5591LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(
5592 armnn::IWorkloadFactory& workloadFactory)
5593{
5594 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<uint8_t>(workloadFactory);
5595}
5596
5597LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3Test(armnn::IWorkloadFactory& workloadFactory)
5598{
5599 return IgnorePaddingAveragePooling2dSize3TestCommon<float>(workloadFactory);
5600}
5601
5602LayerTestResult<uint8_t, 4> IgnorePaddingAveragePooling2dSize3Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5603{
5604 return IgnorePaddingAveragePooling2dSize3TestCommon<uint8_t>(workloadFactory);
5605}
5606
5607LayerTestResult<float, 4> IgnorePaddingSimpleL2Pooling2dTest(armnn::IWorkloadFactory& workloadFactory)
5608{
5609 return IgnorePaddingSimpleL2Pooling2dTestCommon<float>(workloadFactory);
5610}
5611
5612LayerTestResult<uint8_t, 4> IgnorePaddingSimpleL2Pooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
5613{
5614 return IgnorePaddingSimpleL2Pooling2dTestCommon<uint8_t>(workloadFactory);
5615}
5616
5617LayerTestResult<float, 4> IgnorePaddingL2Pooling2dSize3Test(armnn::IWorkloadFactory& workloadFactory)
5618{
5619 return IgnorePaddingL2Pooling2dSize3TestCommon<float>(workloadFactory);
5620}
5621
5622LayerTestResult<uint8_t, 4> IgnorePaddingL2Pooling2dSize3Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5623{
5624 return IgnorePaddingL2Pooling2dSize3TestCommon<uint8_t>(workloadFactory);
5625}
5626
5627LayerTestResult<float, 4> SimplePermuteFloat32Test(armnn::IWorkloadFactory& workloadFactory)
5628{
5629 return SimplePermuteFloat32TestCommon(workloadFactory);
5630};
5631
5632LayerTestResult<uint8_t, 4> SimplePermuteUint8Test(armnn::IWorkloadFactory& workloadFactory)
5633{
5634 return SimplePermuteUint8TestCommon(workloadFactory);
5635};
surmeh01bceff2f2018-03-29 16:29:27 +01005636
5637LayerTestResult<float, 4> PermuteFloat32ValueSet1Test(armnn::IWorkloadFactory& workloadFactory)
5638{
5639 return PermuteFloat32ValueSet1TestCommon(workloadFactory);
5640};
5641
5642LayerTestResult<float, 4> PermuteFloat32ValueSet2Test(armnn::IWorkloadFactory& workloadFactory)
5643{
5644 return PermuteFloat32ValueSet2TestCommon(workloadFactory);
5645};
5646
5647LayerTestResult<float, 4> PermuteFloat32ValueSet3Test(armnn::IWorkloadFactory& workloadFactory)
5648{
5649 return PermuteFloat32ValueSet3TestCommon(workloadFactory);
narpra011e4c31d2018-09-28 11:07:51 +01005650};
5651
5652namespace
5653{
5654template <typename T, std::size_t InputDim, std::size_t OutputDim>
5655LayerTestResult<T, OutputDim> MeanTestHelper(armnn::IWorkloadFactory& workloadFactory,
5656 const unsigned int* inputShape,
5657 const std::vector<T>& inputData,
5658 const std::vector<unsigned int>& axis,
5659 bool keepDims,
5660 const unsigned int* outputShape,
5661 const std::vector<T>& outputData,
5662 float scale = 1.0f,
5663 int32_t offset = 0)
5664{
5665 auto dataType = (std::is_same<T, uint8_t>::value ?
5666 armnn::DataType::QuantisedAsymm8 :
5667 armnn::DataType::Float32);
5668
5669 armnn::TensorInfo inputTensorInfo(InputDim, inputShape, dataType);
5670 armnn::TensorInfo outputTensorInfo(OutputDim, outputShape, dataType);
5671
5672 inputTensorInfo.SetQuantizationScale(scale);
5673 inputTensorInfo.SetQuantizationOffset(offset);
5674
5675 outputTensorInfo.SetQuantizationScale(scale);
5676 outputTensorInfo.SetQuantizationOffset(offset);
5677
5678 auto input = MakeTensor<T, InputDim>(inputTensorInfo, inputData);
5679
5680 LayerTestResult<T, OutputDim> result(outputTensorInfo);
5681 result.outputExpected = MakeTensor<T, OutputDim>(outputTensorInfo, outputData);
5682
5683 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5684 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5685
5686 armnn::MeanQueueDescriptor data;
5687 data.m_Parameters.m_Axis = axis;
5688 data.m_Parameters.m_KeepDims = keepDims;
5689 armnn::WorkloadInfo info;
5690 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
5691 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
5692
5693 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMean(data, info);
5694
5695 inputHandle->Allocate();
5696 outputHandle->Allocate();
5697
5698 CopyDataToITensorHandle(inputHandle.get(), input.origin());
5699
5700 workloadFactory.Finalize();
5701 workload->Execute();
5702
5703 CopyDataFromITensorHandle(result.output.origin(), outputHandle.get());
5704
5705 return result;
5706}
5707} // anonymous namespace
5708
5709LayerTestResult<uint8_t, 1> MeanUint8SimpleTest(armnn::IWorkloadFactory& workloadFactory)
5710{
5711 const unsigned int inputShape[] = { 3, 2 };
5712 const unsigned int outputShape[] = { 1 };
5713
5714 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
5715 std::vector<uint8_t> output({ 2 });
5716
5717 return MeanTestHelper<uint8_t, 2, 1>(workloadFactory, inputShape, input, {}, false, outputShape, output);
5718}
5719
5720LayerTestResult<uint8_t, 3> MeanUint8SimpleAxisTest(armnn::IWorkloadFactory& workloadFactory)
5721{
5722 const unsigned int inputShape[] = { 1, 1, 3, 2 };
5723 const unsigned int outputShape[] = { 1, 1, 2 };
5724
5725 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
5726 std::vector<uint8_t> output({ 2, 2 });
5727
5728 return MeanTestHelper<uint8_t, 4, 3>(workloadFactory, inputShape, input, {2}, false, outputShape, output);
5729}
5730
5731LayerTestResult<uint8_t, 4> MeanUint8KeepDimsTest(armnn::IWorkloadFactory& workloadFactory)
5732{
5733 const unsigned int inputShape[] = { 1, 1, 3, 2 };
5734 const unsigned int outputShape[] = { 1, 1, 1, 2 };
5735
5736 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
5737 std::vector<uint8_t> output({ 2, 2 });
5738
5739 return MeanTestHelper<uint8_t, 4, 4>(workloadFactory, inputShape, input, {2}, true, outputShape, output);
5740}
5741
5742LayerTestResult<uint8_t, 4> MeanUint8MultipleDimsTest(armnn::IWorkloadFactory& workloadFactory)
5743{
5744 const unsigned int inputShape[] = { 2, 3, 1, 2 };
5745 const unsigned int outputShape[] = { 1, 3, 1, 1 };
5746
5747 std::vector<uint8_t> input({ 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6});
5748 std::vector<uint8_t> output({ 1, 3, 5 });
5749
5750 return MeanTestHelper<uint8_t, 4, 4>(workloadFactory, inputShape, input, {0, 3}, true, outputShape, output);
5751}
5752
5753LayerTestResult<uint8_t, 1> MeanVtsUint8Test(armnn::IWorkloadFactory& workloadFactory)
5754{
5755 const unsigned int inputShape[] = {4, 3, 2};
5756 const unsigned int outputShape[] = { 2 };
5757
5758 std::vector<uint8_t> input({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24});
5759 std::vector<uint8_t> output({12, 13});
5760
5761 return MeanTestHelper<uint8_t, 3, 1>(workloadFactory, inputShape, input, {0, 1}, false, outputShape,
5762 output, 0.8f, 5);
5763}
5764
5765LayerTestResult<float, 1> MeanFloatSimpleTest(armnn::IWorkloadFactory& workloadFactory)
5766{
5767 const unsigned int inputShape[] = { 3, 2 };
5768 const unsigned int outputShape[] = { 1 };
5769
5770 std::vector<float> input({ 1., 1., 2., 2., 3., 3. });
5771 std::vector<float> output({ 2. });
5772
5773 return MeanTestHelper<float, 2, 1>(workloadFactory, inputShape, input, {}, false, outputShape, output);
5774}
5775
5776LayerTestResult<float, 3> MeanFloatSimpleAxisTest(armnn::IWorkloadFactory& workloadFactory)
5777{
5778 const unsigned int inputShape[] = { 2, 3, 1, 2 };
5779 const unsigned int outputShape[] = { 3, 1, 2 };
5780
5781 std::vector<float> input({ 1., 2., 3., 4., 5., 6., 1., 2., 3., 4., 5., 6.});
5782 std::vector<float> output({ 1., 2., 3., 4., 5., 6. });
5783
5784 return MeanTestHelper<float, 4, 3>(workloadFactory, inputShape, input, {0}, false, outputShape, output);
5785}
5786
5787LayerTestResult<float, 4> MeanFloatKeepDimsTest(armnn::IWorkloadFactory& workloadFactory)
5788{
5789 const unsigned int inputShape[] = { 1, 1, 3, 2 };
5790 const unsigned int outputShape[] = { 1, 1, 1, 2 };
5791
5792 std::vector<float> input({ 1., 1., 2., 2., 3., 3. });
5793 std::vector<float> output({ 2., 2. });
5794
5795 return MeanTestHelper<float, 4, 4>(workloadFactory, inputShape, input, {2}, true, outputShape, output);
5796}
5797
5798LayerTestResult<float, 4> MeanFloatMultipleDimsTest(armnn::IWorkloadFactory& workloadFactory)
5799{
5800 const unsigned int inputShape[] = { 2, 3, 1, 2 };
5801 const unsigned int outputShape[] = { 1, 3, 1, 1 };
5802
5803 std::vector<float> input({ 1., 2., 3., 4., 5., 6., 1., 2., 3., 4., 5., 6.});
5804 std::vector<float> output({ 1.5, 3.5, 5.5 });
5805
5806 return MeanTestHelper<float, 4, 4>(workloadFactory, inputShape, input, {0, 3}, true, outputShape, output);
5807}
5808
5809LayerTestResult<float, 1> MeanVtsFloat1Test(armnn::IWorkloadFactory& workloadFactory)
5810{
5811 const unsigned int inputShape[] = {4, 3, 2};
5812 const unsigned int outputShape[] = { 2 };
5813
5814 std::vector<float> input({1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f,
5815 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f});
5816 std::vector<float> output({12.0f, 13.0f});
5817
5818 return MeanTestHelper<float, 3, 1>(workloadFactory, inputShape, input, {0, 1}, false, outputShape, output);
5819}
5820
5821LayerTestResult<float, 3> MeanVtsFloat2Test(armnn::IWorkloadFactory& workloadFactory)
5822{
5823 const unsigned int inputShape[] = {4, 3, 2};
5824 const unsigned int outputShape[] = {1, 3, 1 };
5825
5826 std::vector<float> input({1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f,
5827 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f});
5828 std::vector<float> output({10.5f, 12.5f, 14.5f});
5829
5830 return MeanTestHelper<float, 3, 3>(workloadFactory, inputShape, input, {0, 2}, true, outputShape, output);
5831}
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01005832
5833LayerTestResult<float, 4> AdditionAfterMaxPoolTest(armnn::IWorkloadFactory& workloadFactory)
5834{
5835 // Create Initial Tensor
5836 // 1, 2, 3
5837 // 4, 5, 6
5838 // 7, 8, 9
5839
5840 armnn::TensorInfo poolingInputTensorInfo({ 1, 1, 3, 3}, armnn::GetDataType<float>());
5841 armnn::TensorInfo poolingOutputTensorInfo({ 1, 1, 2, 2}, armnn::GetDataType<float>());
5842
5843 boost::multi_array<float, 4> poolingInput = MakeTensor<float,4>(poolingInputTensorInfo,
5844 {1, 2, 3,
5845 4, 5, 6,
5846 7, 8, 9
5847 });
5848
5849 std::unique_ptr<armnn::ITensorHandle> poolingInputHandle =
5850 workloadFactory.CreateTensorHandle(poolingInputTensorInfo);
5851 std::unique_ptr<armnn::ITensorHandle> poolingOutputHandle =
5852 workloadFactory.CreateTensorHandle(poolingOutputTensorInfo);
5853
5854 // Apply MaxPool poolSize = 1x1, stride=2x2
5855 // Result =
5856 // 1, 3
5857 // 7, 9
5858 armnn::Pooling2dDescriptor descriptor;
5859 descriptor.m_PoolHeight = 1;
5860 descriptor.m_PoolWidth = 1;
5861 descriptor.m_StrideX = 2;
5862 descriptor.m_StrideY = 2;
5863 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
5864
5865 armnn::Pooling2dQueueDescriptor queueDescriptor;
5866 queueDescriptor.m_Parameters = descriptor;
5867 armnn::WorkloadInfo workloadInfo;
5868 AddInputToWorkload(queueDescriptor, workloadInfo, poolingInputTensorInfo, poolingInputHandle.get());
5869 AddOutputToWorkload(queueDescriptor, workloadInfo, poolingOutputTensorInfo, poolingOutputHandle.get());
5870
5871 // Create the MaxPool
5872 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
5873
5874 //LayerTestResult<float, 4> result(poolingOutputTensorInfo);
5875 auto shape( GetTensorShapeAsArray<4>(poolingOutputTensorInfo));
5876 boost::multi_array<float, 4> resultMaxPool;
5877 resultMaxPool.resize(shape);
5878
5879
5880 // Create addition with another tensor the same size
5881 // This would be the result to apply a Conv2d with kernel ones(2) and stride 1x1
5882 // with the initial tensor.
5883 // 12, 16
5884 // 24, 28
5885
5886 armnn::TensorInfo addInputTensorInfo({ 1,1,2,2}, armnn::GetDataType<float>());
5887 armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2}, armnn::GetDataType<float>());
5888
5889 boost::multi_array<float, 4> addInput = MakeTensor<float,4>(addInputTensorInfo,
5890 {12, 16,
5891 24, 28,
5892 });
5893
5894 // Expected output tensor after MaxPool and Addition.
5895 LayerTestResult<float,4> addRet(addOutputTensorInfo);
5896 addRet.outputExpected = MakeTensor<float, 4>(addOutputTensorInfo, std::vector<float>(
5897 {
5898 13, 19,
5899 31, 37
5900 }));
5901
5902 std::unique_ptr<armnn::ITensorHandle> addInputHandle = workloadFactory.CreateTensorHandle(addInputTensorInfo);
5903 std::unique_ptr<armnn::ITensorHandle> addOutputHandle = workloadFactory.CreateTensorHandle(addOutputTensorInfo);
5904
5905 armnn::AdditionQueueDescriptor data;
5906 armnn::WorkloadInfo info;
5907
5908 // Add the output of the MaxPool and the new tensor
5909 AddInputToWorkload(data, info, poolingOutputTensorInfo, poolingOutputHandle.get());
5910 AddInputToWorkload(data, info, addInputTensorInfo, addInputHandle.get());
5911 AddOutputToWorkload(data, info, addOutputTensorInfo, addOutputHandle.get());
5912
5913 std::unique_ptr<armnn::IWorkload> addWorkload = workloadFactory.CreateAddition(data, info);
5914
5915 poolingInputHandle->Allocate();
5916 poolingOutputHandle->Allocate();
5917 addInputHandle->Allocate();
5918 addOutputHandle->Allocate();
5919
5920 CopyDataToITensorHandle(poolingInputHandle.get(), &poolingInput[0][0][0][0]);
5921 CopyDataFromITensorHandle(&resultMaxPool[0][0][0][0], poolingOutputHandle.get());
5922
5923 CopyDataToITensorHandle(poolingOutputHandle.get(), &resultMaxPool[0][0][0][0]);
5924 CopyDataToITensorHandle(addInputHandle.get(), &addInput[0][0][0][0]);
5925
5926 workload->Execute();
5927 addWorkload->Execute();
5928
5929 CopyDataFromITensorHandle(&addRet.output[0][0][0][0], addOutputHandle.get());
5930
5931 workloadFactory.Finalize();
5932
5933 return addRet;
5934}