blob: d88db06411263b36bf798837b95dd3785f5a3a33 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#include "LayerTests.hpp"
6
7#include "test/TensorHelpers.hpp"
8#include "TensorCopyUtils.hpp"
surmeh013537c2c2018-05-18 16:31:43 +01009#include "Permute.hpp"
telsoa014fcda012018-03-09 14:13:49 +000010
11#include <boost/test/unit_test.hpp>
surmeh013537c2c2018-05-18 16:31:43 +010012#include <boost/assert.hpp>
telsoa014fcda012018-03-09 14:13:49 +000013
David Beck711fa312018-09-24 10:46:38 +010014#include <armnn/LayerSupport.hpp>
telsoa014fcda012018-03-09 14:13:49 +000015
David Beck711fa312018-09-24 10:46:38 +010016#include <backends/CpuTensorHandle.hpp>
17#include <backends/WorkloadFactory.hpp>
telsoa014fcda012018-03-09 14:13:49 +000018
19#ifdef ARMCOMPUTECL_ENABLED
David Beckac42efd2018-09-26 17:41:13 +010020#include <backends/cl/ClTensorHandle.hpp>
David Beck711fa312018-09-24 10:46:38 +010021#include <backends/aclCommon/ArmComputeTensorUtils.hpp>
telsoa014fcda012018-03-09 14:13:49 +000022#endif
23
24#include <algorithm>
25#include <boost/cast.hpp>
26
27#include "WorkloadTestUtils.hpp"
28#include "Conv2dTestImpl.hpp"
29#include "BatchNormTestImpl.hpp"
30#include "ActivationTestImpl.hpp"
31#include "Pooling2dTestImpl.hpp"
32#include "ReshapeTestImpl.hpp"
33#include "FullyConnectedTestImpl.hpp"
34#include "SplitterTestImpl.hpp"
35#include "SoftmaxTestImpl.hpp"
36#include "NormTestImpl.hpp"
37#include "PermuteTestImpl.hpp"
telsoa01c577f2c2018-08-31 09:22:23 +010038#include "LstmTestImpl.hpp"
39#include "ConvertFp16ToFp32TestImpl.hpp"
40#include "ConvertFp32ToFp16TestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000041
arovir0143095f32018-10-09 18:04:24 +010042#include <backends/cl/test/ClContextControlFixture.hpp>
Matteo Martincigh539b44d2018-10-01 09:26:39 +010043
telsoa01c577f2c2018-08-31 09:22:23 +010044// 3-channel 16x8 image used as common input data for a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000045static std::vector<float> ConvInput3x8x16({
46 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
47 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
48 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
49 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
50 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
51 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
52 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
53 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
54 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
55 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
56 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
57 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
58 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
59 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
60 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
61 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
62 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
63 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
64 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
65 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
66 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
67 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
68 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
69 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
70});
71
telsoa01c577f2c2018-08-31 09:22:23 +010072// 2-channel bias used by a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000073static std::vector<float> Bias2({0, 2});
74
telsoa01c577f2c2018-08-31 09:22:23 +010075// Helper function that returns either Bias2 or an empty vector depending on whether bias is enabled.
telsoa014fcda012018-03-09 14:13:49 +000076template<typename T>
77boost::multi_array<T, 1> GetBias2(bool biasEnabled, float qScale, int32_t qOffset)
78{
79 if(biasEnabled)
80 {
81 armnn::TensorInfo biasDesc({static_cast<unsigned int>(Bias2.size())}, armnn::GetDataType<T>());
82 boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(qScale, qOffset, Bias2));
83 return bias;
84 }
85 else
86 {
87 return boost::multi_array<T, 1>();
88 }
89}
90
91template<typename T>
92LayerTestResult<T, 4> SimpleConvolution2d3x5TestCommon(armnn::IWorkloadFactory& workloadFactory,
93 float qScale,
94 int32_t qOffset,
95 bool biasEnabled)
96{
telsoa01c577f2c2018-08-31 09:22:23 +010097 // Use common single-batch 3-channel 16x8 image.
telsoa014fcda012018-03-09 14:13:49 +000098 armnn::TensorInfo inputDesc({1, 3, 8, 16}, armnn::GetDataType<T>());
99 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
100
telsoa01c577f2c2018-08-31 09:22:23 +0100101 // Use a 2-element batch with 3-channel 3x5 kernels.
telsoa014fcda012018-03-09 14:13:49 +0000102 armnn::TensorInfo kernelDesc({2, 3, 5, 3}, armnn::GetDataType<T>());
103 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
104 QuantizedVector<T>(qScale, qOffset, {
105 1, 1, 1,
106 1, -1, 1,
107 1, 1, 1,
108 1, 1, 1,
109 1, 1, 1,
110
111 0, 0, 0,
112 0, 0, 0,
113 0, 0, 0,
114 0, 0, 0,
115 0, 0, 0,
116
117 2, 2, 2,
118 2, 2, 2,
119 2, 2, 2,
120 2, 2, 2,
121 2, 2, 2,
122
123
124 0, 0, 0,
125 0, 0, 0,
126 0, 0, 0,
127 0, 0, 0,
128 0, 0, 0,
129
130 1, 1, 1,
131 1, 1, 1,
132 1, 1, 1,
133 1, 1, 1,
134 1, 1, 1,
135
136 0, 0, 0,
137 0, 0, 0,
138 0, 0, 0,
139 0, 0, 0,
140 0, 0, 0
141 })));
142
telsoa01c577f2c2018-08-31 09:22:23 +0100143 // Expected output is 2 batch elements of a 1-channel 14x4 image.
telsoa014fcda012018-03-09 14:13:49 +0000144 armnn::TensorInfo outputDesc({1, 2, 4, 14}, armnn::GetDataType<T>());
145 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
146 QuantizedVector<T>(qScale, qOffset, {
147 -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24,
148 -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25,
149 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
150 -23.5f, -23.5f, -23.5f,
151 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
152 -23.5f, -23.5f, -23.5f,
153
154 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
155 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
156 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
157 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
158 })));
159
160 return SimpleConvolution2dTestImpl<T>(workloadFactory,
161 input,
162 kernel,
163 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
164 expectedOutput,
165 qScale,
166 qOffset);
167}
168
169template<typename T>
170LayerTestResult<T, 4> SimpleConvolution2d3x3TestCommon(armnn::IWorkloadFactory& workloadFactory,
171 float qScale,
172 int32_t qOffset,
173 bool biasEnabled)
174{
telsoa01c577f2c2018-08-31 09:22:23 +0100175 // Use a 3x3 kernel, which exercises ArmCompute's direct convolution path.
telsoa014fcda012018-03-09 14:13:49 +0000176
telsoa01c577f2c2018-08-31 09:22:23 +0100177 // Use common single-batch 3-channel 16x8 image.
telsoa014fcda012018-03-09 14:13:49 +0000178 armnn::TensorInfo inputDesc({1, 3, 8, 16}, armnn::GetDataType<T>());
179 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
180
telsoa01c577f2c2018-08-31 09:22:23 +0100181 // Use a 2-element batch of 3-channel 3x3 kernels.
telsoa014fcda012018-03-09 14:13:49 +0000182 armnn::TensorInfo kernelDesc({2, 3, 3, 3}, armnn::GetDataType<T>());
183 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
184 QuantizedVector<T>(qScale, qOffset, {
185 1, 1, 1,
186 1, -1, 1,
187 1, 1, 1,
188
189 0, 0, 0,
190 0, 0, 0,
191 0, 0, 0,
192
193 2, 2, 2,
194 2, 2, 2,
195 2, 2, 2,
196
197
198 0, 0, 0,
199 0, 0, 0,
200 0, 0, 0,
201
202 1, 1, 1,
203 1, 1, 1,
204 1, 1, 1,
205
206 0, 0, 0,
207 0, 0, 0,
208 0, 0, 0
209 })));
210
telsoa01c577f2c2018-08-31 09:22:23 +0100211 // Expected output is 1 batch of a 2-channel 14x6 image.
telsoa014fcda012018-03-09 14:13:49 +0000212 armnn::TensorInfo outputDesc({1, 2, 6, 14}, armnn::GetDataType<T>());
213 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
214 QuantizedVector<T>(qScale, qOffset, {
215 -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15,
216 -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16,
217 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
218 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
219 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
220 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
221
222 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
223 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
224 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
225 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
226 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
227 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
228 })));
229
230 return SimpleConvolution2dTestImpl<T>(workloadFactory,
231 input,
232 kernel,
233 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
234 expectedOutput,
235 qScale,
236 qOffset);
237}
238
Francis Murtaghd59116e2018-10-04 16:03:07 +0100239template<typename T>
240LayerTestResult<T, 4> SimpleConvolution2d3x3NhwcTestCommon(armnn::IWorkloadFactory& workloadFactory,
241 float qScale,
242 int32_t qOffset,
243 bool biasEnabled,
244 armnn::DataLayout dataLayout)
245{
246 // Use common single-batch 5x5 image.
247
248 armnn::TensorInfo inputDesc({1, 3, 4, 1}, armnn::GetDataType<T>());
249 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
250 {
251 1, 5, 2, 3,
252 8, 7, 3, 6,
253 3, 3, 9, 1
254 });
255
256
257 // Use a 2-element batch of 3-channel 3x3 kernels.
258 armnn::TensorInfo kernelDesc({1, 3, 3, 1}, armnn::GetDataType<T>());
259 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, {
260 4, 5, 6,
261 0, 0, 0,
262 3, 2, 1
263 });
264
265 // Expected output is 1 batch of a 5x5 image.
266 armnn::TensorInfo outputDesc({1, 3, 4, 1}, armnn::GetDataType<T>());
267
268 const std::vector<float> outputData =
269 {
270 23, 41, 33, 21,
271 44, 65, 76, 52,
272 82, 85, 79, 42
273 };
274
275 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
276
277 return SimpleConvolution2dNhwcTestImpl<T>(workloadFactory,
278 input,
279 kernel,
280 boost::multi_array<T, 1>(),
281 expectedOutput,
282 dataLayout,
283 qScale,
284 qOffset);
285}
286
telsoa014fcda012018-03-09 14:13:49 +0000287LayerTestResult<float, 4> SimpleConvolution2d3x5Test(armnn::IWorkloadFactory& workloadFactory,
288 bool biasEnabled)
289{
290 return SimpleConvolution2d3x5TestCommon<float>(workloadFactory, 0.f, 0, biasEnabled);
291}
292
293LayerTestResult<uint8_t, 4> SimpleConvolution2d3x5Uint8Test(armnn::IWorkloadFactory& workloadFactory,
294 bool biasEnabled)
295{
296 return SimpleConvolution2d3x5TestCommon<uint8_t>(workloadFactory, 0.5f, 50, biasEnabled);
297}
298
299LayerTestResult<float, 4> SimpleConvolution2d3x3Test(armnn::IWorkloadFactory& workloadFactory,
300 bool biasEnabled)
301{
302 return SimpleConvolution2d3x3TestCommon<float>(workloadFactory, 0.f, 0, biasEnabled);
303}
304
Francis Murtaghd59116e2018-10-04 16:03:07 +0100305LayerTestResult<float, 4> SimpleConvolution2d3x3NhwcTest(armnn::IWorkloadFactory& workloadFactory,
306 bool biasEnabled)
307{
308 return SimpleConvolution2d3x3NhwcTestCommon<float>(workloadFactory, 0.f, 0, biasEnabled, armnn::DataLayout::NHWC);
309}
310
telsoa014fcda012018-03-09 14:13:49 +0000311LayerTestResult<uint8_t, 4> SimpleConvolution2d3x3Uint8Test(armnn::IWorkloadFactory& workloadFactory,
312 bool biasEnabled)
313{
314 return SimpleConvolution2d3x3TestCommon<uint8_t>(workloadFactory, 0.5f, 50, biasEnabled);
315}
316
317template<typename T>
318LayerTestResult<T, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon(
319 armnn::IWorkloadFactory& workloadFactory,
320 float qScale,
321 int32_t qOffset)
322{
telsoa01c577f2c2018-08-31 09:22:23 +0100323 // Use a single-batch 1-channel 3x3 image as input.
telsoa014fcda012018-03-09 14:13:49 +0000324 armnn::TensorInfo inputDesc({1, 1, 3, 3}, armnn::GetDataType<T>());
325 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
326 QuantizedVector<T>(qScale, qOffset, {
327 11,21,31,
328 12,22,32,
329 13,23,33
330 })));
331
telsoa01c577f2c2018-08-31 09:22:23 +0100332 // Use 1 batch of a 1-channel 2x2 kernel.
telsoa014fcda012018-03-09 14:13:49 +0000333 armnn::TensorInfo kernelDesc({1, 1, 2, 2}, armnn::GetDataType<T>());
334 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
335 QuantizedVector<T>(qScale, qOffset, {
336 -11,-21,
337 -12,-22,
338 })));
339
telsoa01c577f2c2018-08-31 09:22:23 +0100340// Expected output is 1 batch of a 1-channel 6x8 image.
telsoa014fcda012018-03-09 14:13:49 +0000341// Manually calculated like this:
342//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
343//[-11*0 -21*0 -12*0 -22*11 ; -11*0 -21*0 -12*11 -22*21 ; -11*0 -21*0 -12*21 -22*31 ; -11*0 -21*0 -12*31 -22*0 ..]
344//[-11*0 -21*11 -12*0 -22*12 ; -11*11 -21*21 -12*12 -22*22 ; -11*21 -21*31 -12*22 -22*32 ; -11*31 -21*0 -12*32 -22*0 ..]
345//[-11*0 -21*12 -12*0 -22*13 ; -11*12 -21*22 -12*13 -22*23 ; -11*22 -21*32 -12*23 -22*33 ; -11*32 -21*0 -12*33 -22*0 ..]
346//[-11*0 -21*13 -12*0 -22*0 ; -11*13 -21*23 -12*0 -22*0 ; -11*23 -21*33 -12*0 -22*0 ; -11*33 -21*0 -12*0 -22*0 ..]
347//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
348//[..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ..]
349 armnn::TensorInfo outputDesc({1, 1, 8, 6}, armnn::GetDataType<T>());
350 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
351 QuantizedVector<T>(qScale, qOffset, {
352 0, 0, 0, 0, 0, 0,
353 -242, -594, -934, -372, 0, 0,
354 -495, -1190, -1850, -725, 0, 0,
355 -538, -1256, -1916, -748, 0, 0,
356 -273, -626, -946, -363, 0, 0,
357 0, 0, 0, 0, 0, 0,
358 0, 0, 0, 0, 0, 0,
359 0, 0, 0, 0, 0, 0
360 })));
361
362 return SimpleConvolution2dTestImpl<T>(workloadFactory,
363 input,
364 kernel,
365 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(false, qScale, qOffset),
366 expectedOutput,
367 qScale,
368 qOffset,
telsoa01c577f2c2018-08-31 09:22:23 +0100369 1, // Padding left.
370 2, // Padding top.
371 3, // Padding right.
372 4); // Padding bottom.
telsoa014fcda012018-03-09 14:13:49 +0000373}
374
375template<typename T>
376LayerTestResult<T, 4> SimpleConvolution2dAsymmetricPaddingTestCommon(armnn::IWorkloadFactory& workloadFactory,
377 float qScale,
378 int32_t qOffset)
379{
telsoa01c577f2c2018-08-31 09:22:23 +0100380 // Use a single-batch 1-channel 5x5 image as input.
telsoa014fcda012018-03-09 14:13:49 +0000381 armnn::TensorInfo inputDesc({ 1, 1, 5, 5 }, armnn::GetDataType<T>());
382 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
383 QuantizedVector<T>(qScale, qOffset, {
384 11,21,31,41,51,
385 12,22,32,42,52,
386 13,23,33,43,53,
387 14,24,34,44,54,
388 15,25,35,45,55,
389 })));
390
telsoa01c577f2c2018-08-31 09:22:23 +0100391 // Use 1 batch of a 1-channel 4x4 kernel.
telsoa014fcda012018-03-09 14:13:49 +0000392 armnn::TensorInfo kernelDesc({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
393 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
394 QuantizedVector<T>(qScale, qOffset, {
395 -11,-21,-31,-41,
396 -12,-22,-32,-42,
397 -13,-23,-33,-43,
398 -14,-24,-34,-44,
399 })));
400
telsoa01c577f2c2018-08-31 09:22:23 +0100401 // Expected output is 1 batch of a 1-channel 5x5 image.
telsoa014fcda012018-03-09 14:13:49 +0000402 armnn::TensorInfo outputDesc({ 1, 1, 5, 5 }, armnn::GetDataType<T>());
403 std::vector<T> myVec(outputDesc.GetNumElements(), 0);
404 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
405 QuantizedVector<T>(qScale, qOffset, {
telsoa014fcda012018-03-09 14:13:49 +0000406 -7140, -10580, -13940, -9300, -5230,
407 -9590, -14120, -18520, -12290, -6860,
408 -9980, -14560, -18960, -12560, -7000,
409 -7518, -10904, -14144, -9318, -5152,
surmeh013537c2c2018-05-18 16:31:43 +0100410 -5032, -7256, -9376, -6142, -3368,
telsoa014fcda012018-03-09 14:13:49 +0000411 })));
412
413 return SimpleConvolution2dTestImpl<T>(workloadFactory,
414 input,
415 kernel,
416 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(false, qScale, qOffset),
417 expectedOutput,
418 qScale,
419 qOffset,
telsoa01c577f2c2018-08-31 09:22:23 +0100420 1, // Padding left.
421 1, // Padding top.
422 2, // Padding right.
423 2); // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100424}
425
426template<typename T>
427LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(armnn::IWorkloadFactory& workloadFactory,
428 float qScale,
429 int32_t qOffset,
430 bool biasEnabled)
431{
telsoa01c577f2c2018-08-31 09:22:23 +0100432 // Use a single-batch 2-channel 5x5 image as input.
surmeh013537c2c2018-05-18 16:31:43 +0100433 armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5 }, armnn::GetDataType<T>());
434 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
435 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
436 0, 1, 2, 3, 4,
437 5, 6, 7, 8, 9,
438 10, 11, 12, 13, 14,
439 15, 16, 17, 18, 19,
440 20, 21, 22, 23, 24,
441
442 25, 26, 27, 28, 29,
443 30, 31, 32, 33, 34,
444 35, 36, 37, 38, 39,
445 40, 41, 42, 43, 44,
446 45, 46, 47, 48, 49
447 })));
448
telsoa01c577f2c2018-08-31 09:22:23 +0100449 // Use a depth multiplier of 1 on a 2-channel 4x4 kernel.
surmeh013537c2c2018-05-18 16:31:43 +0100450 armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, armnn::GetDataType<T>());
451 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
452 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
453 32, 31, 30, 29,
454 28, 27, 26, 25,
455 24, 23, 22, 21,
456 20, 19, 18, 17,
457
458 16, 15, 14, 13,
459 12, 11, 10, 9,
460 8, 7, 6, 5,
461 4, 3, 2, 1
462 })));
463
telsoa01c577f2c2018-08-31 09:22:23 +0100464 // Expected output is 1 batch of a 2-channel 5x5 image.
465 // Calculated using the python tensorflow library with strideX=1, strideY=1.
surmeh013537c2c2018-05-18 16:31:43 +0100466 armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5 }, armnn::GetDataType<T>());
467 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
468 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
469 1062, 1580, 1850, 1530, 1117,
470 2140, 3108, 3500, 2842, 2042,
471 3580, 5068, 5460, 4342, 3062,
472 3618, 5072, 5390, 4248, 2971,
473 3074, 4282, 4510, 3533, 2457,
474 1550, 2284, 2362, 1955, 1428,
475 2910, 4206, 4342, 3528, 2536,
476 3390, 4886, 5022, 4068, 2916,
477 3566, 5056, 5182, 4133, 2922,
478 3100, 4352, 4452, 3517, 2465
479 })));
480
481 return DepthwiseConvolution2dAsymmetricTestImpl<T>(workloadFactory,
482 input,
483 kernel,
484 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
485 expectedOutput,
486 qScale,
487 qOffset,
telsoa01c577f2c2018-08-31 09:22:23 +0100488 1, // Padding left.
489 1, // Padding top.
490 2, // Padding right.
491 2, // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100492 1, // strideX
493 1); // strideY
telsoa014fcda012018-03-09 14:13:49 +0000494}
495
496LayerTestResult<float, 4>
497Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest(armnn::IWorkloadFactory& workloadFactory)
498{
499 return Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon<float>(workloadFactory, 0.0f, 0);
500}
501
502LayerTestResult<float, 4> Convolution2dAsymmetricPaddingTest(armnn::IWorkloadFactory& workloadFactory)
503{
504 return SimpleConvolution2dAsymmetricPaddingTestCommon<float>(workloadFactory, 0.0f, 0);
505}
506
507LayerTestResult<float, 4> DepthwiseConvolution2dTest(armnn::IWorkloadFactory& workloadFactory,
508 bool biasEnabled)
509{
510 return DepthwiseConvolution2dTestImpl<float, float>(workloadFactory, 0.0f, 0, biasEnabled);
511}
512
513LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul1Test(armnn::IWorkloadFactory& workloadFactory,
514 bool biasEnabled)
515{
516 return DepthwiseConvolution2dDepthMul1TestImpl<float, float>(workloadFactory, 0.0f, 0, biasEnabled);
517}
518
surmeh013537c2c2018-05-18 16:31:43 +0100519LayerTestResult<float, 4> DepthwiseConvolution2dAsymmetricTest(armnn::IWorkloadFactory& workloadFactory,
520 bool biasEnabled)
521{
522 return DepthwiseConvolution2dAsymmetricTestCommon<float>(workloadFactory, 0.0f, 0, biasEnabled);
523}
524
telsoa014fcda012018-03-09 14:13:49 +0000525LayerTestResult<uint8_t, 4> DepthwiseConvolution2dUint8Test(armnn::IWorkloadFactory& workloadFactory,
526 bool biasEnabled)
527{
528 return DepthwiseConvolution2dTestImpl<uint8_t, int32_t>(workloadFactory, 0.5f, 50, biasEnabled);
529}
530
531LayerTestResult<uint8_t, 4> DepthwiseConvolution2dDepthMul1Uint8Test(armnn::IWorkloadFactory& workloadFactory,
532 bool biasEnabled)
533{
534 return DepthwiseConvolution2dDepthMul1TestImpl<uint8_t, int32_t>(workloadFactory, 0.5f, 50, biasEnabled);
535}
536
537LayerTestResult<float, 4> Convolution1dTest(armnn::IWorkloadFactory& workloadFactory, bool biasEnabled)
538{
539 return Convolution1dTestImpl<float>(workloadFactory, 0.0f, 0, biasEnabled);
540}
541
542LayerTestResult<uint8_t, 4> Convolution1dUint8Test(armnn::IWorkloadFactory& workloadFactory, bool biasEnabled)
543{
544 return Convolution1dTestImpl<uint8_t>(workloadFactory, 0.1f, 128, biasEnabled);
545}
546
547LayerTestResult<float,4> CompareConvolution2dTest(armnn::IWorkloadFactory& workloadFactory,
548 armnn::IWorkloadFactory& refWorkloadFactory)
549{
550 return CompareConvolution2dTestImpl<float>(workloadFactory, refWorkloadFactory);
551}
552
553template<typename T>
554LayerTestResult<T,4> CompareDepthwiseConvolution2dTest(armnn::IWorkloadFactory& workloadFactory,
555 armnn::IWorkloadFactory& refWorkloadFactory)
556{
557 return CompareDepthwiseConvolution2dTestImpl<T>(workloadFactory, refWorkloadFactory);
558}
559
560template LayerTestResult<float, 4> CompareDepthwiseConvolution2dTest<float>(
561 armnn::IWorkloadFactory&, armnn::IWorkloadFactory&);
562template LayerTestResult<uint8_t, 4> CompareDepthwiseConvolution2dTest<uint8_t>(
563 armnn::IWorkloadFactory&, armnn::IWorkloadFactory&);
564
565LayerTestResult<float,4> SimpleNormalizationAcrossTest(armnn::IWorkloadFactory& workloadFactory)
566{
567 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
568 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
569 return SimpleNormalizationTestImpl(workloadFactory, normChannel, normMethod);
570}
571
572LayerTestResult<float,4> SimpleNormalizationWithinTest(armnn::IWorkloadFactory& workloadFactory)
573{
574 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
575 auto normChannel = armnn::NormalizationAlgorithmChannel::Within;
576 return SimpleNormalizationTestImpl(workloadFactory, normChannel, normMethod);
577}
578
narpra0155a97bc2018-10-02 14:35:53 +0100579LayerTestResult<float,4> SimpleNormalizationAcrossNhwcTest(armnn::IWorkloadFactory& workloadFactory)
580{
581 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
582 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
583 return SimpleNormalizationNhwcClNeonTestImpl(workloadFactory, normChannel, normMethod);
584}
585
telsoa014fcda012018-03-09 14:13:49 +0000586LayerTestResult<float,2> SimpleSoftmaxTest(armnn::IWorkloadFactory& workloadFactory, float beta)
587{
588 return SimpleSoftmaxTestImpl<float>(workloadFactory, beta);
589}
590
591LayerTestResult<uint8_t,2> SimpleSoftmaxUint8Test(armnn::IWorkloadFactory& workloadFactory, float beta)
592{
593 return SimpleSoftmaxTestImpl<uint8_t>(workloadFactory, beta);
594}
595
596LayerTestResult<float,4> CompareNormalizationTest(armnn::IWorkloadFactory& workloadFactory,
597 armnn::IWorkloadFactory& refWorkloadFactory,
598 armnn::NormalizationAlgorithmChannel normChannel,
599 armnn::NormalizationAlgorithmMethod normMethod)
600{
601 return CompareNormalizationTestImpl(workloadFactory, refWorkloadFactory, normChannel, normMethod);
602}
603
604LayerTestResult<float,2> CompareSoftmaxTest(armnn::IWorkloadFactory& workloadFactory,
605 armnn::IWorkloadFactory& refWorkloadFactory,
606 float beta)
607{
608 return CompareSoftmaxTestImpl<float>(workloadFactory, refWorkloadFactory, beta);
609}
610
611LayerTestResult<uint8_t,2> CompareSoftmaxUint8Test(armnn::IWorkloadFactory& workloadFactory,
612 armnn::IWorkloadFactory& refWorkloadFactory,
613 float beta)
614{
615 return CompareSoftmaxTestImpl<uint8_t>(workloadFactory, refWorkloadFactory, beta);
616}
617
618std::vector<LayerTestResult<float,3>> SplitterTest(armnn::IWorkloadFactory& workloadFactory)
619{
620 return SplitterTestCommon<float>(workloadFactory);
621}
622
623std::vector<LayerTestResult<uint8_t,3>> SplitterUint8Test(armnn::IWorkloadFactory& workloadFactory)
624{
625 return SplitterTestCommon<uint8_t>(workloadFactory, 1.0f, 0);
626}
627
628LayerTestResult<float, 3> CopyViaSplitterTest(armnn::IWorkloadFactory& workloadFactory)
629{
630 return CopyViaSplitterTestImpl<float>(workloadFactory, 0.0f, 0);
631}
632
633LayerTestResult<uint8_t, 3> CopyViaSplitterUint8Test(armnn::IWorkloadFactory& workloadFactory)
634{
635 return CopyViaSplitterTestImpl<uint8_t>(workloadFactory, 1.0f, 0);
636}
637
telsoa01c577f2c2018-08-31 09:22:23 +0100638LayerTestResult<float, 2> LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest(
639 armnn::IWorkloadFactory& workloadFactory)
640{
641 armnn::TensorInfo inputDesc({ 2, 2 }, armnn::GetDataType<float>());
642 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
643 { 2., 3., 3., 4. }));
644
645 armnn::TensorInfo outputDesc({ 2, 4 }, armnn::GetDataType<float>());
646 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
647 {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
648 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f}));
649 return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl(workloadFactory, input, expectedOutput);
650}
651
652LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest(
653 armnn::IWorkloadFactory& workloadFactory)
654{
655 armnn::TensorInfo inputDesc({ 2, 5 }, armnn::GetDataType<float>());
656 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
657 {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
658 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f}));
659
660 armnn::TensorInfo outputDesc({ 2, 16 }, armnn::GetDataType<float>());
661 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
662 {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f,
663 -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f,
664 -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f,
665 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f,
666 -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f,
667 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f,
668 0.02168f}));
669 return LstmLayerFloat32NoCifgWithPeepholeWithProjectionTestImpl(workloadFactory, input, expectedOutput);
670}
671
672LayerTestResult<float, 2> LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest(armnn::IWorkloadFactory& workloadFactory)
673{
674 armnn::TensorInfo inputDesc({2, 2}, armnn::GetDataType<float>());
675 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
676 {2., 3., 3., 4.}));
677
678
679 armnn::TensorInfo outputDesc({2, 4}, armnn::GetDataType<float>());
680 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
681 {{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
682 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}}));
683
684 return LstmNoCifgNoPeepholeNoProjectionTestImpl(workloadFactory, input, expectedOutput);
685}
686
telsoa014fcda012018-03-09 14:13:49 +0000687LayerTestResult<float,3> MergerTest(armnn::IWorkloadFactory& workloadFactory)
688{
surmeh013537c2c2018-05-18 16:31:43 +0100689 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +0000690 unsigned int outputHeight = 6;
691 unsigned int outputChannels = 3;
692
surmeh013537c2c2018-05-18 16:31:43 +0100693 unsigned int inputWidth1 = 3;
694 unsigned int inputHeight1 = 6;
695 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +0000696
surmeh013537c2c2018-05-18 16:31:43 +0100697 unsigned int inputWidth2 = 3;
698 unsigned int inputHeight2 = 6;
699 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +0000700
telsoa01c577f2c2018-08-31 09:22:23 +0100701 // Define the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +0000702 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::Float32);
703 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::Float32);
704 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +0000705
706 LayerTestResult<float,3> ret(outputTensorInfo);
707
telsoa014fcda012018-03-09 14:13:49 +0000708 ret.outputExpected = MakeTensor<float, 3>(outputTensorInfo, std::vector<float>(
surmeh013537c2c2018-05-18 16:31:43 +0100709 {
710 1.0f, 2.0f, 3.0f,
711 4.0f, 5.0f, 6.0f,
712 7.0f, 8.0f, 9.0f,
713 10.0f, 11.0f, 12.0f,
714 13.0f, 14.0f, 15.0f,
715 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +0000716
surmeh013537c2c2018-05-18 16:31:43 +0100717 19.0f, 20.0f, 21.0f,
718 22.0f, 23.0f, 24.0f,
719 25.0f, 26.0f, 27.0f,
720 28.0f, 29.0f, 30.0f,
721 31.0f, 32.0f, 33.0f,
722 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +0000723
surmeh013537c2c2018-05-18 16:31:43 +0100724 37.0f, 38.0f, 39.0f,
725 40.0f, 41.0f, 42.0f,
726 43.0f, 44.0f, 45.0f,
727 46.0f, 47.0f, 48.0f,
728 49.0f, 50.0f, 51.0f,
729 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +0000730 })
731 );
732
telsoa014fcda012018-03-09 14:13:49 +0000733 auto input1 = MakeTensor<float, 3>(inputTensorInfo1, std::vector<float>(
734 {
surmeh013537c2c2018-05-18 16:31:43 +0100735 1.0f, 2.0f, 3.0f,
736 4.0f, 5.0f, 6.0f,
737 7.0f, 8.0f, 9.0f,
738 10.0f, 11.0f, 12.0f,
739 13.0f, 14.0f, 15.0f,
740 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +0000741
surmeh013537c2c2018-05-18 16:31:43 +0100742 19.0f, 20.0f, 21.0f,
743 22.0f, 23.0f, 24.0f,
744 25.0f, 26.0f, 27.0f,
745 28.0f, 29.0f, 30.0f,
746 31.0f, 32.0f, 33.0f,
747 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +0000748 })
749 );
750
751 auto input2 = MakeTensor<float, 3>(inputTensorInfo2, std::vector<float>(
752 {
surmeh013537c2c2018-05-18 16:31:43 +0100753 37.0f, 38.0f, 39.0f,
754 40.0f, 41.0f, 42.0f,
telsoa014fcda012018-03-09 14:13:49 +0000755 43.0f, 44.0f, 45.0f,
surmeh013537c2c2018-05-18 16:31:43 +0100756 46.0f, 47.0f, 48.0f,
757 49.0f, 50.0f, 51.0f,
758 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +0000759 })
760 );
761
telsoa01c577f2c2018-08-31 09:22:23 +0100762 std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of input[0].
telsoa014fcda012018-03-09 14:13:49 +0000763 armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1);
764
telsoa01c577f2c2018-08-31 09:22:23 +0100765 std::vector<unsigned int> wOrigin2 = {2, 0, 0}; //Extent of the window is defined by size of input[1].
telsoa014fcda012018-03-09 14:13:49 +0000766 armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2);
767
telsoa014fcda012018-03-09 14:13:49 +0000768 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
769
770 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
771
772 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
773 subTensorsSupported ?
774 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
775 workloadFactory.CreateTensorHandle(inputTensorInfo1);
776
777 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
778 subTensorsSupported ?
779 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
780 workloadFactory.CreateTensorHandle(inputTensorInfo2);
781
telsoa014fcda012018-03-09 14:13:49 +0000782 armnn::MergerQueueDescriptor data;
783 armnn::WorkloadInfo info;
784 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
785 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +0000786 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
787
788 data.m_ViewOrigins.push_back(window1);
789 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +0000790
791 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(data, info);
792
793 inputHandle1->Allocate();
794 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +0000795 outputHandle->Allocate();
796
797 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
798 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +0000799
surmeh013537c2c2018-05-18 16:31:43 +0100800 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +0000801 workload->Execute();
802
803 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
804
805 return ret;
806}
807
808LayerTestResult<float,4> AdditionTest(armnn::IWorkloadFactory& workloadFactory)
809{
810 unsigned int batchSize = 2;
811 unsigned int channels = 2;
812 unsigned int height = 2;
813 unsigned int width = 3;
814
815 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
816 armnn::TensorInfo outputTensorInfo;
817
818 unsigned int shape[] = {batchSize, channels, height, width};
819
820 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
821 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
822 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
823
824
825 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, std::vector<float>(
826 {
827 0.0f, 2.0f, 1.0f,
828 0.2f, 1.0f, 2.0f,
829
830 1.0f, 2.0f, 1.0f,
831 0.2f, 1.0f, 2.0f,
832
833 0.0f, 2.0f, 1.0f,
834 4.2f, 1.0f, 2.0f,
835
836 0.0f, 0.0f, 1.0f,
837 0.2f, 1.0f, 2.0f,
838 }));
839
840 auto input2 = MakeTensor<float, 4>(inputTensorInfo2, std::vector<float>(
841 {
842 1.0f, 2.0f, 1.0f,
843 0.0f, 1.0f, 2.0f,
844
845 1.0f, 2.0f, -2.0f,
846 0.2f, 1.0f, 2.0f,
847
848 0.0f, 2.0f, 1.0f,
849 4.2f, 0.0f, -3.0f,
850
851 0.0f, 0.0f, 1.0f,
852 0.7f, 1.0f, 5.0f,
853 }));
854
855 LayerTestResult<float,4> ret(outputTensorInfo);
856 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>(
857 {
858 1.0f, 4.0f, 2.0f,
859 0.2f, 2.0f, 4.0f,
860
861 2.0f, 4.0f, -1.0f,
862 0.4f, 2.0f, 4.0f,
863
864 0.0f, 4.0f, 2.0f,
865 8.4f, 1.0f, -1.0f,
866
867 0.0f, 0.0f, 2.0f,
868 0.9f, 2.0f, 7.0f,
869 }));
870
871 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
872 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
873 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
874
875 armnn::AdditionQueueDescriptor data;
876 armnn::WorkloadInfo info;
877 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
878 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
879 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
880
881 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
882
883 inputHandle1->Allocate();
884 inputHandle2->Allocate();
885 outputHandle->Allocate();
886
887 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
888 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
889
surmeh013537c2c2018-05-18 16:31:43 +0100890 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +0000891 workload->Execute();
892
893 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
894
895 return ret;
896}
897
898template <typename T>
899LayerTestResult<T, 4> AdditionBroadcastTestImpl(armnn::IWorkloadFactory& workloadFactory,
900 float qScale,
901 int32_t qOffset)
902{
903 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, armnn::GetDataType<T>());
904 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, armnn::GetDataType<T>());
905 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>());
906
907 if (armnn::IsQuantizedType<T>())
908 {
909 inputTensorInfo1.SetQuantizationScale(qScale);
910 inputTensorInfo1.SetQuantizationOffset(qOffset);
911 inputTensorInfo2.SetQuantizationScale(qScale);
912 inputTensorInfo2.SetQuantizationOffset(qOffset);
913 outputTensorInfo.SetQuantizationScale(qScale);
914 outputTensorInfo.SetQuantizationOffset(qOffset);
915 }
916
917 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
918 {
919 0.0f,
920 1.0f,
921
922 2.0f,
923 3.0f,
924
925 4.0f,
926 5.0f,
927 }));
928
929 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
930 {
931 0.5f, 1.5f, 2.5f,
932 3.5f, 4.5f, 5.5f,
933 }));
934
935 LayerTestResult<T,4> ret(outputTensorInfo);
936 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
937 {
938 0.5f, 1.5f, 2.5f,
939 4.5f, 5.5f, 6.5f,
940
941 2.5f, 3.5f, 4.5f,
942 6.5f, 7.5f, 8.5f,
943
944 4.5f, 5.5f, 6.5f,
945 8.5f, 9.5f, 10.5f,
946 }));
947
948 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
949 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
950 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
951
952 armnn::AdditionQueueDescriptor data;
953 armnn::WorkloadInfo info;
954 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
955 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
956 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
957
958 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
959
960 inputHandle1->Allocate();
961 inputHandle2->Allocate();
962 outputHandle->Allocate();
963
964 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
965 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
966
surmeh013537c2c2018-05-18 16:31:43 +0100967 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +0000968 workload->Execute();
969
970 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
971
972 return ret;
973}
974
975template <typename T>
976LayerTestResult<T, 4> AdditionBroadcast1ElementTestImpl(armnn::IWorkloadFactory& workloadFactory,
977 float qScale,
978 int32_t qOffset)
979{
980 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>());
981 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, armnn::GetDataType<T>());
982 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>());
983
984 if (armnn::IsQuantizedType<T>())
985 {
986 inputTensorInfo1.SetQuantizationScale(qScale);
987 inputTensorInfo1.SetQuantizationOffset(qOffset);
988 inputTensorInfo2.SetQuantizationScale(qScale);
989 inputTensorInfo2.SetQuantizationOffset(qOffset);
990 outputTensorInfo.SetQuantizationScale(qScale);
991 outputTensorInfo.SetQuantizationOffset(qOffset);
992 }
993
994 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
995 {
996 0.0f, 1.0f, 2.0f,
997 3.0f, 4.0f, 5.0f,
998 6.0f, 7.0f, 8.0f,
999 9.0f, 10.0f, 11.0f,
1000 12.0f, 13.0f, 14.0f,
1001 15.0f, 16.0f, 17.0f,
1002 }));
1003
1004 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
1005 {
1006 0.5f,
1007 }));
1008
1009 LayerTestResult<T,4> ret(outputTensorInfo);
1010 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
1011 {
1012 0.5f, 1.5f, 2.5f,
1013 3.5f, 4.5f, 5.5f,
1014 6.5f, 7.5f, 8.5f,
1015 9.5f, 10.5f, 11.5f,
1016 12.5f, 13.5f, 14.5f,
1017 15.5f, 16.5f, 17.5f,
1018 }));
1019
1020 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1021 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1022 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1023
1024 armnn::AdditionQueueDescriptor data;
1025 armnn::WorkloadInfo info;
1026 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1027 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1028 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1029
1030 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1031
1032 inputHandle1->Allocate();
1033 inputHandle2->Allocate();
1034 outputHandle->Allocate();
1035
1036 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1037 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1038
surmeh013537c2c2018-05-18 16:31:43 +01001039 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001040 workload->Execute();
1041
1042 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1043
1044 return ret;
1045}
1046
1047LayerTestResult<float, 4> AdditionBroadcastTest(armnn::IWorkloadFactory& workloadFactory)
1048{
1049 return AdditionBroadcastTestImpl<float>(workloadFactory, 0.0f, 0);
1050}
1051
1052LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test(armnn::IWorkloadFactory& workloadFactory)
1053{
1054 return AdditionBroadcastTestImpl<uint8_t>(workloadFactory, 2.f, 0);
1055}
1056
1057LayerTestResult<float, 4> AdditionBroadcast1ElementTest(armnn::IWorkloadFactory& workloadFactory)
1058{
1059 return AdditionBroadcast1ElementTestImpl<float>(workloadFactory, 0.0f, 0);
1060}
1061
1062LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test(armnn::IWorkloadFactory& workloadFactory)
1063{
1064 return AdditionBroadcast1ElementTestImpl<uint8_t>(workloadFactory, 0.1333333f, 128);
1065}
1066
1067LayerTestResult<float,4> CompareAdditionTest(armnn::IWorkloadFactory& workloadFactory,
David Beckf195f032018-09-06 16:46:34 +01001068 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00001069{
1070 unsigned int batchSize = 4;
1071 unsigned int channels = 1;
1072 unsigned int height = 2;
1073 unsigned int width = 3;
1074
1075 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
1076 armnn::TensorInfo outputTensorInfo;
1077
1078 unsigned int shape[] = {batchSize, channels, height, width};
1079
1080 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1081 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1082 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1083
1084 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 1232);
1085 auto input2 = MakeRandomTensor<float, 4>(inputTensorInfo2, 456);
1086
1087 LayerTestResult<float,4> ret(outputTensorInfo);
1088
1089 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1090 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1091 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1092
1093 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
1094 std::unique_ptr<armnn::ITensorHandle> inputHandle2Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo2);
1095 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
1096
1097 armnn::AdditionQueueDescriptor data;
1098 armnn::WorkloadInfo info;
1099 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1100 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1101 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1102
1103 armnn::AdditionQueueDescriptor refData = data;
1104 armnn::WorkloadInfo refInfo = info;
1105 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo1, inputHandle1Ref.get());
1106 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get());
1107 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1108
1109 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1110 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateAddition(refData, refInfo);
1111
1112 inputHandle1->Allocate();
1113 inputHandle2->Allocate();
1114 outputHandle->Allocate();
1115 inputHandle1Ref->Allocate();
1116 inputHandle2Ref->Allocate();
1117 outputHandleRef->Allocate();
1118
1119 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1120 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1121 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
1122 CopyDataToITensorHandle(inputHandle2Ref.get(), &input2[0][0][0][0]);
1123
surmeh013537c2c2018-05-18 16:31:43 +01001124 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001125 workload->Execute();
surmeh013537c2c2018-05-18 16:31:43 +01001126 refWorkloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001127 workloadRef->Execute();
1128
1129 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1130 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
1131
1132 return ret;
1133}
1134
surmeh01bceff2f2018-03-29 16:29:27 +01001135namespace {
David Beck5cd01f32018-09-12 16:00:08 +01001136template <typename T>
1137LayerTestResult<T, 4> DivisionTestHelper(armnn::IWorkloadFactory& workloadFactory,
1138 const unsigned int shape0[4],
1139 const std::vector<T>& values0,
1140 float scale0,
1141 int32_t offset0,
1142 const unsigned int shape1[4],
1143 const std::vector<T> & values1,
1144 float scale1,
1145 int32_t offset1,
1146 const unsigned int outShape[4],
1147 const std::vector<T> & outValues,
1148 float outScale,
1149 int32_t outOffset)
1150{
1151 auto dataType = (std::is_same<T, uint8_t>::value ?
1152 armnn::DataType::QuantisedAsymm8 :
1153 armnn::DataType::Float32);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001154
David Beck5cd01f32018-09-12 16:00:08 +01001155 armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
1156 armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
1157 armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001158
David Beck5cd01f32018-09-12 16:00:08 +01001159 inputTensorInfo0.SetQuantizationScale(scale0);
1160 inputTensorInfo0.SetQuantizationOffset(offset0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001161
David Beck5cd01f32018-09-12 16:00:08 +01001162 inputTensorInfo1.SetQuantizationScale(scale1);
1163 inputTensorInfo1.SetQuantizationOffset(offset1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001164
David Beck5cd01f32018-09-12 16:00:08 +01001165 outputTensorInfo.SetQuantizationScale(outScale);
1166 outputTensorInfo.SetQuantizationOffset(outOffset);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001167
David Beck5cd01f32018-09-12 16:00:08 +01001168 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
1169 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001170
David Beck5cd01f32018-09-12 16:00:08 +01001171 LayerTestResult<T, 4> result(outputTensorInfo);
1172 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001173
David Beck5cd01f32018-09-12 16:00:08 +01001174 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
1175 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1176 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001177
David Beck5cd01f32018-09-12 16:00:08 +01001178 armnn::DivisionQueueDescriptor data;
1179 armnn::WorkloadInfo info;
1180 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
1181 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1182 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001183
David Beck5cd01f32018-09-12 16:00:08 +01001184 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDivision(data, info);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001185
David Beck5cd01f32018-09-12 16:00:08 +01001186 inputHandle0->Allocate();
1187 inputHandle1->Allocate();
1188 outputHandle->Allocate();
1189
1190 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
1191 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1192
1193 workloadFactory.Finalize();
1194 workload->Execute();
1195
1196 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
1197
1198 return result;
1199}
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001200} // anonymous namespace
1201
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01001202LayerTestResult<float,4> DivisionByZeroTest(armnn::IWorkloadFactory& workloadFactory)
1203{
1204 const unsigned int width = 2;
1205 const unsigned int height = 2;
1206 const unsigned int channelCount = 2;
1207 const unsigned int batchSize = 2;
1208
1209 unsigned int shape[] = { batchSize, channelCount, height, width };
1210
1211 std::vector<float> input0({
1212 1.f, 1.f, 1.f, 1.f, 0.f, 0.f, 0.f, 0.f,
1213 -1.f, -1.f, -1.f, -1.f, 5.f, 5.f, 5.f, 5.f });
1214
1215 std::vector<float> input1({
1216 0.f, 0.f, -0.f, -0.f, 0.f, 0.f, -0.f, -0.f,
1217 0.f, 0.f, -0.f, -0.f, 5.f, 5.f, 5.f, 5.f });
1218
1219 std::vector<float> output({
1220 INFINITY, INFINITY, -INFINITY, -INFINITY, NAN, NAN, -NAN, -NAN,
1221 -INFINITY, -INFINITY, INFINITY, INFINITY, 1, 1, 1, 1 });
1222
David Beck5cd01f32018-09-12 16:00:08 +01001223 return DivisionTestHelper<float>(workloadFactory,
1224 shape, input0, 1.0f, 0,
1225 shape, input1, 1.0f, 0,
1226 shape, output, 1.0f, 0);
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01001227}
1228
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001229LayerTestResult<float,4> DivisionTest(armnn::IWorkloadFactory& workloadFactory)
1230{
1231 const unsigned int width = 2;
1232 const unsigned int height = 2;
1233 const unsigned int channelCount = 2;
1234 const unsigned int batchSize = 2;
1235
1236 unsigned int shape[] = { batchSize, channelCount, height, width };
1237
1238 std::vector<float> input0({
1239 2, 2, 2, 2, 3, 3, 3, 3,
1240 4, 4, 4, 4, 5, 5, 5, 5 });
1241
1242 std::vector<float> input1({
1243 1, 1, 1, 1, 2, 2, 2, 2,
1244 4, 4, 4, 4, 4, 4, 4, 4 });
1245
1246 std::vector<float> output({
1247 2, 2, 2, 2, 1.5, 1.5, 1.5, 1.5,
1248 1, 1, 1, 1, 1.25, 1.25, 1.25, 1.25 });
1249
David Beck5cd01f32018-09-12 16:00:08 +01001250
1251 return DivisionTestHelper<float>(workloadFactory,
1252 shape, input0, 1.0f, 0,
1253 shape, input1, 1.0f, 0,
1254 shape, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001255}
1256
1257LayerTestResult<float, 4> DivisionBroadcast1ElementTest(armnn::IWorkloadFactory& workloadFactory)
1258{
1259 unsigned int shape0[] = { 1, 2, 2, 2 };
1260 std::vector<float> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
1261
1262 unsigned int shape1[] = { 1, 1, 1, 1 };
1263 std::vector<float> input1({ 2 });
1264
1265 std::vector<float> output({ 1, 2, 3, 4, 5, 6, 7, 8});
1266
David Beck5cd01f32018-09-12 16:00:08 +01001267
1268 return DivisionTestHelper<float>(workloadFactory,
1269 shape0, input0, 1.0f, 0,
1270 shape1, input1, 1.0f, 0,
1271 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001272}
1273
1274LayerTestResult<float, 4> DivisionBroadcast1DVectorTest(armnn::IWorkloadFactory& workloadFactory)
1275{
1276 unsigned int shape0[] = { 1, 3, 3, 2 };
1277 std::vector<float> input0({
1278 1, 4, 3, 8, 5, 12,
1279 7, 16, 9, 20, 11, 24,
1280 13, 28, 15, 32, 17, 36});
1281
1282 unsigned int shape1[] = { 1, 1, 1, 2 };
1283 std::vector<float> input1({ 1, 2 });
1284
1285 std::vector<float> output({
1286 1, 2, 3, 4, 5, 6,
1287 7, 8, 9, 10, 11, 12,
1288 13, 14, 15, 16, 17, 18});
1289
David Beck5cd01f32018-09-12 16:00:08 +01001290 return DivisionTestHelper<float>(workloadFactory,
1291 shape0, input0, 1.0f, 0,
1292 shape1, input1, 1.0f, 0,
1293 shape0, output, 1.0f, 0);
1294}
1295
1296
1297LayerTestResult<uint8_t,4> DivisionUint8Test(armnn::IWorkloadFactory& workloadFactory)
1298{
1299 const unsigned int width = 2;
1300 const unsigned int height = 2;
1301 const unsigned int channelCount = 2;
1302 const unsigned int batchSize = 2;
1303
1304 unsigned int shape[] = { batchSize, channelCount, height, width };
1305
1306 std::vector<uint8_t> input0({2, 2, 2, 2, 3, 3, 3, 3,
1307 4, 4, 4, 4, 5, 5, 5, 5 });
1308
1309 std::vector<uint8_t> input1({1, 1, 1, 1, 2, 2, 2, 2,
1310 4, 4, 4, 4, 4, 4, 4, 4 });
1311
1312 std::vector<uint8_t> output({8, 8, 8, 8, 6, 6, 6, 6,
1313 4, 4, 4, 4, 5, 5, 5, 5});
1314
1315
1316 return DivisionTestHelper<uint8_t>(workloadFactory,
1317 shape, input0, 1.0f, 0,
1318 shape, input1, 1.0f, 0,
1319 shape, output, 0.25f, 0);
1320}
1321
1322LayerTestResult<uint8_t, 4> DivisionBroadcast1ElementUint8Test(armnn::IWorkloadFactory& workloadFactory)
1323{
1324 unsigned int shape0[] = { 1, 2, 2, 2 };
1325 std::vector<uint8_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
1326
1327 unsigned int shape1[] = { 1, 1, 1, 1 };
1328 std::vector<uint8_t> input1({ 2 });
1329
1330 std::vector<uint8_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
1331
1332 return DivisionTestHelper<uint8_t>(workloadFactory,
1333 shape0, input0, 1.0f, 0,
1334 shape1, input1, 1.0f, 0,
1335 shape0, output, 1.0f, 0);
1336}
1337
1338LayerTestResult<uint8_t, 4> DivisionBroadcast1DVectorUint8Test(armnn::IWorkloadFactory& workloadFactory)
1339{
1340 unsigned int shape0[] = { 1, 3, 3, 2 };
1341 std::vector<uint8_t> input0({1, 4, 3, 8, 5, 12,
1342 7, 16, 9, 20, 11, 24,
1343 13, 28, 15, 32, 17, 36});
1344
1345 unsigned int shape1[] = { 1, 1, 1, 2 };
1346 std::vector<uint8_t> input1({ 1, 2 });
1347
1348 std::vector<uint8_t> output({1, 2, 3, 4, 5, 6,
1349 7, 8, 9, 10, 11, 12,
1350 13, 14, 15, 16, 17, 18});
1351
1352 return DivisionTestHelper<uint8_t>(workloadFactory,
1353 shape0, input0, 1.0f, 0,
1354 shape1, input1, 1.0f, 0,
1355 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001356}
1357
1358namespace {
surmeh01bceff2f2018-03-29 16:29:27 +01001359LayerTestResult<float,4> MultiplicationTestHelper(armnn::IWorkloadFactory& workloadFactory,
1360 const unsigned int shape0[4],
1361 const std::vector<float> & values0,
1362 const unsigned int shape1[4],
1363 const std::vector<float> & values1,
1364 const unsigned int outShape[4],
1365 const std::vector<float> & outValues)
telsoa014fcda012018-03-09 14:13:49 +00001366{
surmeh01bceff2f2018-03-29 16:29:27 +01001367 const size_t dimensionCount = 4;
1368 armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, armnn::DataType::Float32};
1369 armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, armnn::DataType::Float32};
1370 armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, armnn::DataType::Float32};
telsoa014fcda012018-03-09 14:13:49 +00001371
surmeh01bceff2f2018-03-29 16:29:27 +01001372 auto input0 = MakeTensor<float, 4>(inputTensorInfo0, values0);
1373 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00001374
1375 LayerTestResult<float,4> ret(outputTensorInfo);
1376
1377 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
1378 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1379 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1380
1381 armnn::MultiplicationQueueDescriptor data;
1382 armnn::WorkloadInfo info;
1383 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
1384 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1385 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1386
1387 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
1388
1389 inputHandle0->Allocate();
1390 inputHandle1->Allocate();
1391 outputHandle->Allocate();
1392
1393 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
1394 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1395
surmeh013537c2c2018-05-18 16:31:43 +01001396 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001397 workload->Execute();
1398
1399 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1400
surmeh01bceff2f2018-03-29 16:29:27 +01001401 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00001402 return ret;
1403}
surmeh01bceff2f2018-03-29 16:29:27 +01001404} // anonymous namespace
1405
1406
1407LayerTestResult<float,4> MultiplicationTest(armnn::IWorkloadFactory& workloadFactory)
1408{
1409 const unsigned int width = 2;
1410 const unsigned int height = 2;
1411 const unsigned int channelCount = 2;
1412 const unsigned int batchSize = 2;
1413
1414 unsigned int shape[] = { batchSize, channelCount, height, width };
1415
1416 std::vector<float> input0({
1417 1, 1, 1, 1, 2, 2, 2, 2,
1418 3, 3, 3, 3, 4, 4, 4, 4 });
1419
1420 std::vector<float> input1({
1421 2, 2, 2, 2, 3, 3, 3, 3,
1422 4, 4, 4, 4, 5, 5, 5, 5 });
1423
1424 std::vector<float> output({
1425 2, 2, 2, 2, 6, 6, 6, 6,
1426 12, 12, 12, 12, 20, 20, 20, 20 });
1427
1428 return MultiplicationTestHelper(workloadFactory,
1429 shape,
1430 input0,
1431 shape,
1432 input1,
1433 shape,
1434 output);
1435}
1436
1437LayerTestResult<float, 4> MultiplicationBroadcast1ElementTest(armnn::IWorkloadFactory& workloadFactory)
1438{
1439 unsigned int shape0[] = { 1, 2, 2, 2 };
1440 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
1441
1442 unsigned int shape1[] = { 1, 1, 1, 1 };
1443 std::vector<float> input1({ 2 });
1444
1445 std::vector<float> output({ 2, 4, 6, 8, 10, 12, 14, 16});
1446
1447 return MultiplicationTestHelper(workloadFactory,
1448 shape0,
1449 input0,
1450 shape1,
1451 input1,
1452 shape0,
1453 output);
1454}
1455
1456LayerTestResult<float, 4> MultiplicationBroadcast1DVectorTest(armnn::IWorkloadFactory& workloadFactory)
1457{
1458 unsigned int shape0[] = { 1, 3, 3, 2 };
1459 std::vector<float> input0({
1460 1, 2, 3, 4, 5, 6,
1461 7, 8, 9, 10, 11, 12,
1462 13, 14, 15, 16, 17, 18});
1463
1464 unsigned int shape1[] = { 1, 1, 1, 2 };
1465 std::vector<float> input1({ 1, 2 });
1466
1467 std::vector<float> output({
1468 1, 4, 3, 8, 5, 12,
1469 7, 16, 9, 20, 11, 24,
1470 13, 28, 15, 32, 17, 36});
1471
1472 return MultiplicationTestHelper(workloadFactory,
1473 shape0,
1474 input0,
1475 shape1,
1476 input1,
1477 shape0,
1478 output);
1479}
telsoa014fcda012018-03-09 14:13:49 +00001480
1481LayerTestResult<float,4> CompareMultiplicationTest(armnn::IWorkloadFactory& workloadFactory,
1482 armnn::IWorkloadFactory& refWorkloadFactory)
1483{
1484 const unsigned int width = 16;
1485 const unsigned int height = 32;
1486 const unsigned int channelCount = 2;
1487 const unsigned int batchSize = 5;
1488
1489 armnn::TensorInfo inputTensorInfo0;
1490 armnn::TensorInfo inputTensorInfo1;
1491 armnn::TensorInfo outputTensorInfo;
1492
1493 constexpr unsigned int shape[] = { batchSize, channelCount, height, width };
1494
1495 inputTensorInfo0 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1496 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1497 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1498
1499 LayerTestResult<float,4> comparisonResult(outputTensorInfo);
1500
1501 auto input0 = MakeRandomTensor<float, 4>(inputTensorInfo0, 803506992);
1502 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 54902257);
1503
1504 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
1505 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1506 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1507
1508 std::unique_ptr<armnn::ITensorHandle> inputHandle0Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo0);
1509 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
1510 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
1511
1512 armnn::MultiplicationQueueDescriptor data;
1513 armnn::WorkloadInfo info;
1514 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
1515 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1516 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1517
1518 armnn::MultiplicationQueueDescriptor refData = data;
1519 armnn::WorkloadInfo refInfo = info;
1520 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo0, inputHandle0Ref.get());
1521 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo1, inputHandle1Ref.get());
1522 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1523
1524 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
1525 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateMultiplication(refData, refInfo);
1526
1527 inputHandle0->Allocate();
1528 inputHandle1->Allocate();
1529 outputHandle->Allocate();
1530 inputHandle0Ref->Allocate();
1531 inputHandle1Ref->Allocate();
1532 outputHandleRef->Allocate();
1533
1534 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
1535 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1536 CopyDataToITensorHandle(inputHandle0Ref.get(), &input0[0][0][0][0]);
1537 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
1538
surmeh013537c2c2018-05-18 16:31:43 +01001539 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001540 workload->Execute();
surmeh013537c2c2018-05-18 16:31:43 +01001541 refWorkloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001542 workloadRef->Execute();
1543
1544 CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get());
1545 CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get());
1546
1547 return comparisonResult;
1548}
1549
1550LayerTestResult<float,4> CompareBatchNormTest(armnn::IWorkloadFactory& workloadFactory,
1551 armnn::IWorkloadFactory& refWorkloadFactory)
1552{
1553 const unsigned int width = 2;
1554 const unsigned int height = 3;
1555 const unsigned int channels = 5;
1556 const unsigned int batchSize = 3;
1557
1558 armnn::TensorInfo inputTensorInfo;
1559 armnn::TensorInfo outputTensorInfo;
1560 armnn::TensorInfo tensorInfo;
1561
1562 constexpr unsigned int shape[] = {batchSize, channels, height, width};
1563 constexpr unsigned int tensorShape[] = {channels};
1564
1565 inputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1566 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1567 tensorInfo = armnn::TensorInfo(1, tensorShape, armnn::DataType::Float32);
1568
1569 auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 21312);
1570
1571 auto mean = MakeRandomTensor<float, 1>(tensorInfo, 123);
1572 auto variance = MakeRandomTensor<float, 1>(tensorInfo, 234, 0.0f);
1573 auto beta = MakeRandomTensor<float, 1>(tensorInfo, 123);
1574 auto gamma = MakeRandomTensor<float, 1>(tensorInfo, 345);
1575
1576 LayerTestResult<float,4> ret(outputTensorInfo);
1577
1578 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
1579 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1580
1581 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
1582 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
1583
1584 armnn::BatchNormalizationQueueDescriptor data;
1585 armnn::WorkloadInfo info;
1586 armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
1587 armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
1588 armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
1589 armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
1590
1591 AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
1592 AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
1593 AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
1594 AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
1595
1596 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1597 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1598 data.m_Mean = &meanTensor;
1599 data.m_Variance = &varianceTensor;
1600 data.m_Beta = &betaTensor;
1601 data.m_Gamma = &gammaTensor;
1602 data.m_Parameters.m_Eps = 0.01f;
1603
1604 armnn::BatchNormalizationQueueDescriptor refData = data;
1605 armnn::WorkloadInfo refInfo = info;
1606 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
1607 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1608
1609 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
1610 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateBatchNormalization(refData, refInfo);
1611
1612 inputHandle->Allocate();
1613 outputHandle->Allocate();
1614 inputHandleRef->Allocate();
1615 outputHandleRef->Allocate();
1616
1617 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
1618 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
1619
surmeh013537c2c2018-05-18 16:31:43 +01001620 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001621 workload->Execute();
surmeh013537c2c2018-05-18 16:31:43 +01001622 refWorkloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001623 workloadRef->Execute();
1624
1625 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1626 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
1627
1628 return ret;
1629}
1630
surmeh013537c2c2018-05-18 16:31:43 +01001631template<typename T>
1632void PermuteTensorData(
1633 armnn::IWorkloadFactory& workloadFactory,
1634 const armnn::PermutationVector& mappings,
1635 armnn::TensorInfo & inputTensorInfo,
1636 const T * inputData,
1637 std::vector<T>& outputData)
telsoa014fcda012018-03-09 14:13:49 +00001638{
surmeh013537c2c2018-05-18 16:31:43 +01001639 BOOST_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
1640 if (inputData == nullptr)
1641 {
1642 // Nullptr is an error in the test. By returning without doing the concatenation
1643 // I expect the caller to fail the test. It still makes sense to report this as
1644 // an assert for Debug builds.
1645 return;
1646 }
telsoa014fcda012018-03-09 14:13:49 +00001647
surmeh013537c2c2018-05-18 16:31:43 +01001648 armnn::TensorInfo outputTensorInfo = armnnUtils::Permuted(inputTensorInfo, mappings);
1649
1650 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
1651 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1652
1653 armnn::PermuteQueueDescriptor queueDescriptor;
1654 queueDescriptor.m_Parameters = armnn::PermuteDescriptor{mappings};
1655 armnn::WorkloadInfo workloadInfo;
1656 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
1657 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
1658
1659 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePermute(queueDescriptor, workloadInfo);
1660
1661 inputHandle->Allocate();
1662 outputHandle->Allocate();
1663
1664 CopyDataToITensorHandle(inputHandle.get(), inputData);
1665
1666 workload->Execute();
1667
1668 outputData.resize(outputTensorInfo.GetNumElements());
1669 CopyDataFromITensorHandle(&outputData[0], outputHandle.get());
1670 inputTensorInfo = outputTensorInfo;
1671}
1672
1673armnn::OriginsDescriptor CreateMergerDescriptorForConcatenation(
1674 const std::vector<armnn::TensorInfo> & inputTensorInfos,
1675 unsigned int concatDim)
1676{
telsoa014fcda012018-03-09 14:13:49 +00001677 std::vector<armnn::TensorShape> shapes;
1678 shapes.reserve(inputTensorInfos.size());
1679 for (const armnn::TensorInfo& it: inputTensorInfos)
1680 {
1681 shapes.push_back(it.GetShape());
1682 }
surmeh013537c2c2018-05-18 16:31:43 +01001683
1684 return armnn::CreateMergerDescriptorForConcatenation(shapes.begin(),
1685 shapes.end(),
1686 concatDim);
1687}
1688
1689//
1690// Concatenation is only supported for N and C dimensions for NCHW. In case of
telsoa01c577f2c2018-08-31 09:22:23 +01001691// <4 dimensions we need to make sure that the concat dimensions are at least
surmeh013537c2c2018-05-18 16:31:43 +01001692// the 3rd slowest iterating one.
1693//
1694
1695bool NeedPermuteForConcat(
1696 const std::vector<armnn::TensorInfo> & inputTensorInfos,
1697 unsigned int concatDim)
1698{
1699 // See note above. Additionally we expect the input shapes to have the
1700 // same number of dimensions.
1701 unsigned int nDimensions = 0;
1702
telsoa01c577f2c2018-08-31 09:22:23 +01001703 // Determine the number of dimensions as well as sanity check them
1704 // agains test implementation issues.
surmeh013537c2c2018-05-18 16:31:43 +01001705 for (auto && tensorInfo : inputTensorInfos)
1706 {
1707 if (!nDimensions)
1708 {
1709 nDimensions = tensorInfo.GetShape().GetNumDimensions();
1710 }
1711 else
1712 {
1713 BOOST_ASSERT_MSG(nDimensions == tensorInfo.GetShape().GetNumDimensions(),
1714 "Input shapes must have the same number of dimensions");
1715 }
1716 }
1717
1718 return (nDimensions-concatDim) < 3;
1719}
1720
1721armnn::TensorShape ExpandTensorShapeTo3dForPermute(const armnn::TensorShape & inputShape)
1722{
1723 unsigned int numDims = inputShape.GetNumDimensions();
1724 if (numDims >= 3)
1725 {
1726 // Nothing to do if the inputShape has at least 3 dimensions.
1727 return inputShape;
1728 }
1729
1730 std::vector<unsigned int> newDims(size_t(3), 1u);
1731 unsigned int expandedBy = 3 - numDims;
1732 for (unsigned int i=0; i<numDims; ++i)
1733 {
1734 newDims[expandedBy+i] = inputShape[i];
1735 }
1736 return armnn::TensorShape(3u, &newDims[0]);
1737}
1738
1739void Generate3dPermuteVectorForConcat(
1740 unsigned int numDimensions,
1741 unsigned int & concatDim,
1742 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutations)
1743{
1744 BOOST_ASSERT_MSG(numDimensions <= 3,
1745 "Only dimensions 1,2 and 3 are supported by this helper");
1746
1747 unsigned int expandedBy = 3 - numDimensions;
1748 unsigned int expandedConcatAxis = concatDim + expandedBy;
1749
1750 if (expandedConcatAxis == 2)
1751 {
1752 concatDim = 0;
1753 armnn::PermutationVector forwardPermutation({1, 2, 0});
1754 armnn::PermutationVector reversePermutation({2, 0, 1});
1755 permutations = std::make_pair(forwardPermutation, reversePermutation);
1756 }
1757 else if (expandedConcatAxis == 1)
1758 {
1759 concatDim = 0;
1760 armnn::PermutationVector forwardPermutation({2, 0, 1});
1761 armnn::PermutationVector reversePermutation({1, 2, 0});
1762 permutations = std::make_pair(forwardPermutation, reversePermutation);
1763 }
1764 else
1765 {
1766 BOOST_ASSERT(expandedConcatAxis == 0);
1767 concatDim = 0;
1768 }
1769}
1770
1771//
1772// Permute the input tensors so we can do a supported concatenation.
1773// Also treat lower than 3d tensors as 3d by adding dummy 1 dimensions
1774// at the front. Finally this function tells what the output shape
1775// of the permuted concatenated tensor is going to be.
1776//
1777template <typename T>
1778void PermuteInputsForConcat(
1779 armnn::IWorkloadFactory& workloadFactory,
1780 std::vector<armnn::TensorInfo> & inputTensorInfos,
1781 std::vector<T *> & inputData,
1782 std::vector<std::vector<T>> & inputDataStorage,
1783 armnn::PermutationVector & permuteVector,
1784 unsigned int & concatDim,
1785 armnn::TensorInfo & outputTensorInfo)
1786{
1787 BOOST_ASSERT_MSG(inputTensorInfos.size() > 1,
1788 "Expecting more than one tensor to be concatenated here");
1789
1790 unsigned int numDims = 0;
1791 unsigned int nthInput = 0;
1792 const armnn::PermutationVector identity({0, 1, 2});
1793
1794 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutations =
1795 std::make_pair(identity, identity);
1796
1797 inputDataStorage.resize(inputData.size());
1798
1799 for (auto && tensorInfo : inputTensorInfos)
1800 {
1801 if (numDims == 0)
1802 {
1803 numDims = tensorInfo.GetShape().GetNumDimensions();
1804 Generate3dPermuteVectorForConcat(numDims, concatDim, permutations);
telsoa01c577f2c2018-08-31 09:22:23 +01001805 // Store the reverese permutation.
surmeh013537c2c2018-05-18 16:31:43 +01001806 permuteVector = permutations.second;
1807 BOOST_ASSERT_MSG(!permuteVector.IsEqual(identity),
1808 "Test logic error, we don't need permutation, so we shouldn't arrive here");
1809 }
1810 else
1811 {
1812 BOOST_ASSERT_MSG(numDims == tensorInfo.GetShape().GetNumDimensions(),
1813 "All inputs must have the same number of dimensions");
1814 }
1815
1816 armnn::TensorInfo newTensorInfo = tensorInfo;
1817 newTensorInfo.SetShape(ExpandTensorShapeTo3dForPermute(tensorInfo.GetShape()));
1818
1819 PermuteTensorData<T>(workloadFactory,
1820 permutations.first,
1821 newTensorInfo,
1822 inputData[nthInput],
1823 inputDataStorage[nthInput]);
1824
1825 inputData[nthInput] = inputDataStorage[nthInput].data();
1826 inputTensorInfos[nthInput] = newTensorInfo;
1827
1828 ++nthInput;
1829 }
1830
1831 outputTensorInfo.SetShape(
1832 armnnUtils::Permuted(
1833 ExpandTensorShapeTo3dForPermute(outputTensorInfo.GetShape()),
1834 permutations.first));
1835}
1836
1837
1838//
1839// This is the pair of PermuteInputsForConcat(...) which permutes back
telsoa01c577f2c2018-08-31 09:22:23 +01001840// the output of the concatenation so we can check it against an expected
surmeh013537c2c2018-05-18 16:31:43 +01001841// output.
1842//
1843template <typename T>
1844void PermuteOutputForConcat(
1845 armnn::IWorkloadFactory& workloadFactory,
1846 const armnn::TensorInfo & tensorInfo,
1847 const armnn::PermutationVector & permuteVector,
1848 std::unique_ptr<armnn::ITensorHandle> && inputDataHandle,
1849 T * data)
1850{
1851 BOOST_ASSERT_MSG(data != nullptr, "data must not be null");
1852 if (data == nullptr)
1853 {
1854 // Nullptr is an error in the test. By returning without doing the permutation
1855 // I expect the caller to fail the test. It still makes sense to report this as
1856 // an assert for Debug builds.
1857 return;
1858 }
1859
1860 armnn::TensorInfo resultTensorInfo = tensorInfo;
1861 std::vector<T> inputData(tensorInfo.GetNumElements());
1862 std::vector<T> outputData;
1863
1864 CopyDataFromITensorHandle(&inputData[0], inputDataHandle.get());
1865
1866 PermuteTensorData<T>(workloadFactory,
1867 permuteVector,
1868 resultTensorInfo,
1869 &inputData[0],
1870 outputData);
1871
1872 ::memcpy(data, &outputData[0], sizeof(T)*outputData.size());
1873}
1874
1875template <typename T>
1876void Concatenate(armnn::IWorkloadFactory& workloadFactory,
1877 std::initializer_list<const armnn::TensorInfo> inputTensorInfosOrig,
1878 std::initializer_list<T *> inputsOrig,
1879 const armnn::TensorInfo& outputTensorInfoOrig,
1880 T * output,
1881 unsigned int concatDim)
1882{
1883 BOOST_ASSERT_MSG(output != nullptr, "output must not be null");
1884 if (output == nullptr)
1885 {
1886 // Nullptr is an error in the test. By returning without doing the permutation
1887 // I expect the caller to fail the test. It still makes sense to report this as
1888 // an assert for Debug builds.
1889 return;
1890 }
1891
1892 armnn::MergerQueueDescriptor queueDescriptor;
1893
telsoa01c577f2c2018-08-31 09:22:23 +01001894 // Saves a copy of the parameters which we might need to change.
surmeh013537c2c2018-05-18 16:31:43 +01001895 std::vector<armnn::TensorInfo> inputTensorInfos(inputTensorInfosOrig.begin(), inputTensorInfosOrig.end());
1896 std::vector<T *> inputs = inputsOrig;
1897 armnn::TensorInfo outputTensorInfo = outputTensorInfoOrig;
1898
1899 armnn::PermutationVector permuteVector{0, 1, 2};
1900
telsoa01c577f2c2018-08-31 09:22:23 +01001901 // Holds and automatically releases memory for the reshaped input data.
surmeh013537c2c2018-05-18 16:31:43 +01001902 std::vector<std::vector<T>> tmpInputDataStorage;
1903
1904 const size_t inputCount = inputTensorInfos.size();
1905
1906 bool needPermuteForConcat = NeedPermuteForConcat(inputTensorInfos, concatDim);
1907
1908 if (needPermuteForConcat)
1909 {
1910 //
1911 // We need to permute the inputs, because concatenation along
telsoa01c577f2c2018-08-31 09:22:23 +01001912 // the requested axis is not supported.
surmeh013537c2c2018-05-18 16:31:43 +01001913 //
1914 PermuteInputsForConcat<T>(workloadFactory,
1915 inputTensorInfos,
1916 inputs,
1917 tmpInputDataStorage,
1918 permuteVector,
1919 concatDim,
1920 outputTensorInfo);
1921 }
1922
1923 armnn::OriginsDescriptor viewsDescriptor = CreateMergerDescriptorForConcatenation(inputTensorInfos, concatDim);
telsoa014fcda012018-03-09 14:13:49 +00001924
1925 queueDescriptor.m_ViewOrigins.reserve(viewsDescriptor.GetNumViews());
1926 for (unsigned int i = 0; i < viewsDescriptor.GetNumViews(); ++i)
1927 {
1928 queueDescriptor.m_ViewOrigins.emplace_back(std::vector<unsigned int>(viewsDescriptor.GetViewOrigin(i),
1929 viewsDescriptor.GetViewOrigin(i) + viewsDescriptor.GetNumDimensions()));
1930 }
1931
telsoa014fcda012018-03-09 14:13:49 +00001932 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1933
1934 std::vector<std::unique_ptr<armnn::ITensorHandle>> inputHandles;
1935 inputHandles.reserve(inputCount);
1936
1937 const bool subTensorsSupported = workloadFactory.SupportsSubTensors();
1938 for (unsigned int i = 0; i < inputCount; ++i)
1939 {
surmeh013537c2c2018-05-18 16:31:43 +01001940 const armnn::TensorInfo& inputTensorInfo = inputTensorInfos[i];
telsoa014fcda012018-03-09 14:13:49 +00001941
1942 std::unique_ptr<armnn::ITensorHandle> inputHandle = subTensorsSupported ?
1943 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo.GetShape(),
1944 queueDescriptor.m_ViewOrigins[i].m_Origin.data())
1945 : workloadFactory.CreateTensorHandle(inputTensorInfo);
1946
1947 inputHandles.emplace_back(std::move(inputHandle));
1948 }
1949
1950 armnn::WorkloadInfo workloadInfo;
1951
1952 for (unsigned int i = 0; i < inputCount; ++i)
1953 {
surmeh013537c2c2018-05-18 16:31:43 +01001954 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfos[i], inputHandles[i].get());
telsoa014fcda012018-03-09 14:13:49 +00001955 }
1956
1957 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
1958
1959 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(queueDescriptor, workloadInfo);
1960
1961 for (auto& inputHandle : inputHandles)
1962 {
1963 inputHandle->Allocate();
1964 }
1965
1966 outputHandle->Allocate();
1967
1968 unsigned int nextInputId = 0;
1969 for (auto& inputHandle : inputHandles)
1970 {
surmeh013537c2c2018-05-18 16:31:43 +01001971 CopyDataToITensorHandle(inputHandle.get(), inputs[nextInputId]);
1972 ++nextInputId;
telsoa014fcda012018-03-09 14:13:49 +00001973 }
1974
surmeh013537c2c2018-05-18 16:31:43 +01001975 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001976 workload->Execute();
1977
surmeh013537c2c2018-05-18 16:31:43 +01001978 if (needPermuteForConcat)
1979 {
1980 PermuteOutputForConcat<T>(workloadFactory,
1981 outputTensorInfo,
1982 permuteVector,
1983 std::move(outputHandle),
1984 output);
1985 }
1986 else
1987 {
1988 CopyDataFromITensorHandle(output, outputHandle.get());
1989 }
telsoa014fcda012018-03-09 14:13:49 +00001990}
1991
1992template <typename T>
1993LayerTestResult<T, 1> Concatenation1dTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale, int32_t qOffset)
1994{
1995 armnn::TensorInfo inputTensorInfo({ 3 }, armnn::GetDataType<T>());
1996
1997 auto input0 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 1.0f, 2.0f, 3.0f }));
1998 auto input1 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 4.0f, 5.0f, 6.0f }));
1999 auto input2 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 7.0f, 8.0f, 9.0f }));
2000
2001 armnn::TensorInfo outputTensorInfo({ 9 }, armnn::GetDataType<T>());
2002
2003 LayerTestResult<T, 1> result(outputTensorInfo);
2004
2005 std::vector<T> output;
2006 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002007 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002008 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
2009 { input0.data(), input1.data(), input2.data() },
2010 outputTensorInfo,
2011 output.data(),
2012 0);
2013
2014 result.output = MakeTensor<T, 1>(outputTensorInfo, output);
2015 result.outputExpected = MakeTensor<T, 1>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2016 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f
2017 }));
2018
2019 return result;
2020}
2021
2022LayerTestResult<float, 1> Concatenation1dTest(armnn::IWorkloadFactory& workloadFactory)
2023{
2024 return Concatenation1dTestImpl<float>(workloadFactory, 0.0f, 0);
2025}
2026
2027template <typename T>
2028LayerTestResult<T, 2> Concatenation2dTestImpl(armnn::IWorkloadFactory& workloadFactory,
2029 const armnn::TensorInfo& outputTensorInfo,
2030 unsigned int dimension,
2031 const float qScale,
2032 const int32_t qOffset)
2033{
2034 armnn::TensorInfo inputTensorInfo({ 2, 3 }, armnn::GetDataType<T>());
2035
2036 auto input0 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2037 // Batch 0
2038 1.0f, 2.0f, 3.0f,
2039
2040 // Batch 1
2041 10.0f, 11.0f, 12.0f,
2042 }));
2043
2044 auto input1 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2045 // Batch 0
2046 4.0f, 5.0f, 6.0f,
2047
2048 // Batch 1
2049 13.0f, 14.0f, 15.0f,
2050 }));
2051
2052 auto input2 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2053 // Batch 0
2054 7.0f, 8.0f, 9.0f,
2055
2056 // Batch 1
2057 16.0f, 17.0f, 18.0f,
2058 }));
2059
2060 LayerTestResult<T, 2> result(outputTensorInfo);
2061
2062 std::vector<T> output;
2063 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002064 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002065 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
2066 { input0.data(), input1.data(), input2.data() },
2067 outputTensorInfo,
2068 output.data(),
2069 dimension);
2070
2071 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
2072 return result;
2073}
2074
2075template <typename T>
2076LayerTestResult<T, 2> Concatenation2dDim0TestImpl(armnn::IWorkloadFactory& workloadFactory,
2077 float qScale, int32_t qOffset)
2078{
2079 armnn::TensorInfo outputTensorInfo({ 6, 3 }, armnn::GetDataType<T>());
2080
2081 LayerTestResult<T, 2> result = Concatenation2dTestImpl<T>(workloadFactory, outputTensorInfo, 0, qScale, qOffset);
2082 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2083 // Batch 0
2084 1.0f, 2.0f, 3.0f,
2085
2086 // Batch 1
2087 10.0f, 11.0f, 12.0f,
2088
2089 // Batch 2
2090 4.0f, 5.0f, 6.0f,
2091
2092 // Batch 3
2093 13.0f, 14.0f, 15.0f,
2094
2095 // Batch 4
2096 7.0f, 8.0f, 9.0f,
2097
2098 // Batch 5
2099 16.0f, 17.0f, 18.0f,
2100 }));
2101
2102 return result;
2103}
2104
2105LayerTestResult<float, 2> Concatenation2dDim0Test(armnn::IWorkloadFactory& workloadFactory)
2106{
2107 return Concatenation2dDim0TestImpl<float>(workloadFactory, 0.0f, 0);
2108}
2109
2110template <typename T>
2111LayerTestResult<T, 2> Concatenation2dDim1TestImpl(armnn::IWorkloadFactory& workloadFactory,
2112 float qScale, int32_t qOffset)
2113{
2114 armnn::TensorInfo outputTensorInfo({ 2, 9 }, armnn::GetDataType<T>());
2115
2116 LayerTestResult<T, 2> result = Concatenation2dTestImpl<T>(workloadFactory, outputTensorInfo, 1, qScale, qOffset);
2117 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2118 // Batch 0
2119 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
2120
2121 // Batch 1
2122 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f
2123 }));
2124
2125 return result;
2126}
2127
2128LayerTestResult<float, 2> Concatenation2dDim1Test(armnn::IWorkloadFactory& workloadFactory)
2129{
2130 return Concatenation2dDim1TestImpl<float>(workloadFactory, 0.0f, 0);
2131}
2132
2133template <typename T>
2134LayerTestResult<T, 2> Concatenation2dDim0DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2135 int32_t qOffset)
2136{
2137 armnn::TensorInfo input0TensorInfo({ 2, 3 }, armnn::GetDataType<T>());
2138 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2139 // Batch 0
2140 1.0f, 2.0f, 3.0f,
2141
2142 // Batch 1
2143 10.0f, 11.0f, 12.0f,
2144 }));
2145
2146 armnn::TensorInfo input1TensorInfo({ 3, 3 }, armnn::GetDataType<T>());
2147 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2148 // Batch 0
2149 4.0f, 5.0f, 6.0f,
2150
2151 // Batch 1
2152 13.0f, 14.0f, 15.0f,
2153
2154 // Batch 0
2155 7.0f, 8.0f, 9.0f,
2156 }));
2157
2158 armnn::TensorInfo input2TensorInfo({ 1, 3 }, armnn::GetDataType<T>());
2159 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2160 // Batch 1
2161 16.0f, 17.0f, 18.0f,
2162 }));
2163
2164 armnn::TensorInfo outputTensorInfo({ 6, 3 }, armnn::GetDataType<T>());
2165 LayerTestResult<T, 2> result(outputTensorInfo);
2166
2167 std::vector<T> output;
2168 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002169 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002170 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
2171 { input0.data(), input1.data(), input2.data() },
2172 outputTensorInfo,
2173 output.data(),
2174 0);
2175
2176 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
2177 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2178 // Batch 0
2179 1.0f, 2.0f, 3.0f,
2180
2181 // Batch 1
2182 10.0f, 11.0f, 12.0f,
2183
2184 // Batch 2
2185 4.0f, 5.0f, 6.0f,
2186
2187 // Batch 3
2188 13.0f, 14.0f, 15.0f,
2189
2190 // Batch 4
2191 7.0f, 8.0f, 9.0f,
2192
2193 // Batch 5
2194 16.0f, 17.0f, 18.0f,
2195 }));
2196
2197 return result;
2198}
2199
2200LayerTestResult<float, 2> Concatenation2dDim0DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory)
2201{
2202 return Concatenation2dDim0DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
2203}
2204
2205template <typename T>
2206LayerTestResult<T, 2> Concatenation2dDim1DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2207 int32_t qOffset)
2208{
2209 armnn::TensorInfo input0TensorInfo({ 2, 3 }, armnn::GetDataType<T>());
2210 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2211 // Batch 0
2212 1.0f, 2.0f, 3.0f,
2213
2214 // Batch 1
2215 10.0f, 11.0f, 12.0f,
2216 }));
2217
2218 armnn::TensorInfo input1TensorInfo({ 2, 5 }, armnn::GetDataType<T>());
2219 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2220 // Batch 0
2221 4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
2222
2223 // Batch 1
2224 13.0f, 14.0f, 15.0f, 16.0f, 17.0f,
2225 }));
2226
2227 armnn::TensorInfo input2TensorInfo({ 2, 1 }, armnn::GetDataType<T>());
2228 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2229 // Batch 0
2230 9.0f,
2231
2232 // Batch 1
2233 18.0f
2234 }));
2235
2236 armnn::TensorInfo outputTensorInfo({ 2, 9 }, armnn::GetDataType<T>());
2237 LayerTestResult<T, 2> result(outputTensorInfo);
2238
2239 std::vector<T> output;
2240 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002241 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002242 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
2243 { input0.data(), input1.data(), input2.data() },
2244 outputTensorInfo,
2245 output.data(),
2246 1);
2247
2248 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
2249 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2250 // Batch 0
2251 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
2252
2253 // Batch 1
2254 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f,
2255 }));
2256
2257 return result;
2258}
2259
2260LayerTestResult<float, 2> Concatenation2dDim1DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory)
2261{
2262 return Concatenation2dDim1DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
2263}
2264
2265template <typename T>
2266LayerTestResult<T, 3> Concatenation3dTestImpl(armnn::IWorkloadFactory& workloadFactory,
2267 const armnn::TensorInfo& outputTensorInfo,
2268 unsigned int dimension,
2269 float qScale,
2270 int32_t qOffset)
2271{
2272 armnn::TensorInfo inputTensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
2273
2274 auto input0 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2275 // Batch 0, Channel 0
2276 1.0f, 2.0f,
2277
2278 // Batch 0, Channel 1
2279 3.0f, 4.0f,
2280
2281 // Batch 0, Channel 2
2282 5.0f, 6.0f,
2283
2284 // Batch 1, Channel 0
2285 19.0f, 20.0f,
2286
2287 // Batch 1, Channel 1
2288 21.0f, 22.0f,
2289
2290 // Batch 1, Channel 2
2291 23.0f, 24.0f
2292 }));
2293
2294 auto input1 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2295 // Batch 0, Channel 0
2296 7.0f, 8.0f,
2297
2298 // Batch 0, Channel 1
2299 9.0f, 10.0f,
2300
2301 // Batch 0, Channel 2
2302 11.0f, 12.0f,
2303
2304 // Batch 1, Channel 0
2305 25.0f, 26.0f,
2306
2307 // Batch 1, Channel 1
2308 27.0f, 28.0f,
2309
2310 // Batch 1, Channel 2
2311 29.0f, 30.0f
2312 }));
2313
2314 auto input2 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2315 // Batch 0, Channel 0
2316 13.0f, 14.0f,
2317
2318 // Batch 0, Channel 1
2319 15.0f, 16.0f,
2320
2321 // Batch 0, Channel 2
2322 17.0f, 18.0f,
2323
2324 // Batch 1, Channel 0
2325 31.0f, 32.0f,
2326
2327 // Batch 1, Channel 1
2328 33.0f, 34.0f,
2329
2330 // Batch 1, Channel 2
2331 35.0f, 36.0f
2332 }));
2333
2334 LayerTestResult<T, 3> result(outputTensorInfo);
2335
2336 std::vector<T> output;
2337 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002338 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002339 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
2340 { input0.data(), input1.data(), input2.data() },
2341 outputTensorInfo,
2342 output.data(),
2343 dimension);
2344
2345 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
2346 return result;
2347}
2348
2349template <typename T>
2350LayerTestResult<T, 3> Concatenation3dDim0TestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2351 int32_t qOffset)
2352{
2353 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, armnn::GetDataType<T>());
2354
2355 LayerTestResult<T, 3> result = Concatenation3dTestImpl<T>(workloadFactory, outputTensorInfo, 0,
2356 qScale, qOffset);
2357 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2358 // Batch 0, Channel 0
2359 1.0f, 2.0f,
2360
2361 // Batch 0, Channel 1
2362 3.0f, 4.0f,
2363
2364 // Batch 0, Channel 2
2365 5.0f, 6.0f,
2366
2367 // Batch 1, Channel 0
2368 19.0f, 20.0f,
2369
2370 // Batch 1, Channel 1
2371 21.0f, 22.0f,
2372
2373 // Batch 1, Channel 2
2374 23.0f, 24.0f,
2375
2376 // Batch 2, Channel 0
2377 7.0f, 8.0f,
2378
2379 // Batch 2, Channel 1
2380 9.0f, 10.0f,
2381
2382 // Batch 2, Channel 2
2383 11.0f, 12.0f,
2384
2385 // Batch 3, Channel 0
2386 25.0f, 26.0f,
2387
2388 // Batch 3, Channel 1
2389 27.0f, 28.0f,
2390
2391 // Batch 3, Channel 2
2392 29.0f, 30.0f,
2393
2394 // Batch 4, Channel 0
2395 13.0f, 14.0f,
2396
2397 // Batch 4, Channel 1
2398 15.0f, 16.0f,
2399
2400 // Batch 4, Channel 2
2401 17.0f, 18.0f,
2402
2403 // Batch 5, Channel 0
2404 31.0f, 32.0f,
2405
2406 // Batch 5, Channel 1
2407 33.0f, 34.0f,
2408
2409 // Batch 5, Channel 2
2410 35.0f, 36.0f
2411 }));
2412 return result;
2413}
2414
2415LayerTestResult<float, 3> Concatenation3dDim0Test(armnn::IWorkloadFactory& workloadFactory)
2416{
2417 return Concatenation3dDim0TestImpl<float>(workloadFactory, 0.0f, 0);
2418}
2419
2420template <typename T>
2421LayerTestResult<T, 3> Concatenation3dDim1TestImpl(armnn::IWorkloadFactory& workloadFactory,
2422 float qScale, int32_t qOffset)
2423{
2424 armnn::TensorInfo outputTensorInfo({ 2, 9, 2 }, armnn::GetDataType<T>());
2425
2426 LayerTestResult<T, 3> result = Concatenation3dTestImpl<T>(workloadFactory, outputTensorInfo, 1, qScale, qOffset);
2427 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2428 // Batch 0, Channel 0
2429 1.0f, 2.0f,
2430
2431 // Batch 0, Channel 1
2432 3.0f, 4.0f,
2433
2434 // Batch 0, Channel 2
2435 5.0f, 6.0f,
2436
2437 // Batch 0, Channel 3
2438 7.0f, 8.0f,
2439
2440 // Batch 0, Channel 4
2441 9.0f, 10.0f,
2442
2443 // Batch 0, Channel 5
2444 11.0f, 12.0f,
2445
2446 // Batch 0, Channel 6
2447 13.0f, 14.0f,
2448
2449 // Batch 0, Channel 7
2450 15.0f, 16.0f,
2451
2452 // Batch 0, Channel 8
2453 17.0f, 18.0f,
2454
2455 // Batch 1, Channel 0
2456 19.0f, 20.0f,
2457
2458 // Batch 1, Channel 1
2459 21.0f, 22.0f,
2460
2461 // Batch 1, Channel 2
2462 23.0f, 24.0f,
2463
2464 // Batch 1, Channel 3
2465 25.0f, 26.0f,
2466
2467 // Batch 1, Channel 4
2468 27.0f, 28.0f,
2469
2470 // Batch 1, Channel 5
2471 29.0f, 30.0f,
2472
2473 // Batch 1, Channel 6
2474 31.0f, 32.0f,
2475
2476 // Batch 1, Channel 7
2477 33.0f, 34.0f,
2478
2479 // Batch 1, Channel 8
2480 35.0f, 36.0f
2481 }));
2482
2483 return result;
2484}
2485
2486LayerTestResult<float, 3> Concatenation3dDim1Test(armnn::IWorkloadFactory& workloadFactory)
2487{
2488 return Concatenation3dDim1TestImpl<float>(workloadFactory, 0.0f, 0);
2489}
2490
2491template <typename T>
2492LayerTestResult<T, 3> Concatenation3dDim2TestImpl(armnn::IWorkloadFactory& workloadFactory,
2493 float qScale, int32_t qOffset)
2494{
2495 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, armnn::GetDataType<T>());
2496
2497 LayerTestResult<T, 3> result = Concatenation3dTestImpl<T>(workloadFactory, outputTensorInfo, 2, qScale, qOffset);
2498 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2499 // Batch 0, Channel 0
2500 1.0f, 2.0f, 7.0f, 8.0f, 13.0f, 14.0f,
2501
2502 // Batch 0, Channel 1
2503 3.0f, 4.0f, 9.0f, 10.0f, 15.0f, 16.0f,
2504
2505 // Batch 0, Channel 2
2506 5.0f, 6.0f, 11.0f, 12.0f, 17.0f, 18.0f,
2507
2508 // Batch 1, Channel 0
2509 19.0f, 20.0f, 25.0f, 26.0f, 31.0f, 32.0f,
2510
2511 // Batch 1, Channel 1
2512 21.0f, 22.0f, 27.0f, 28.0f, 33.0f, 34.0f,
2513
2514 // Batch 1, Channel 2
2515 23.0f, 24.0f, 29.0f, 30.0f, 35.0f, 36.0f,
2516 }));
2517
2518 return result;
2519}
2520
2521LayerTestResult<float, 3> Concatenation3dDim2Test(armnn::IWorkloadFactory& workloadFactory)
2522{
2523 return Concatenation3dDim2TestImpl<float>(workloadFactory, 0.0f, 0);
2524}
2525
2526template <typename T>
2527LayerTestResult<T, 3> Concatenation3dDim0DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2528 int32_t qOffset)
2529{
2530 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
2531 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2532 // Batch 0, Channel 0
2533 1.0f, 2.0f,
2534
2535 // Batch 0, Channel 1
2536 3.0f, 4.0f,
2537
2538 // Batch 0, Channel 2
2539 5.0f, 6.0f,
2540
2541 // Batch 1, Channel 0
2542 19.0f, 20.0f,
2543
2544 // Batch 1, Channel 1
2545 21.0f, 22.0f,
2546
2547 // Batch 1, Channel 2
2548 23.0f, 24.0f
2549 }));
2550
2551 armnn::TensorInfo input1TensorInfo({ 1, 3, 2 }, armnn::GetDataType<T>());
2552 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2553 // Batch 0, Channel 0
2554 7.0f, 8.0f,
2555
2556 // Batch 0, Channel 1
2557 9.0f, 10.0f,
2558
2559 // Batch 0, Channel 2
2560 11.0f, 12.0f,
2561 }));
2562
2563 armnn::TensorInfo input2TensorInfo({ 3, 3, 2 }, armnn::GetDataType<T>());
2564 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2565 // Batch 0, Channel 0
2566 25.0f, 26.0f,
2567
2568 // Batch 0, Channel 1
2569 27.0f, 28.0f,
2570
2571 // Batch 0, Channel 2
2572 29.0f, 30.0f,
2573
2574 // Batch 1, Channel 0
2575 13.0f, 14.0f,
2576
2577 // Batch 1, Channel 1
2578 15.0f, 16.0f,
2579
2580 // Batch 1, Channel 2
2581 17.0f, 18.0f,
2582
2583 // Batch 2, Channel 0
2584 31.0f, 32.0f,
2585
2586 // Batch 2, Channel 1
2587 33.0f, 34.0f,
2588
2589 // Batch 2, Channel 2
2590 35.0f, 36.0f
2591 }));
2592
2593 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, armnn::GetDataType<T>());
2594 LayerTestResult<T, 3> result(outputTensorInfo);
2595
2596 std::vector<T> output;
2597 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002598 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002599 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
2600 { input0.data(), input1.data(), input2.data() },
2601 outputTensorInfo,
2602 output.data(),
2603 0);
2604
2605 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
2606 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2607 // Batch 0, Channel 0
2608 1.0f, 2.0f,
2609
2610 // Batch 0, Channel 1
2611 3.0f, 4.0f,
2612
2613 // Batch 0, Channel 2
2614 5.0f, 6.0f,
2615
2616 // Batch 1, Channel 0
2617 19.0f, 20.0f,
2618
2619 // Batch 1, Channel 1
2620 21.0f, 22.0f,
2621
2622 // Batch 1, Channel 2
2623 23.0f, 24.0f,
2624
2625 // Batch 2, Channel 0
2626 7.0f, 8.0f,
2627
2628 // Batch 2, Channel 1
2629 9.0f, 10.0f,
2630
2631 // Batch 2, Channel 2
2632 11.0f, 12.0f,
2633
2634 // Batch 3, Channel 0
2635 25.0f, 26.0f,
2636
2637 // Batch 3, Channel 1
2638 27.0f, 28.0f,
2639
2640 // Batch 3, Channel 2
2641 29.0f, 30.0f,
2642
2643 // Batch 4, Channel 0
2644 13.0f, 14.0f,
2645
2646 // Batch 4, Channel 1
2647 15.0f, 16.0f,
2648
2649 // Batch 4, Channel 2
2650 17.0f, 18.0f,
2651
2652 // Batch 5, Channel 0
2653 31.0f, 32.0f,
2654
2655 // Batch 5, Channel 1
2656 33.0f, 34.0f,
2657
2658 // Batch 5, Channel 2
2659 35.0f, 36.0f
2660 }));
2661
2662 return result;
2663}
2664
2665LayerTestResult<float, 3> Concatenation3dDim0DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory)
2666{
2667 return Concatenation3dDim0DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
2668}
2669
2670template <typename T>
2671LayerTestResult<T, 3> Concatenation3dDim1DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2672 int32_t qOffset)
2673{
2674 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
2675 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2676 // Batch 0, Channel 0
2677 1.0f, 2.0f,
2678
2679 // Batch 0, Channel 1
2680 3.0f, 4.0f,
2681
2682 // Batch 0, Channel 2
2683 5.0f, 6.0f,
2684
2685 // Batch 1, Channel 0
2686 19.0f, 20.0f,
2687
2688 // Batch 1, Channel 1
2689 21.0f, 22.0f,
2690
2691 // Batch 1, Channel 2
2692 23.0f, 24.0f
2693 }));
2694
2695 armnn::TensorInfo input1TensorInfo({ 2, 4, 2 }, armnn::GetDataType<T>());
2696 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2697 // Batch 0, Channel 0
2698 7.0f, 8.0f,
2699
2700 // Batch 0, Channel 1
2701 9.0f, 10.0f,
2702
2703 // Batch 0, Channel 2
2704 11.0f, 12.0f,
2705
2706 // Batch 0, Channel 3
2707 25.0f, 26.0f,
2708
2709 // Batch 1, Channel 0
2710 27.0f, 28.0f,
2711
2712 // Batch 1, Channel 1
2713 29.0f, 30.0f,
2714
2715 // Batch 1, Channel 2
2716 13.0f, 14.0f,
2717
2718 // Batch 1, Channel 3
2719 15.0f, 16.0f,
2720 }));
2721
2722 armnn::TensorInfo input2TensorInfo({ 2, 1, 2 }, armnn::GetDataType<T>());
2723 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2724 // Batch 0, Channel 0
2725 17.0f, 18.0f,
2726
2727 // Batch 1, Channel 0
2728 31.0f, 32.0f,
2729 }));
2730
2731 armnn::TensorInfo outputTensorInfo({ 2, 8, 2 }, armnn::GetDataType<T>());
2732 LayerTestResult<T, 3> result(outputTensorInfo);
2733
2734 std::vector<T> output;
2735 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002736 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002737 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
2738 { input0.data(), input1.data(), input2.data() },
2739 outputTensorInfo,
2740 output.data(),
2741 1);
2742
2743 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
2744 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2745 // Batch 0, Channel 0
2746 1.0f, 2.0f,
2747
2748 // Batch 0, Channel 1
2749 3.0f, 4.0f,
2750
2751 // Batch 0, Channel 2
2752 5.0f, 6.0f,
2753
2754 // Batch 0, Channel 3
2755 7.0f, 8.0f,
2756
2757 // Batch 0, Channel 4
2758 9.0f, 10.0f,
2759
2760 // Batch 0, Channel 5
2761 11.0f, 12.0f,
2762
2763 // Batch 0, Channel 6
2764 25.0f, 26.0f,
2765
2766 // Batch 0, Channel 7
2767 17.0f, 18.0f,
2768
2769 // Batch 1, Channel 0
2770 19.0f, 20.0f,
2771
2772 // Batch 1, Channel 1
2773 21.0f, 22.0f,
2774
2775 // Batch 1, Channel 2
2776 23.0f, 24.0f,
2777
2778 // Batch 1, Channel 3
2779 27.0f, 28.0f,
2780
2781 // Batch 1, Channel 4
2782 29.0f, 30.0f,
2783
2784 // Batch 1, Channel 5
2785 13.0f, 14.0f,
2786
2787 // Batch 1, Channel 6
2788 15.0f, 16.0f,
2789
2790 // Batch 1, Channel 7
2791 31.0f, 32.0f,
2792 }));
2793
2794 return result;
2795}
2796
2797LayerTestResult<float, 3> Concatenation3dDim1DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory)
2798{
2799 return Concatenation3dDim1DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
2800}
2801
2802template <typename T>
2803LayerTestResult<T, 3> Concatenation3dDim2DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2804 int32_t qOffset)
2805{
2806 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
2807 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2808 // Batch 0, Channel 0
2809 1.0f, 2.0f,
2810
2811 // Batch 0, Channel 1
2812 3.0f, 4.0f,
2813
2814 // Batch 0, Channel 2
2815 5.0f, 6.0f,
2816
2817 // Batch 1, Channel 0
2818 19.0f, 20.0f,
2819
2820 // Batch 1, Channel 1
2821 21.0f, 22.0f,
2822
2823 // Batch 1, Channel 2
2824 23.0f, 24.0f
2825 }));
2826
2827 armnn::TensorInfo input1TensorInfo({ 2, 3, 1 }, armnn::GetDataType<T>());
2828 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2829 // Batch 0, Channel 0
2830 7.0f,
2831
2832 // Batch 0, Channel 1
2833 9.0f,
2834
2835 // Batch 0, Channel 2
2836 11.0f,
2837
2838 // Batch 1, Channel 0
2839 25.0f,
2840
2841 // Batch 1, Channel 1
2842 27.0f,
2843
2844 // Batch 1, Channel 2
2845 29.0f
2846 }));
2847
2848 armnn::TensorInfo input2TensorInfo({ 2, 3, 3 }, armnn::GetDataType<T>());
2849 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2850 // Batch 0, Channel 0
2851 13.0f, 14.0f, 50.0f,
2852
2853 // Batch 0, Channel 1
2854 15.0f, 16.0f, 51.0f,
2855
2856 // Batch 0, Channel 2
2857 17.0f, 18.0f, 52.0f,
2858
2859 // Batch 1, Channel 0
2860 31.0f, 32.0f, 53.0f,
2861
2862 // Batch 1, Channel 1
2863 33.0f, 34.0f, 54.0f,
2864
2865 // Batch 1, Channel 2
2866 35.0f, 36.0f, 55.0f,
2867 }));
2868
2869 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, armnn::GetDataType<T>());
2870 LayerTestResult<T, 3> result(outputTensorInfo);
2871
2872 std::vector<T> output;
2873 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002874 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002875 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
2876 { input0.data(), input1.data(), input2.data() },
2877 outputTensorInfo,
2878 output.data(),
2879 2);
2880
2881 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
2882 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2883 // Batch 0, Channel 0
2884 1.0f, 2.0f, 7.0f, 13.0f, 14.0f, 50.0f,
2885
2886 // Batch 0, Channel 1
2887 3.0f, 4.0f, 9.0f, 15.0f, 16.0f, 51.0f,
2888
2889 // Batch 0, Channel 2
2890 5.0f, 6.0f, 11.0f, 17.0f, 18.0f, 52.0f,
2891
2892 // Batch 1, Channel 0
2893 19.0f, 20.0f, 25.0f, 31.0f, 32.0f, 53.0f,
2894
2895 // Batch 1, Channel 1
2896 21.0f, 22.0f, 27.0f, 33.0f, 34.0f, 54.0f,
2897
2898 // Batch 1, Channel 2
2899 23.0f, 24.0f, 29.0f, 35.0f, 36.0f, 55.0f,
2900 }));
2901
2902 return result;
2903}
2904
2905LayerTestResult<float, 3> Concatenation3dDim2DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory)
2906{
2907 return Concatenation3dDim2DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
2908}
2909
James Conroy074f3712018-10-03 09:32:03 +01002910LayerTestResult<float, 4> ResizeBilinearNopTestImpl(armnn::IWorkloadFactory& workloadFactory,
2911 const armnn::TensorShape& inputOutputTensorShape,
2912 armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00002913{
James Conroy074f3712018-10-03 09:32:03 +01002914 const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
2915 const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00002916
2917 auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
2918 1.0f, 2.0f, 3.0f, 4.0f,
2919 2.0f, 3.0f, 4.0f, 5.0f,
2920 3.0f, 4.0f, 5.0f, 6.0f,
2921 4.0f, 5.0f, 6.0f, 7.0f
2922 }));
2923
2924 LayerTestResult<float, 4> result(outputTensorInfo);
2925 result.outputExpected = input;
2926
2927 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
2928 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2929
2930 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01002931 descriptor.m_Parameters.m_DataLayout = dataLayout;
2932 armnn::WorkloadInfo info;
2933 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
2934 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
2935
2936 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
2937
2938 inputHandle->Allocate();
2939 outputHandle->Allocate();
2940 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
2941
2942 workloadFactory.Finalize();
2943 workload->Execute();
2944
2945 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
2946 return result;
2947}
2948
2949LayerTestResult<float, 4> ResizeBilinearNopTest(armnn::IWorkloadFactory& workloadFactory)
2950{
2951 // BatchSize = 1, Channels = 1, Height = 4, Width = 4
2952 const armnn::TensorShape inputOutputShape{ 1, 1, 4, 4 };
2953
2954 return ResizeBilinearNopTestImpl(workloadFactory, inputOutputShape, armnn::DataLayout::NCHW);
2955}
2956
2957LayerTestResult<float, 4> ResizeBilinearNopNhwcTest(armnn::IWorkloadFactory& workloadFactory)
2958{
2959 // BatchSize = 1, Height = 4, Width = 4, Channels = 1
2960 const armnn::TensorShape inputOutputShape{ 1, 4, 4, 1 };
2961
2962 return ResizeBilinearNopTestImpl(workloadFactory, inputOutputShape, armnn::DataLayout::NHWC);
2963}
2964
2965LayerTestResult<float, 4> SimpleResizeBilinearTestImpl(armnn::IWorkloadFactory& workloadFactory,
2966 const armnn::TensorShape& inputTensorShape,
2967 const armnn::TensorShape& outputTensorShape,
2968 armnn::DataLayout dataLayout)
2969{
2970 const armnn::TensorInfo inputTensorInfo(inputTensorShape, armnn::DataType::Float32);
2971 const armnn::TensorInfo outputTensorInfo(outputTensorShape, armnn::DataType::Float32);
2972
2973 auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
2974 1.0f, 255.0f,
2975 200.0f, 250.0f
2976 }));
2977
2978 // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
2979 // then figures out the interpolants and weights. Note this is different to projecting the centre of the
2980 // output texel - and thus we'll expect the output 1x1 matrix to contain, as its single element, the value
2981 // that was at position (0,0) of the input matrix (rather than an average, which we would expect if projecting
2982 // the centre).
2983 LayerTestResult<float, 4> result(outputTensorInfo);
2984 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>({
2985 1.0f
2986 }));
2987
2988 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
2989 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2990
2991 armnn::ResizeBilinearQueueDescriptor descriptor;
2992 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00002993 armnn::WorkloadInfo info;
2994 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
2995 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
2996
2997 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
2998
2999 inputHandle->Allocate();
3000 outputHandle->Allocate();
3001 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3002
surmeh013537c2c2018-05-18 16:31:43 +01003003 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00003004 workload->Execute();
3005
3006 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3007 return result;
3008}
3009
3010LayerTestResult<float, 4> SimpleResizeBilinearTest(armnn::IWorkloadFactory& workloadFactory)
3011{
James Conroy074f3712018-10-03 09:32:03 +01003012 // inputShape: BatchSize = 1, Channels = 1, Height = 2, Width = 2
3013 const armnn::TensorShape inputShape{ 1, 1, 2, 2 };
telsoa014fcda012018-03-09 14:13:49 +00003014
James Conroy074f3712018-10-03 09:32:03 +01003015 // outputShape: BatchSize = 1, Channels = 1, Height = 1, Width = 1
3016 const armnn::TensorShape outputShape{ 1, 1, 1, 1 };
telsoa014fcda012018-03-09 14:13:49 +00003017
James Conroy074f3712018-10-03 09:32:03 +01003018 return SimpleResizeBilinearTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NCHW);
3019}
3020
3021LayerTestResult<float, 4> SimpleResizeBilinearNhwcTest(armnn::IWorkloadFactory& workloadFactory)
3022{
3023 // inputShape: BatchSize = 1, Height = 2, Width = 2, Channels = 1
3024 const armnn::TensorShape inputShape{ 1, 2, 2, 1 };
3025
3026 // outputShape: BatchSize = 1, Height = 1, Width = 1, Channels = 1
3027 const armnn::TensorShape outputShape{ 1, 1, 1, 1 };
3028
3029 return SimpleResizeBilinearTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NHWC);
3030}
3031
3032LayerTestResult<float, 4> ResizeBilinearSqMinTestImpl(armnn::IWorkloadFactory& workloadFactory,
3033 const armnn::TensorShape& inputTensorShape,
3034 const armnn::TensorShape& outputTensorShape,
3035 armnn::DataLayout dataLayout)
3036{
3037 const armnn::TensorInfo inputTensorInfo(inputTensorShape, armnn::DataType::Float32);
3038 const armnn::TensorInfo outputTensorInfo(outputTensorShape, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00003039
3040 auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
James Conroy074f3712018-10-03 09:32:03 +01003041 1.0f, 2.0f, 3.0f, 4.0f,
3042 2.0f, 3.0f, 4.0f, 5.0f,
3043 3.0f, 4.0f, 5.0f, 6.0f,
3044 4.0f, 5.0f, 6.0f, 7.0f
telsoa014fcda012018-03-09 14:13:49 +00003045 }));
3046
telsoa014fcda012018-03-09 14:13:49 +00003047 LayerTestResult<float, 4> result(outputTensorInfo);
3048 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>({
James Conroy074f3712018-10-03 09:32:03 +01003049 1.0f, 3.0f,
3050 3.0f, 5.0f
telsoa014fcda012018-03-09 14:13:49 +00003051 }));
3052
3053 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3054 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3055
3056 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01003057 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00003058 armnn::WorkloadInfo info;
3059 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3060 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3061
3062 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
3063
3064 inputHandle->Allocate();
3065 outputHandle->Allocate();
3066 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3067
surmeh013537c2c2018-05-18 16:31:43 +01003068 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00003069 workload->Execute();
3070
3071 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3072 return result;
3073}
3074
3075LayerTestResult<float, 4> ResizeBilinearSqMinTest(armnn::IWorkloadFactory& workloadFactory)
3076{
James Conroy074f3712018-10-03 09:32:03 +01003077 // inputShape: BatchSize = 1, Channels = 1, Height = 4, Width = 4
3078 const armnn::TensorShape inputShape{ 1, 1, 4, 4 };
telsoa014fcda012018-03-09 14:13:49 +00003079
James Conroy074f3712018-10-03 09:32:03 +01003080 // outputShape: BatchSize = 1, Channels = 1, Height = 2, Width = 2
3081 const armnn::TensorShape outputShape{ 1, 1, 2, 2 };
telsoa014fcda012018-03-09 14:13:49 +00003082
James Conroy074f3712018-10-03 09:32:03 +01003083 return ResizeBilinearSqMinTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NCHW);
3084}
3085
3086LayerTestResult<float, 4> ResizeBilinearSqMinNhwcTest(armnn::IWorkloadFactory& workloadFactory)
3087{
3088 // inputShape: BatchSize = 1, Height = 4, Width = 4, Channels = 1
3089 const armnn::TensorShape inputShape{ 1, 4, 4, 1 };
3090
3091 // outputShape: BatchSize = 1, Height = 2, Width = 2, Channels = 1
3092 const armnn::TensorShape outputShape{ 1, 2, 2, 1 };
3093
3094 return ResizeBilinearSqMinTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NHWC);
3095}
3096
3097LayerTestResult<float, 4> ResizeBilinearMinTestImpl(armnn::IWorkloadFactory& workloadFactory,
3098 const armnn::TensorShape& inputTensorShape,
3099 const armnn::TensorShape& outputTensorShape,
3100 armnn::DataLayout dataLayout)
3101{
3102 const armnn::TensorInfo inputTensorInfo(inputTensorShape, armnn::DataType::Float32);
3103 const armnn::TensorInfo outputTensorInfo(outputTensorShape, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00003104
3105 auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
James Conroy074f3712018-10-03 09:32:03 +01003106 1.0f, 2.0f, 3.0f, 5.0f, 8.0f,
3107 13.0f, 21.0f, 34.0f, 55.0f, 89.0f,
3108 144.0f, 233.0f, 377.0f, 610.0f, 987.0f
telsoa014fcda012018-03-09 14:13:49 +00003109 }));
3110
3111 LayerTestResult<float, 4> result(outputTensorInfo);
3112 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>({
James Conroy074f3712018-10-03 09:32:03 +01003113 1.0f, 2.6666f, 6.0f,
3114 78.5f, 179.3333f, 401.0f
telsoa014fcda012018-03-09 14:13:49 +00003115 }));
3116
3117 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3118 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3119
3120 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01003121 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00003122 armnn::WorkloadInfo info;
3123 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3124 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3125
3126 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
3127
3128 inputHandle->Allocate();
3129 outputHandle->Allocate();
3130 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3131
surmeh013537c2c2018-05-18 16:31:43 +01003132 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00003133 workload->Execute();
3134
3135 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3136 return result;
3137}
3138
3139LayerTestResult<float, 4> ResizeBilinearMinTest(armnn::IWorkloadFactory& workloadFactory)
3140{
James Conroy074f3712018-10-03 09:32:03 +01003141 // inputShape: BatchSize = 1, Channels = 1, Height = 3, Width = 5
3142 const armnn::TensorShape inputShape{ 1, 1, 3, 5 };
telsoa014fcda012018-03-09 14:13:49 +00003143
James Conroy074f3712018-10-03 09:32:03 +01003144 // outputShape: BatchSize = 1, Channels = 1, Height = 2, Width = 3
3145 const armnn::TensorShape outputShape{ 1, 1, 2, 3 };
telsoa014fcda012018-03-09 14:13:49 +00003146
James Conroy074f3712018-10-03 09:32:03 +01003147 return ResizeBilinearMinTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NCHW);
3148}
3149
3150LayerTestResult<float, 4> ResizeBilinearMinNhwcTest(armnn::IWorkloadFactory& workloadFactory)
3151{
3152 // inputShape: BatchSize = 1, Height = 3, Width = 5, Channels = 1
3153 const armnn::TensorShape inputShape{ 1, 3, 5, 1 };
3154
3155 // outputShape: BatchSize = 1, Height = 2, Width = 3, Channels = 1
3156 const armnn::TensorShape outputShape{ 1, 2, 3, 1 };
3157
3158 return ResizeBilinearMinTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NHWC);
3159}
3160
3161LayerTestResult<float, 4> ResizeBilinearMagTestImpl(armnn::IWorkloadFactory& workloadFactory,
3162 const armnn::TensorShape& inputTensorShape,
3163 const armnn::TensorShape& outputTensorShape,
3164 armnn::DataLayout dataLayout)
3165{
3166 const armnn::TensorInfo inputTensorInfo(inputTensorShape, armnn::DataType::Float32);
3167 const armnn::TensorInfo outputTensorInfo(outputTensorShape, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00003168
3169 auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
James Conroy074f3712018-10-03 09:32:03 +01003170 1.0f, 2.0f,
3171 13.0f, 21.0f,
3172 144.0f, 233.0f
telsoa014fcda012018-03-09 14:13:49 +00003173 }));
3174
3175 LayerTestResult<float, 4> result(outputTensorInfo);
3176 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>({
James Conroy074f3712018-10-03 09:32:03 +01003177 1.0f, 1.4f, 1.8f, 2.0f, 2.0f,
3178 13.0f, 16.2f, 19.4f, 21.0f, 21.0f,
3179 144.0f, 179.6f, 215.2f, 233.0f, 233.0f
telsoa014fcda012018-03-09 14:13:49 +00003180 }));
3181
3182 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3183 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3184
3185 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01003186 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00003187 armnn::WorkloadInfo info;
3188 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3189 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3190
3191 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
3192
3193 inputHandle->Allocate();
3194 outputHandle->Allocate();
3195 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3196
surmeh013537c2c2018-05-18 16:31:43 +01003197 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00003198 workload->Execute();
3199
3200 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3201 return result;
3202}
3203
3204LayerTestResult<float, 4> ResizeBilinearMagTest(armnn::IWorkloadFactory& workloadFactory)
3205{
James Conroy074f3712018-10-03 09:32:03 +01003206 // inputShape: BatchSize = 1, Channels = 1, Height = 3, Width = 2
3207 const armnn::TensorShape inputShape{ 1, 1, 3, 2 };
telsoa014fcda012018-03-09 14:13:49 +00003208
James Conroy074f3712018-10-03 09:32:03 +01003209 // outputShape: BatchSize = 1, Channels = 1, Height = 3, Width = 5
3210 const armnn::TensorShape outputShape{ 1, 1, 3, 5 };
telsoa014fcda012018-03-09 14:13:49 +00003211
James Conroy074f3712018-10-03 09:32:03 +01003212 return ResizeBilinearMagTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NCHW);
3213}
telsoa014fcda012018-03-09 14:13:49 +00003214
James Conroy074f3712018-10-03 09:32:03 +01003215LayerTestResult<float, 4> ResizeBilinearMagNhwcTest(armnn::IWorkloadFactory& workloadFactory)
3216{
3217 // inputShape: BatchSize = 1, Height = 3, Width = 2, Channels = 1
3218 const armnn::TensorShape inputShape{ 1, 3, 2, 1 };
telsoa014fcda012018-03-09 14:13:49 +00003219
James Conroy074f3712018-10-03 09:32:03 +01003220 // outputShape: BatchSize = 1, Height = 3, Width = 5, Channels = 1
3221 const armnn::TensorShape outputShape{ 1, 3, 5, 1 };
telsoa014fcda012018-03-09 14:13:49 +00003222
James Conroy074f3712018-10-03 09:32:03 +01003223 return ResizeBilinearMagTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00003224}
3225
3226LayerTestResult<float, 2> FakeQuantizationTest(armnn::IWorkloadFactory& workloadFactory)
3227{
3228 constexpr unsigned int width = 2;
3229 constexpr unsigned int height = 3;
3230
3231 const armnn::TensorInfo tensorInfo({height, width },
3232 armnn::DataType::Float32);
3233 auto input = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
3234 -10.0f, -5.0f,
3235 0.0f, 5.0f,
3236 10.0f, 10.0f
3237 }));
3238
3239 LayerTestResult<float, 2> ret(tensorInfo);
3240
3241 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
3242
3243 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
3244
3245 armnn::FakeQuantizationQueueDescriptor data;
3246 armnn::WorkloadInfo info;
3247
3248 AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
3249 AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
3250 float min = -10.f;
3251 float max = 10.f;
3252
3253 data.m_Parameters.m_Min = min;
3254 data.m_Parameters.m_Max = max;
3255
3256 armnn::PassthroughCpuTensorHandle refHandle(tensorInfo, &ret.outputExpected[0][0]);
3257 armnn::FakeQuantizationQueueDescriptor refData = data;
3258 armnn::WorkloadInfo refInfo = info;
3259 SetWorkloadOutput(refData, refInfo, 0, tensorInfo, &refHandle);
3260
3261 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFakeQuantization(data, info);
3262
3263 inputHandle->Allocate();
3264 outputHandle->Allocate();
3265
3266 CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
3267
surmeh013537c2c2018-05-18 16:31:43 +01003268 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00003269 workload->Execute();
3270
3271 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
3272
3273 ret.outputExpected = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
3274 0.0f, 63.0f,
3275 128.0f, 191.0f,
3276 255.0f, 255.0f
3277 }));
3278 return ret;
3279}
3280
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003281namespace
3282{
3283
3284LayerTestResult<float, 4> L2NormalizationTestImpl(armnn::IWorkloadFactory& workloadFactory,
3285 const armnn::TensorShape& inputOutputTensorShape,
3286 const std::vector<float>& inputValues,
3287 const std::vector<float>& expectedOutputValues,
3288 armnn::DataLayout dataLayout)
3289{
3290 const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
3291 const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
3292
3293 auto inputTensor = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(inputValues));
3294
3295 LayerTestResult<float, 4> result(outputTensorInfo);
3296 result.outputExpected = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(expectedOutputValues));
3297
3298 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3299 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3300
3301 armnn::L2NormalizationQueueDescriptor descriptor;
3302 descriptor.m_Parameters.m_DataLayout = dataLayout;
3303 armnn::WorkloadInfo info;
3304
3305 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3306 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3307
3308 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
3309
3310 inputHandle->Allocate();
3311 outputHandle->Allocate();
3312
3313 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
3314
3315 workloadFactory.Finalize();
3316 workload->Execute();
3317
3318 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3319
3320 return result;
3321}
3322
3323float CalcInvL2Norm(std::initializer_list<float> elements)
3324{
3325 const float reduction = std::accumulate(elements.begin(), elements.end(), 0.0f,
3326 [](float acc, float element) { return acc + element * element; });
3327 return 1.0f / sqrtf(reduction);
3328}
3329
3330} // anonymous namespace
3331
telsoa014fcda012018-03-09 14:13:49 +00003332LayerTestResult<float, 4> L2Normalization1dTest(armnn::IWorkloadFactory& workloadFactory)
3333{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003334 // Width: 1
3335 // Height: 1
3336 // Channels: 10
3337 // BatchSize: 1
telsoa014fcda012018-03-09 14:13:49 +00003338
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003339 const armnn::TensorShape inputOutputShape{ 1, 10, 1, 1 };
3340 std::vector<float> inputValues
3341 {
3342 // Batch 0, Channel 0, Height (1) x Width (1)
3343 1.0f,
telsoa014fcda012018-03-09 14:13:49 +00003344
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003345 // Batch 0, Channel 1, Height (1) x Width (1)
3346 2.0f,
telsoa014fcda012018-03-09 14:13:49 +00003347
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003348 // Batch 0, Channel 2, Height (1) x Width (1)
3349 3.0f,
telsoa014fcda012018-03-09 14:13:49 +00003350
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003351 // Batch 0, Channel 3, Height (1) x Width (1)
3352 4.0f,
3353
3354 // Batch 0, Channel 4, Height (1) x Width (1)
3355 5.0f,
3356
3357 // Batch 0, Channel 5, Height (1) x Width (1)
3358 6.0f,
3359
3360 // Batch 0, Channel 6, Height (1) x Width (1)
3361 7.0f,
3362
3363 // Batch 0, Channel 7, Height (1) x Width (1)
3364 8.0f,
3365
3366 // Batch 0, Channel 8, Height (1) x Width (1)
3367 9.0f,
3368
3369 // Batch 0, Channel 9, Height (1) x Width (1)
3370 10.0f
3371 };
telsoa014fcda012018-03-09 14:13:49 +00003372 const float approxInvL2Norm = 0.050964719f;
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003373 std::vector<float> expectedOutputValues
3374 {
3375 // Batch 0, Channel 0, Height (1) x Width (1)
telsoa014fcda012018-03-09 14:13:49 +00003376 1.0f * approxInvL2Norm,
3377 2.0f * approxInvL2Norm,
3378 3.0f * approxInvL2Norm,
3379 4.0f * approxInvL2Norm,
3380 5.0f * approxInvL2Norm,
3381 6.0f * approxInvL2Norm,
3382 7.0f * approxInvL2Norm,
3383 8.0f * approxInvL2Norm,
3384 9.0f * approxInvL2Norm,
3385 10.0f * approxInvL2Norm
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003386 };
telsoa014fcda012018-03-09 14:13:49 +00003387
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003388 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
3389 inputValues, expectedOutputValues, armnn::DataLayout::NCHW);
telsoa014fcda012018-03-09 14:13:49 +00003390}
3391
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003392LayerTestResult<float, 4> L2Normalization1dNhwcTest(armnn::IWorkloadFactory& workloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00003393{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003394#ifdef ARMCOMPUTECL_ENABLED
3395 // Clear the CL cache before this test when using ACL
3396 if (ClContextControlFixture::Instance())
3397 {
3398 ClContextControlFixture::Instance()->m_ClContextControl.ClearClCache();
3399 }
3400#endif
telsoa014fcda012018-03-09 14:13:49 +00003401
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003402 // Width: 1
3403 // Height: 1
3404 // Channels: 10
3405 // BatchSize: 1
telsoa014fcda012018-03-09 14:13:49 +00003406
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003407 const armnn::TensorShape inputOutputShape{ 1, 1, 1, 10 };
3408 std::vector<float> inputValues
3409 {
3410 // Batch 0, Height 0, Width (1) x Channel (10)
3411 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f
3412 };
3413 const float approxInvL2Norm = 0.050964719f;
3414 std::vector<float> expectedOutputValues
3415 {
3416 // Batch 0, Height 0, Width (1) x Channel (10)
3417 1.0f * approxInvL2Norm,
3418 2.0f * approxInvL2Norm,
3419 3.0f * approxInvL2Norm,
3420 4.0f * approxInvL2Norm,
3421 5.0f * approxInvL2Norm,
3422 6.0f * approxInvL2Norm,
3423 7.0f * approxInvL2Norm,
3424 8.0f * approxInvL2Norm,
3425 9.0f * approxInvL2Norm,
3426 10.0f * approxInvL2Norm
3427 };
3428
3429 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
3430 inputValues, expectedOutputValues, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00003431}
3432
3433LayerTestResult<float, 4> L2Normalization2dTest(armnn::IWorkloadFactory& workloadFactory)
3434{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003435 // Width: 5
3436 // Height: 1
3437 // Channels: 2
3438 // BatchSize: 1
telsoa014fcda012018-03-09 14:13:49 +00003439
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003440 const armnn::TensorShape inputOutputShape{ 1, 2, 1, 5 };
3441 std::vector<float> inputValues
3442 {
3443 // Batch 0, Channel 0, Height (1) x Width (5)
telsoa014fcda012018-03-09 14:13:49 +00003444 1.0f, 3.0f, 5.0f, 7.0f, 9.0f,
telsoa014fcda012018-03-09 14:13:49 +00003445
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003446 // Batch 0, Channel 1, Height (1) x Width (5)
3447 2.0f, 4.0f, 6.0f, 8.0f, 10.0f
3448 };
3449 std::vector<float> expectedOutputValues
3450 {
3451 // Batch 0, Channel 0, Height (1) x Width (5)
3452 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
3453 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
3454 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
3455 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
telsoa014fcda012018-03-09 14:13:49 +00003456 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
3457
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003458 // Batch 0, Channel 1, Height (1) x Width (5)
3459 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
3460 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
3461 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
3462 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
telsoa014fcda012018-03-09 14:13:49 +00003463 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003464 };
telsoa014fcda012018-03-09 14:13:49 +00003465
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003466 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
3467 inputValues, expectedOutputValues, armnn::DataLayout::NCHW);
3468}
telsoa014fcda012018-03-09 14:13:49 +00003469
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003470LayerTestResult<float, 4> L2Normalization2dNhwcTest(armnn::IWorkloadFactory& workloadFactory)
3471{
3472 // Width: 5
3473 // Height: 1
3474 // Channels: 2
3475 // BatchSize: 1
telsoa014fcda012018-03-09 14:13:49 +00003476
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003477 const armnn::TensorShape inputOutputShape{ 1, 1, 5, 2 };
3478 std::vector<float> inputValues
3479 {
3480 // Batch 0, Height 0, Width (5) x Channel (2)
3481 1.0f, 2.0f,
3482 3.0f, 4.0f,
3483 5.0f, 6.0f,
3484 7.0f, 8.0f,
3485 9.0f, 10.0f
3486 };
3487 std::vector<float> expectedOutputValues
3488 {
3489 // Batch 0, Height 0, Width (5) x Channel (2)
3490 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
3491 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
3492 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
3493 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
3494 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
3495 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
3496 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
3497 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
3498 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
3499 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
3500 };
telsoa014fcda012018-03-09 14:13:49 +00003501
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003502 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
3503 inputValues, expectedOutputValues, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00003504}
3505
3506LayerTestResult<float, 4> L2Normalization3dTest(armnn::IWorkloadFactory& workloadFactory)
3507{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003508 // Width: 3
3509 // Height: 4
3510 // Channels: 2
3511 // BatchSize: 1
telsoa014fcda012018-03-09 14:13:49 +00003512
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003513 const armnn::TensorShape inputOutputShape{ 1, 2, 4, 3 };
3514 std::vector<float> inputValues
3515 {
3516 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00003517 119.0f, 21.0f, 150.0f,
3518 149.0f, 32.0f, 179.0f,
3519 15.0f, 227.0f, 141.0f,
3520 147.0f, 199.0f, 220.0f,
3521
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003522 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00003523 110.0f, 140.0f, 73.0f,
3524 211.0f, 212.0f, 89.0f,
3525 24.0f, 138.0f, 188.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003526 162.0f, 12.0f, 161.0f
3527 };
3528 std::vector<float> expectedOutputValues
3529 {
3530 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00003531 119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
3532 21.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
3533 150.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
3534 149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
3535 32.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
3536 179.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
3537 15.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
3538 227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
3539 141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
3540 147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
3541 199.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
3542 220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
3543
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003544 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00003545 110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
3546 140.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
3547 73.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
3548 211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
3549 212.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
3550 89.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
3551 24.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
3552 138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
3553 188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
3554 162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
3555 12.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003556 161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
3557 };
telsoa014fcda012018-03-09 14:13:49 +00003558
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003559 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
3560 inputValues, expectedOutputValues, armnn::DataLayout::NCHW);
3561}
telsoa014fcda012018-03-09 14:13:49 +00003562
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003563LayerTestResult<float, 4> L2Normalization3dNhwcTest(armnn::IWorkloadFactory& workloadFactory)
3564{
3565 // Width: 3
3566 // Height: 4
3567 // Channels: 2
3568 // BatchSize: 1
telsoa014fcda012018-03-09 14:13:49 +00003569
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003570 const armnn::TensorShape inputOutputShape{ 1, 4, 3, 2 };
3571 std::vector<float> inputValues
3572 {
3573 // Batch 0, Height 0, Width (3) x Channel (2)
3574 119.0f, 110.0f,
3575 21.0f, 140.0f,
3576 150.0f, 73.0f,
telsoa014fcda012018-03-09 14:13:49 +00003577
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003578 // Batch 0, Height 1, Width (3) x Channel (2)
3579 149.0f, 211.0f,
3580 32.0f, 212.0f,
3581 179.0f, 89.0f,
telsoa014fcda012018-03-09 14:13:49 +00003582
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003583 // Batch 0, Height 2, Width (3) x Channel (2)
3584 15.0f, 24.0f,
3585 227.0f, 138.0f,
3586 141.0f, 188.0f,
telsoa014fcda012018-03-09 14:13:49 +00003587
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003588 // Batch 0, Height 3, Width (3) x Channel (2)
3589 147.0f, 162.0f,
3590 199.0f, 12.0f,
3591 220.0f, 161.0f
3592 };
3593 std::vector<float> expectedOutputValues
3594 {
3595 // Batch 0, Height 0, Width (3) x Channel (2)
3596 119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
3597 110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
3598 21.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
3599 140.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
3600 150.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
3601 73.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
3602
3603 // Batch 0, Height 1, Width (3) x Channel (2)
3604 149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
3605 211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
3606 32.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
3607 212.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
3608 179.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
3609 89.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
3610
3611 // Batch 0, Height 2, Width (3) x Channel (2)
3612 15.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
3613 24.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
3614 227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
3615 138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
3616 141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
3617 188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
3618
3619 // Batch 0, Height 3, Width (3) x Channel (2)
3620 147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
3621 162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
3622 199.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
3623 12.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
3624 220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
3625 161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
3626 };
3627
3628 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
3629 inputValues, expectedOutputValues, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00003630}
3631
3632LayerTestResult<float, 4> L2Normalization4dTest(armnn::IWorkloadFactory& workloadFactory)
3633{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003634 // Width: 3
3635 // Height: 4
3636 // Channels: 3
3637 // BatchSize: 2
telsoa014fcda012018-03-09 14:13:49 +00003638
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003639 const armnn::TensorShape inputOutputShape{ 2, 3, 4, 3 };
3640 std::vector<float> inputValues
3641 {
3642 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00003643 235.0f, 46.0f, 178.0f,
3644 100.0f, 123.0f, 19.0f,
3645 172.0f, 74.0f, 250.0f,
3646 6.0f, 195.0f, 80.0f,
3647
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003648 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00003649 113.0f, 95.0f, 202.0f,
3650 77.0f, 114.0f, 71.0f,
3651 122.0f, 246.0f, 166.0f,
3652 82.0f, 28.0f, 37.0f,
3653
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003654 // Batch 0, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00003655 56.0f, 170.0f, 162.0f,
3656 194.0f, 89.0f, 254.0f,
3657 12.0f, 209.0f, 200.0f,
3658 1.0f, 64.0f, 54.0f,
3659
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003660 // Batch 1, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00003661 67.0f, 90.0f, 49.0f,
3662 7.0f, 163.0f, 18.0f,
3663 25.0f, 117.0f, 103.0f,
3664 247.0f, 59.0f, 189.0f,
3665
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003666 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00003667 239.0f, 104.0f, 199.0f,
3668 17.0f, 124.0f, 153.0f,
3669 222.0f, 217.0f, 75.0f,
3670 32.0f, 126.0f, 21.0f,
3671
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003672 // Batch 1, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00003673 97.0f, 145.0f, 215.0f,
3674 115.0f, 116.0f, 238.0f,
3675 226.0f, 16.0f, 132.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003676 92.0f, 125.0f, 88.0f
3677 };
3678 std::vector<float> expectedOutputValues
3679 {
3680 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00003681 235.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
3682 46.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
3683 178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
3684 100.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
3685 123.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
3686 19.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
3687 172.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
3688 74.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
3689 250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
3690 6.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
3691 195.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
3692 80.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
3693
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003694 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00003695 113.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
3696 95.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
3697 202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
3698 77.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
3699 114.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
3700 71.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
3701 122.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
3702 246.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
3703 166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
3704 82.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
3705 28.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
3706 37.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
3707
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003708 // Batch 0, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00003709 56.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
3710 170.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
3711 162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
3712 194.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
3713 89.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
3714 254.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
3715 12.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
3716 209.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
3717 200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
3718 1.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
3719 64.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
3720 54.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
3721
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003722 // Batch 1, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00003723 67.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
3724 90.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
3725 49.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
3726 7.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
3727 163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
3728 18.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
3729 25.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
3730 117.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
3731 103.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
3732 247.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
3733 59.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
3734 189.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
3735
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003736 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00003737 239.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
3738 104.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
3739 199.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
3740 17.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
3741 124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
3742 153.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
3743 222.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
3744 217.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
3745 75.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
3746 32.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
3747 126.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
3748 21.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
3749
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003750 // Batch 1, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00003751 97.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
3752 145.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
3753 215.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
3754 115.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
3755 116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
3756 238.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
3757 226.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
3758 16.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
3759 132.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
3760 92.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
3761 125.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003762 88.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f })
3763 };
telsoa014fcda012018-03-09 14:13:49 +00003764
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003765 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
3766 inputValues, expectedOutputValues, armnn::DataLayout::NCHW);
3767}
telsoa014fcda012018-03-09 14:13:49 +00003768
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003769LayerTestResult<float, 4> L2Normalization4dNhwcTest(armnn::IWorkloadFactory& workloadFactory)
3770{
3771 // Width: 3
3772 // Height: 4
3773 // Channels: 3
3774 // BatchSize: 2
telsoa014fcda012018-03-09 14:13:49 +00003775
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003776 const armnn::TensorShape inputOutputShape{ 2, 4, 3, 3 };
3777 std::vector<float> inputValues
3778 {
3779 // Batch 0, Height 0, Width (3) x Channel (3)
3780 235.0f, 113.0f, 56.0f,
3781 46.0f, 95.0f, 170.0f,
3782 178.0f, 202.0f, 162.0f,
telsoa014fcda012018-03-09 14:13:49 +00003783
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003784 // Batch 0, Height 1, Width (3) x Channel (3)
3785 100.0f, 77.0f, 194.0f,
3786 123.0f, 114.0f, 89.0f,
3787 19.0f, 71.0f, 254.0f,
telsoa014fcda012018-03-09 14:13:49 +00003788
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003789 // Batch 0, Height 2, Width (3) x Channel (3)
3790 172.0f, 122.0f, 12.0f,
3791 74.0f, 246.0f, 209.0f,
3792 250.0f, 166.0f, 200.0f,
telsoa014fcda012018-03-09 14:13:49 +00003793
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003794 // Batch 0, Height 3, Width (3) x Channel (3)
3795 6.0f, 82.0f, 1.0f,
3796 195.0f, 28.0f, 64.0f,
3797 80.0f, 37.0f, 54.0f,
3798
3799 // Batch 1, Height 0, Width (3) x Channel (3)
3800 67.0f, 239.0f, 97.0f,
3801 90.0f, 104.0f, 145.0f,
3802 49.0f, 199.0f, 215.0f,
3803
3804 // Batch 1, Height 1, Width (3) x Channel (3)
3805 7.0f, 17.0f, 115.0f,
3806 163.0f, 124.0f, 116.0f,
3807 18.0f, 153.0f, 238.0f,
3808
3809 // Batch 1, Height 2, Width (3) x Channel (3)
3810 25.0f, 222.0f, 226.0f,
3811 117.0f, 217.0f, 16.0f,
3812 103.0f, 75.0f, 132.0f,
3813
3814 // Batch 1, Height 3, Width (3) x Channel (3)
3815 247.0f, 32.0f, 92.0f,
3816 59.0f, 126.0f, 125.0f,
3817 189.0f, 21.0f, 88.0f
3818 };
3819 std::vector<float> expectedOutputValues
3820 {
3821 // Batch 0, Height 0, Width (3) x Channel (3)
3822 235.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
3823 113.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
3824 56.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
3825 46.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
3826 95.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
3827 170.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
3828 178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
3829 202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
3830 162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
3831
3832 // Batch 0, Height 1, Width (3) x Channel (3)
3833 100.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
3834 77.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
3835 194.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
3836 123.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
3837 114.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
3838 89.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
3839 19.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
3840 71.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
3841 254.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
3842
3843 // Batch 0, Height 2, Width (3) x Channel (3)
3844 172.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
3845 122.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
3846 12.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
3847 74.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
3848 246.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
3849 209.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
3850 250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
3851 166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
3852 200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
3853
3854 // Batch 0, Height 3, Width (3) x Channel (3)
3855 6.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
3856 82.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
3857 1.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
3858 195.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
3859 28.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
3860 64.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
3861 80.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
3862 37.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
3863 54.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
3864
3865 // Batch 1, Height 0, Width (3) x Channel (3)
3866 67.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
3867 239.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
3868 97.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
3869 90.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
3870 104.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
3871 145.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
3872 49.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
3873 199.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
3874 215.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
3875
3876 // Batch 1, Height 1, Width (3) x Channel (3)
3877 7.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
3878 17.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
3879 115.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
3880 163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
3881 124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
3882 116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
3883 18.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
3884 153.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
3885 238.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
3886
3887 // Batch 1, Height 2, Width (3) x Channel (3)
3888 25.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
3889 222.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
3890 226.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
3891 117.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
3892 217.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
3893 16.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
3894 103.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
3895 75.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
3896 132.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
3897
3898 // Batch 1, Height 3, Width (3) x Channel (3)
3899 247.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
3900 32.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
3901 92.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
3902 59.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
3903 126.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
3904 125.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
3905 189.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
3906 21.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
3907 88.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f })
3908 };
3909
3910 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
3911 inputValues, expectedOutputValues, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00003912}
3913
3914template <typename T>
3915LayerTestResult<T, 4> ConstantTestImpl(armnn::IWorkloadFactory& workloadFactory,
3916 float qScale,
3917 int32_t qOffset)
3918{
3919 constexpr unsigned int inputWidth = 3;
3920 constexpr unsigned int inputHeight = 4;
3921 constexpr unsigned int inputChannels = 3;
3922 constexpr unsigned int inputBatchSize = 2;
3923
3924 constexpr unsigned int outputWidth = inputWidth;
3925 constexpr unsigned int outputHeight = inputHeight;
3926 constexpr unsigned int outputChannels = inputChannels;
3927 constexpr unsigned int outputBatchSize = inputBatchSize;
3928
3929 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
3930 armnn::GetDataType<T>());
3931
3932 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
3933 armnn::GetDataType<T>());
3934
3935 // Set quantization parameters if the requested type is a quantized type.
3936 if(armnn::IsQuantizedType<T>())
3937 {
3938 inputTensorInfo.SetQuantizationScale(qScale);
3939 inputTensorInfo.SetQuantizationOffset(qOffset);
3940 outputTensorInfo.SetQuantizationScale(qScale);
3941 outputTensorInfo.SetQuantizationOffset(qOffset);
3942 }
3943
3944 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
3945 QuantizedVector<T>(qScale, qOffset, {
3946 // Batch 0, Channel 0
3947 235.0f, 46.0f, 178.0f,
3948 100.0f, 123.0f, 19.0f,
3949 172.0f, 74.0f, 250.0f,
3950 6.0f, 195.0f, 80.0f,
3951
3952 // Batch 0, Channel 1
3953 113.0f, 95.0f, 202.0f,
3954 77.0f, 114.0f, 71.0f,
3955 122.0f, 246.0f, 166.0f,
3956 82.0f, 28.0f, 37.0f,
3957
3958 // Batch 0, Channel 2
3959 56.0f, 170.0f, 162.0f,
3960 194.0f, 89.0f, 254.0f,
3961 12.0f, 209.0f, 200.0f,
3962 1.0f, 64.0f, 54.0f,
3963
3964 // Batch 1, Channel 0
3965 67.0f, 90.0f, 49.0f,
3966 7.0f, 163.0f, 18.0f,
3967 25.0f, 117.0f, 103.0f,
3968 247.0f, 59.0f, 189.0f,
3969
3970 // Batch 1, Channel 1
3971 239.0f, 104.0f, 199.0f,
3972 17.0f, 124.0f, 153.0f,
3973 222.0f, 217.0f, 75.0f,
3974 32.0f, 126.0f, 21.0f,
3975
3976 // Batch 1, Channel 2
3977 97.0f, 145.0f, 215.0f,
3978 115.0f, 116.0f, 238.0f,
3979 226.0f, 16.0f, 132.0f,
3980 92.0f, 125.0f, 88.0f,
3981 })));
3982
3983 LayerTestResult<T, 4> result(outputTensorInfo);
3984 result.outputExpected = input;
3985
3986 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3987
3988 armnn::ScopedCpuTensorHandle constantTensor(inputTensorInfo);
3989 AllocateAndCopyDataToITensorHandle(&constantTensor, &input[0][0][0][0]);
3990
3991 armnn::ConstantQueueDescriptor descriptor;
3992 descriptor.m_LayerOutput = &constantTensor;
3993
3994 armnn::WorkloadInfo info;
3995 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3996
3997 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConstant(descriptor, info);
3998
3999 outputHandle->Allocate();
4000
surmeh013537c2c2018-05-18 16:31:43 +01004001 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00004002 workload->Execute();
4003
4004 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4005 return result;
4006}
4007
4008LayerTestResult<float, 4> ConstantTest(armnn::IWorkloadFactory& workloadFactory)
4009{
4010 return ConstantTestImpl<float>(workloadFactory, 0.0f, 0);
4011}
4012
4013LayerTestResult<uint8_t, 4> ConstantTestUint8(armnn::IWorkloadFactory& workloadFactory)
4014{
4015 return ConstantTestImpl<uint8_t>(workloadFactory, 1.0f, 0);
4016}
4017
4018LayerTestResult<uint8_t, 3> MergerUint8Test(armnn::IWorkloadFactory& workloadFactory)
4019{
surmeh013537c2c2018-05-18 16:31:43 +01004020 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +00004021 unsigned int outputHeight = 6;
4022 unsigned int outputChannels = 3;
4023
surmeh013537c2c2018-05-18 16:31:43 +01004024 unsigned int inputWidth1 = 3;
4025 unsigned int inputHeight1 = 6;
4026 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +00004027
surmeh013537c2c2018-05-18 16:31:43 +01004028 unsigned int inputWidth2 = 3;
4029 unsigned int inputHeight2 = 6;
4030 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +00004031
telsoa01c577f2c2018-08-31 09:22:23 +01004032 // Defines the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +00004033 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
4034 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
4035 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
telsoa014fcda012018-03-09 14:13:49 +00004036
telsoa01c577f2c2018-08-31 09:22:23 +01004037 // Arbitrary scale and offsets. They don't really matter as the merger operator doesn't dequantize/quantize them.
telsoa014fcda012018-03-09 14:13:49 +00004038 const float scale = 0.13497836f;
4039 const int32_t offset = -7;
4040
4041 outputTensorInfo.SetQuantizationScale(scale);
4042 outputTensorInfo.SetQuantizationOffset(offset);
4043 inputTensorInfo1.SetQuantizationScale(scale);
4044 inputTensorInfo1.SetQuantizationOffset(offset);
4045 inputTensorInfo2.SetQuantizationScale(scale);
4046 inputTensorInfo2.SetQuantizationOffset(offset);
telsoa014fcda012018-03-09 14:13:49 +00004047
4048 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
4049
4050 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
surmeh013537c2c2018-05-18 16:31:43 +01004051 {
4052 1, 2, 3,
4053 4, 5, 6,
4054 7, 8, 9,
4055 10, 11, 12,
4056 13, 14, 15,
4057 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00004058
surmeh013537c2c2018-05-18 16:31:43 +01004059 19, 20, 21,
4060 22, 23, 24,
4061 25, 26, 27,
4062 28, 29, 30,
4063 31, 32, 33,
4064 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00004065
surmeh013537c2c2018-05-18 16:31:43 +01004066 37, 38, 39,
4067 40, 41, 42,
4068 43, 44, 45,
4069 46, 47, 48,
4070 49, 50, 51,
4071 52, 53, 54,
4072 })
telsoa014fcda012018-03-09 14:13:49 +00004073 );
4074
telsoa014fcda012018-03-09 14:13:49 +00004075 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
4076 {
surmeh013537c2c2018-05-18 16:31:43 +01004077 1, 2, 3,
4078 4, 5, 6,
4079 7, 8, 9,
4080 10, 11, 12,
4081 13, 14, 15,
4082 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00004083
surmeh013537c2c2018-05-18 16:31:43 +01004084 19, 20, 21,
4085 22, 23, 24,
4086 25, 26, 27,
4087 28, 29, 30,
4088 31, 32, 33,
4089 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00004090 })
4091 );
4092
4093 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
4094 {
surmeh013537c2c2018-05-18 16:31:43 +01004095 37, 38, 39,
4096 40, 41, 42,
telsoa014fcda012018-03-09 14:13:49 +00004097 43, 44, 45,
surmeh013537c2c2018-05-18 16:31:43 +01004098 46, 47, 48,
4099 49, 50, 51,
4100 52, 53, 54,
telsoa014fcda012018-03-09 14:13:49 +00004101 })
4102 );
4103
telsoa01c577f2c2018-08-31 09:22:23 +01004104 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
telsoa014fcda012018-03-09 14:13:49 +00004105 armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1);
4106
telsoa01c577f2c2018-08-31 09:22:23 +01004107 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
telsoa014fcda012018-03-09 14:13:49 +00004108 armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2);
4109
telsoa014fcda012018-03-09 14:13:49 +00004110
4111 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4112
4113 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
4114
4115 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
4116 subTensorsSupported ?
4117 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
4118 workloadFactory.CreateTensorHandle(inputTensorInfo1);
4119
4120 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
4121 subTensorsSupported ?
4122 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
4123 workloadFactory.CreateTensorHandle(inputTensorInfo2);
4124
telsoa014fcda012018-03-09 14:13:49 +00004125
4126 armnn::MergerQueueDescriptor data;
4127 armnn::WorkloadInfo info;
4128 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
4129 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +00004130 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4131
4132 data.m_ViewOrigins.push_back(window1);
4133 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +00004134
4135 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(data, info);
4136
4137 inputHandle1->Allocate();
4138 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00004139 outputHandle->Allocate();
4140
4141 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
4142 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00004143
surmeh013537c2c2018-05-18 16:31:43 +01004144 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00004145 workload->Execute();
4146
4147 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
4148
4149 return ret;
4150}
4151
4152LayerTestResult<uint8_t, 4> AdditionUint8Test(armnn::IWorkloadFactory& workloadFactory)
4153{
4154 unsigned int batchSize = 1;
4155 unsigned int channels = 2;
4156 unsigned int height = 2;
4157 unsigned int width = 3;
4158
4159 const float scale = 7.0f;
4160 const int32_t offset = 3;
4161
4162 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
4163 armnn::TensorInfo outputTensorInfo;
4164
4165 const unsigned int shape[] = { batchSize, channels, height, width };
4166 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
4167 inputTensorInfo1.SetQuantizationScale(scale);
4168 inputTensorInfo1.SetQuantizationOffset(offset);
4169
4170 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
4171 inputTensorInfo2.SetQuantizationScale(scale);
4172 inputTensorInfo2.SetQuantizationOffset(offset);
4173
4174 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
4175 outputTensorInfo.SetQuantizationScale(scale);
4176 outputTensorInfo.SetQuantizationOffset(offset);
4177
telsoa01c577f2c2018-08-31 09:22:23 +01004178 // See dequantized values to the right.
telsoa014fcda012018-03-09 14:13:49 +00004179 auto input1 = MakeTensor<uint8_t, 4>(inputTensorInfo1, std::vector<uint8_t>(
4180 {
4181 63, 35, 77, 70, 56, 112, // 420, 224, 518, 469, 371, 763
4182 203, 28, 252, 168, 245, 91 // 1400, 175, 1743, 1155, 1694, 616
4183 }));
4184
telsoa01c577f2c2018-08-31 09:22:23 +01004185 // See dequantized values to the right.
telsoa014fcda012018-03-09 14:13:49 +00004186 auto input2 = MakeTensor<uint8_t, 4>(inputTensorInfo1, std::vector<uint8_t>(
4187 {
4188 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
4189 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
4190 }));
4191
telsoa01c577f2c2018-08-31 09:22:23 +01004192 // See dequantized values to the right.
telsoa014fcda012018-03-09 14:13:49 +00004193 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
4194 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>(
4195 {
4196 81, 39, 249, 255, 228, 255, // 546, 252, 1722, 2065(clamped), 1575, 2212(clamped)
4197 255, 186, 255, 186, 255, 214, // 2261(clamped), 1281, 2163(clamped), 1281, 2408(clamped), 1477
4198 }));
4199
4200 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
4201 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
4202 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4203
4204 armnn::AdditionQueueDescriptor data;
4205 armnn::WorkloadInfo info;
4206 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
4207 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
4208 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4209
4210 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
4211
4212 inputHandle1->Allocate();
4213 inputHandle2->Allocate();
4214 outputHandle->Allocate();
4215
4216 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
4217 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
4218
surmeh013537c2c2018-05-18 16:31:43 +01004219 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00004220 workload->Execute();
4221
4222 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4223
4224 return result;
4225}
4226
surmeh01bceff2f2018-03-29 16:29:27 +01004227namespace
telsoa014fcda012018-03-09 14:13:49 +00004228{
surmeh01bceff2f2018-03-29 16:29:27 +01004229LayerTestResult<uint8_t, 4> MultiplicationUint8TestHelper(armnn::IWorkloadFactory& workloadFactory,
4230 const unsigned int shape0[4],
4231 const std::vector<uint8_t> & values0,
4232 float scale0,
4233 int32_t offset0,
4234 const unsigned int shape1[4],
4235 const std::vector<uint8_t> & values1,
4236 float scale1,
4237 int32_t offset1,
4238 const unsigned int outShape[4],
4239 const std::vector<uint8_t> & outValues,
4240 float outScale,
4241 int32_t outOffset)
4242{
4243 armnn::TensorInfo inputTensorInfo0(4, shape0, armnn::DataType::QuantisedAsymm8);
4244 armnn::TensorInfo inputTensorInfo1(4, shape1, armnn::DataType::QuantisedAsymm8);
4245 armnn::TensorInfo outputTensorInfo(4, outShape, armnn::DataType::QuantisedAsymm8);
telsoa014fcda012018-03-09 14:13:49 +00004246
surmeh01bceff2f2018-03-29 16:29:27 +01004247 inputTensorInfo0.SetQuantizationScale(scale0);
4248 inputTensorInfo0.SetQuantizationOffset(offset0);
telsoa014fcda012018-03-09 14:13:49 +00004249
surmeh01bceff2f2018-03-29 16:29:27 +01004250 inputTensorInfo1.SetQuantizationScale(scale1);
4251 inputTensorInfo1.SetQuantizationOffset(offset1);
telsoa014fcda012018-03-09 14:13:49 +00004252
surmeh01bceff2f2018-03-29 16:29:27 +01004253 outputTensorInfo.SetQuantizationScale(outScale);
4254 outputTensorInfo.SetQuantizationOffset(outOffset);
telsoa014fcda012018-03-09 14:13:49 +00004255
surmeh01bceff2f2018-03-29 16:29:27 +01004256 auto input0 = MakeTensor<uint8_t, 4>(inputTensorInfo0, values0);
4257 auto input1 = MakeTensor<uint8_t, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00004258
telsoa014fcda012018-03-09 14:13:49 +00004259 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
surmeh01bceff2f2018-03-29 16:29:27 +01004260 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00004261
surmeh01bceff2f2018-03-29 16:29:27 +01004262 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
telsoa014fcda012018-03-09 14:13:49 +00004263 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
telsoa014fcda012018-03-09 14:13:49 +00004264 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4265
4266 armnn::MultiplicationQueueDescriptor data;
4267 armnn::WorkloadInfo info;
surmeh01bceff2f2018-03-29 16:29:27 +01004268 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
4269 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
telsoa014fcda012018-03-09 14:13:49 +00004270 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4271
4272 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
4273
surmeh01bceff2f2018-03-29 16:29:27 +01004274 inputHandle0->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00004275 inputHandle1->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00004276 outputHandle->Allocate();
4277
surmeh01bceff2f2018-03-29 16:29:27 +01004278 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00004279 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00004280
surmeh013537c2c2018-05-18 16:31:43 +01004281 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00004282 workload->Execute();
4283
4284 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4285
4286 return result;
4287}
surmeh01bceff2f2018-03-29 16:29:27 +01004288} // anonymous namespace
4289
4290LayerTestResult<uint8_t, 4> MultiplicationUint8Test(armnn::IWorkloadFactory& workloadFactory)
4291{
4292 unsigned int batchSize = 1;
4293 unsigned int channels = 2;
4294 unsigned int height = 2;
4295 unsigned int width = 3;
4296 const unsigned int shape[] = { batchSize, channels, height, width };
4297
telsoa01c577f2c2018-08-31 09:22:23 +01004298 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01004299 std::vector<uint8_t> input0({
4300 62, 37, 3, 172, 13, 111, // 244, 144, 8, 684, 48, 440,
4301 188, 20, 73, 31, 23, 31 // 748, 76, 288, 120, 88, 120
4302 });
4303
telsoa01c577f2c2018-08-31 09:22:23 +01004304 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01004305 std::vector<uint8_t> input1({
4306 126, 240, 252, 183, 121, 247, // 384, 726, 762, 555, 369, 747,
4307 48, 115, 151, 79, 78, 97 // 150, 351, 459, 243, 240, 297
4308 });
4309
telsoa01c577f2c2018-08-31 09:22:23 +01004310 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01004311 std::vector<uint8_t> output(
4312 {
4313 64, 72, 0, 255, 8, 236, // 93696, 104544, 6096(clamped), 379620(clamped), 17712, 328680,
4314 77, 15, 92, 16, 10, 21, // 112200, 26676, 132192, 29160, 21120, 35640
4315 });
4316
4317 return MultiplicationUint8TestHelper(workloadFactory,
4318 shape,
4319 input0,
4320 4.0f,
4321 1,
4322 shape,
4323 input1,
4324 3.0f,
4325 -2,
4326 shape,
4327 output,
telsoa01c577f2c2018-08-31 09:22:23 +01004328 1366.255f, // Scale/offset chosen to have output values out of range.
surmeh01bceff2f2018-03-29 16:29:27 +01004329 -5);
4330}
4331
4332LayerTestResult<uint8_t, 4> MultiplicationBroadcast1ElementUint8Test(armnn::IWorkloadFactory& workloadFactory)
4333{
4334 const unsigned int shape0[] = { 1, 2, 2, 3 };
4335 const unsigned int shape1[] = { 1, 1, 1, 1 };
4336
4337 std::vector<uint8_t> input0({
4338 1, 2, 3, 4, 5, 6,
4339 7, 8, 9, 10, 11, 12
4340 });
4341
4342 std::vector<uint8_t> input1({2});
4343
4344 std::vector<uint8_t> output({
4345 2, 4, 6, 8, 10, 12,
4346 14, 16, 18, 20, 22, 24
4347 });
4348
4349 return MultiplicationUint8TestHelper(workloadFactory,
4350 shape0,
4351 input0,
4352 1.0f,
4353 0,
4354 shape1,
4355 input1,
4356 1.0f,
4357 0,
4358 shape0,
4359 output,
4360 1.0f,
4361 0);
4362}
4363
4364LayerTestResult<uint8_t, 4> MultiplicationBroadcast1DVectorUint8Test(armnn::IWorkloadFactory& workloadFactory)
4365{
4366 const unsigned int shape0[] = { 1, 2, 2, 3 };
4367 const unsigned int shape1[] = { 1, 1, 1, 3 };
4368
4369 std::vector<uint8_t> input0({
4370 1, 2, 3, 4, 5, 6,
4371 7, 8, 9, 10, 11, 12
4372 });
4373
4374 std::vector<uint8_t> input1({1, 2, 3});
4375
4376 std::vector<uint8_t> output({
4377 1, 4, 9, 4, 10, 18,
4378 7, 16, 27, 10, 22, 36
4379 });
4380
4381 return MultiplicationUint8TestHelper(workloadFactory,
4382 shape0,
4383 input0,
4384 1.0f,
4385 0,
4386 shape1,
4387 input1,
4388 1.0f,
4389 0,
4390 shape0,
4391 output,
4392 1.0f,
4393 0);
4394}
telsoa014fcda012018-03-09 14:13:49 +00004395
David Beckf195f032018-09-06 16:46:34 +01004396namespace
4397{
4398template <typename T>
4399LayerTestResult<T, 4> SubtractionTestHelper(armnn::IWorkloadFactory& workloadFactory,
4400 const unsigned int shape0[4],
4401 const std::vector<T>& values0,
4402 float scale0,
4403 int32_t offset0,
4404 const unsigned int shape1[4],
4405 const std::vector<T> & values1,
4406 float scale1,
4407 int32_t offset1,
4408 const unsigned int outShape[4],
4409 const std::vector<T> & outValues,
4410 float outScale,
4411 int32_t outOffset)
4412{
4413 auto dataType = (std::is_same<T, uint8_t>::value ?
4414 armnn::DataType::QuantisedAsymm8 :
4415 armnn::DataType::Float32);
4416
4417 armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
4418 armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
4419 armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
4420
4421 inputTensorInfo0.SetQuantizationScale(scale0);
4422 inputTensorInfo0.SetQuantizationOffset(offset0);
4423
4424 inputTensorInfo1.SetQuantizationScale(scale1);
4425 inputTensorInfo1.SetQuantizationOffset(offset1);
4426
4427 outputTensorInfo.SetQuantizationScale(outScale);
4428 outputTensorInfo.SetQuantizationOffset(outOffset);
4429
4430 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
4431 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
4432
4433 LayerTestResult<T, 4> result(outputTensorInfo);
4434 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
4435
4436 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
4437 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
4438 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4439
4440 armnn::SubtractionQueueDescriptor data;
4441 armnn::WorkloadInfo info;
4442 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
4443 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
4444 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4445
4446 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSubtraction(data, info);
4447
4448 inputHandle0->Allocate();
4449 inputHandle1->Allocate();
4450 outputHandle->Allocate();
4451
4452 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
4453 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
4454
4455 workloadFactory.Finalize();
4456 workload->Execute();
4457
4458 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4459
4460 return result;
4461}
4462} // anonymous namespace
4463
4464LayerTestResult<uint8_t, 4> SubtractionUint8Test(armnn::IWorkloadFactory& workloadFactory)
4465{
4466 const unsigned int shape0[] = { 1, 1, 2, 2 };
4467 const unsigned int shape1[] = { 1, 1, 2, 2 };
4468
4469 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
4470 std::vector<uint8_t> input1({ 1, 2, 1, 2 });
4471 std::vector<uint8_t> output({ 3, 3, 5, 5 });
4472
4473 return SubtractionTestHelper(workloadFactory,
4474 shape0, input0, 0.5f, 2,
4475 shape1, input1, 1.0f, 0,
4476 shape0, output, 1.0f, 0);
4477}
4478
4479LayerTestResult<uint8_t, 4> SubtractionBroadcast1ElementUint8Test(armnn::IWorkloadFactory& workloadFactory)
4480{
4481 const unsigned int shape0[] = { 1, 1, 2, 2 };
4482 const unsigned int shape1[] = { 1, 1, 1, 1 };
4483
4484 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
4485 std::vector<uint8_t> input1({ 2 });
4486 std::vector<uint8_t> output({ 5, 6, 7, 8 });
4487
4488 return SubtractionTestHelper(workloadFactory,
4489 shape0, input0, 0.5f, 2,
4490 shape1, input1, 1.0f, 0,
4491 shape0, output, 1.0f, 3);
4492}
4493
4494LayerTestResult<uint8_t, 4> SubtractionBroadcastUint8Test(armnn::IWorkloadFactory& workloadFactory)
4495{
4496 const unsigned int shape0[] = { 1, 1, 2, 2 };
4497 const unsigned int shape1[] = { 1, 1, 2, 1 };
4498
4499 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
4500 std::vector<uint8_t> input1({ 2, 1 });
4501 std::vector<uint8_t> output({ 8, 11, 12, 15 });
4502
4503 return SubtractionTestHelper(workloadFactory,
4504 shape0, input0, 1.0f, 0,
4505 shape1, input1, 1.0f, 0,
4506 shape0, output, 1.0f, 0);
4507}
4508
4509LayerTestResult<float, 4> SubtractionTest(armnn::IWorkloadFactory& workloadFactory)
4510{
4511 const unsigned int shape0[] = { 1, 1, 2, 2 };
4512 const unsigned int shape1[] = { 1, 1, 2, 2 };
4513
4514 std::vector<float> input0({ 1, 2, 3, 4 });
4515 std::vector<float> input1({ 1, -1, 0, 2 });
4516 std::vector<float> output({ 0, 3, 3, 2 });
4517
4518 return SubtractionTestHelper(workloadFactory,
4519 shape0, input0, 1.0f, 0,
4520 shape1, input1, 1.0f, 0,
4521 shape0, output, 1.0f, 0);
4522}
4523
4524LayerTestResult<float, 4> SubtractionBroadcast1ElementTest(armnn::IWorkloadFactory& workloadFactory)
4525{
4526 const unsigned int shape0[] = { 1, 1, 2, 2 };
4527 const unsigned int shape1[] = { 1, 1, 1, 1 };
4528
4529 std::vector<float> input0({ 1, 2, 3, 4 });
4530 std::vector<float> input1({ 10 });
4531 std::vector<float> output({ -9, -8, -7, -6 });
4532
4533 return SubtractionTestHelper(workloadFactory,
4534 shape0, input0, 1.0f, 0,
4535 shape1, input1, 1.0f, 0,
4536 shape0, output, 1.0f, 0);
4537}
4538
4539LayerTestResult<float, 4> SubtractionBroadcastTest(armnn::IWorkloadFactory& workloadFactory)
4540{
4541 const unsigned int shape0[] = { 1, 1, 2, 2 };
4542 const unsigned int shape1[] = { 1, 1, 1, 2 };
4543
4544 std::vector<float> input0({ 1, 2, 3, 4 });
4545 std::vector<float> input1({ 10, -5 });
4546 std::vector<float> output({ -9, 7, -7, 9 });
4547
4548 return SubtractionTestHelper(workloadFactory,
4549 shape0, input0, 1.0f, 0,
4550 shape1, input1, 1.0f, 0,
4551 shape0, output, 1.0f, 0);
4552}
4553
telsoa014fcda012018-03-09 14:13:49 +00004554LayerTestResult<uint8_t, 4> ResizeBilinearNopUint8Test(armnn::IWorkloadFactory& workloadFactory)
4555{
4556 constexpr unsigned int inputWidth = 4;
4557 constexpr unsigned int inputHeight = 4;
4558 constexpr unsigned int inputChannels = 1;
4559 constexpr unsigned int inputBatchSize = 1;
4560
4561 constexpr unsigned int outputWidth = inputWidth;
4562 constexpr unsigned int outputHeight = inputHeight;
4563 constexpr unsigned int outputChannels = inputChannels;
4564 constexpr unsigned int outputBatchSize = inputBatchSize;
4565
4566 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
4567 armnn::DataType::QuantisedAsymm8);
4568 inputTensorInfo.SetQuantizationScale(1.5f);
4569 inputTensorInfo.SetQuantizationOffset(-3);
4570
4571 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
4572 armnn::DataType::QuantisedAsymm8);
4573 outputTensorInfo.SetQuantizationScale(1.5f);
4574 outputTensorInfo.SetQuantizationOffset(-3);
4575
4576 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
4577 1, 2, 3, 4,
4578 2, 3, 4, 5,
4579 3, 4, 5, 6,
4580 4, 5, 6, 7
4581 }));
4582
4583 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
4584 result.outputExpected = input;
4585
4586 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4587 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4588
4589 armnn::ResizeBilinearQueueDescriptor descriptor;
4590 armnn::WorkloadInfo info;
4591 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
4592 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4593
4594 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
4595
4596 inputHandle->Allocate();
4597 outputHandle->Allocate();
4598 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
4599
surmeh013537c2c2018-05-18 16:31:43 +01004600 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00004601 workload->Execute();
4602
4603 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4604 return result;
4605}
4606
4607LayerTestResult<uint8_t, 4> SimpleResizeBilinearUint8Test(armnn::IWorkloadFactory& workloadFactory)
4608{
4609 constexpr unsigned int inputWidth = 2;
4610 constexpr unsigned int inputHeight = 2;
4611 constexpr unsigned int inputChannels = 1;
4612 constexpr unsigned int inputBatchSize = 1;
4613
4614 constexpr unsigned int outputWidth = inputWidth / 2;
4615 constexpr unsigned int outputHeight = inputHeight / 2;
4616 constexpr unsigned int outputChannels = inputChannels;
4617 constexpr unsigned int outputBatchSize = inputBatchSize;
4618
4619 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
4620 armnn::DataType::QuantisedAsymm8);
4621 inputTensorInfo.SetQuantizationScale(0.1567f);
4622 inputTensorInfo.SetQuantizationOffset(1);
4623
4624 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
4625 armnn::DataType::QuantisedAsymm8);
4626 outputTensorInfo.SetQuantizationScale(0.1567f);
4627 outputTensorInfo.SetQuantizationOffset(1);
4628
4629 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
4630 1, 255,
4631 200, 250
4632 }));
4633
4634 // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
4635 // then figures out the interpolants and weights. Note this is different to projecting the centre of the
telsoa01c577f2c2018-08-31 09:22:23 +01004636 // output texel - and thus we'll expect the output 1x1 matrix to contain, as its single element, the value
telsoa014fcda012018-03-09 14:13:49 +00004637 // that was at position (0,0) of the input matrix (rather than an average, which we would expect if projecting
4638 // the centre).
4639 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
4640 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
4641 1
4642 }));
4643
4644 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4645 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4646
4647 armnn::ResizeBilinearQueueDescriptor descriptor;
4648 armnn::WorkloadInfo info;
4649 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
4650 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4651
4652 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
4653
4654 inputHandle->Allocate();
4655 outputHandle->Allocate();
4656 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
4657
surmeh013537c2c2018-05-18 16:31:43 +01004658 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00004659 workload->Execute();
4660
4661 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4662 return result;
4663}
4664
4665LayerTestResult<uint8_t, 4> ResizeBilinearSqMinUint8Test(armnn::IWorkloadFactory& workloadFactory)
4666{
4667 constexpr unsigned int inputWidth = 4;
4668 constexpr unsigned int inputHeight = 4;
4669 constexpr unsigned int inputChannels = 1;
4670 constexpr unsigned int inputBatchSize = 1;
4671
4672 constexpr unsigned int outputWidth = inputWidth / 2;
4673 constexpr unsigned int outputHeight = inputHeight / 2;
4674 constexpr unsigned int outputChannels = inputChannels;
4675 constexpr unsigned int outputBatchSize = inputBatchSize;
4676
4677 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
4678 armnn::DataType::QuantisedAsymm8);
4679 inputTensorInfo.SetQuantizationScale(3.141592f);
4680 inputTensorInfo.SetQuantizationOffset(3);
4681
4682 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
4683 armnn::DataType::QuantisedAsymm8);
4684 outputTensorInfo.SetQuantizationScale(3.141592f);
4685 outputTensorInfo.SetQuantizationOffset(3);
4686
4687 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
4688 1, 2, 3, 4,
4689 2, 3, 4, 5,
4690 3, 4, 5, 6,
4691 4, 5, 6, 7
4692 }));
4693
4694 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
4695 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
4696 1, 3,
4697 3, 5
4698 }));
4699
4700 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4701 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4702
4703 armnn::ResizeBilinearQueueDescriptor descriptor;
4704 armnn::WorkloadInfo info;
4705 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
4706 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4707
4708 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
4709
4710 inputHandle->Allocate();
4711 outputHandle->Allocate();
4712 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
4713
surmeh013537c2c2018-05-18 16:31:43 +01004714 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00004715 workload->Execute();
4716
4717 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4718 return result;
4719}
4720
4721LayerTestResult<uint8_t, 4> ResizeBilinearMinUint8Test(armnn::IWorkloadFactory& workloadFactory)
4722{
4723 constexpr unsigned int inputWidth = 3;
4724 constexpr unsigned int inputHeight = 2;
4725 constexpr unsigned int inputChannels = 1;
4726 constexpr unsigned int inputBatchSize = 1;
4727
4728 constexpr unsigned int outputWidth = 2;
4729 constexpr unsigned int outputHeight = 1;
4730 constexpr unsigned int outputChannels = inputChannels;
4731 constexpr unsigned int outputBatchSize = inputBatchSize;
4732
4733 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
4734 armnn::DataType::QuantisedAsymm8);
4735 inputTensorInfo.SetQuantizationScale(1.5f);
4736 inputTensorInfo.SetQuantizationOffset(-1);
4737
4738 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
4739 armnn::DataType::QuantisedAsymm8);
4740 outputTensorInfo.SetQuantizationScale(1.5f);
4741 outputTensorInfo.SetQuantizationOffset(-1);
4742
4743 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
4744 1, 2, 3, // 3.0, 4.5, 6.0
4745 5, 8, 13 // 9.0, 13.5, 21.0
4746 }));
4747
4748 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
4749 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
4750 1, 3 // 3.0, 5.25
4751 }));
4752
4753 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4754 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4755
4756 armnn::ResizeBilinearQueueDescriptor descriptor;
4757 armnn::WorkloadInfo info;
4758 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
4759 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4760
4761 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
4762
4763 inputHandle->Allocate();
4764 outputHandle->Allocate();
4765
4766 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
4767
surmeh013537c2c2018-05-18 16:31:43 +01004768 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00004769 workload->Execute();
4770
4771 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4772 return result;
4773}
4774
4775LayerTestResult<uint8_t, 4> ResizeBilinearMagUint8Test(armnn::IWorkloadFactory& workloadFactory)
4776{
4777 constexpr unsigned int inputWidth = 2;
4778 constexpr unsigned int inputHeight = 3;
4779 constexpr unsigned int inputChannels = 1;
4780 constexpr unsigned int inputBatchSize = 1;
4781
4782 constexpr unsigned int outputWidth = 5;
4783 constexpr unsigned int outputHeight = 3;
4784 constexpr unsigned int outputChannels = inputChannels;
4785 constexpr unsigned int outputBatchSize = inputBatchSize;
4786
4787 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
4788 armnn::DataType::QuantisedAsymm8);
4789 inputTensorInfo.SetQuantizationScale(0.010765f);
4790 inputTensorInfo.SetQuantizationOffset(7);
4791
4792 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
4793 armnn::DataType::QuantisedAsymm8);
4794 outputTensorInfo.SetQuantizationScale(0.010132f);
4795 outputTensorInfo.SetQuantizationOffset(-18);
4796
4797 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
4798 24, 228, // 0.183005, 2.379065,
4799 105, 128, // 1.05497, 1.302565
4800 230, 71 // 2.400595, 0.68896
4801 }));
4802
4803 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
4804 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
4805 0, 87, 173, 217, 217, // 0.18300501, 1.06142902, 1.93985295, 2.37906504, 2.37906504
4806 86, 96, 106, 111, 111, // 1.05497003, 1.15400803, 1.25304604, 1.30256498, 1.30256498
4807 219, 151, 84, 50, 50 // 2.40059495, 1.71594095, 1.03128707, 0.68896002, 0.68896002
4808 }));
4809
4810 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4811 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4812
4813 armnn::ResizeBilinearQueueDescriptor descriptor;
4814 armnn::WorkloadInfo info;
4815 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
4816 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4817
4818 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
4819
4820 inputHandle->Allocate();
4821 outputHandle->Allocate();
4822 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
4823
surmeh013537c2c2018-05-18 16:31:43 +01004824 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00004825 workload->Execute();
4826
4827 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4828 return result;
4829}
4830
4831LayerTestResult<float, 4> BatchNormTest(armnn::IWorkloadFactory& workloadFactory)
4832{
4833 auto ret = BatchNormTestImpl<float>(workloadFactory, 0.f, 0);
4834 return ret;
4835}
4836
4837LayerTestResult<uint8_t, 4> BatchNormUint8Test(armnn::IWorkloadFactory& workloadFactory)
4838{
4839 auto ret = BatchNormTestImpl<uint8_t>(workloadFactory, 1.f/20.f, 50);
4840 return ret;
4841}
4842
4843LayerTestResult<uint8_t, 4> ConstantUint8Test(armnn::IWorkloadFactory& workloadFactory)
4844{
4845 return ConstantTestImpl<uint8_t>(workloadFactory, 2e-6f, 1);
4846}
4847
4848LayerTestResult<uint8_t, 1> Concatenation1dUint8Test(armnn::IWorkloadFactory& workloadFactory)
4849{
4850 return Concatenation1dTestImpl<uint8_t>(workloadFactory, 0.5f, -1);
4851}
4852
4853LayerTestResult<uint8_t, 2> Concatenation2dDim0Uint8Test(armnn::IWorkloadFactory& workloadFactory)
4854{
4855 return Concatenation2dDim0TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
4856}
4857
4858LayerTestResult<uint8_t, 2> Concatenation2dDim1Uint8Test(armnn::IWorkloadFactory& workloadFactory)
4859{
4860 return Concatenation2dDim1TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
4861}
4862
4863LayerTestResult<uint8_t, 2> Concatenation2dDim0DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory)
4864{
4865 return Concatenation2dDim0DiffInputDimsTestImpl<uint8_t>(workloadFactory, 0.5f, -1);
4866}
4867
4868LayerTestResult<uint8_t, 2> Concatenation2dDim1DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory)
4869{
4870 return Concatenation2dDim1DiffInputDimsTestImpl<uint8_t>(workloadFactory, 0.5f, -1);
4871}
4872
4873LayerTestResult<uint8_t, 3> Concatenation3dDim0Uint8Test(armnn::IWorkloadFactory& workloadFactory)
4874{
4875 return Concatenation3dDim0TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
4876}
4877
4878LayerTestResult<uint8_t, 3> Concatenation3dDim1Uint8Test(armnn::IWorkloadFactory& workloadFactory)
4879{
4880 return Concatenation3dDim1TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
4881}
4882
4883LayerTestResult<uint8_t, 3> Concatenation3dDim2Uint8Test(armnn::IWorkloadFactory& workloadFactory)
4884{
4885 return Concatenation3dDim2TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
4886}
4887
4888LayerTestResult<uint8_t, 3> Concatenation3dDim0DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory)
4889{
4890 return Concatenation3dDim0TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
4891}
4892
4893LayerTestResult<uint8_t, 3> Concatenation3dDim1DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory)
4894{
4895 return Concatenation3dDim1DiffInputDimsTestImpl<uint8_t>(workloadFactory, 0.5f, -1);
4896}
4897
4898LayerTestResult<uint8_t, 3> Concatenation3dDim2DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory)
4899{
4900 return Concatenation3dDim2DiffInputDimsTestImpl<uint8_t>(workloadFactory, 0.5f, -1);
4901}
4902
4903LayerTestResult<float, 4> SimpleMaxPooling2dSize2x2Stride2x2Test(armnn::IWorkloadFactory& workloadFactory,
4904 bool forceNoPadding)
4905{
4906 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<float>(workloadFactory, forceNoPadding);
4907}
4908
4909LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(armnn::IWorkloadFactory& workloadFactory,
4910 bool forceNoPadding)
4911{
4912 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<uint8_t>(workloadFactory, forceNoPadding, 3.0f, -5);
4913}
4914
4915LayerTestResult<float, 4> SimpleMaxPooling2dSize3x3Stride2x4Test(armnn::IWorkloadFactory& workloadFactory,
4916 bool forceNoPadding)
4917{
4918 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<float>(workloadFactory, forceNoPadding);
4919}
4920
4921LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(armnn::IWorkloadFactory& workloadFactory,
4922 bool forceNoPadding)
4923{
4924 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<uint8_t>(workloadFactory, forceNoPadding, 0.1f, 128);
4925}
4926
4927LayerTestResult<float, 4> SimpleAveragePooling2dTest(armnn::IWorkloadFactory& workloadFactory)
4928{
4929 return SimpleAveragePooling2dTestCommon<float>(workloadFactory);
4930}
4931
Francis Murtagh043d0d02018-10-05 14:08:48 +01004932LayerTestResult<float, 4> SimpleAveragePooling2dNhwcTest(armnn::IWorkloadFactory& workloadFactory)
4933{
4934 return SimpleAveragePooling2dNhwcTestCommon<float>(workloadFactory);
4935}
4936
telsoa014fcda012018-03-09 14:13:49 +00004937LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
4938{
4939 return SimpleAveragePooling2dTestCommon<uint8_t>(workloadFactory, 0.5, -1);
4940}
4941
surmeh01bceff2f2018-03-29 16:29:27 +01004942LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(armnn::IWorkloadFactory& workloadFactory,
4943 bool forceNoPadding)
4944{
4945 return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<float>(workloadFactory, forceNoPadding);
4946}
4947
telsoa014fcda012018-03-09 14:13:49 +00004948LayerTestResult<float, 4> LargeTensorsAveragePooling2dTest(armnn::IWorkloadFactory& workloadFactory)
4949{
4950 return LargeTensorsAveragePooling2dTestCommon<float>(workloadFactory);
4951}
4952
4953LayerTestResult<uint8_t, 4> LargeTensorsAveragePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
4954{
4955 return LargeTensorsAveragePooling2dTestCommon<uint8_t>(workloadFactory, 0.5, -1);
4956}
4957
4958LayerTestResult<float, 4> SimpleL2Pooling2dTest(armnn::IWorkloadFactory& workloadFactory)
4959{
4960 return SimpleL2Pooling2dTestCommon<float>(workloadFactory);
4961}
4962
4963LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
4964{
4965 return SimpleL2Pooling2dTestCommon<uint8_t>(workloadFactory);
4966}
4967
4968LayerTestResult<float, 4> L2Pooling2dSize3Stride1Test(armnn::IWorkloadFactory& workloadFactory)
4969{
4970 return L2Pooling2dSize3Stride1TestCommon<float>(workloadFactory);
4971}
4972
4973LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride1Uint8Test(armnn::IWorkloadFactory& workloadFactory)
4974{
4975 return L2Pooling2dSize3Stride1TestCommon<uint8_t>(workloadFactory);
4976}
4977
4978LayerTestResult<float, 4> L2Pooling2dSize3Stride3Test(armnn::IWorkloadFactory& workloadFactory)
4979{
4980 return L2Pooling2dSize3Stride3TestCommon<float>(workloadFactory);
4981}
4982
4983LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride3Uint8Test(armnn::IWorkloadFactory& workloadFactory)
4984{
4985 return L2Pooling2dSize3Stride3TestCommon<uint8_t>(workloadFactory);
4986}
4987
4988LayerTestResult<float, 4> L2Pooling2dSize3Stride4Test(armnn::IWorkloadFactory& workloadFactory)
4989{
4990 return L2Pooling2dSize3Stride4TestCommon<float>(workloadFactory);
4991}
4992
4993LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride4Uint8Test(armnn::IWorkloadFactory& workloadFactory)
4994{
4995 return L2Pooling2dSize3Stride4TestCommon<uint8_t>(workloadFactory);
4996}
4997
4998LayerTestResult<float, 4> L2Pooling2dSize7Test(armnn::IWorkloadFactory& workloadFactory)
4999{
5000 return L2Pooling2dSize7TestCommon<float>(workloadFactory);
5001}
5002
5003LayerTestResult<uint8_t, 4> L2Pooling2dSize7Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5004{
5005 return L2Pooling2dSize7TestCommon<uint8_t>(workloadFactory);
5006}
5007
5008LayerTestResult<float, 4> L2Pooling2dSize9Test(armnn::IWorkloadFactory& workloadFactory)
5009{
5010 return L2Pooling2dSize9TestCommon<float>(workloadFactory);
5011}
5012
5013LayerTestResult<uint8_t, 4> L2Pooling2dSize9Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5014{
5015 return L2Pooling2dSize9TestCommon<uint8_t>(workloadFactory);
5016}
5017
5018LayerTestResult<float, 4> AsymmetricNonSquarePooling2dTest(armnn::IWorkloadFactory& workloadFactory)
5019{
5020 return AsymmetricNonSquarePooling2dTestCommon<float>(workloadFactory);
5021}
5022
5023LayerTestResult<uint8_t, 4> AsymmetricNonSquarePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
5024{
5025 return AsymmetricNonSquarePooling2dTestCommon<uint8_t>(workloadFactory);
5026}
5027
5028LayerTestResult<float, 4> ComparePooling2dTest(armnn::IWorkloadFactory& workloadFactory,
5029 armnn::IWorkloadFactory& refWorkloadFactory,
5030 armnn::PoolingAlgorithm poolingType)
5031{
5032 return ComparePooling2dTestCommon<float>(workloadFactory, refWorkloadFactory, poolingType);
5033}
5034
5035LayerTestResult<uint8_t, 4> ComparePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory,
5036 armnn::IWorkloadFactory& refWorkloadFactory,
5037 armnn::PoolingAlgorithm poolingType)
5038{
5039 return ComparePooling2dTestCommon<uint8_t>(workloadFactory, refWorkloadFactory, poolingType, 0.1f, 128);
5040}
5041
5042LayerTestResult<float, 2> FullyConnectedLargeTest(armnn::IWorkloadFactory& workloadFactory,
5043 bool transposeWeights)
5044{
5045 return FullyConnectedLargeTestCommon<float>(workloadFactory, transposeWeights);
5046}
5047
5048LayerTestResult<float, 4> IgnorePaddingSimpleMaxPooling2dTest(armnn::IWorkloadFactory& workloadFactory)
5049{
5050 return IgnorePaddingSimpleMaxPooling2dTestCommon<float>(workloadFactory);
5051}
5052
5053LayerTestResult<uint8_t, 4> IgnorePaddingSimpleMaxPooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
5054{
5055 return IgnorePaddingSimpleMaxPooling2dTestCommon<uint8_t>(workloadFactory, 1.0f, -5);
5056}
5057
5058LayerTestResult<float, 4> IgnorePaddingMaxPooling2dSize3Test(armnn::IWorkloadFactory& workloadFactory)
5059{
5060 return IgnorePaddingMaxPooling2dSize3TestCommon<float>(workloadFactory);
5061}
5062
5063LayerTestResult<uint8_t, 4> IgnorePaddingMaxPooling2dSize3Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5064{
5065 return IgnorePaddingMaxPooling2dSize3TestCommon<uint8_t>(workloadFactory, 1.0f, -5);
5066}
5067
5068LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dTest(armnn::IWorkloadFactory& workloadFactory)
5069{
5070 return IgnorePaddingSimpleAveragePooling2dTestCommon<float>(workloadFactory);
5071}
5072
5073LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
5074{
5075 return IgnorePaddingSimpleAveragePooling2dTestCommon<uint8_t>(workloadFactory);
5076}
5077
5078LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTest(armnn::IWorkloadFactory& workloadFactory)
5079{
5080 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<float>(workloadFactory);
5081}
5082
5083LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(
5084 armnn::IWorkloadFactory& workloadFactory)
5085{
5086 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<uint8_t>(workloadFactory);
5087}
5088
5089LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3Test(armnn::IWorkloadFactory& workloadFactory)
5090{
5091 return IgnorePaddingAveragePooling2dSize3TestCommon<float>(workloadFactory);
5092}
5093
5094LayerTestResult<uint8_t, 4> IgnorePaddingAveragePooling2dSize3Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5095{
5096 return IgnorePaddingAveragePooling2dSize3TestCommon<uint8_t>(workloadFactory);
5097}
5098
5099LayerTestResult<float, 4> IgnorePaddingSimpleL2Pooling2dTest(armnn::IWorkloadFactory& workloadFactory)
5100{
5101 return IgnorePaddingSimpleL2Pooling2dTestCommon<float>(workloadFactory);
5102}
5103
5104LayerTestResult<uint8_t, 4> IgnorePaddingSimpleL2Pooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
5105{
5106 return IgnorePaddingSimpleL2Pooling2dTestCommon<uint8_t>(workloadFactory);
5107}
5108
5109LayerTestResult<float, 4> IgnorePaddingL2Pooling2dSize3Test(armnn::IWorkloadFactory& workloadFactory)
5110{
5111 return IgnorePaddingL2Pooling2dSize3TestCommon<float>(workloadFactory);
5112}
5113
5114LayerTestResult<uint8_t, 4> IgnorePaddingL2Pooling2dSize3Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5115{
5116 return IgnorePaddingL2Pooling2dSize3TestCommon<uint8_t>(workloadFactory);
5117}
5118
5119LayerTestResult<float, 4> SimplePermuteFloat32Test(armnn::IWorkloadFactory& workloadFactory)
5120{
5121 return SimplePermuteFloat32TestCommon(workloadFactory);
5122};
5123
5124LayerTestResult<uint8_t, 4> SimplePermuteUint8Test(armnn::IWorkloadFactory& workloadFactory)
5125{
5126 return SimplePermuteUint8TestCommon(workloadFactory);
5127};
surmeh01bceff2f2018-03-29 16:29:27 +01005128
5129LayerTestResult<float, 4> PermuteFloat32ValueSet1Test(armnn::IWorkloadFactory& workloadFactory)
5130{
5131 return PermuteFloat32ValueSet1TestCommon(workloadFactory);
5132};
5133
5134LayerTestResult<float, 4> PermuteFloat32ValueSet2Test(armnn::IWorkloadFactory& workloadFactory)
5135{
5136 return PermuteFloat32ValueSet2TestCommon(workloadFactory);
5137};
5138
5139LayerTestResult<float, 4> PermuteFloat32ValueSet3Test(armnn::IWorkloadFactory& workloadFactory)
5140{
5141 return PermuteFloat32ValueSet3TestCommon(workloadFactory);
narpra011e4c31d2018-09-28 11:07:51 +01005142};
5143
5144namespace
5145{
5146template <typename T, std::size_t InputDim, std::size_t OutputDim>
5147LayerTestResult<T, OutputDim> MeanTestHelper(armnn::IWorkloadFactory& workloadFactory,
5148 const unsigned int* inputShape,
5149 const std::vector<T>& inputData,
5150 const std::vector<unsigned int>& axis,
5151 bool keepDims,
5152 const unsigned int* outputShape,
5153 const std::vector<T>& outputData,
5154 float scale = 1.0f,
5155 int32_t offset = 0)
5156{
5157 auto dataType = (std::is_same<T, uint8_t>::value ?
5158 armnn::DataType::QuantisedAsymm8 :
5159 armnn::DataType::Float32);
5160
5161 armnn::TensorInfo inputTensorInfo(InputDim, inputShape, dataType);
5162 armnn::TensorInfo outputTensorInfo(OutputDim, outputShape, dataType);
5163
5164 inputTensorInfo.SetQuantizationScale(scale);
5165 inputTensorInfo.SetQuantizationOffset(offset);
5166
5167 outputTensorInfo.SetQuantizationScale(scale);
5168 outputTensorInfo.SetQuantizationOffset(offset);
5169
5170 auto input = MakeTensor<T, InputDim>(inputTensorInfo, inputData);
5171
5172 LayerTestResult<T, OutputDim> result(outputTensorInfo);
5173 result.outputExpected = MakeTensor<T, OutputDim>(outputTensorInfo, outputData);
5174
5175 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5176 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5177
5178 armnn::MeanQueueDescriptor data;
5179 data.m_Parameters.m_Axis = axis;
5180 data.m_Parameters.m_KeepDims = keepDims;
5181 armnn::WorkloadInfo info;
5182 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
5183 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
5184
5185 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMean(data, info);
5186
5187 inputHandle->Allocate();
5188 outputHandle->Allocate();
5189
5190 CopyDataToITensorHandle(inputHandle.get(), input.origin());
5191
5192 workloadFactory.Finalize();
5193 workload->Execute();
5194
5195 CopyDataFromITensorHandle(result.output.origin(), outputHandle.get());
5196
5197 return result;
5198}
5199} // anonymous namespace
5200
5201LayerTestResult<uint8_t, 1> MeanUint8SimpleTest(armnn::IWorkloadFactory& workloadFactory)
5202{
5203 const unsigned int inputShape[] = { 3, 2 };
5204 const unsigned int outputShape[] = { 1 };
5205
5206 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
5207 std::vector<uint8_t> output({ 2 });
5208
5209 return MeanTestHelper<uint8_t, 2, 1>(workloadFactory, inputShape, input, {}, false, outputShape, output);
5210}
5211
5212LayerTestResult<uint8_t, 3> MeanUint8SimpleAxisTest(armnn::IWorkloadFactory& workloadFactory)
5213{
5214 const unsigned int inputShape[] = { 1, 1, 3, 2 };
5215 const unsigned int outputShape[] = { 1, 1, 2 };
5216
5217 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
5218 std::vector<uint8_t> output({ 2, 2 });
5219
5220 return MeanTestHelper<uint8_t, 4, 3>(workloadFactory, inputShape, input, {2}, false, outputShape, output);
5221}
5222
5223LayerTestResult<uint8_t, 4> MeanUint8KeepDimsTest(armnn::IWorkloadFactory& workloadFactory)
5224{
5225 const unsigned int inputShape[] = { 1, 1, 3, 2 };
5226 const unsigned int outputShape[] = { 1, 1, 1, 2 };
5227
5228 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
5229 std::vector<uint8_t> output({ 2, 2 });
5230
5231 return MeanTestHelper<uint8_t, 4, 4>(workloadFactory, inputShape, input, {2}, true, outputShape, output);
5232}
5233
5234LayerTestResult<uint8_t, 4> MeanUint8MultipleDimsTest(armnn::IWorkloadFactory& workloadFactory)
5235{
5236 const unsigned int inputShape[] = { 2, 3, 1, 2 };
5237 const unsigned int outputShape[] = { 1, 3, 1, 1 };
5238
5239 std::vector<uint8_t> input({ 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6});
5240 std::vector<uint8_t> output({ 1, 3, 5 });
5241
5242 return MeanTestHelper<uint8_t, 4, 4>(workloadFactory, inputShape, input, {0, 3}, true, outputShape, output);
5243}
5244
5245LayerTestResult<uint8_t, 1> MeanVtsUint8Test(armnn::IWorkloadFactory& workloadFactory)
5246{
5247 const unsigned int inputShape[] = {4, 3, 2};
5248 const unsigned int outputShape[] = { 2 };
5249
5250 std::vector<uint8_t> input({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24});
5251 std::vector<uint8_t> output({12, 13});
5252
5253 return MeanTestHelper<uint8_t, 3, 1>(workloadFactory, inputShape, input, {0, 1}, false, outputShape,
5254 output, 0.8f, 5);
5255}
5256
5257LayerTestResult<float, 1> MeanFloatSimpleTest(armnn::IWorkloadFactory& workloadFactory)
5258{
5259 const unsigned int inputShape[] = { 3, 2 };
5260 const unsigned int outputShape[] = { 1 };
5261
5262 std::vector<float> input({ 1., 1., 2., 2., 3., 3. });
5263 std::vector<float> output({ 2. });
5264
5265 return MeanTestHelper<float, 2, 1>(workloadFactory, inputShape, input, {}, false, outputShape, output);
5266}
5267
5268LayerTestResult<float, 3> MeanFloatSimpleAxisTest(armnn::IWorkloadFactory& workloadFactory)
5269{
5270 const unsigned int inputShape[] = { 2, 3, 1, 2 };
5271 const unsigned int outputShape[] = { 3, 1, 2 };
5272
5273 std::vector<float> input({ 1., 2., 3., 4., 5., 6., 1., 2., 3., 4., 5., 6.});
5274 std::vector<float> output({ 1., 2., 3., 4., 5., 6. });
5275
5276 return MeanTestHelper<float, 4, 3>(workloadFactory, inputShape, input, {0}, false, outputShape, output);
5277}
5278
5279LayerTestResult<float, 4> MeanFloatKeepDimsTest(armnn::IWorkloadFactory& workloadFactory)
5280{
5281 const unsigned int inputShape[] = { 1, 1, 3, 2 };
5282 const unsigned int outputShape[] = { 1, 1, 1, 2 };
5283
5284 std::vector<float> input({ 1., 1., 2., 2., 3., 3. });
5285 std::vector<float> output({ 2., 2. });
5286
5287 return MeanTestHelper<float, 4, 4>(workloadFactory, inputShape, input, {2}, true, outputShape, output);
5288}
5289
5290LayerTestResult<float, 4> MeanFloatMultipleDimsTest(armnn::IWorkloadFactory& workloadFactory)
5291{
5292 const unsigned int inputShape[] = { 2, 3, 1, 2 };
5293 const unsigned int outputShape[] = { 1, 3, 1, 1 };
5294
5295 std::vector<float> input({ 1., 2., 3., 4., 5., 6., 1., 2., 3., 4., 5., 6.});
5296 std::vector<float> output({ 1.5, 3.5, 5.5 });
5297
5298 return MeanTestHelper<float, 4, 4>(workloadFactory, inputShape, input, {0, 3}, true, outputShape, output);
5299}
5300
5301LayerTestResult<float, 1> MeanVtsFloat1Test(armnn::IWorkloadFactory& workloadFactory)
5302{
5303 const unsigned int inputShape[] = {4, 3, 2};
5304 const unsigned int outputShape[] = { 2 };
5305
5306 std::vector<float> input({1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f,
5307 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f});
5308 std::vector<float> output({12.0f, 13.0f});
5309
5310 return MeanTestHelper<float, 3, 1>(workloadFactory, inputShape, input, {0, 1}, false, outputShape, output);
5311}
5312
5313LayerTestResult<float, 3> MeanVtsFloat2Test(armnn::IWorkloadFactory& workloadFactory)
5314{
5315 const unsigned int inputShape[] = {4, 3, 2};
5316 const unsigned int outputShape[] = {1, 3, 1 };
5317
5318 std::vector<float> input({1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f,
5319 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f});
5320 std::vector<float> output({10.5f, 12.5f, 14.5f});
5321
5322 return MeanTestHelper<float, 3, 3>(workloadFactory, inputShape, input, {0, 2}, true, outputShape, output);
5323}