blob: 4b50e4b5f93a58a31b9103ff7282818515fdb435 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#include "LayerTests.hpp"
6
7#include "test/TensorHelpers.hpp"
8#include "TensorCopyUtils.hpp"
surmeh013537c2c2018-05-18 16:31:43 +01009#include "Permute.hpp"
telsoa014fcda012018-03-09 14:13:49 +000010
11#include <boost/test/unit_test.hpp>
surmeh013537c2c2018-05-18 16:31:43 +010012#include <boost/assert.hpp>
telsoa014fcda012018-03-09 14:13:49 +000013
David Beck711fa312018-09-24 10:46:38 +010014#include <armnn/LayerSupport.hpp>
telsoa014fcda012018-03-09 14:13:49 +000015
David Beck711fa312018-09-24 10:46:38 +010016#include <backends/CpuTensorHandle.hpp>
17#include <backends/WorkloadFactory.hpp>
telsoa014fcda012018-03-09 14:13:49 +000018
19#ifdef ARMCOMPUTECL_ENABLED
David Beckac42efd2018-09-26 17:41:13 +010020#include <backends/cl/ClTensorHandle.hpp>
David Beck711fa312018-09-24 10:46:38 +010021#include <backends/aclCommon/ArmComputeTensorUtils.hpp>
telsoa014fcda012018-03-09 14:13:49 +000022#endif
23
24#include <algorithm>
25#include <boost/cast.hpp>
26
27#include "WorkloadTestUtils.hpp"
28#include "Conv2dTestImpl.hpp"
29#include "BatchNormTestImpl.hpp"
30#include "ActivationTestImpl.hpp"
31#include "Pooling2dTestImpl.hpp"
32#include "ReshapeTestImpl.hpp"
33#include "FullyConnectedTestImpl.hpp"
34#include "SplitterTestImpl.hpp"
35#include "SoftmaxTestImpl.hpp"
36#include "NormTestImpl.hpp"
37#include "PermuteTestImpl.hpp"
telsoa01c577f2c2018-08-31 09:22:23 +010038#include "LstmTestImpl.hpp"
39#include "ConvertFp16ToFp32TestImpl.hpp"
40#include "ConvertFp32ToFp16TestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000041
arovir0143095f32018-10-09 18:04:24 +010042#include <backends/cl/test/ClContextControlFixture.hpp>
Matteo Martincigh539b44d2018-10-01 09:26:39 +010043
telsoa01c577f2c2018-08-31 09:22:23 +010044// 3-channel 16x8 image used as common input data for a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000045static std::vector<float> ConvInput3x8x16({
46 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
47 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
48 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
49 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
50 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
51 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
52 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
53 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
54 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
55 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
56 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
57 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
58 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
59 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
60 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
61 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
62 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
63 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
64 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
65 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
66 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
67 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
68 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
69 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
70});
71
telsoa01c577f2c2018-08-31 09:22:23 +010072// 2-channel bias used by a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000073static std::vector<float> Bias2({0, 2});
74
telsoa01c577f2c2018-08-31 09:22:23 +010075// Helper function that returns either Bias2 or an empty vector depending on whether bias is enabled.
telsoa014fcda012018-03-09 14:13:49 +000076template<typename T>
77boost::multi_array<T, 1> GetBias2(bool biasEnabled, float qScale, int32_t qOffset)
78{
79 if(biasEnabled)
80 {
81 armnn::TensorInfo biasDesc({static_cast<unsigned int>(Bias2.size())}, armnn::GetDataType<T>());
82 boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(qScale, qOffset, Bias2));
83 return bias;
84 }
85 else
86 {
87 return boost::multi_array<T, 1>();
88 }
89}
90
91template<typename T>
92LayerTestResult<T, 4> SimpleConvolution2d3x5TestCommon(armnn::IWorkloadFactory& workloadFactory,
93 float qScale,
94 int32_t qOffset,
95 bool biasEnabled)
96{
telsoa01c577f2c2018-08-31 09:22:23 +010097 // Use common single-batch 3-channel 16x8 image.
telsoa014fcda012018-03-09 14:13:49 +000098 armnn::TensorInfo inputDesc({1, 3, 8, 16}, armnn::GetDataType<T>());
99 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
100
telsoa01c577f2c2018-08-31 09:22:23 +0100101 // Use a 2-element batch with 3-channel 3x5 kernels.
telsoa014fcda012018-03-09 14:13:49 +0000102 armnn::TensorInfo kernelDesc({2, 3, 5, 3}, armnn::GetDataType<T>());
103 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
104 QuantizedVector<T>(qScale, qOffset, {
105 1, 1, 1,
106 1, -1, 1,
107 1, 1, 1,
108 1, 1, 1,
109 1, 1, 1,
110
111 0, 0, 0,
112 0, 0, 0,
113 0, 0, 0,
114 0, 0, 0,
115 0, 0, 0,
116
117 2, 2, 2,
118 2, 2, 2,
119 2, 2, 2,
120 2, 2, 2,
121 2, 2, 2,
122
123
124 0, 0, 0,
125 0, 0, 0,
126 0, 0, 0,
127 0, 0, 0,
128 0, 0, 0,
129
130 1, 1, 1,
131 1, 1, 1,
132 1, 1, 1,
133 1, 1, 1,
134 1, 1, 1,
135
136 0, 0, 0,
137 0, 0, 0,
138 0, 0, 0,
139 0, 0, 0,
140 0, 0, 0
141 })));
142
telsoa01c577f2c2018-08-31 09:22:23 +0100143 // Expected output is 2 batch elements of a 1-channel 14x4 image.
telsoa014fcda012018-03-09 14:13:49 +0000144 armnn::TensorInfo outputDesc({1, 2, 4, 14}, armnn::GetDataType<T>());
145 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
146 QuantizedVector<T>(qScale, qOffset, {
147 -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24,
148 -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25,
149 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
150 -23.5f, -23.5f, -23.5f,
151 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
152 -23.5f, -23.5f, -23.5f,
153
154 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
155 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
156 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
157 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
158 })));
159
160 return SimpleConvolution2dTestImpl<T>(workloadFactory,
161 input,
162 kernel,
163 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
164 expectedOutput,
165 qScale,
166 qOffset);
167}
168
169template<typename T>
170LayerTestResult<T, 4> SimpleConvolution2d3x3TestCommon(armnn::IWorkloadFactory& workloadFactory,
171 float qScale,
172 int32_t qOffset,
173 bool biasEnabled)
174{
telsoa01c577f2c2018-08-31 09:22:23 +0100175 // Use a 3x3 kernel, which exercises ArmCompute's direct convolution path.
telsoa014fcda012018-03-09 14:13:49 +0000176
telsoa01c577f2c2018-08-31 09:22:23 +0100177 // Use common single-batch 3-channel 16x8 image.
telsoa014fcda012018-03-09 14:13:49 +0000178 armnn::TensorInfo inputDesc({1, 3, 8, 16}, armnn::GetDataType<T>());
179 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
180
telsoa01c577f2c2018-08-31 09:22:23 +0100181 // Use a 2-element batch of 3-channel 3x3 kernels.
telsoa014fcda012018-03-09 14:13:49 +0000182 armnn::TensorInfo kernelDesc({2, 3, 3, 3}, armnn::GetDataType<T>());
183 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
184 QuantizedVector<T>(qScale, qOffset, {
185 1, 1, 1,
186 1, -1, 1,
187 1, 1, 1,
188
189 0, 0, 0,
190 0, 0, 0,
191 0, 0, 0,
192
193 2, 2, 2,
194 2, 2, 2,
195 2, 2, 2,
196
197
198 0, 0, 0,
199 0, 0, 0,
200 0, 0, 0,
201
202 1, 1, 1,
203 1, 1, 1,
204 1, 1, 1,
205
206 0, 0, 0,
207 0, 0, 0,
208 0, 0, 0
209 })));
210
telsoa01c577f2c2018-08-31 09:22:23 +0100211 // Expected output is 1 batch of a 2-channel 14x6 image.
telsoa014fcda012018-03-09 14:13:49 +0000212 armnn::TensorInfo outputDesc({1, 2, 6, 14}, armnn::GetDataType<T>());
213 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
214 QuantizedVector<T>(qScale, qOffset, {
215 -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15,
216 -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16,
217 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
218 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
219 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
220 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
221
222 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
223 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
224 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
225 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
226 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
227 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
228 })));
229
230 return SimpleConvolution2dTestImpl<T>(workloadFactory,
231 input,
232 kernel,
233 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
234 expectedOutput,
235 qScale,
236 qOffset);
237}
238
Francis Murtaghd59116e2018-10-04 16:03:07 +0100239template<typename T>
240LayerTestResult<T, 4> SimpleConvolution2d3x3NhwcTestCommon(armnn::IWorkloadFactory& workloadFactory,
241 float qScale,
242 int32_t qOffset,
243 bool biasEnabled,
244 armnn::DataLayout dataLayout)
245{
246 // Use common single-batch 5x5 image.
247
248 armnn::TensorInfo inputDesc({1, 3, 4, 1}, armnn::GetDataType<T>());
249 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
250 {
251 1, 5, 2, 3,
252 8, 7, 3, 6,
253 3, 3, 9, 1
254 });
255
256
257 // Use a 2-element batch of 3-channel 3x3 kernels.
258 armnn::TensorInfo kernelDesc({1, 3, 3, 1}, armnn::GetDataType<T>());
259 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, {
260 4, 5, 6,
261 0, 0, 0,
262 3, 2, 1
263 });
264
265 // Expected output is 1 batch of a 5x5 image.
266 armnn::TensorInfo outputDesc({1, 3, 4, 1}, armnn::GetDataType<T>());
267
268 const std::vector<float> outputData =
269 {
270 23, 41, 33, 21,
271 44, 65, 76, 52,
272 82, 85, 79, 42
273 };
274
275 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
276
277 return SimpleConvolution2dNhwcTestImpl<T>(workloadFactory,
278 input,
279 kernel,
280 boost::multi_array<T, 1>(),
281 expectedOutput,
282 dataLayout,
283 qScale,
284 qOffset);
285}
286
telsoa014fcda012018-03-09 14:13:49 +0000287LayerTestResult<float, 4> SimpleConvolution2d3x5Test(armnn::IWorkloadFactory& workloadFactory,
288 bool biasEnabled)
289{
290 return SimpleConvolution2d3x5TestCommon<float>(workloadFactory, 0.f, 0, biasEnabled);
291}
292
293LayerTestResult<uint8_t, 4> SimpleConvolution2d3x5Uint8Test(armnn::IWorkloadFactory& workloadFactory,
294 bool biasEnabled)
295{
296 return SimpleConvolution2d3x5TestCommon<uint8_t>(workloadFactory, 0.5f, 50, biasEnabled);
297}
298
299LayerTestResult<float, 4> SimpleConvolution2d3x3Test(armnn::IWorkloadFactory& workloadFactory,
300 bool biasEnabled)
301{
302 return SimpleConvolution2d3x3TestCommon<float>(workloadFactory, 0.f, 0, biasEnabled);
303}
304
Francis Murtaghd59116e2018-10-04 16:03:07 +0100305LayerTestResult<float, 4> SimpleConvolution2d3x3NhwcTest(armnn::IWorkloadFactory& workloadFactory,
306 bool biasEnabled)
307{
308 return SimpleConvolution2d3x3NhwcTestCommon<float>(workloadFactory, 0.f, 0, biasEnabled, armnn::DataLayout::NHWC);
309}
310
telsoa014fcda012018-03-09 14:13:49 +0000311LayerTestResult<uint8_t, 4> SimpleConvolution2d3x3Uint8Test(armnn::IWorkloadFactory& workloadFactory,
312 bool biasEnabled)
313{
314 return SimpleConvolution2d3x3TestCommon<uint8_t>(workloadFactory, 0.5f, 50, biasEnabled);
315}
316
317template<typename T>
318LayerTestResult<T, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon(
319 armnn::IWorkloadFactory& workloadFactory,
320 float qScale,
321 int32_t qOffset)
322{
telsoa01c577f2c2018-08-31 09:22:23 +0100323 // Use a single-batch 1-channel 3x3 image as input.
telsoa014fcda012018-03-09 14:13:49 +0000324 armnn::TensorInfo inputDesc({1, 1, 3, 3}, armnn::GetDataType<T>());
325 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
326 QuantizedVector<T>(qScale, qOffset, {
327 11,21,31,
328 12,22,32,
329 13,23,33
330 })));
331
telsoa01c577f2c2018-08-31 09:22:23 +0100332 // Use 1 batch of a 1-channel 2x2 kernel.
telsoa014fcda012018-03-09 14:13:49 +0000333 armnn::TensorInfo kernelDesc({1, 1, 2, 2}, armnn::GetDataType<T>());
334 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
335 QuantizedVector<T>(qScale, qOffset, {
336 -11,-21,
337 -12,-22,
338 })));
339
telsoa01c577f2c2018-08-31 09:22:23 +0100340// Expected output is 1 batch of a 1-channel 6x8 image.
telsoa014fcda012018-03-09 14:13:49 +0000341// Manually calculated like this:
342//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
343//[-11*0 -21*0 -12*0 -22*11 ; -11*0 -21*0 -12*11 -22*21 ; -11*0 -21*0 -12*21 -22*31 ; -11*0 -21*0 -12*31 -22*0 ..]
344//[-11*0 -21*11 -12*0 -22*12 ; -11*11 -21*21 -12*12 -22*22 ; -11*21 -21*31 -12*22 -22*32 ; -11*31 -21*0 -12*32 -22*0 ..]
345//[-11*0 -21*12 -12*0 -22*13 ; -11*12 -21*22 -12*13 -22*23 ; -11*22 -21*32 -12*23 -22*33 ; -11*32 -21*0 -12*33 -22*0 ..]
346//[-11*0 -21*13 -12*0 -22*0 ; -11*13 -21*23 -12*0 -22*0 ; -11*23 -21*33 -12*0 -22*0 ; -11*33 -21*0 -12*0 -22*0 ..]
347//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
348//[..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ..]
349 armnn::TensorInfo outputDesc({1, 1, 8, 6}, armnn::GetDataType<T>());
350 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
351 QuantizedVector<T>(qScale, qOffset, {
352 0, 0, 0, 0, 0, 0,
353 -242, -594, -934, -372, 0, 0,
354 -495, -1190, -1850, -725, 0, 0,
355 -538, -1256, -1916, -748, 0, 0,
356 -273, -626, -946, -363, 0, 0,
357 0, 0, 0, 0, 0, 0,
358 0, 0, 0, 0, 0, 0,
359 0, 0, 0, 0, 0, 0
360 })));
361
362 return SimpleConvolution2dTestImpl<T>(workloadFactory,
363 input,
364 kernel,
365 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(false, qScale, qOffset),
366 expectedOutput,
367 qScale,
368 qOffset,
telsoa01c577f2c2018-08-31 09:22:23 +0100369 1, // Padding left.
370 2, // Padding top.
371 3, // Padding right.
372 4); // Padding bottom.
telsoa014fcda012018-03-09 14:13:49 +0000373}
374
375template<typename T>
376LayerTestResult<T, 4> SimpleConvolution2dAsymmetricPaddingTestCommon(armnn::IWorkloadFactory& workloadFactory,
377 float qScale,
378 int32_t qOffset)
379{
telsoa01c577f2c2018-08-31 09:22:23 +0100380 // Use a single-batch 1-channel 5x5 image as input.
telsoa014fcda012018-03-09 14:13:49 +0000381 armnn::TensorInfo inputDesc({ 1, 1, 5, 5 }, armnn::GetDataType<T>());
382 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
383 QuantizedVector<T>(qScale, qOffset, {
384 11,21,31,41,51,
385 12,22,32,42,52,
386 13,23,33,43,53,
387 14,24,34,44,54,
388 15,25,35,45,55,
389 })));
390
telsoa01c577f2c2018-08-31 09:22:23 +0100391 // Use 1 batch of a 1-channel 4x4 kernel.
telsoa014fcda012018-03-09 14:13:49 +0000392 armnn::TensorInfo kernelDesc({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
393 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
394 QuantizedVector<T>(qScale, qOffset, {
395 -11,-21,-31,-41,
396 -12,-22,-32,-42,
397 -13,-23,-33,-43,
398 -14,-24,-34,-44,
399 })));
400
telsoa01c577f2c2018-08-31 09:22:23 +0100401 // Expected output is 1 batch of a 1-channel 5x5 image.
telsoa014fcda012018-03-09 14:13:49 +0000402 armnn::TensorInfo outputDesc({ 1, 1, 5, 5 }, armnn::GetDataType<T>());
403 std::vector<T> myVec(outputDesc.GetNumElements(), 0);
404 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
405 QuantizedVector<T>(qScale, qOffset, {
telsoa014fcda012018-03-09 14:13:49 +0000406 -7140, -10580, -13940, -9300, -5230,
407 -9590, -14120, -18520, -12290, -6860,
408 -9980, -14560, -18960, -12560, -7000,
409 -7518, -10904, -14144, -9318, -5152,
surmeh013537c2c2018-05-18 16:31:43 +0100410 -5032, -7256, -9376, -6142, -3368,
telsoa014fcda012018-03-09 14:13:49 +0000411 })));
412
413 return SimpleConvolution2dTestImpl<T>(workloadFactory,
414 input,
415 kernel,
416 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(false, qScale, qOffset),
417 expectedOutput,
418 qScale,
419 qOffset,
telsoa01c577f2c2018-08-31 09:22:23 +0100420 1, // Padding left.
421 1, // Padding top.
422 2, // Padding right.
423 2); // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100424}
425
426template<typename T>
427LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(armnn::IWorkloadFactory& workloadFactory,
428 float qScale,
429 int32_t qOffset,
430 bool biasEnabled)
431{
telsoa01c577f2c2018-08-31 09:22:23 +0100432 // Use a single-batch 2-channel 5x5 image as input.
surmeh013537c2c2018-05-18 16:31:43 +0100433 armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5 }, armnn::GetDataType<T>());
434 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
435 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
436 0, 1, 2, 3, 4,
437 5, 6, 7, 8, 9,
438 10, 11, 12, 13, 14,
439 15, 16, 17, 18, 19,
440 20, 21, 22, 23, 24,
441
442 25, 26, 27, 28, 29,
443 30, 31, 32, 33, 34,
444 35, 36, 37, 38, 39,
445 40, 41, 42, 43, 44,
446 45, 46, 47, 48, 49
447 })));
448
telsoa01c577f2c2018-08-31 09:22:23 +0100449 // Use a depth multiplier of 1 on a 2-channel 4x4 kernel.
surmeh013537c2c2018-05-18 16:31:43 +0100450 armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, armnn::GetDataType<T>());
451 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
452 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
453 32, 31, 30, 29,
454 28, 27, 26, 25,
455 24, 23, 22, 21,
456 20, 19, 18, 17,
457
458 16, 15, 14, 13,
459 12, 11, 10, 9,
460 8, 7, 6, 5,
461 4, 3, 2, 1
462 })));
463
telsoa01c577f2c2018-08-31 09:22:23 +0100464 // Expected output is 1 batch of a 2-channel 5x5 image.
465 // Calculated using the python tensorflow library with strideX=1, strideY=1.
surmeh013537c2c2018-05-18 16:31:43 +0100466 armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5 }, armnn::GetDataType<T>());
467 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
468 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
469 1062, 1580, 1850, 1530, 1117,
470 2140, 3108, 3500, 2842, 2042,
471 3580, 5068, 5460, 4342, 3062,
472 3618, 5072, 5390, 4248, 2971,
473 3074, 4282, 4510, 3533, 2457,
474 1550, 2284, 2362, 1955, 1428,
475 2910, 4206, 4342, 3528, 2536,
476 3390, 4886, 5022, 4068, 2916,
477 3566, 5056, 5182, 4133, 2922,
478 3100, 4352, 4452, 3517, 2465
479 })));
480
481 return DepthwiseConvolution2dAsymmetricTestImpl<T>(workloadFactory,
482 input,
483 kernel,
484 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
485 expectedOutput,
486 qScale,
487 qOffset,
telsoa01c577f2c2018-08-31 09:22:23 +0100488 1, // Padding left.
489 1, // Padding top.
490 2, // Padding right.
491 2, // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100492 1, // strideX
493 1); // strideY
telsoa014fcda012018-03-09 14:13:49 +0000494}
495
Nikhil Rajcec6b652018-10-12 13:51:57 +0100496template<typename T>
497LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestCommon(armnn::IWorkloadFactory& workloadFactory,
498 float qScale,
499 int32_t qOffset,
500 bool biasEnabled)
501{
502 armnn::TensorInfo inputTensorInfo({ 1, 5, 5, 2}, armnn::GetDataType<T>());
503 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
504 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
505 0, 25,
506 1, 26,
507 2, 27,
508 3, 28,
509 4, 29,
510
511 5, 30,
512 6, 31,
513 7, 32,
514 8, 33,
515 9, 34,
516
517 10, 35,
518 11, 36,
519 12, 37,
520 13, 38,
521 14, 39,
522
523 15, 40,
524 16, 41,
525 17, 42,
526 18, 43,
527 19, 44,
528
529 20, 45,
530 21, 46,
531 22, 47,
532 23, 48,
533 24, 49
534 })));
535
536 armnn::TensorInfo kernelTensorInfo({ 1, 4, 4, 2}, armnn::GetDataType<T>());
537 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
538 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
539 32, 16,
540 31, 15,
541 30, 14,
542 29, 13,
543
544 28, 12,
545 27, 11,
546 26, 10,
547 25, 9,
548
549 24, 8,
550 23, 7,
551 22, 6,
552 21, 5,
553
554 20, 4,
555 19, 3,
556 18, 2,
557 17, 1
558 })));
559
560 armnn::TensorInfo outputTensorInfo({ 1, 5, 5, 2}, armnn::GetDataType<T>());
561 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
562 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
563 1062, 1550,
564 1580, 2284,
565 1850, 2362,
566 1530, 1955,
567 1117, 1428,
568
569 2140, 2910,
570 3108, 4206,
571 3500, 4342,
572 2842, 3528,
573 2042, 2536,
574
575 3580, 3390,
576 5068, 4886,
577 5460, 5022,
578 4342, 4068,
579 3062, 2916,
580
581 3618, 3566,
582 5072, 5056,
583 5390, 5182,
584 4248, 4133,
585 2971, 2922,
586
587 3074, 3100,
588 4282, 4352,
589 4510, 4452,
590 3533, 3517,
591 2457, 2465
592 })));
593
594 return DepthwiseConvolution2dNhwcTestImpl<T>(workloadFactory,
595 input,
596 kernel,
597 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
598 expectedOutput,
599 qScale,
600 qOffset,
601 1, // Padding left.
602 1, // Padding top.
603 2, // Padding right.
604 2, // Padding bottom.
605 1, // strideX
606 1); // strideY
607}
608
telsoa014fcda012018-03-09 14:13:49 +0000609LayerTestResult<float, 4>
610Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest(armnn::IWorkloadFactory& workloadFactory)
611{
612 return Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon<float>(workloadFactory, 0.0f, 0);
613}
614
615LayerTestResult<float, 4> Convolution2dAsymmetricPaddingTest(armnn::IWorkloadFactory& workloadFactory)
616{
617 return SimpleConvolution2dAsymmetricPaddingTestCommon<float>(workloadFactory, 0.0f, 0);
618}
619
620LayerTestResult<float, 4> DepthwiseConvolution2dTest(armnn::IWorkloadFactory& workloadFactory,
621 bool biasEnabled)
622{
623 return DepthwiseConvolution2dTestImpl<float, float>(workloadFactory, 0.0f, 0, biasEnabled);
624}
625
Nikhil Rajcec6b652018-10-12 13:51:57 +0100626LayerTestResult<float, 4> DepthwiseConvolution2dDepthNhwcTest(armnn::IWorkloadFactory& workloadFactory,
627 bool biasEnabled)
628{
629 return DepthwiseConvolution2dNhwcTestCommon<float>(workloadFactory, 0.0f, 0, biasEnabled);
630}
631
telsoa014fcda012018-03-09 14:13:49 +0000632LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul1Test(armnn::IWorkloadFactory& workloadFactory,
633 bool biasEnabled)
634{
635 return DepthwiseConvolution2dDepthMul1TestImpl<float, float>(workloadFactory, 0.0f, 0, biasEnabled);
636}
637
surmeh013537c2c2018-05-18 16:31:43 +0100638LayerTestResult<float, 4> DepthwiseConvolution2dAsymmetricTest(armnn::IWorkloadFactory& workloadFactory,
639 bool biasEnabled)
640{
641 return DepthwiseConvolution2dAsymmetricTestCommon<float>(workloadFactory, 0.0f, 0, biasEnabled);
642}
643
telsoa014fcda012018-03-09 14:13:49 +0000644LayerTestResult<uint8_t, 4> DepthwiseConvolution2dUint8Test(armnn::IWorkloadFactory& workloadFactory,
645 bool biasEnabled)
646{
647 return DepthwiseConvolution2dTestImpl<uint8_t, int32_t>(workloadFactory, 0.5f, 50, biasEnabled);
648}
649
650LayerTestResult<uint8_t, 4> DepthwiseConvolution2dDepthMul1Uint8Test(armnn::IWorkloadFactory& workloadFactory,
651 bool biasEnabled)
652{
653 return DepthwiseConvolution2dDepthMul1TestImpl<uint8_t, int32_t>(workloadFactory, 0.5f, 50, biasEnabled);
654}
655
656LayerTestResult<float, 4> Convolution1dTest(armnn::IWorkloadFactory& workloadFactory, bool biasEnabled)
657{
658 return Convolution1dTestImpl<float>(workloadFactory, 0.0f, 0, biasEnabled);
659}
660
661LayerTestResult<uint8_t, 4> Convolution1dUint8Test(armnn::IWorkloadFactory& workloadFactory, bool biasEnabled)
662{
663 return Convolution1dTestImpl<uint8_t>(workloadFactory, 0.1f, 128, biasEnabled);
664}
665
666LayerTestResult<float,4> CompareConvolution2dTest(armnn::IWorkloadFactory& workloadFactory,
667 armnn::IWorkloadFactory& refWorkloadFactory)
668{
669 return CompareConvolution2dTestImpl<float>(workloadFactory, refWorkloadFactory);
670}
671
672template<typename T>
673LayerTestResult<T,4> CompareDepthwiseConvolution2dTest(armnn::IWorkloadFactory& workloadFactory,
674 armnn::IWorkloadFactory& refWorkloadFactory)
675{
676 return CompareDepthwiseConvolution2dTestImpl<T>(workloadFactory, refWorkloadFactory);
677}
678
679template LayerTestResult<float, 4> CompareDepthwiseConvolution2dTest<float>(
680 armnn::IWorkloadFactory&, armnn::IWorkloadFactory&);
681template LayerTestResult<uint8_t, 4> CompareDepthwiseConvolution2dTest<uint8_t>(
682 armnn::IWorkloadFactory&, armnn::IWorkloadFactory&);
683
684LayerTestResult<float,4> SimpleNormalizationAcrossTest(armnn::IWorkloadFactory& workloadFactory)
685{
686 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
687 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
688 return SimpleNormalizationTestImpl(workloadFactory, normChannel, normMethod);
689}
690
691LayerTestResult<float,4> SimpleNormalizationWithinTest(armnn::IWorkloadFactory& workloadFactory)
692{
693 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
694 auto normChannel = armnn::NormalizationAlgorithmChannel::Within;
695 return SimpleNormalizationTestImpl(workloadFactory, normChannel, normMethod);
696}
697
narpra0155a97bc2018-10-02 14:35:53 +0100698LayerTestResult<float,4> SimpleNormalizationAcrossNhwcTest(armnn::IWorkloadFactory& workloadFactory)
699{
700 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
701 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
702 return SimpleNormalizationNhwcClNeonTestImpl(workloadFactory, normChannel, normMethod);
703}
704
telsoa014fcda012018-03-09 14:13:49 +0000705LayerTestResult<float,2> SimpleSoftmaxTest(armnn::IWorkloadFactory& workloadFactory, float beta)
706{
707 return SimpleSoftmaxTestImpl<float>(workloadFactory, beta);
708}
709
710LayerTestResult<uint8_t,2> SimpleSoftmaxUint8Test(armnn::IWorkloadFactory& workloadFactory, float beta)
711{
712 return SimpleSoftmaxTestImpl<uint8_t>(workloadFactory, beta);
713}
714
715LayerTestResult<float,4> CompareNormalizationTest(armnn::IWorkloadFactory& workloadFactory,
716 armnn::IWorkloadFactory& refWorkloadFactory,
717 armnn::NormalizationAlgorithmChannel normChannel,
718 armnn::NormalizationAlgorithmMethod normMethod)
719{
720 return CompareNormalizationTestImpl(workloadFactory, refWorkloadFactory, normChannel, normMethod);
721}
722
723LayerTestResult<float,2> CompareSoftmaxTest(armnn::IWorkloadFactory& workloadFactory,
724 armnn::IWorkloadFactory& refWorkloadFactory,
725 float beta)
726{
727 return CompareSoftmaxTestImpl<float>(workloadFactory, refWorkloadFactory, beta);
728}
729
730LayerTestResult<uint8_t,2> CompareSoftmaxUint8Test(armnn::IWorkloadFactory& workloadFactory,
731 armnn::IWorkloadFactory& refWorkloadFactory,
732 float beta)
733{
734 return CompareSoftmaxTestImpl<uint8_t>(workloadFactory, refWorkloadFactory, beta);
735}
736
737std::vector<LayerTestResult<float,3>> SplitterTest(armnn::IWorkloadFactory& workloadFactory)
738{
739 return SplitterTestCommon<float>(workloadFactory);
740}
741
742std::vector<LayerTestResult<uint8_t,3>> SplitterUint8Test(armnn::IWorkloadFactory& workloadFactory)
743{
744 return SplitterTestCommon<uint8_t>(workloadFactory, 1.0f, 0);
745}
746
747LayerTestResult<float, 3> CopyViaSplitterTest(armnn::IWorkloadFactory& workloadFactory)
748{
749 return CopyViaSplitterTestImpl<float>(workloadFactory, 0.0f, 0);
750}
751
752LayerTestResult<uint8_t, 3> CopyViaSplitterUint8Test(armnn::IWorkloadFactory& workloadFactory)
753{
754 return CopyViaSplitterTestImpl<uint8_t>(workloadFactory, 1.0f, 0);
755}
756
telsoa01c577f2c2018-08-31 09:22:23 +0100757LayerTestResult<float, 2> LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest(
758 armnn::IWorkloadFactory& workloadFactory)
759{
760 armnn::TensorInfo inputDesc({ 2, 2 }, armnn::GetDataType<float>());
761 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
762 { 2., 3., 3., 4. }));
763
764 armnn::TensorInfo outputDesc({ 2, 4 }, armnn::GetDataType<float>());
765 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
766 {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
767 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f}));
768 return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl(workloadFactory, input, expectedOutput);
769}
770
771LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest(
772 armnn::IWorkloadFactory& workloadFactory)
773{
774 armnn::TensorInfo inputDesc({ 2, 5 }, armnn::GetDataType<float>());
775 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
776 {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
777 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f}));
778
779 armnn::TensorInfo outputDesc({ 2, 16 }, armnn::GetDataType<float>());
780 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
781 {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f,
782 -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f,
783 -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f,
784 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f,
785 -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f,
786 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f,
787 0.02168f}));
788 return LstmLayerFloat32NoCifgWithPeepholeWithProjectionTestImpl(workloadFactory, input, expectedOutput);
789}
790
791LayerTestResult<float, 2> LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest(armnn::IWorkloadFactory& workloadFactory)
792{
793 armnn::TensorInfo inputDesc({2, 2}, armnn::GetDataType<float>());
794 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
795 {2., 3., 3., 4.}));
796
797
798 armnn::TensorInfo outputDesc({2, 4}, armnn::GetDataType<float>());
799 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
800 {{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
801 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}}));
802
803 return LstmNoCifgNoPeepholeNoProjectionTestImpl(workloadFactory, input, expectedOutput);
804}
805
telsoa014fcda012018-03-09 14:13:49 +0000806LayerTestResult<float,3> MergerTest(armnn::IWorkloadFactory& workloadFactory)
807{
surmeh013537c2c2018-05-18 16:31:43 +0100808 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +0000809 unsigned int outputHeight = 6;
810 unsigned int outputChannels = 3;
811
surmeh013537c2c2018-05-18 16:31:43 +0100812 unsigned int inputWidth1 = 3;
813 unsigned int inputHeight1 = 6;
814 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +0000815
surmeh013537c2c2018-05-18 16:31:43 +0100816 unsigned int inputWidth2 = 3;
817 unsigned int inputHeight2 = 6;
818 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +0000819
telsoa01c577f2c2018-08-31 09:22:23 +0100820 // Define the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +0000821 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::Float32);
822 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::Float32);
823 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +0000824
825 LayerTestResult<float,3> ret(outputTensorInfo);
826
telsoa014fcda012018-03-09 14:13:49 +0000827 ret.outputExpected = MakeTensor<float, 3>(outputTensorInfo, std::vector<float>(
surmeh013537c2c2018-05-18 16:31:43 +0100828 {
829 1.0f, 2.0f, 3.0f,
830 4.0f, 5.0f, 6.0f,
831 7.0f, 8.0f, 9.0f,
832 10.0f, 11.0f, 12.0f,
833 13.0f, 14.0f, 15.0f,
834 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +0000835
surmeh013537c2c2018-05-18 16:31:43 +0100836 19.0f, 20.0f, 21.0f,
837 22.0f, 23.0f, 24.0f,
838 25.0f, 26.0f, 27.0f,
839 28.0f, 29.0f, 30.0f,
840 31.0f, 32.0f, 33.0f,
841 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +0000842
surmeh013537c2c2018-05-18 16:31:43 +0100843 37.0f, 38.0f, 39.0f,
844 40.0f, 41.0f, 42.0f,
845 43.0f, 44.0f, 45.0f,
846 46.0f, 47.0f, 48.0f,
847 49.0f, 50.0f, 51.0f,
848 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +0000849 })
850 );
851
telsoa014fcda012018-03-09 14:13:49 +0000852 auto input1 = MakeTensor<float, 3>(inputTensorInfo1, std::vector<float>(
853 {
surmeh013537c2c2018-05-18 16:31:43 +0100854 1.0f, 2.0f, 3.0f,
855 4.0f, 5.0f, 6.0f,
856 7.0f, 8.0f, 9.0f,
857 10.0f, 11.0f, 12.0f,
858 13.0f, 14.0f, 15.0f,
859 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +0000860
surmeh013537c2c2018-05-18 16:31:43 +0100861 19.0f, 20.0f, 21.0f,
862 22.0f, 23.0f, 24.0f,
863 25.0f, 26.0f, 27.0f,
864 28.0f, 29.0f, 30.0f,
865 31.0f, 32.0f, 33.0f,
866 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +0000867 })
868 );
869
870 auto input2 = MakeTensor<float, 3>(inputTensorInfo2, std::vector<float>(
871 {
surmeh013537c2c2018-05-18 16:31:43 +0100872 37.0f, 38.0f, 39.0f,
873 40.0f, 41.0f, 42.0f,
telsoa014fcda012018-03-09 14:13:49 +0000874 43.0f, 44.0f, 45.0f,
surmeh013537c2c2018-05-18 16:31:43 +0100875 46.0f, 47.0f, 48.0f,
876 49.0f, 50.0f, 51.0f,
877 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +0000878 })
879 );
880
telsoa01c577f2c2018-08-31 09:22:23 +0100881 std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of input[0].
telsoa014fcda012018-03-09 14:13:49 +0000882 armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1);
883
telsoa01c577f2c2018-08-31 09:22:23 +0100884 std::vector<unsigned int> wOrigin2 = {2, 0, 0}; //Extent of the window is defined by size of input[1].
telsoa014fcda012018-03-09 14:13:49 +0000885 armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2);
886
telsoa014fcda012018-03-09 14:13:49 +0000887 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
888
889 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
890
891 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
892 subTensorsSupported ?
893 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
894 workloadFactory.CreateTensorHandle(inputTensorInfo1);
895
896 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
897 subTensorsSupported ?
898 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
899 workloadFactory.CreateTensorHandle(inputTensorInfo2);
900
telsoa014fcda012018-03-09 14:13:49 +0000901 armnn::MergerQueueDescriptor data;
902 armnn::WorkloadInfo info;
903 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
904 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +0000905 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
906
907 data.m_ViewOrigins.push_back(window1);
908 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +0000909
910 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(data, info);
911
912 inputHandle1->Allocate();
913 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +0000914 outputHandle->Allocate();
915
916 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
917 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +0000918
surmeh013537c2c2018-05-18 16:31:43 +0100919 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +0000920 workload->Execute();
921
922 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
923
924 return ret;
925}
926
927LayerTestResult<float,4> AdditionTest(armnn::IWorkloadFactory& workloadFactory)
928{
929 unsigned int batchSize = 2;
930 unsigned int channels = 2;
931 unsigned int height = 2;
932 unsigned int width = 3;
933
934 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
935 armnn::TensorInfo outputTensorInfo;
936
937 unsigned int shape[] = {batchSize, channels, height, width};
938
939 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
940 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
941 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
942
943
944 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, std::vector<float>(
945 {
946 0.0f, 2.0f, 1.0f,
947 0.2f, 1.0f, 2.0f,
948
949 1.0f, 2.0f, 1.0f,
950 0.2f, 1.0f, 2.0f,
951
952 0.0f, 2.0f, 1.0f,
953 4.2f, 1.0f, 2.0f,
954
955 0.0f, 0.0f, 1.0f,
956 0.2f, 1.0f, 2.0f,
957 }));
958
959 auto input2 = MakeTensor<float, 4>(inputTensorInfo2, std::vector<float>(
960 {
961 1.0f, 2.0f, 1.0f,
962 0.0f, 1.0f, 2.0f,
963
964 1.0f, 2.0f, -2.0f,
965 0.2f, 1.0f, 2.0f,
966
967 0.0f, 2.0f, 1.0f,
968 4.2f, 0.0f, -3.0f,
969
970 0.0f, 0.0f, 1.0f,
971 0.7f, 1.0f, 5.0f,
972 }));
973
974 LayerTestResult<float,4> ret(outputTensorInfo);
975 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>(
976 {
977 1.0f, 4.0f, 2.0f,
978 0.2f, 2.0f, 4.0f,
979
980 2.0f, 4.0f, -1.0f,
981 0.4f, 2.0f, 4.0f,
982
983 0.0f, 4.0f, 2.0f,
984 8.4f, 1.0f, -1.0f,
985
986 0.0f, 0.0f, 2.0f,
987 0.9f, 2.0f, 7.0f,
988 }));
989
990 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
991 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
992 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
993
994 armnn::AdditionQueueDescriptor data;
995 armnn::WorkloadInfo info;
996 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
997 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
998 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
999
1000 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1001
1002 inputHandle1->Allocate();
1003 inputHandle2->Allocate();
1004 outputHandle->Allocate();
1005
1006 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1007 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1008
surmeh013537c2c2018-05-18 16:31:43 +01001009 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001010 workload->Execute();
1011
1012 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1013
1014 return ret;
1015}
1016
1017template <typename T>
1018LayerTestResult<T, 4> AdditionBroadcastTestImpl(armnn::IWorkloadFactory& workloadFactory,
1019 float qScale,
1020 int32_t qOffset)
1021{
1022 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, armnn::GetDataType<T>());
1023 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, armnn::GetDataType<T>());
1024 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>());
1025
1026 if (armnn::IsQuantizedType<T>())
1027 {
1028 inputTensorInfo1.SetQuantizationScale(qScale);
1029 inputTensorInfo1.SetQuantizationOffset(qOffset);
1030 inputTensorInfo2.SetQuantizationScale(qScale);
1031 inputTensorInfo2.SetQuantizationOffset(qOffset);
1032 outputTensorInfo.SetQuantizationScale(qScale);
1033 outputTensorInfo.SetQuantizationOffset(qOffset);
1034 }
1035
1036 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
1037 {
1038 0.0f,
1039 1.0f,
1040
1041 2.0f,
1042 3.0f,
1043
1044 4.0f,
1045 5.0f,
1046 }));
1047
1048 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
1049 {
1050 0.5f, 1.5f, 2.5f,
1051 3.5f, 4.5f, 5.5f,
1052 }));
1053
1054 LayerTestResult<T,4> ret(outputTensorInfo);
1055 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
1056 {
1057 0.5f, 1.5f, 2.5f,
1058 4.5f, 5.5f, 6.5f,
1059
1060 2.5f, 3.5f, 4.5f,
1061 6.5f, 7.5f, 8.5f,
1062
1063 4.5f, 5.5f, 6.5f,
1064 8.5f, 9.5f, 10.5f,
1065 }));
1066
1067 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1068 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1069 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1070
1071 armnn::AdditionQueueDescriptor data;
1072 armnn::WorkloadInfo info;
1073 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1074 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1075 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1076
1077 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1078
1079 inputHandle1->Allocate();
1080 inputHandle2->Allocate();
1081 outputHandle->Allocate();
1082
1083 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1084 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1085
surmeh013537c2c2018-05-18 16:31:43 +01001086 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001087 workload->Execute();
1088
1089 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1090
1091 return ret;
1092}
1093
1094template <typename T>
1095LayerTestResult<T, 4> AdditionBroadcast1ElementTestImpl(armnn::IWorkloadFactory& workloadFactory,
1096 float qScale,
1097 int32_t qOffset)
1098{
1099 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>());
1100 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, armnn::GetDataType<T>());
1101 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>());
1102
1103 if (armnn::IsQuantizedType<T>())
1104 {
1105 inputTensorInfo1.SetQuantizationScale(qScale);
1106 inputTensorInfo1.SetQuantizationOffset(qOffset);
1107 inputTensorInfo2.SetQuantizationScale(qScale);
1108 inputTensorInfo2.SetQuantizationOffset(qOffset);
1109 outputTensorInfo.SetQuantizationScale(qScale);
1110 outputTensorInfo.SetQuantizationOffset(qOffset);
1111 }
1112
1113 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
1114 {
1115 0.0f, 1.0f, 2.0f,
1116 3.0f, 4.0f, 5.0f,
1117 6.0f, 7.0f, 8.0f,
1118 9.0f, 10.0f, 11.0f,
1119 12.0f, 13.0f, 14.0f,
1120 15.0f, 16.0f, 17.0f,
1121 }));
1122
1123 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
1124 {
1125 0.5f,
1126 }));
1127
1128 LayerTestResult<T,4> ret(outputTensorInfo);
1129 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
1130 {
1131 0.5f, 1.5f, 2.5f,
1132 3.5f, 4.5f, 5.5f,
1133 6.5f, 7.5f, 8.5f,
1134 9.5f, 10.5f, 11.5f,
1135 12.5f, 13.5f, 14.5f,
1136 15.5f, 16.5f, 17.5f,
1137 }));
1138
1139 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1140 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1141 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1142
1143 armnn::AdditionQueueDescriptor data;
1144 armnn::WorkloadInfo info;
1145 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1146 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1147 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1148
1149 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1150
1151 inputHandle1->Allocate();
1152 inputHandle2->Allocate();
1153 outputHandle->Allocate();
1154
1155 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1156 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1157
surmeh013537c2c2018-05-18 16:31:43 +01001158 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001159 workload->Execute();
1160
1161 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1162
1163 return ret;
1164}
1165
1166LayerTestResult<float, 4> AdditionBroadcastTest(armnn::IWorkloadFactory& workloadFactory)
1167{
1168 return AdditionBroadcastTestImpl<float>(workloadFactory, 0.0f, 0);
1169}
1170
1171LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test(armnn::IWorkloadFactory& workloadFactory)
1172{
1173 return AdditionBroadcastTestImpl<uint8_t>(workloadFactory, 2.f, 0);
1174}
1175
1176LayerTestResult<float, 4> AdditionBroadcast1ElementTest(armnn::IWorkloadFactory& workloadFactory)
1177{
1178 return AdditionBroadcast1ElementTestImpl<float>(workloadFactory, 0.0f, 0);
1179}
1180
1181LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test(armnn::IWorkloadFactory& workloadFactory)
1182{
1183 return AdditionBroadcast1ElementTestImpl<uint8_t>(workloadFactory, 0.1333333f, 128);
1184}
1185
1186LayerTestResult<float,4> CompareAdditionTest(armnn::IWorkloadFactory& workloadFactory,
David Beckf195f032018-09-06 16:46:34 +01001187 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00001188{
1189 unsigned int batchSize = 4;
1190 unsigned int channels = 1;
1191 unsigned int height = 2;
1192 unsigned int width = 3;
1193
1194 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
1195 armnn::TensorInfo outputTensorInfo;
1196
1197 unsigned int shape[] = {batchSize, channels, height, width};
1198
1199 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1200 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1201 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1202
1203 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 1232);
1204 auto input2 = MakeRandomTensor<float, 4>(inputTensorInfo2, 456);
1205
1206 LayerTestResult<float,4> ret(outputTensorInfo);
1207
1208 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1209 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1210 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1211
1212 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
1213 std::unique_ptr<armnn::ITensorHandle> inputHandle2Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo2);
1214 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
1215
1216 armnn::AdditionQueueDescriptor data;
1217 armnn::WorkloadInfo info;
1218 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1219 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1220 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1221
1222 armnn::AdditionQueueDescriptor refData = data;
1223 armnn::WorkloadInfo refInfo = info;
1224 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo1, inputHandle1Ref.get());
1225 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get());
1226 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1227
1228 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1229 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateAddition(refData, refInfo);
1230
1231 inputHandle1->Allocate();
1232 inputHandle2->Allocate();
1233 outputHandle->Allocate();
1234 inputHandle1Ref->Allocate();
1235 inputHandle2Ref->Allocate();
1236 outputHandleRef->Allocate();
1237
1238 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1239 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1240 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
1241 CopyDataToITensorHandle(inputHandle2Ref.get(), &input2[0][0][0][0]);
1242
surmeh013537c2c2018-05-18 16:31:43 +01001243 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001244 workload->Execute();
surmeh013537c2c2018-05-18 16:31:43 +01001245 refWorkloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001246 workloadRef->Execute();
1247
1248 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1249 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
1250
1251 return ret;
1252}
1253
surmeh01bceff2f2018-03-29 16:29:27 +01001254namespace {
David Beck5cd01f32018-09-12 16:00:08 +01001255template <typename T>
1256LayerTestResult<T, 4> DivisionTestHelper(armnn::IWorkloadFactory& workloadFactory,
1257 const unsigned int shape0[4],
1258 const std::vector<T>& values0,
1259 float scale0,
1260 int32_t offset0,
1261 const unsigned int shape1[4],
1262 const std::vector<T> & values1,
1263 float scale1,
1264 int32_t offset1,
1265 const unsigned int outShape[4],
1266 const std::vector<T> & outValues,
1267 float outScale,
1268 int32_t outOffset)
1269{
1270 auto dataType = (std::is_same<T, uint8_t>::value ?
1271 armnn::DataType::QuantisedAsymm8 :
1272 armnn::DataType::Float32);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001273
David Beck5cd01f32018-09-12 16:00:08 +01001274 armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
1275 armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
1276 armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001277
David Beck5cd01f32018-09-12 16:00:08 +01001278 inputTensorInfo0.SetQuantizationScale(scale0);
1279 inputTensorInfo0.SetQuantizationOffset(offset0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001280
David Beck5cd01f32018-09-12 16:00:08 +01001281 inputTensorInfo1.SetQuantizationScale(scale1);
1282 inputTensorInfo1.SetQuantizationOffset(offset1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001283
David Beck5cd01f32018-09-12 16:00:08 +01001284 outputTensorInfo.SetQuantizationScale(outScale);
1285 outputTensorInfo.SetQuantizationOffset(outOffset);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001286
David Beck5cd01f32018-09-12 16:00:08 +01001287 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
1288 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001289
David Beck5cd01f32018-09-12 16:00:08 +01001290 LayerTestResult<T, 4> result(outputTensorInfo);
1291 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001292
David Beck5cd01f32018-09-12 16:00:08 +01001293 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
1294 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1295 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001296
David Beck5cd01f32018-09-12 16:00:08 +01001297 armnn::DivisionQueueDescriptor data;
1298 armnn::WorkloadInfo info;
1299 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
1300 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1301 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001302
David Beck5cd01f32018-09-12 16:00:08 +01001303 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDivision(data, info);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001304
David Beck5cd01f32018-09-12 16:00:08 +01001305 inputHandle0->Allocate();
1306 inputHandle1->Allocate();
1307 outputHandle->Allocate();
1308
1309 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
1310 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1311
1312 workloadFactory.Finalize();
1313 workload->Execute();
1314
1315 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
1316
1317 return result;
1318}
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001319} // anonymous namespace
1320
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01001321LayerTestResult<float,4> DivisionByZeroTest(armnn::IWorkloadFactory& workloadFactory)
1322{
1323 const unsigned int width = 2;
1324 const unsigned int height = 2;
1325 const unsigned int channelCount = 2;
1326 const unsigned int batchSize = 2;
1327
1328 unsigned int shape[] = { batchSize, channelCount, height, width };
1329
1330 std::vector<float> input0({
1331 1.f, 1.f, 1.f, 1.f, 0.f, 0.f, 0.f, 0.f,
1332 -1.f, -1.f, -1.f, -1.f, 5.f, 5.f, 5.f, 5.f });
1333
1334 std::vector<float> input1({
1335 0.f, 0.f, -0.f, -0.f, 0.f, 0.f, -0.f, -0.f,
1336 0.f, 0.f, -0.f, -0.f, 5.f, 5.f, 5.f, 5.f });
1337
1338 std::vector<float> output({
1339 INFINITY, INFINITY, -INFINITY, -INFINITY, NAN, NAN, -NAN, -NAN,
1340 -INFINITY, -INFINITY, INFINITY, INFINITY, 1, 1, 1, 1 });
1341
David Beck5cd01f32018-09-12 16:00:08 +01001342 return DivisionTestHelper<float>(workloadFactory,
1343 shape, input0, 1.0f, 0,
1344 shape, input1, 1.0f, 0,
1345 shape, output, 1.0f, 0);
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01001346}
1347
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001348LayerTestResult<float,4> DivisionTest(armnn::IWorkloadFactory& workloadFactory)
1349{
1350 const unsigned int width = 2;
1351 const unsigned int height = 2;
1352 const unsigned int channelCount = 2;
1353 const unsigned int batchSize = 2;
1354
1355 unsigned int shape[] = { batchSize, channelCount, height, width };
1356
1357 std::vector<float> input0({
1358 2, 2, 2, 2, 3, 3, 3, 3,
1359 4, 4, 4, 4, 5, 5, 5, 5 });
1360
1361 std::vector<float> input1({
1362 1, 1, 1, 1, 2, 2, 2, 2,
1363 4, 4, 4, 4, 4, 4, 4, 4 });
1364
1365 std::vector<float> output({
1366 2, 2, 2, 2, 1.5, 1.5, 1.5, 1.5,
1367 1, 1, 1, 1, 1.25, 1.25, 1.25, 1.25 });
1368
David Beck5cd01f32018-09-12 16:00:08 +01001369
1370 return DivisionTestHelper<float>(workloadFactory,
1371 shape, input0, 1.0f, 0,
1372 shape, input1, 1.0f, 0,
1373 shape, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001374}
1375
1376LayerTestResult<float, 4> DivisionBroadcast1ElementTest(armnn::IWorkloadFactory& workloadFactory)
1377{
1378 unsigned int shape0[] = { 1, 2, 2, 2 };
1379 std::vector<float> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
1380
1381 unsigned int shape1[] = { 1, 1, 1, 1 };
1382 std::vector<float> input1({ 2 });
1383
1384 std::vector<float> output({ 1, 2, 3, 4, 5, 6, 7, 8});
1385
David Beck5cd01f32018-09-12 16:00:08 +01001386
1387 return DivisionTestHelper<float>(workloadFactory,
1388 shape0, input0, 1.0f, 0,
1389 shape1, input1, 1.0f, 0,
1390 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001391}
1392
1393LayerTestResult<float, 4> DivisionBroadcast1DVectorTest(armnn::IWorkloadFactory& workloadFactory)
1394{
1395 unsigned int shape0[] = { 1, 3, 3, 2 };
1396 std::vector<float> input0({
1397 1, 4, 3, 8, 5, 12,
1398 7, 16, 9, 20, 11, 24,
1399 13, 28, 15, 32, 17, 36});
1400
1401 unsigned int shape1[] = { 1, 1, 1, 2 };
1402 std::vector<float> input1({ 1, 2 });
1403
1404 std::vector<float> output({
1405 1, 2, 3, 4, 5, 6,
1406 7, 8, 9, 10, 11, 12,
1407 13, 14, 15, 16, 17, 18});
1408
David Beck5cd01f32018-09-12 16:00:08 +01001409 return DivisionTestHelper<float>(workloadFactory,
1410 shape0, input0, 1.0f, 0,
1411 shape1, input1, 1.0f, 0,
1412 shape0, output, 1.0f, 0);
1413}
1414
1415
1416LayerTestResult<uint8_t,4> DivisionUint8Test(armnn::IWorkloadFactory& workloadFactory)
1417{
1418 const unsigned int width = 2;
1419 const unsigned int height = 2;
1420 const unsigned int channelCount = 2;
1421 const unsigned int batchSize = 2;
1422
1423 unsigned int shape[] = { batchSize, channelCount, height, width };
1424
1425 std::vector<uint8_t> input0({2, 2, 2, 2, 3, 3, 3, 3,
1426 4, 4, 4, 4, 5, 5, 5, 5 });
1427
1428 std::vector<uint8_t> input1({1, 1, 1, 1, 2, 2, 2, 2,
1429 4, 4, 4, 4, 4, 4, 4, 4 });
1430
1431 std::vector<uint8_t> output({8, 8, 8, 8, 6, 6, 6, 6,
1432 4, 4, 4, 4, 5, 5, 5, 5});
1433
1434
1435 return DivisionTestHelper<uint8_t>(workloadFactory,
1436 shape, input0, 1.0f, 0,
1437 shape, input1, 1.0f, 0,
1438 shape, output, 0.25f, 0);
1439}
1440
1441LayerTestResult<uint8_t, 4> DivisionBroadcast1ElementUint8Test(armnn::IWorkloadFactory& workloadFactory)
1442{
1443 unsigned int shape0[] = { 1, 2, 2, 2 };
1444 std::vector<uint8_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
1445
1446 unsigned int shape1[] = { 1, 1, 1, 1 };
1447 std::vector<uint8_t> input1({ 2 });
1448
1449 std::vector<uint8_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
1450
1451 return DivisionTestHelper<uint8_t>(workloadFactory,
1452 shape0, input0, 1.0f, 0,
1453 shape1, input1, 1.0f, 0,
1454 shape0, output, 1.0f, 0);
1455}
1456
1457LayerTestResult<uint8_t, 4> DivisionBroadcast1DVectorUint8Test(armnn::IWorkloadFactory& workloadFactory)
1458{
1459 unsigned int shape0[] = { 1, 3, 3, 2 };
1460 std::vector<uint8_t> input0({1, 4, 3, 8, 5, 12,
1461 7, 16, 9, 20, 11, 24,
1462 13, 28, 15, 32, 17, 36});
1463
1464 unsigned int shape1[] = { 1, 1, 1, 2 };
1465 std::vector<uint8_t> input1({ 1, 2 });
1466
1467 std::vector<uint8_t> output({1, 2, 3, 4, 5, 6,
1468 7, 8, 9, 10, 11, 12,
1469 13, 14, 15, 16, 17, 18});
1470
1471 return DivisionTestHelper<uint8_t>(workloadFactory,
1472 shape0, input0, 1.0f, 0,
1473 shape1, input1, 1.0f, 0,
1474 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001475}
1476
1477namespace {
surmeh01bceff2f2018-03-29 16:29:27 +01001478LayerTestResult<float,4> MultiplicationTestHelper(armnn::IWorkloadFactory& workloadFactory,
1479 const unsigned int shape0[4],
1480 const std::vector<float> & values0,
1481 const unsigned int shape1[4],
1482 const std::vector<float> & values1,
1483 const unsigned int outShape[4],
1484 const std::vector<float> & outValues)
telsoa014fcda012018-03-09 14:13:49 +00001485{
surmeh01bceff2f2018-03-29 16:29:27 +01001486 const size_t dimensionCount = 4;
1487 armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, armnn::DataType::Float32};
1488 armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, armnn::DataType::Float32};
1489 armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, armnn::DataType::Float32};
telsoa014fcda012018-03-09 14:13:49 +00001490
surmeh01bceff2f2018-03-29 16:29:27 +01001491 auto input0 = MakeTensor<float, 4>(inputTensorInfo0, values0);
1492 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00001493
1494 LayerTestResult<float,4> ret(outputTensorInfo);
1495
1496 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
1497 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1498 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1499
1500 armnn::MultiplicationQueueDescriptor data;
1501 armnn::WorkloadInfo info;
1502 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
1503 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1504 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1505
1506 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
1507
1508 inputHandle0->Allocate();
1509 inputHandle1->Allocate();
1510 outputHandle->Allocate();
1511
1512 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
1513 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1514
surmeh013537c2c2018-05-18 16:31:43 +01001515 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001516 workload->Execute();
1517
1518 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1519
surmeh01bceff2f2018-03-29 16:29:27 +01001520 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00001521 return ret;
1522}
surmeh01bceff2f2018-03-29 16:29:27 +01001523} // anonymous namespace
1524
1525
1526LayerTestResult<float,4> MultiplicationTest(armnn::IWorkloadFactory& workloadFactory)
1527{
1528 const unsigned int width = 2;
1529 const unsigned int height = 2;
1530 const unsigned int channelCount = 2;
1531 const unsigned int batchSize = 2;
1532
1533 unsigned int shape[] = { batchSize, channelCount, height, width };
1534
1535 std::vector<float> input0({
1536 1, 1, 1, 1, 2, 2, 2, 2,
1537 3, 3, 3, 3, 4, 4, 4, 4 });
1538
1539 std::vector<float> input1({
1540 2, 2, 2, 2, 3, 3, 3, 3,
1541 4, 4, 4, 4, 5, 5, 5, 5 });
1542
1543 std::vector<float> output({
1544 2, 2, 2, 2, 6, 6, 6, 6,
1545 12, 12, 12, 12, 20, 20, 20, 20 });
1546
1547 return MultiplicationTestHelper(workloadFactory,
1548 shape,
1549 input0,
1550 shape,
1551 input1,
1552 shape,
1553 output);
1554}
1555
1556LayerTestResult<float, 4> MultiplicationBroadcast1ElementTest(armnn::IWorkloadFactory& workloadFactory)
1557{
1558 unsigned int shape0[] = { 1, 2, 2, 2 };
1559 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
1560
1561 unsigned int shape1[] = { 1, 1, 1, 1 };
1562 std::vector<float> input1({ 2 });
1563
1564 std::vector<float> output({ 2, 4, 6, 8, 10, 12, 14, 16});
1565
1566 return MultiplicationTestHelper(workloadFactory,
1567 shape0,
1568 input0,
1569 shape1,
1570 input1,
1571 shape0,
1572 output);
1573}
1574
1575LayerTestResult<float, 4> MultiplicationBroadcast1DVectorTest(armnn::IWorkloadFactory& workloadFactory)
1576{
1577 unsigned int shape0[] = { 1, 3, 3, 2 };
1578 std::vector<float> input0({
1579 1, 2, 3, 4, 5, 6,
1580 7, 8, 9, 10, 11, 12,
1581 13, 14, 15, 16, 17, 18});
1582
1583 unsigned int shape1[] = { 1, 1, 1, 2 };
1584 std::vector<float> input1({ 1, 2 });
1585
1586 std::vector<float> output({
1587 1, 4, 3, 8, 5, 12,
1588 7, 16, 9, 20, 11, 24,
1589 13, 28, 15, 32, 17, 36});
1590
1591 return MultiplicationTestHelper(workloadFactory,
1592 shape0,
1593 input0,
1594 shape1,
1595 input1,
1596 shape0,
1597 output);
1598}
telsoa014fcda012018-03-09 14:13:49 +00001599
1600LayerTestResult<float,4> CompareMultiplicationTest(armnn::IWorkloadFactory& workloadFactory,
1601 armnn::IWorkloadFactory& refWorkloadFactory)
1602{
1603 const unsigned int width = 16;
1604 const unsigned int height = 32;
1605 const unsigned int channelCount = 2;
1606 const unsigned int batchSize = 5;
1607
1608 armnn::TensorInfo inputTensorInfo0;
1609 armnn::TensorInfo inputTensorInfo1;
1610 armnn::TensorInfo outputTensorInfo;
1611
1612 constexpr unsigned int shape[] = { batchSize, channelCount, height, width };
1613
1614 inputTensorInfo0 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1615 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1616 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1617
1618 LayerTestResult<float,4> comparisonResult(outputTensorInfo);
1619
1620 auto input0 = MakeRandomTensor<float, 4>(inputTensorInfo0, 803506992);
1621 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 54902257);
1622
1623 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
1624 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1625 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1626
1627 std::unique_ptr<armnn::ITensorHandle> inputHandle0Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo0);
1628 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
1629 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
1630
1631 armnn::MultiplicationQueueDescriptor data;
1632 armnn::WorkloadInfo info;
1633 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
1634 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1635 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1636
1637 armnn::MultiplicationQueueDescriptor refData = data;
1638 armnn::WorkloadInfo refInfo = info;
1639 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo0, inputHandle0Ref.get());
1640 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo1, inputHandle1Ref.get());
1641 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1642
1643 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
1644 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateMultiplication(refData, refInfo);
1645
1646 inputHandle0->Allocate();
1647 inputHandle1->Allocate();
1648 outputHandle->Allocate();
1649 inputHandle0Ref->Allocate();
1650 inputHandle1Ref->Allocate();
1651 outputHandleRef->Allocate();
1652
1653 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
1654 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1655 CopyDataToITensorHandle(inputHandle0Ref.get(), &input0[0][0][0][0]);
1656 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
1657
surmeh013537c2c2018-05-18 16:31:43 +01001658 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001659 workload->Execute();
surmeh013537c2c2018-05-18 16:31:43 +01001660 refWorkloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001661 workloadRef->Execute();
1662
1663 CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get());
1664 CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get());
1665
1666 return comparisonResult;
1667}
1668
1669LayerTestResult<float,4> CompareBatchNormTest(armnn::IWorkloadFactory& workloadFactory,
1670 armnn::IWorkloadFactory& refWorkloadFactory)
1671{
1672 const unsigned int width = 2;
1673 const unsigned int height = 3;
1674 const unsigned int channels = 5;
1675 const unsigned int batchSize = 3;
1676
1677 armnn::TensorInfo inputTensorInfo;
1678 armnn::TensorInfo outputTensorInfo;
1679 armnn::TensorInfo tensorInfo;
1680
1681 constexpr unsigned int shape[] = {batchSize, channels, height, width};
1682 constexpr unsigned int tensorShape[] = {channels};
1683
1684 inputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1685 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1686 tensorInfo = armnn::TensorInfo(1, tensorShape, armnn::DataType::Float32);
1687
1688 auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 21312);
1689
1690 auto mean = MakeRandomTensor<float, 1>(tensorInfo, 123);
1691 auto variance = MakeRandomTensor<float, 1>(tensorInfo, 234, 0.0f);
1692 auto beta = MakeRandomTensor<float, 1>(tensorInfo, 123);
1693 auto gamma = MakeRandomTensor<float, 1>(tensorInfo, 345);
1694
1695 LayerTestResult<float,4> ret(outputTensorInfo);
1696
1697 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
1698 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1699
1700 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
1701 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
1702
1703 armnn::BatchNormalizationQueueDescriptor data;
1704 armnn::WorkloadInfo info;
1705 armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
1706 armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
1707 armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
1708 armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
1709
1710 AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
1711 AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
1712 AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
1713 AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
1714
1715 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1716 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1717 data.m_Mean = &meanTensor;
1718 data.m_Variance = &varianceTensor;
1719 data.m_Beta = &betaTensor;
1720 data.m_Gamma = &gammaTensor;
1721 data.m_Parameters.m_Eps = 0.01f;
1722
1723 armnn::BatchNormalizationQueueDescriptor refData = data;
1724 armnn::WorkloadInfo refInfo = info;
1725 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
1726 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1727
1728 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
1729 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateBatchNormalization(refData, refInfo);
1730
1731 inputHandle->Allocate();
1732 outputHandle->Allocate();
1733 inputHandleRef->Allocate();
1734 outputHandleRef->Allocate();
1735
1736 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
1737 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
1738
surmeh013537c2c2018-05-18 16:31:43 +01001739 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001740 workload->Execute();
surmeh013537c2c2018-05-18 16:31:43 +01001741 refWorkloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001742 workloadRef->Execute();
1743
1744 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1745 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
1746
1747 return ret;
1748}
1749
surmeh013537c2c2018-05-18 16:31:43 +01001750template<typename T>
1751void PermuteTensorData(
1752 armnn::IWorkloadFactory& workloadFactory,
1753 const armnn::PermutationVector& mappings,
1754 armnn::TensorInfo & inputTensorInfo,
1755 const T * inputData,
1756 std::vector<T>& outputData)
telsoa014fcda012018-03-09 14:13:49 +00001757{
surmeh013537c2c2018-05-18 16:31:43 +01001758 BOOST_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
1759 if (inputData == nullptr)
1760 {
1761 // Nullptr is an error in the test. By returning without doing the concatenation
1762 // I expect the caller to fail the test. It still makes sense to report this as
1763 // an assert for Debug builds.
1764 return;
1765 }
telsoa014fcda012018-03-09 14:13:49 +00001766
surmeh013537c2c2018-05-18 16:31:43 +01001767 armnn::TensorInfo outputTensorInfo = armnnUtils::Permuted(inputTensorInfo, mappings);
1768
1769 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
1770 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1771
1772 armnn::PermuteQueueDescriptor queueDescriptor;
1773 queueDescriptor.m_Parameters = armnn::PermuteDescriptor{mappings};
1774 armnn::WorkloadInfo workloadInfo;
1775 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
1776 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
1777
1778 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePermute(queueDescriptor, workloadInfo);
1779
1780 inputHandle->Allocate();
1781 outputHandle->Allocate();
1782
1783 CopyDataToITensorHandle(inputHandle.get(), inputData);
1784
1785 workload->Execute();
1786
1787 outputData.resize(outputTensorInfo.GetNumElements());
1788 CopyDataFromITensorHandle(&outputData[0], outputHandle.get());
1789 inputTensorInfo = outputTensorInfo;
1790}
1791
1792armnn::OriginsDescriptor CreateMergerDescriptorForConcatenation(
1793 const std::vector<armnn::TensorInfo> & inputTensorInfos,
1794 unsigned int concatDim)
1795{
telsoa014fcda012018-03-09 14:13:49 +00001796 std::vector<armnn::TensorShape> shapes;
1797 shapes.reserve(inputTensorInfos.size());
1798 for (const armnn::TensorInfo& it: inputTensorInfos)
1799 {
1800 shapes.push_back(it.GetShape());
1801 }
surmeh013537c2c2018-05-18 16:31:43 +01001802
1803 return armnn::CreateMergerDescriptorForConcatenation(shapes.begin(),
1804 shapes.end(),
1805 concatDim);
1806}
1807
1808//
1809// Concatenation is only supported for N and C dimensions for NCHW. In case of
telsoa01c577f2c2018-08-31 09:22:23 +01001810// <4 dimensions we need to make sure that the concat dimensions are at least
surmeh013537c2c2018-05-18 16:31:43 +01001811// the 3rd slowest iterating one.
1812//
1813
1814bool NeedPermuteForConcat(
1815 const std::vector<armnn::TensorInfo> & inputTensorInfos,
1816 unsigned int concatDim)
1817{
1818 // See note above. Additionally we expect the input shapes to have the
1819 // same number of dimensions.
1820 unsigned int nDimensions = 0;
1821
telsoa01c577f2c2018-08-31 09:22:23 +01001822 // Determine the number of dimensions as well as sanity check them
1823 // agains test implementation issues.
surmeh013537c2c2018-05-18 16:31:43 +01001824 for (auto && tensorInfo : inputTensorInfos)
1825 {
1826 if (!nDimensions)
1827 {
1828 nDimensions = tensorInfo.GetShape().GetNumDimensions();
1829 }
1830 else
1831 {
1832 BOOST_ASSERT_MSG(nDimensions == tensorInfo.GetShape().GetNumDimensions(),
1833 "Input shapes must have the same number of dimensions");
1834 }
1835 }
1836
1837 return (nDimensions-concatDim) < 3;
1838}
1839
1840armnn::TensorShape ExpandTensorShapeTo3dForPermute(const armnn::TensorShape & inputShape)
1841{
1842 unsigned int numDims = inputShape.GetNumDimensions();
1843 if (numDims >= 3)
1844 {
1845 // Nothing to do if the inputShape has at least 3 dimensions.
1846 return inputShape;
1847 }
1848
1849 std::vector<unsigned int> newDims(size_t(3), 1u);
1850 unsigned int expandedBy = 3 - numDims;
1851 for (unsigned int i=0; i<numDims; ++i)
1852 {
1853 newDims[expandedBy+i] = inputShape[i];
1854 }
1855 return armnn::TensorShape(3u, &newDims[0]);
1856}
1857
1858void Generate3dPermuteVectorForConcat(
1859 unsigned int numDimensions,
1860 unsigned int & concatDim,
1861 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutations)
1862{
1863 BOOST_ASSERT_MSG(numDimensions <= 3,
1864 "Only dimensions 1,2 and 3 are supported by this helper");
1865
1866 unsigned int expandedBy = 3 - numDimensions;
1867 unsigned int expandedConcatAxis = concatDim + expandedBy;
1868
1869 if (expandedConcatAxis == 2)
1870 {
1871 concatDim = 0;
1872 armnn::PermutationVector forwardPermutation({1, 2, 0});
1873 armnn::PermutationVector reversePermutation({2, 0, 1});
1874 permutations = std::make_pair(forwardPermutation, reversePermutation);
1875 }
1876 else if (expandedConcatAxis == 1)
1877 {
1878 concatDim = 0;
1879 armnn::PermutationVector forwardPermutation({2, 0, 1});
1880 armnn::PermutationVector reversePermutation({1, 2, 0});
1881 permutations = std::make_pair(forwardPermutation, reversePermutation);
1882 }
1883 else
1884 {
1885 BOOST_ASSERT(expandedConcatAxis == 0);
1886 concatDim = 0;
1887 }
1888}
1889
1890//
1891// Permute the input tensors so we can do a supported concatenation.
1892// Also treat lower than 3d tensors as 3d by adding dummy 1 dimensions
1893// at the front. Finally this function tells what the output shape
1894// of the permuted concatenated tensor is going to be.
1895//
1896template <typename T>
1897void PermuteInputsForConcat(
1898 armnn::IWorkloadFactory& workloadFactory,
1899 std::vector<armnn::TensorInfo> & inputTensorInfos,
1900 std::vector<T *> & inputData,
1901 std::vector<std::vector<T>> & inputDataStorage,
1902 armnn::PermutationVector & permuteVector,
1903 unsigned int & concatDim,
1904 armnn::TensorInfo & outputTensorInfo)
1905{
1906 BOOST_ASSERT_MSG(inputTensorInfos.size() > 1,
1907 "Expecting more than one tensor to be concatenated here");
1908
1909 unsigned int numDims = 0;
1910 unsigned int nthInput = 0;
1911 const armnn::PermutationVector identity({0, 1, 2});
1912
1913 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutations =
1914 std::make_pair(identity, identity);
1915
1916 inputDataStorage.resize(inputData.size());
1917
1918 for (auto && tensorInfo : inputTensorInfos)
1919 {
1920 if (numDims == 0)
1921 {
1922 numDims = tensorInfo.GetShape().GetNumDimensions();
1923 Generate3dPermuteVectorForConcat(numDims, concatDim, permutations);
telsoa01c577f2c2018-08-31 09:22:23 +01001924 // Store the reverese permutation.
surmeh013537c2c2018-05-18 16:31:43 +01001925 permuteVector = permutations.second;
1926 BOOST_ASSERT_MSG(!permuteVector.IsEqual(identity),
1927 "Test logic error, we don't need permutation, so we shouldn't arrive here");
1928 }
1929 else
1930 {
1931 BOOST_ASSERT_MSG(numDims == tensorInfo.GetShape().GetNumDimensions(),
1932 "All inputs must have the same number of dimensions");
1933 }
1934
1935 armnn::TensorInfo newTensorInfo = tensorInfo;
1936 newTensorInfo.SetShape(ExpandTensorShapeTo3dForPermute(tensorInfo.GetShape()));
1937
1938 PermuteTensorData<T>(workloadFactory,
1939 permutations.first,
1940 newTensorInfo,
1941 inputData[nthInput],
1942 inputDataStorage[nthInput]);
1943
1944 inputData[nthInput] = inputDataStorage[nthInput].data();
1945 inputTensorInfos[nthInput] = newTensorInfo;
1946
1947 ++nthInput;
1948 }
1949
1950 outputTensorInfo.SetShape(
1951 armnnUtils::Permuted(
1952 ExpandTensorShapeTo3dForPermute(outputTensorInfo.GetShape()),
1953 permutations.first));
1954}
1955
1956
1957//
1958// This is the pair of PermuteInputsForConcat(...) which permutes back
telsoa01c577f2c2018-08-31 09:22:23 +01001959// the output of the concatenation so we can check it against an expected
surmeh013537c2c2018-05-18 16:31:43 +01001960// output.
1961//
1962template <typename T>
1963void PermuteOutputForConcat(
1964 armnn::IWorkloadFactory& workloadFactory,
1965 const armnn::TensorInfo & tensorInfo,
1966 const armnn::PermutationVector & permuteVector,
1967 std::unique_ptr<armnn::ITensorHandle> && inputDataHandle,
1968 T * data)
1969{
1970 BOOST_ASSERT_MSG(data != nullptr, "data must not be null");
1971 if (data == nullptr)
1972 {
1973 // Nullptr is an error in the test. By returning without doing the permutation
1974 // I expect the caller to fail the test. It still makes sense to report this as
1975 // an assert for Debug builds.
1976 return;
1977 }
1978
1979 armnn::TensorInfo resultTensorInfo = tensorInfo;
1980 std::vector<T> inputData(tensorInfo.GetNumElements());
1981 std::vector<T> outputData;
1982
1983 CopyDataFromITensorHandle(&inputData[0], inputDataHandle.get());
1984
1985 PermuteTensorData<T>(workloadFactory,
1986 permuteVector,
1987 resultTensorInfo,
1988 &inputData[0],
1989 outputData);
1990
1991 ::memcpy(data, &outputData[0], sizeof(T)*outputData.size());
1992}
1993
1994template <typename T>
1995void Concatenate(armnn::IWorkloadFactory& workloadFactory,
1996 std::initializer_list<const armnn::TensorInfo> inputTensorInfosOrig,
1997 std::initializer_list<T *> inputsOrig,
1998 const armnn::TensorInfo& outputTensorInfoOrig,
1999 T * output,
2000 unsigned int concatDim)
2001{
2002 BOOST_ASSERT_MSG(output != nullptr, "output must not be null");
2003 if (output == nullptr)
2004 {
2005 // Nullptr is an error in the test. By returning without doing the permutation
2006 // I expect the caller to fail the test. It still makes sense to report this as
2007 // an assert for Debug builds.
2008 return;
2009 }
2010
2011 armnn::MergerQueueDescriptor queueDescriptor;
2012
telsoa01c577f2c2018-08-31 09:22:23 +01002013 // Saves a copy of the parameters which we might need to change.
surmeh013537c2c2018-05-18 16:31:43 +01002014 std::vector<armnn::TensorInfo> inputTensorInfos(inputTensorInfosOrig.begin(), inputTensorInfosOrig.end());
2015 std::vector<T *> inputs = inputsOrig;
2016 armnn::TensorInfo outputTensorInfo = outputTensorInfoOrig;
2017
2018 armnn::PermutationVector permuteVector{0, 1, 2};
2019
telsoa01c577f2c2018-08-31 09:22:23 +01002020 // Holds and automatically releases memory for the reshaped input data.
surmeh013537c2c2018-05-18 16:31:43 +01002021 std::vector<std::vector<T>> tmpInputDataStorage;
2022
2023 const size_t inputCount = inputTensorInfos.size();
2024
2025 bool needPermuteForConcat = NeedPermuteForConcat(inputTensorInfos, concatDim);
2026
2027 if (needPermuteForConcat)
2028 {
2029 //
2030 // We need to permute the inputs, because concatenation along
telsoa01c577f2c2018-08-31 09:22:23 +01002031 // the requested axis is not supported.
surmeh013537c2c2018-05-18 16:31:43 +01002032 //
2033 PermuteInputsForConcat<T>(workloadFactory,
2034 inputTensorInfos,
2035 inputs,
2036 tmpInputDataStorage,
2037 permuteVector,
2038 concatDim,
2039 outputTensorInfo);
2040 }
2041
2042 armnn::OriginsDescriptor viewsDescriptor = CreateMergerDescriptorForConcatenation(inputTensorInfos, concatDim);
telsoa014fcda012018-03-09 14:13:49 +00002043
2044 queueDescriptor.m_ViewOrigins.reserve(viewsDescriptor.GetNumViews());
2045 for (unsigned int i = 0; i < viewsDescriptor.GetNumViews(); ++i)
2046 {
2047 queueDescriptor.m_ViewOrigins.emplace_back(std::vector<unsigned int>(viewsDescriptor.GetViewOrigin(i),
2048 viewsDescriptor.GetViewOrigin(i) + viewsDescriptor.GetNumDimensions()));
2049 }
2050
telsoa014fcda012018-03-09 14:13:49 +00002051 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2052
2053 std::vector<std::unique_ptr<armnn::ITensorHandle>> inputHandles;
2054 inputHandles.reserve(inputCount);
2055
2056 const bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2057 for (unsigned int i = 0; i < inputCount; ++i)
2058 {
surmeh013537c2c2018-05-18 16:31:43 +01002059 const armnn::TensorInfo& inputTensorInfo = inputTensorInfos[i];
telsoa014fcda012018-03-09 14:13:49 +00002060
2061 std::unique_ptr<armnn::ITensorHandle> inputHandle = subTensorsSupported ?
2062 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo.GetShape(),
2063 queueDescriptor.m_ViewOrigins[i].m_Origin.data())
2064 : workloadFactory.CreateTensorHandle(inputTensorInfo);
2065
2066 inputHandles.emplace_back(std::move(inputHandle));
2067 }
2068
2069 armnn::WorkloadInfo workloadInfo;
2070
2071 for (unsigned int i = 0; i < inputCount; ++i)
2072 {
surmeh013537c2c2018-05-18 16:31:43 +01002073 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfos[i], inputHandles[i].get());
telsoa014fcda012018-03-09 14:13:49 +00002074 }
2075
2076 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
2077
2078 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(queueDescriptor, workloadInfo);
2079
2080 for (auto& inputHandle : inputHandles)
2081 {
2082 inputHandle->Allocate();
2083 }
2084
2085 outputHandle->Allocate();
2086
2087 unsigned int nextInputId = 0;
2088 for (auto& inputHandle : inputHandles)
2089 {
surmeh013537c2c2018-05-18 16:31:43 +01002090 CopyDataToITensorHandle(inputHandle.get(), inputs[nextInputId]);
2091 ++nextInputId;
telsoa014fcda012018-03-09 14:13:49 +00002092 }
2093
surmeh013537c2c2018-05-18 16:31:43 +01002094 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00002095 workload->Execute();
2096
surmeh013537c2c2018-05-18 16:31:43 +01002097 if (needPermuteForConcat)
2098 {
2099 PermuteOutputForConcat<T>(workloadFactory,
2100 outputTensorInfo,
2101 permuteVector,
2102 std::move(outputHandle),
2103 output);
2104 }
2105 else
2106 {
2107 CopyDataFromITensorHandle(output, outputHandle.get());
2108 }
telsoa014fcda012018-03-09 14:13:49 +00002109}
2110
2111template <typename T>
2112LayerTestResult<T, 1> Concatenation1dTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale, int32_t qOffset)
2113{
2114 armnn::TensorInfo inputTensorInfo({ 3 }, armnn::GetDataType<T>());
2115
2116 auto input0 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 1.0f, 2.0f, 3.0f }));
2117 auto input1 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 4.0f, 5.0f, 6.0f }));
2118 auto input2 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 7.0f, 8.0f, 9.0f }));
2119
2120 armnn::TensorInfo outputTensorInfo({ 9 }, armnn::GetDataType<T>());
2121
2122 LayerTestResult<T, 1> result(outputTensorInfo);
2123
2124 std::vector<T> output;
2125 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002126 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002127 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
2128 { input0.data(), input1.data(), input2.data() },
2129 outputTensorInfo,
2130 output.data(),
2131 0);
2132
2133 result.output = MakeTensor<T, 1>(outputTensorInfo, output);
2134 result.outputExpected = MakeTensor<T, 1>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2135 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f
2136 }));
2137
2138 return result;
2139}
2140
2141LayerTestResult<float, 1> Concatenation1dTest(armnn::IWorkloadFactory& workloadFactory)
2142{
2143 return Concatenation1dTestImpl<float>(workloadFactory, 0.0f, 0);
2144}
2145
2146template <typename T>
2147LayerTestResult<T, 2> Concatenation2dTestImpl(armnn::IWorkloadFactory& workloadFactory,
2148 const armnn::TensorInfo& outputTensorInfo,
2149 unsigned int dimension,
2150 const float qScale,
2151 const int32_t qOffset)
2152{
2153 armnn::TensorInfo inputTensorInfo({ 2, 3 }, armnn::GetDataType<T>());
2154
2155 auto input0 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2156 // Batch 0
2157 1.0f, 2.0f, 3.0f,
2158
2159 // Batch 1
2160 10.0f, 11.0f, 12.0f,
2161 }));
2162
2163 auto input1 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2164 // Batch 0
2165 4.0f, 5.0f, 6.0f,
2166
2167 // Batch 1
2168 13.0f, 14.0f, 15.0f,
2169 }));
2170
2171 auto input2 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2172 // Batch 0
2173 7.0f, 8.0f, 9.0f,
2174
2175 // Batch 1
2176 16.0f, 17.0f, 18.0f,
2177 }));
2178
2179 LayerTestResult<T, 2> result(outputTensorInfo);
2180
2181 std::vector<T> output;
2182 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002183 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002184 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
2185 { input0.data(), input1.data(), input2.data() },
2186 outputTensorInfo,
2187 output.data(),
2188 dimension);
2189
2190 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
2191 return result;
2192}
2193
2194template <typename T>
2195LayerTestResult<T, 2> Concatenation2dDim0TestImpl(armnn::IWorkloadFactory& workloadFactory,
2196 float qScale, int32_t qOffset)
2197{
2198 armnn::TensorInfo outputTensorInfo({ 6, 3 }, armnn::GetDataType<T>());
2199
2200 LayerTestResult<T, 2> result = Concatenation2dTestImpl<T>(workloadFactory, outputTensorInfo, 0, qScale, qOffset);
2201 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2202 // Batch 0
2203 1.0f, 2.0f, 3.0f,
2204
2205 // Batch 1
2206 10.0f, 11.0f, 12.0f,
2207
2208 // Batch 2
2209 4.0f, 5.0f, 6.0f,
2210
2211 // Batch 3
2212 13.0f, 14.0f, 15.0f,
2213
2214 // Batch 4
2215 7.0f, 8.0f, 9.0f,
2216
2217 // Batch 5
2218 16.0f, 17.0f, 18.0f,
2219 }));
2220
2221 return result;
2222}
2223
2224LayerTestResult<float, 2> Concatenation2dDim0Test(armnn::IWorkloadFactory& workloadFactory)
2225{
2226 return Concatenation2dDim0TestImpl<float>(workloadFactory, 0.0f, 0);
2227}
2228
2229template <typename T>
2230LayerTestResult<T, 2> Concatenation2dDim1TestImpl(armnn::IWorkloadFactory& workloadFactory,
2231 float qScale, int32_t qOffset)
2232{
2233 armnn::TensorInfo outputTensorInfo({ 2, 9 }, armnn::GetDataType<T>());
2234
2235 LayerTestResult<T, 2> result = Concatenation2dTestImpl<T>(workloadFactory, outputTensorInfo, 1, qScale, qOffset);
2236 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2237 // Batch 0
2238 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
2239
2240 // Batch 1
2241 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f
2242 }));
2243
2244 return result;
2245}
2246
2247LayerTestResult<float, 2> Concatenation2dDim1Test(armnn::IWorkloadFactory& workloadFactory)
2248{
2249 return Concatenation2dDim1TestImpl<float>(workloadFactory, 0.0f, 0);
2250}
2251
2252template <typename T>
2253LayerTestResult<T, 2> Concatenation2dDim0DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2254 int32_t qOffset)
2255{
2256 armnn::TensorInfo input0TensorInfo({ 2, 3 }, armnn::GetDataType<T>());
2257 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2258 // Batch 0
2259 1.0f, 2.0f, 3.0f,
2260
2261 // Batch 1
2262 10.0f, 11.0f, 12.0f,
2263 }));
2264
2265 armnn::TensorInfo input1TensorInfo({ 3, 3 }, armnn::GetDataType<T>());
2266 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2267 // Batch 0
2268 4.0f, 5.0f, 6.0f,
2269
2270 // Batch 1
2271 13.0f, 14.0f, 15.0f,
2272
2273 // Batch 0
2274 7.0f, 8.0f, 9.0f,
2275 }));
2276
2277 armnn::TensorInfo input2TensorInfo({ 1, 3 }, armnn::GetDataType<T>());
2278 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2279 // Batch 1
2280 16.0f, 17.0f, 18.0f,
2281 }));
2282
2283 armnn::TensorInfo outputTensorInfo({ 6, 3 }, armnn::GetDataType<T>());
2284 LayerTestResult<T, 2> result(outputTensorInfo);
2285
2286 std::vector<T> output;
2287 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002288 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002289 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
2290 { input0.data(), input1.data(), input2.data() },
2291 outputTensorInfo,
2292 output.data(),
2293 0);
2294
2295 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
2296 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2297 // Batch 0
2298 1.0f, 2.0f, 3.0f,
2299
2300 // Batch 1
2301 10.0f, 11.0f, 12.0f,
2302
2303 // Batch 2
2304 4.0f, 5.0f, 6.0f,
2305
2306 // Batch 3
2307 13.0f, 14.0f, 15.0f,
2308
2309 // Batch 4
2310 7.0f, 8.0f, 9.0f,
2311
2312 // Batch 5
2313 16.0f, 17.0f, 18.0f,
2314 }));
2315
2316 return result;
2317}
2318
2319LayerTestResult<float, 2> Concatenation2dDim0DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory)
2320{
2321 return Concatenation2dDim0DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
2322}
2323
2324template <typename T>
2325LayerTestResult<T, 2> Concatenation2dDim1DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2326 int32_t qOffset)
2327{
2328 armnn::TensorInfo input0TensorInfo({ 2, 3 }, armnn::GetDataType<T>());
2329 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2330 // Batch 0
2331 1.0f, 2.0f, 3.0f,
2332
2333 // Batch 1
2334 10.0f, 11.0f, 12.0f,
2335 }));
2336
2337 armnn::TensorInfo input1TensorInfo({ 2, 5 }, armnn::GetDataType<T>());
2338 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2339 // Batch 0
2340 4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
2341
2342 // Batch 1
2343 13.0f, 14.0f, 15.0f, 16.0f, 17.0f,
2344 }));
2345
2346 armnn::TensorInfo input2TensorInfo({ 2, 1 }, armnn::GetDataType<T>());
2347 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2348 // Batch 0
2349 9.0f,
2350
2351 // Batch 1
2352 18.0f
2353 }));
2354
2355 armnn::TensorInfo outputTensorInfo({ 2, 9 }, armnn::GetDataType<T>());
2356 LayerTestResult<T, 2> result(outputTensorInfo);
2357
2358 std::vector<T> output;
2359 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002360 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002361 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
2362 { input0.data(), input1.data(), input2.data() },
2363 outputTensorInfo,
2364 output.data(),
2365 1);
2366
2367 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
2368 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2369 // Batch 0
2370 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
2371
2372 // Batch 1
2373 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f,
2374 }));
2375
2376 return result;
2377}
2378
2379LayerTestResult<float, 2> Concatenation2dDim1DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory)
2380{
2381 return Concatenation2dDim1DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
2382}
2383
2384template <typename T>
2385LayerTestResult<T, 3> Concatenation3dTestImpl(armnn::IWorkloadFactory& workloadFactory,
2386 const armnn::TensorInfo& outputTensorInfo,
2387 unsigned int dimension,
2388 float qScale,
2389 int32_t qOffset)
2390{
2391 armnn::TensorInfo inputTensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
2392
2393 auto input0 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2394 // Batch 0, Channel 0
2395 1.0f, 2.0f,
2396
2397 // Batch 0, Channel 1
2398 3.0f, 4.0f,
2399
2400 // Batch 0, Channel 2
2401 5.0f, 6.0f,
2402
2403 // Batch 1, Channel 0
2404 19.0f, 20.0f,
2405
2406 // Batch 1, Channel 1
2407 21.0f, 22.0f,
2408
2409 // Batch 1, Channel 2
2410 23.0f, 24.0f
2411 }));
2412
2413 auto input1 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2414 // Batch 0, Channel 0
2415 7.0f, 8.0f,
2416
2417 // Batch 0, Channel 1
2418 9.0f, 10.0f,
2419
2420 // Batch 0, Channel 2
2421 11.0f, 12.0f,
2422
2423 // Batch 1, Channel 0
2424 25.0f, 26.0f,
2425
2426 // Batch 1, Channel 1
2427 27.0f, 28.0f,
2428
2429 // Batch 1, Channel 2
2430 29.0f, 30.0f
2431 }));
2432
2433 auto input2 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2434 // Batch 0, Channel 0
2435 13.0f, 14.0f,
2436
2437 // Batch 0, Channel 1
2438 15.0f, 16.0f,
2439
2440 // Batch 0, Channel 2
2441 17.0f, 18.0f,
2442
2443 // Batch 1, Channel 0
2444 31.0f, 32.0f,
2445
2446 // Batch 1, Channel 1
2447 33.0f, 34.0f,
2448
2449 // Batch 1, Channel 2
2450 35.0f, 36.0f
2451 }));
2452
2453 LayerTestResult<T, 3> result(outputTensorInfo);
2454
2455 std::vector<T> output;
2456 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002457 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002458 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
2459 { input0.data(), input1.data(), input2.data() },
2460 outputTensorInfo,
2461 output.data(),
2462 dimension);
2463
2464 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
2465 return result;
2466}
2467
2468template <typename T>
2469LayerTestResult<T, 3> Concatenation3dDim0TestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2470 int32_t qOffset)
2471{
2472 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, armnn::GetDataType<T>());
2473
2474 LayerTestResult<T, 3> result = Concatenation3dTestImpl<T>(workloadFactory, outputTensorInfo, 0,
2475 qScale, qOffset);
2476 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2477 // Batch 0, Channel 0
2478 1.0f, 2.0f,
2479
2480 // Batch 0, Channel 1
2481 3.0f, 4.0f,
2482
2483 // Batch 0, Channel 2
2484 5.0f, 6.0f,
2485
2486 // Batch 1, Channel 0
2487 19.0f, 20.0f,
2488
2489 // Batch 1, Channel 1
2490 21.0f, 22.0f,
2491
2492 // Batch 1, Channel 2
2493 23.0f, 24.0f,
2494
2495 // Batch 2, Channel 0
2496 7.0f, 8.0f,
2497
2498 // Batch 2, Channel 1
2499 9.0f, 10.0f,
2500
2501 // Batch 2, Channel 2
2502 11.0f, 12.0f,
2503
2504 // Batch 3, Channel 0
2505 25.0f, 26.0f,
2506
2507 // Batch 3, Channel 1
2508 27.0f, 28.0f,
2509
2510 // Batch 3, Channel 2
2511 29.0f, 30.0f,
2512
2513 // Batch 4, Channel 0
2514 13.0f, 14.0f,
2515
2516 // Batch 4, Channel 1
2517 15.0f, 16.0f,
2518
2519 // Batch 4, Channel 2
2520 17.0f, 18.0f,
2521
2522 // Batch 5, Channel 0
2523 31.0f, 32.0f,
2524
2525 // Batch 5, Channel 1
2526 33.0f, 34.0f,
2527
2528 // Batch 5, Channel 2
2529 35.0f, 36.0f
2530 }));
2531 return result;
2532}
2533
2534LayerTestResult<float, 3> Concatenation3dDim0Test(armnn::IWorkloadFactory& workloadFactory)
2535{
2536 return Concatenation3dDim0TestImpl<float>(workloadFactory, 0.0f, 0);
2537}
2538
2539template <typename T>
2540LayerTestResult<T, 3> Concatenation3dDim1TestImpl(armnn::IWorkloadFactory& workloadFactory,
2541 float qScale, int32_t qOffset)
2542{
2543 armnn::TensorInfo outputTensorInfo({ 2, 9, 2 }, armnn::GetDataType<T>());
2544
2545 LayerTestResult<T, 3> result = Concatenation3dTestImpl<T>(workloadFactory, outputTensorInfo, 1, qScale, qOffset);
2546 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2547 // Batch 0, Channel 0
2548 1.0f, 2.0f,
2549
2550 // Batch 0, Channel 1
2551 3.0f, 4.0f,
2552
2553 // Batch 0, Channel 2
2554 5.0f, 6.0f,
2555
2556 // Batch 0, Channel 3
2557 7.0f, 8.0f,
2558
2559 // Batch 0, Channel 4
2560 9.0f, 10.0f,
2561
2562 // Batch 0, Channel 5
2563 11.0f, 12.0f,
2564
2565 // Batch 0, Channel 6
2566 13.0f, 14.0f,
2567
2568 // Batch 0, Channel 7
2569 15.0f, 16.0f,
2570
2571 // Batch 0, Channel 8
2572 17.0f, 18.0f,
2573
2574 // Batch 1, Channel 0
2575 19.0f, 20.0f,
2576
2577 // Batch 1, Channel 1
2578 21.0f, 22.0f,
2579
2580 // Batch 1, Channel 2
2581 23.0f, 24.0f,
2582
2583 // Batch 1, Channel 3
2584 25.0f, 26.0f,
2585
2586 // Batch 1, Channel 4
2587 27.0f, 28.0f,
2588
2589 // Batch 1, Channel 5
2590 29.0f, 30.0f,
2591
2592 // Batch 1, Channel 6
2593 31.0f, 32.0f,
2594
2595 // Batch 1, Channel 7
2596 33.0f, 34.0f,
2597
2598 // Batch 1, Channel 8
2599 35.0f, 36.0f
2600 }));
2601
2602 return result;
2603}
2604
2605LayerTestResult<float, 3> Concatenation3dDim1Test(armnn::IWorkloadFactory& workloadFactory)
2606{
2607 return Concatenation3dDim1TestImpl<float>(workloadFactory, 0.0f, 0);
2608}
2609
2610template <typename T>
2611LayerTestResult<T, 3> Concatenation3dDim2TestImpl(armnn::IWorkloadFactory& workloadFactory,
2612 float qScale, int32_t qOffset)
2613{
2614 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, armnn::GetDataType<T>());
2615
2616 LayerTestResult<T, 3> result = Concatenation3dTestImpl<T>(workloadFactory, outputTensorInfo, 2, qScale, qOffset);
2617 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2618 // Batch 0, Channel 0
2619 1.0f, 2.0f, 7.0f, 8.0f, 13.0f, 14.0f,
2620
2621 // Batch 0, Channel 1
2622 3.0f, 4.0f, 9.0f, 10.0f, 15.0f, 16.0f,
2623
2624 // Batch 0, Channel 2
2625 5.0f, 6.0f, 11.0f, 12.0f, 17.0f, 18.0f,
2626
2627 // Batch 1, Channel 0
2628 19.0f, 20.0f, 25.0f, 26.0f, 31.0f, 32.0f,
2629
2630 // Batch 1, Channel 1
2631 21.0f, 22.0f, 27.0f, 28.0f, 33.0f, 34.0f,
2632
2633 // Batch 1, Channel 2
2634 23.0f, 24.0f, 29.0f, 30.0f, 35.0f, 36.0f,
2635 }));
2636
2637 return result;
2638}
2639
2640LayerTestResult<float, 3> Concatenation3dDim2Test(armnn::IWorkloadFactory& workloadFactory)
2641{
2642 return Concatenation3dDim2TestImpl<float>(workloadFactory, 0.0f, 0);
2643}
2644
2645template <typename T>
2646LayerTestResult<T, 3> Concatenation3dDim0DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2647 int32_t qOffset)
2648{
2649 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
2650 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2651 // Batch 0, Channel 0
2652 1.0f, 2.0f,
2653
2654 // Batch 0, Channel 1
2655 3.0f, 4.0f,
2656
2657 // Batch 0, Channel 2
2658 5.0f, 6.0f,
2659
2660 // Batch 1, Channel 0
2661 19.0f, 20.0f,
2662
2663 // Batch 1, Channel 1
2664 21.0f, 22.0f,
2665
2666 // Batch 1, Channel 2
2667 23.0f, 24.0f
2668 }));
2669
2670 armnn::TensorInfo input1TensorInfo({ 1, 3, 2 }, armnn::GetDataType<T>());
2671 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2672 // Batch 0, Channel 0
2673 7.0f, 8.0f,
2674
2675 // Batch 0, Channel 1
2676 9.0f, 10.0f,
2677
2678 // Batch 0, Channel 2
2679 11.0f, 12.0f,
2680 }));
2681
2682 armnn::TensorInfo input2TensorInfo({ 3, 3, 2 }, armnn::GetDataType<T>());
2683 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2684 // Batch 0, Channel 0
2685 25.0f, 26.0f,
2686
2687 // Batch 0, Channel 1
2688 27.0f, 28.0f,
2689
2690 // Batch 0, Channel 2
2691 29.0f, 30.0f,
2692
2693 // Batch 1, Channel 0
2694 13.0f, 14.0f,
2695
2696 // Batch 1, Channel 1
2697 15.0f, 16.0f,
2698
2699 // Batch 1, Channel 2
2700 17.0f, 18.0f,
2701
2702 // Batch 2, Channel 0
2703 31.0f, 32.0f,
2704
2705 // Batch 2, Channel 1
2706 33.0f, 34.0f,
2707
2708 // Batch 2, Channel 2
2709 35.0f, 36.0f
2710 }));
2711
2712 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, armnn::GetDataType<T>());
2713 LayerTestResult<T, 3> result(outputTensorInfo);
2714
2715 std::vector<T> output;
2716 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002717 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002718 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
2719 { input0.data(), input1.data(), input2.data() },
2720 outputTensorInfo,
2721 output.data(),
2722 0);
2723
2724 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
2725 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2726 // Batch 0, Channel 0
2727 1.0f, 2.0f,
2728
2729 // Batch 0, Channel 1
2730 3.0f, 4.0f,
2731
2732 // Batch 0, Channel 2
2733 5.0f, 6.0f,
2734
2735 // Batch 1, Channel 0
2736 19.0f, 20.0f,
2737
2738 // Batch 1, Channel 1
2739 21.0f, 22.0f,
2740
2741 // Batch 1, Channel 2
2742 23.0f, 24.0f,
2743
2744 // Batch 2, Channel 0
2745 7.0f, 8.0f,
2746
2747 // Batch 2, Channel 1
2748 9.0f, 10.0f,
2749
2750 // Batch 2, Channel 2
2751 11.0f, 12.0f,
2752
2753 // Batch 3, Channel 0
2754 25.0f, 26.0f,
2755
2756 // Batch 3, Channel 1
2757 27.0f, 28.0f,
2758
2759 // Batch 3, Channel 2
2760 29.0f, 30.0f,
2761
2762 // Batch 4, Channel 0
2763 13.0f, 14.0f,
2764
2765 // Batch 4, Channel 1
2766 15.0f, 16.0f,
2767
2768 // Batch 4, Channel 2
2769 17.0f, 18.0f,
2770
2771 // Batch 5, Channel 0
2772 31.0f, 32.0f,
2773
2774 // Batch 5, Channel 1
2775 33.0f, 34.0f,
2776
2777 // Batch 5, Channel 2
2778 35.0f, 36.0f
2779 }));
2780
2781 return result;
2782}
2783
2784LayerTestResult<float, 3> Concatenation3dDim0DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory)
2785{
2786 return Concatenation3dDim0DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
2787}
2788
2789template <typename T>
2790LayerTestResult<T, 3> Concatenation3dDim1DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2791 int32_t qOffset)
2792{
2793 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
2794 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2795 // Batch 0, Channel 0
2796 1.0f, 2.0f,
2797
2798 // Batch 0, Channel 1
2799 3.0f, 4.0f,
2800
2801 // Batch 0, Channel 2
2802 5.0f, 6.0f,
2803
2804 // Batch 1, Channel 0
2805 19.0f, 20.0f,
2806
2807 // Batch 1, Channel 1
2808 21.0f, 22.0f,
2809
2810 // Batch 1, Channel 2
2811 23.0f, 24.0f
2812 }));
2813
2814 armnn::TensorInfo input1TensorInfo({ 2, 4, 2 }, armnn::GetDataType<T>());
2815 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2816 // Batch 0, Channel 0
2817 7.0f, 8.0f,
2818
2819 // Batch 0, Channel 1
2820 9.0f, 10.0f,
2821
2822 // Batch 0, Channel 2
2823 11.0f, 12.0f,
2824
2825 // Batch 0, Channel 3
2826 25.0f, 26.0f,
2827
2828 // Batch 1, Channel 0
2829 27.0f, 28.0f,
2830
2831 // Batch 1, Channel 1
2832 29.0f, 30.0f,
2833
2834 // Batch 1, Channel 2
2835 13.0f, 14.0f,
2836
2837 // Batch 1, Channel 3
2838 15.0f, 16.0f,
2839 }));
2840
2841 armnn::TensorInfo input2TensorInfo({ 2, 1, 2 }, armnn::GetDataType<T>());
2842 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2843 // Batch 0, Channel 0
2844 17.0f, 18.0f,
2845
2846 // Batch 1, Channel 0
2847 31.0f, 32.0f,
2848 }));
2849
2850 armnn::TensorInfo outputTensorInfo({ 2, 8, 2 }, armnn::GetDataType<T>());
2851 LayerTestResult<T, 3> result(outputTensorInfo);
2852
2853 std::vector<T> output;
2854 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002855 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002856 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
2857 { input0.data(), input1.data(), input2.data() },
2858 outputTensorInfo,
2859 output.data(),
2860 1);
2861
2862 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
2863 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2864 // Batch 0, Channel 0
2865 1.0f, 2.0f,
2866
2867 // Batch 0, Channel 1
2868 3.0f, 4.0f,
2869
2870 // Batch 0, Channel 2
2871 5.0f, 6.0f,
2872
2873 // Batch 0, Channel 3
2874 7.0f, 8.0f,
2875
2876 // Batch 0, Channel 4
2877 9.0f, 10.0f,
2878
2879 // Batch 0, Channel 5
2880 11.0f, 12.0f,
2881
2882 // Batch 0, Channel 6
2883 25.0f, 26.0f,
2884
2885 // Batch 0, Channel 7
2886 17.0f, 18.0f,
2887
2888 // Batch 1, Channel 0
2889 19.0f, 20.0f,
2890
2891 // Batch 1, Channel 1
2892 21.0f, 22.0f,
2893
2894 // Batch 1, Channel 2
2895 23.0f, 24.0f,
2896
2897 // Batch 1, Channel 3
2898 27.0f, 28.0f,
2899
2900 // Batch 1, Channel 4
2901 29.0f, 30.0f,
2902
2903 // Batch 1, Channel 5
2904 13.0f, 14.0f,
2905
2906 // Batch 1, Channel 6
2907 15.0f, 16.0f,
2908
2909 // Batch 1, Channel 7
2910 31.0f, 32.0f,
2911 }));
2912
2913 return result;
2914}
2915
2916LayerTestResult<float, 3> Concatenation3dDim1DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory)
2917{
2918 return Concatenation3dDim1DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
2919}
2920
2921template <typename T>
2922LayerTestResult<T, 3> Concatenation3dDim2DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2923 int32_t qOffset)
2924{
2925 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
2926 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2927 // Batch 0, Channel 0
2928 1.0f, 2.0f,
2929
2930 // Batch 0, Channel 1
2931 3.0f, 4.0f,
2932
2933 // Batch 0, Channel 2
2934 5.0f, 6.0f,
2935
2936 // Batch 1, Channel 0
2937 19.0f, 20.0f,
2938
2939 // Batch 1, Channel 1
2940 21.0f, 22.0f,
2941
2942 // Batch 1, Channel 2
2943 23.0f, 24.0f
2944 }));
2945
2946 armnn::TensorInfo input1TensorInfo({ 2, 3, 1 }, armnn::GetDataType<T>());
2947 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2948 // Batch 0, Channel 0
2949 7.0f,
2950
2951 // Batch 0, Channel 1
2952 9.0f,
2953
2954 // Batch 0, Channel 2
2955 11.0f,
2956
2957 // Batch 1, Channel 0
2958 25.0f,
2959
2960 // Batch 1, Channel 1
2961 27.0f,
2962
2963 // Batch 1, Channel 2
2964 29.0f
2965 }));
2966
2967 armnn::TensorInfo input2TensorInfo({ 2, 3, 3 }, armnn::GetDataType<T>());
2968 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2969 // Batch 0, Channel 0
2970 13.0f, 14.0f, 50.0f,
2971
2972 // Batch 0, Channel 1
2973 15.0f, 16.0f, 51.0f,
2974
2975 // Batch 0, Channel 2
2976 17.0f, 18.0f, 52.0f,
2977
2978 // Batch 1, Channel 0
2979 31.0f, 32.0f, 53.0f,
2980
2981 // Batch 1, Channel 1
2982 33.0f, 34.0f, 54.0f,
2983
2984 // Batch 1, Channel 2
2985 35.0f, 36.0f, 55.0f,
2986 }));
2987
2988 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, armnn::GetDataType<T>());
2989 LayerTestResult<T, 3> result(outputTensorInfo);
2990
2991 std::vector<T> output;
2992 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002993 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002994 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
2995 { input0.data(), input1.data(), input2.data() },
2996 outputTensorInfo,
2997 output.data(),
2998 2);
2999
3000 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
3001 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3002 // Batch 0, Channel 0
3003 1.0f, 2.0f, 7.0f, 13.0f, 14.0f, 50.0f,
3004
3005 // Batch 0, Channel 1
3006 3.0f, 4.0f, 9.0f, 15.0f, 16.0f, 51.0f,
3007
3008 // Batch 0, Channel 2
3009 5.0f, 6.0f, 11.0f, 17.0f, 18.0f, 52.0f,
3010
3011 // Batch 1, Channel 0
3012 19.0f, 20.0f, 25.0f, 31.0f, 32.0f, 53.0f,
3013
3014 // Batch 1, Channel 1
3015 21.0f, 22.0f, 27.0f, 33.0f, 34.0f, 54.0f,
3016
3017 // Batch 1, Channel 2
3018 23.0f, 24.0f, 29.0f, 35.0f, 36.0f, 55.0f,
3019 }));
3020
3021 return result;
3022}
3023
3024LayerTestResult<float, 3> Concatenation3dDim2DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory)
3025{
3026 return Concatenation3dDim2DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
3027}
3028
James Conroy074f3712018-10-03 09:32:03 +01003029LayerTestResult<float, 4> ResizeBilinearNopTestImpl(armnn::IWorkloadFactory& workloadFactory,
3030 const armnn::TensorShape& inputOutputTensorShape,
3031 armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00003032{
James Conroy074f3712018-10-03 09:32:03 +01003033 const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
3034 const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00003035
3036 auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
3037 1.0f, 2.0f, 3.0f, 4.0f,
3038 2.0f, 3.0f, 4.0f, 5.0f,
3039 3.0f, 4.0f, 5.0f, 6.0f,
3040 4.0f, 5.0f, 6.0f, 7.0f
3041 }));
3042
3043 LayerTestResult<float, 4> result(outputTensorInfo);
3044 result.outputExpected = input;
3045
3046 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3047 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3048
3049 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01003050 descriptor.m_Parameters.m_DataLayout = dataLayout;
3051 armnn::WorkloadInfo info;
3052 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3053 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3054
3055 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
3056
3057 inputHandle->Allocate();
3058 outputHandle->Allocate();
3059 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3060
3061 workloadFactory.Finalize();
3062 workload->Execute();
3063
3064 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3065 return result;
3066}
3067
3068LayerTestResult<float, 4> ResizeBilinearNopTest(armnn::IWorkloadFactory& workloadFactory)
3069{
3070 // BatchSize = 1, Channels = 1, Height = 4, Width = 4
3071 const armnn::TensorShape inputOutputShape{ 1, 1, 4, 4 };
3072
3073 return ResizeBilinearNopTestImpl(workloadFactory, inputOutputShape, armnn::DataLayout::NCHW);
3074}
3075
3076LayerTestResult<float, 4> ResizeBilinearNopNhwcTest(armnn::IWorkloadFactory& workloadFactory)
3077{
3078 // BatchSize = 1, Height = 4, Width = 4, Channels = 1
3079 const armnn::TensorShape inputOutputShape{ 1, 4, 4, 1 };
3080
3081 return ResizeBilinearNopTestImpl(workloadFactory, inputOutputShape, armnn::DataLayout::NHWC);
3082}
3083
3084LayerTestResult<float, 4> SimpleResizeBilinearTestImpl(armnn::IWorkloadFactory& workloadFactory,
3085 const armnn::TensorShape& inputTensorShape,
3086 const armnn::TensorShape& outputTensorShape,
3087 armnn::DataLayout dataLayout)
3088{
3089 const armnn::TensorInfo inputTensorInfo(inputTensorShape, armnn::DataType::Float32);
3090 const armnn::TensorInfo outputTensorInfo(outputTensorShape, armnn::DataType::Float32);
3091
3092 auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
3093 1.0f, 255.0f,
3094 200.0f, 250.0f
3095 }));
3096
3097 // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
3098 // then figures out the interpolants and weights. Note this is different to projecting the centre of the
3099 // output texel - and thus we'll expect the output 1x1 matrix to contain, as its single element, the value
3100 // that was at position (0,0) of the input matrix (rather than an average, which we would expect if projecting
3101 // the centre).
3102 LayerTestResult<float, 4> result(outputTensorInfo);
3103 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>({
3104 1.0f
3105 }));
3106
3107 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3108 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3109
3110 armnn::ResizeBilinearQueueDescriptor descriptor;
3111 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00003112 armnn::WorkloadInfo info;
3113 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3114 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3115
3116 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
3117
3118 inputHandle->Allocate();
3119 outputHandle->Allocate();
3120 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3121
surmeh013537c2c2018-05-18 16:31:43 +01003122 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00003123 workload->Execute();
3124
3125 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3126 return result;
3127}
3128
3129LayerTestResult<float, 4> SimpleResizeBilinearTest(armnn::IWorkloadFactory& workloadFactory)
3130{
James Conroy074f3712018-10-03 09:32:03 +01003131 // inputShape: BatchSize = 1, Channels = 1, Height = 2, Width = 2
3132 const armnn::TensorShape inputShape{ 1, 1, 2, 2 };
telsoa014fcda012018-03-09 14:13:49 +00003133
James Conroy074f3712018-10-03 09:32:03 +01003134 // outputShape: BatchSize = 1, Channels = 1, Height = 1, Width = 1
3135 const armnn::TensorShape outputShape{ 1, 1, 1, 1 };
telsoa014fcda012018-03-09 14:13:49 +00003136
James Conroy074f3712018-10-03 09:32:03 +01003137 return SimpleResizeBilinearTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NCHW);
3138}
3139
3140LayerTestResult<float, 4> SimpleResizeBilinearNhwcTest(armnn::IWorkloadFactory& workloadFactory)
3141{
3142 // inputShape: BatchSize = 1, Height = 2, Width = 2, Channels = 1
3143 const armnn::TensorShape inputShape{ 1, 2, 2, 1 };
3144
3145 // outputShape: BatchSize = 1, Height = 1, Width = 1, Channels = 1
3146 const armnn::TensorShape outputShape{ 1, 1, 1, 1 };
3147
3148 return SimpleResizeBilinearTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NHWC);
3149}
3150
3151LayerTestResult<float, 4> ResizeBilinearSqMinTestImpl(armnn::IWorkloadFactory& workloadFactory,
3152 const armnn::TensorShape& inputTensorShape,
3153 const armnn::TensorShape& outputTensorShape,
3154 armnn::DataLayout dataLayout)
3155{
3156 const armnn::TensorInfo inputTensorInfo(inputTensorShape, armnn::DataType::Float32);
3157 const armnn::TensorInfo outputTensorInfo(outputTensorShape, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00003158
3159 auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
James Conroy074f3712018-10-03 09:32:03 +01003160 1.0f, 2.0f, 3.0f, 4.0f,
3161 2.0f, 3.0f, 4.0f, 5.0f,
3162 3.0f, 4.0f, 5.0f, 6.0f,
3163 4.0f, 5.0f, 6.0f, 7.0f
telsoa014fcda012018-03-09 14:13:49 +00003164 }));
3165
telsoa014fcda012018-03-09 14:13:49 +00003166 LayerTestResult<float, 4> result(outputTensorInfo);
3167 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>({
James Conroy074f3712018-10-03 09:32:03 +01003168 1.0f, 3.0f,
3169 3.0f, 5.0f
telsoa014fcda012018-03-09 14:13:49 +00003170 }));
3171
3172 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3173 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3174
3175 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01003176 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00003177 armnn::WorkloadInfo info;
3178 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3179 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3180
3181 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
3182
3183 inputHandle->Allocate();
3184 outputHandle->Allocate();
3185 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3186
surmeh013537c2c2018-05-18 16:31:43 +01003187 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00003188 workload->Execute();
3189
3190 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3191 return result;
3192}
3193
3194LayerTestResult<float, 4> ResizeBilinearSqMinTest(armnn::IWorkloadFactory& workloadFactory)
3195{
James Conroy074f3712018-10-03 09:32:03 +01003196 // inputShape: BatchSize = 1, Channels = 1, Height = 4, Width = 4
3197 const armnn::TensorShape inputShape{ 1, 1, 4, 4 };
telsoa014fcda012018-03-09 14:13:49 +00003198
James Conroy074f3712018-10-03 09:32:03 +01003199 // outputShape: BatchSize = 1, Channels = 1, Height = 2, Width = 2
3200 const armnn::TensorShape outputShape{ 1, 1, 2, 2 };
telsoa014fcda012018-03-09 14:13:49 +00003201
James Conroy074f3712018-10-03 09:32:03 +01003202 return ResizeBilinearSqMinTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NCHW);
3203}
3204
3205LayerTestResult<float, 4> ResizeBilinearSqMinNhwcTest(armnn::IWorkloadFactory& workloadFactory)
3206{
3207 // inputShape: BatchSize = 1, Height = 4, Width = 4, Channels = 1
3208 const armnn::TensorShape inputShape{ 1, 4, 4, 1 };
3209
3210 // outputShape: BatchSize = 1, Height = 2, Width = 2, Channels = 1
3211 const armnn::TensorShape outputShape{ 1, 2, 2, 1 };
3212
3213 return ResizeBilinearSqMinTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NHWC);
3214}
3215
3216LayerTestResult<float, 4> ResizeBilinearMinTestImpl(armnn::IWorkloadFactory& workloadFactory,
3217 const armnn::TensorShape& inputTensorShape,
3218 const armnn::TensorShape& outputTensorShape,
3219 armnn::DataLayout dataLayout)
3220{
3221 const armnn::TensorInfo inputTensorInfo(inputTensorShape, armnn::DataType::Float32);
3222 const armnn::TensorInfo outputTensorInfo(outputTensorShape, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00003223
3224 auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
James Conroy074f3712018-10-03 09:32:03 +01003225 1.0f, 2.0f, 3.0f, 5.0f, 8.0f,
3226 13.0f, 21.0f, 34.0f, 55.0f, 89.0f,
3227 144.0f, 233.0f, 377.0f, 610.0f, 987.0f
telsoa014fcda012018-03-09 14:13:49 +00003228 }));
3229
3230 LayerTestResult<float, 4> result(outputTensorInfo);
3231 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>({
James Conroy074f3712018-10-03 09:32:03 +01003232 1.0f, 2.6666f, 6.0f,
3233 78.5f, 179.3333f, 401.0f
telsoa014fcda012018-03-09 14:13:49 +00003234 }));
3235
3236 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3237 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3238
3239 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01003240 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00003241 armnn::WorkloadInfo info;
3242 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3243 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3244
3245 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
3246
3247 inputHandle->Allocate();
3248 outputHandle->Allocate();
3249 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3250
surmeh013537c2c2018-05-18 16:31:43 +01003251 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00003252 workload->Execute();
3253
3254 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3255 return result;
3256}
3257
3258LayerTestResult<float, 4> ResizeBilinearMinTest(armnn::IWorkloadFactory& workloadFactory)
3259{
James Conroy074f3712018-10-03 09:32:03 +01003260 // inputShape: BatchSize = 1, Channels = 1, Height = 3, Width = 5
3261 const armnn::TensorShape inputShape{ 1, 1, 3, 5 };
telsoa014fcda012018-03-09 14:13:49 +00003262
James Conroy074f3712018-10-03 09:32:03 +01003263 // outputShape: BatchSize = 1, Channels = 1, Height = 2, Width = 3
3264 const armnn::TensorShape outputShape{ 1, 1, 2, 3 };
telsoa014fcda012018-03-09 14:13:49 +00003265
James Conroy074f3712018-10-03 09:32:03 +01003266 return ResizeBilinearMinTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NCHW);
3267}
3268
3269LayerTestResult<float, 4> ResizeBilinearMinNhwcTest(armnn::IWorkloadFactory& workloadFactory)
3270{
3271 // inputShape: BatchSize = 1, Height = 3, Width = 5, Channels = 1
3272 const armnn::TensorShape inputShape{ 1, 3, 5, 1 };
3273
3274 // outputShape: BatchSize = 1, Height = 2, Width = 3, Channels = 1
3275 const armnn::TensorShape outputShape{ 1, 2, 3, 1 };
3276
3277 return ResizeBilinearMinTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NHWC);
3278}
3279
3280LayerTestResult<float, 4> ResizeBilinearMagTestImpl(armnn::IWorkloadFactory& workloadFactory,
3281 const armnn::TensorShape& inputTensorShape,
3282 const armnn::TensorShape& outputTensorShape,
3283 armnn::DataLayout dataLayout)
3284{
3285 const armnn::TensorInfo inputTensorInfo(inputTensorShape, armnn::DataType::Float32);
3286 const armnn::TensorInfo outputTensorInfo(outputTensorShape, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00003287
3288 auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
James Conroy074f3712018-10-03 09:32:03 +01003289 1.0f, 2.0f,
3290 13.0f, 21.0f,
3291 144.0f, 233.0f
telsoa014fcda012018-03-09 14:13:49 +00003292 }));
3293
3294 LayerTestResult<float, 4> result(outputTensorInfo);
3295 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>({
James Conroy074f3712018-10-03 09:32:03 +01003296 1.0f, 1.4f, 1.8f, 2.0f, 2.0f,
3297 13.0f, 16.2f, 19.4f, 21.0f, 21.0f,
3298 144.0f, 179.6f, 215.2f, 233.0f, 233.0f
telsoa014fcda012018-03-09 14:13:49 +00003299 }));
3300
3301 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3302 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3303
3304 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01003305 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00003306 armnn::WorkloadInfo info;
3307 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3308 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3309
3310 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
3311
3312 inputHandle->Allocate();
3313 outputHandle->Allocate();
3314 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3315
surmeh013537c2c2018-05-18 16:31:43 +01003316 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00003317 workload->Execute();
3318
3319 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3320 return result;
3321}
3322
3323LayerTestResult<float, 4> ResizeBilinearMagTest(armnn::IWorkloadFactory& workloadFactory)
3324{
James Conroy074f3712018-10-03 09:32:03 +01003325 // inputShape: BatchSize = 1, Channels = 1, Height = 3, Width = 2
3326 const armnn::TensorShape inputShape{ 1, 1, 3, 2 };
telsoa014fcda012018-03-09 14:13:49 +00003327
James Conroy074f3712018-10-03 09:32:03 +01003328 // outputShape: BatchSize = 1, Channels = 1, Height = 3, Width = 5
3329 const armnn::TensorShape outputShape{ 1, 1, 3, 5 };
telsoa014fcda012018-03-09 14:13:49 +00003330
James Conroy074f3712018-10-03 09:32:03 +01003331 return ResizeBilinearMagTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NCHW);
3332}
telsoa014fcda012018-03-09 14:13:49 +00003333
James Conroy074f3712018-10-03 09:32:03 +01003334LayerTestResult<float, 4> ResizeBilinearMagNhwcTest(armnn::IWorkloadFactory& workloadFactory)
3335{
3336 // inputShape: BatchSize = 1, Height = 3, Width = 2, Channels = 1
3337 const armnn::TensorShape inputShape{ 1, 3, 2, 1 };
telsoa014fcda012018-03-09 14:13:49 +00003338
James Conroy074f3712018-10-03 09:32:03 +01003339 // outputShape: BatchSize = 1, Height = 3, Width = 5, Channels = 1
3340 const armnn::TensorShape outputShape{ 1, 3, 5, 1 };
telsoa014fcda012018-03-09 14:13:49 +00003341
James Conroy074f3712018-10-03 09:32:03 +01003342 return ResizeBilinearMagTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00003343}
3344
3345LayerTestResult<float, 2> FakeQuantizationTest(armnn::IWorkloadFactory& workloadFactory)
3346{
3347 constexpr unsigned int width = 2;
3348 constexpr unsigned int height = 3;
3349
3350 const armnn::TensorInfo tensorInfo({height, width },
3351 armnn::DataType::Float32);
3352 auto input = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
3353 -10.0f, -5.0f,
3354 0.0f, 5.0f,
3355 10.0f, 10.0f
3356 }));
3357
3358 LayerTestResult<float, 2> ret(tensorInfo);
3359
3360 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
3361
3362 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
3363
3364 armnn::FakeQuantizationQueueDescriptor data;
3365 armnn::WorkloadInfo info;
3366
3367 AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
3368 AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
3369 float min = -10.f;
3370 float max = 10.f;
3371
3372 data.m_Parameters.m_Min = min;
3373 data.m_Parameters.m_Max = max;
3374
3375 armnn::PassthroughCpuTensorHandle refHandle(tensorInfo, &ret.outputExpected[0][0]);
3376 armnn::FakeQuantizationQueueDescriptor refData = data;
3377 armnn::WorkloadInfo refInfo = info;
3378 SetWorkloadOutput(refData, refInfo, 0, tensorInfo, &refHandle);
3379
3380 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFakeQuantization(data, info);
3381
3382 inputHandle->Allocate();
3383 outputHandle->Allocate();
3384
3385 CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
3386
surmeh013537c2c2018-05-18 16:31:43 +01003387 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00003388 workload->Execute();
3389
3390 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
3391
3392 ret.outputExpected = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
3393 0.0f, 63.0f,
3394 128.0f, 191.0f,
3395 255.0f, 255.0f
3396 }));
3397 return ret;
3398}
3399
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003400namespace
3401{
3402
3403LayerTestResult<float, 4> L2NormalizationTestImpl(armnn::IWorkloadFactory& workloadFactory,
3404 const armnn::TensorShape& inputOutputTensorShape,
3405 const std::vector<float>& inputValues,
3406 const std::vector<float>& expectedOutputValues,
3407 armnn::DataLayout dataLayout)
3408{
3409 const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
3410 const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
3411
3412 auto inputTensor = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(inputValues));
3413
3414 LayerTestResult<float, 4> result(outputTensorInfo);
3415 result.outputExpected = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(expectedOutputValues));
3416
3417 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3418 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3419
3420 armnn::L2NormalizationQueueDescriptor descriptor;
3421 descriptor.m_Parameters.m_DataLayout = dataLayout;
3422 armnn::WorkloadInfo info;
3423
3424 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3425 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3426
3427 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
3428
3429 inputHandle->Allocate();
3430 outputHandle->Allocate();
3431
3432 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
3433
3434 workloadFactory.Finalize();
3435 workload->Execute();
3436
3437 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3438
3439 return result;
3440}
3441
3442float CalcInvL2Norm(std::initializer_list<float> elements)
3443{
3444 const float reduction = std::accumulate(elements.begin(), elements.end(), 0.0f,
3445 [](float acc, float element) { return acc + element * element; });
3446 return 1.0f / sqrtf(reduction);
3447}
3448
3449} // anonymous namespace
3450
telsoa014fcda012018-03-09 14:13:49 +00003451LayerTestResult<float, 4> L2Normalization1dTest(armnn::IWorkloadFactory& workloadFactory)
3452{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003453 // Width: 1
3454 // Height: 1
3455 // Channels: 10
3456 // BatchSize: 1
telsoa014fcda012018-03-09 14:13:49 +00003457
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003458 const armnn::TensorShape inputOutputShape{ 1, 10, 1, 1 };
3459 std::vector<float> inputValues
3460 {
3461 // Batch 0, Channel 0, Height (1) x Width (1)
3462 1.0f,
telsoa014fcda012018-03-09 14:13:49 +00003463
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003464 // Batch 0, Channel 1, Height (1) x Width (1)
3465 2.0f,
telsoa014fcda012018-03-09 14:13:49 +00003466
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003467 // Batch 0, Channel 2, Height (1) x Width (1)
3468 3.0f,
telsoa014fcda012018-03-09 14:13:49 +00003469
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003470 // Batch 0, Channel 3, Height (1) x Width (1)
3471 4.0f,
3472
3473 // Batch 0, Channel 4, Height (1) x Width (1)
3474 5.0f,
3475
3476 // Batch 0, Channel 5, Height (1) x Width (1)
3477 6.0f,
3478
3479 // Batch 0, Channel 6, Height (1) x Width (1)
3480 7.0f,
3481
3482 // Batch 0, Channel 7, Height (1) x Width (1)
3483 8.0f,
3484
3485 // Batch 0, Channel 8, Height (1) x Width (1)
3486 9.0f,
3487
3488 // Batch 0, Channel 9, Height (1) x Width (1)
3489 10.0f
3490 };
telsoa014fcda012018-03-09 14:13:49 +00003491 const float approxInvL2Norm = 0.050964719f;
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003492 std::vector<float> expectedOutputValues
3493 {
3494 // Batch 0, Channel 0, Height (1) x Width (1)
telsoa014fcda012018-03-09 14:13:49 +00003495 1.0f * approxInvL2Norm,
3496 2.0f * approxInvL2Norm,
3497 3.0f * approxInvL2Norm,
3498 4.0f * approxInvL2Norm,
3499 5.0f * approxInvL2Norm,
3500 6.0f * approxInvL2Norm,
3501 7.0f * approxInvL2Norm,
3502 8.0f * approxInvL2Norm,
3503 9.0f * approxInvL2Norm,
3504 10.0f * approxInvL2Norm
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003505 };
telsoa014fcda012018-03-09 14:13:49 +00003506
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003507 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
3508 inputValues, expectedOutputValues, armnn::DataLayout::NCHW);
telsoa014fcda012018-03-09 14:13:49 +00003509}
3510
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003511LayerTestResult<float, 4> L2Normalization1dNhwcTest(armnn::IWorkloadFactory& workloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00003512{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003513 // Width: 1
3514 // Height: 1
3515 // Channels: 10
3516 // BatchSize: 1
telsoa014fcda012018-03-09 14:13:49 +00003517
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003518 const armnn::TensorShape inputOutputShape{ 1, 1, 1, 10 };
3519 std::vector<float> inputValues
3520 {
3521 // Batch 0, Height 0, Width (1) x Channel (10)
3522 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f
3523 };
3524 const float approxInvL2Norm = 0.050964719f;
3525 std::vector<float> expectedOutputValues
3526 {
3527 // Batch 0, Height 0, Width (1) x Channel (10)
3528 1.0f * approxInvL2Norm,
3529 2.0f * approxInvL2Norm,
3530 3.0f * approxInvL2Norm,
3531 4.0f * approxInvL2Norm,
3532 5.0f * approxInvL2Norm,
3533 6.0f * approxInvL2Norm,
3534 7.0f * approxInvL2Norm,
3535 8.0f * approxInvL2Norm,
3536 9.0f * approxInvL2Norm,
3537 10.0f * approxInvL2Norm
3538 };
3539
3540 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
3541 inputValues, expectedOutputValues, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00003542}
3543
3544LayerTestResult<float, 4> L2Normalization2dTest(armnn::IWorkloadFactory& workloadFactory)
3545{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003546 // Width: 5
3547 // Height: 1
3548 // Channels: 2
3549 // BatchSize: 1
telsoa014fcda012018-03-09 14:13:49 +00003550
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003551 const armnn::TensorShape inputOutputShape{ 1, 2, 1, 5 };
3552 std::vector<float> inputValues
3553 {
3554 // Batch 0, Channel 0, Height (1) x Width (5)
telsoa014fcda012018-03-09 14:13:49 +00003555 1.0f, 3.0f, 5.0f, 7.0f, 9.0f,
telsoa014fcda012018-03-09 14:13:49 +00003556
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003557 // Batch 0, Channel 1, Height (1) x Width (5)
3558 2.0f, 4.0f, 6.0f, 8.0f, 10.0f
3559 };
3560 std::vector<float> expectedOutputValues
3561 {
3562 // Batch 0, Channel 0, Height (1) x Width (5)
3563 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
3564 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
3565 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
3566 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
telsoa014fcda012018-03-09 14:13:49 +00003567 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
3568
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003569 // Batch 0, Channel 1, Height (1) x Width (5)
3570 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
3571 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
3572 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
3573 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
telsoa014fcda012018-03-09 14:13:49 +00003574 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003575 };
telsoa014fcda012018-03-09 14:13:49 +00003576
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003577 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
3578 inputValues, expectedOutputValues, armnn::DataLayout::NCHW);
3579}
telsoa014fcda012018-03-09 14:13:49 +00003580
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003581LayerTestResult<float, 4> L2Normalization2dNhwcTest(armnn::IWorkloadFactory& workloadFactory)
3582{
3583 // Width: 5
3584 // Height: 1
3585 // Channels: 2
3586 // BatchSize: 1
telsoa014fcda012018-03-09 14:13:49 +00003587
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003588 const armnn::TensorShape inputOutputShape{ 1, 1, 5, 2 };
3589 std::vector<float> inputValues
3590 {
3591 // Batch 0, Height 0, Width (5) x Channel (2)
3592 1.0f, 2.0f,
3593 3.0f, 4.0f,
3594 5.0f, 6.0f,
3595 7.0f, 8.0f,
3596 9.0f, 10.0f
3597 };
3598 std::vector<float> expectedOutputValues
3599 {
3600 // Batch 0, Height 0, Width (5) x Channel (2)
3601 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
3602 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
3603 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
3604 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
3605 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
3606 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
3607 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
3608 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
3609 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
3610 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
3611 };
telsoa014fcda012018-03-09 14:13:49 +00003612
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003613 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
3614 inputValues, expectedOutputValues, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00003615}
3616
3617LayerTestResult<float, 4> L2Normalization3dTest(armnn::IWorkloadFactory& workloadFactory)
3618{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003619 // Width: 3
3620 // Height: 4
3621 // Channels: 2
3622 // BatchSize: 1
telsoa014fcda012018-03-09 14:13:49 +00003623
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003624 const armnn::TensorShape inputOutputShape{ 1, 2, 4, 3 };
3625 std::vector<float> inputValues
3626 {
3627 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00003628 119.0f, 21.0f, 150.0f,
3629 149.0f, 32.0f, 179.0f,
3630 15.0f, 227.0f, 141.0f,
3631 147.0f, 199.0f, 220.0f,
3632
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003633 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00003634 110.0f, 140.0f, 73.0f,
3635 211.0f, 212.0f, 89.0f,
3636 24.0f, 138.0f, 188.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003637 162.0f, 12.0f, 161.0f
3638 };
3639 std::vector<float> expectedOutputValues
3640 {
3641 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00003642 119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
3643 21.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
3644 150.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
3645 149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
3646 32.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
3647 179.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
3648 15.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
3649 227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
3650 141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
3651 147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
3652 199.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
3653 220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
3654
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003655 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00003656 110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
3657 140.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
3658 73.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
3659 211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
3660 212.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
3661 89.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
3662 24.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
3663 138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
3664 188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
3665 162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
3666 12.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003667 161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
3668 };
telsoa014fcda012018-03-09 14:13:49 +00003669
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003670 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
3671 inputValues, expectedOutputValues, armnn::DataLayout::NCHW);
3672}
telsoa014fcda012018-03-09 14:13:49 +00003673
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003674LayerTestResult<float, 4> L2Normalization3dNhwcTest(armnn::IWorkloadFactory& workloadFactory)
3675{
3676 // Width: 3
3677 // Height: 4
3678 // Channels: 2
3679 // BatchSize: 1
telsoa014fcda012018-03-09 14:13:49 +00003680
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003681 const armnn::TensorShape inputOutputShape{ 1, 4, 3, 2 };
3682 std::vector<float> inputValues
3683 {
3684 // Batch 0, Height 0, Width (3) x Channel (2)
3685 119.0f, 110.0f,
3686 21.0f, 140.0f,
3687 150.0f, 73.0f,
telsoa014fcda012018-03-09 14:13:49 +00003688
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003689 // Batch 0, Height 1, Width (3) x Channel (2)
3690 149.0f, 211.0f,
3691 32.0f, 212.0f,
3692 179.0f, 89.0f,
telsoa014fcda012018-03-09 14:13:49 +00003693
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003694 // Batch 0, Height 2, Width (3) x Channel (2)
3695 15.0f, 24.0f,
3696 227.0f, 138.0f,
3697 141.0f, 188.0f,
telsoa014fcda012018-03-09 14:13:49 +00003698
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003699 // Batch 0, Height 3, Width (3) x Channel (2)
3700 147.0f, 162.0f,
3701 199.0f, 12.0f,
3702 220.0f, 161.0f
3703 };
3704 std::vector<float> expectedOutputValues
3705 {
3706 // Batch 0, Height 0, Width (3) x Channel (2)
3707 119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
3708 110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
3709 21.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
3710 140.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
3711 150.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
3712 73.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
3713
3714 // Batch 0, Height 1, Width (3) x Channel (2)
3715 149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
3716 211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
3717 32.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
3718 212.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
3719 179.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
3720 89.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
3721
3722 // Batch 0, Height 2, Width (3) x Channel (2)
3723 15.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
3724 24.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
3725 227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
3726 138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
3727 141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
3728 188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
3729
3730 // Batch 0, Height 3, Width (3) x Channel (2)
3731 147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
3732 162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
3733 199.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
3734 12.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
3735 220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
3736 161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
3737 };
3738
3739 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
3740 inputValues, expectedOutputValues, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00003741}
3742
3743LayerTestResult<float, 4> L2Normalization4dTest(armnn::IWorkloadFactory& workloadFactory)
3744{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003745 // Width: 3
3746 // Height: 4
3747 // Channels: 3
3748 // BatchSize: 2
telsoa014fcda012018-03-09 14:13:49 +00003749
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003750 const armnn::TensorShape inputOutputShape{ 2, 3, 4, 3 };
3751 std::vector<float> inputValues
3752 {
3753 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00003754 235.0f, 46.0f, 178.0f,
3755 100.0f, 123.0f, 19.0f,
3756 172.0f, 74.0f, 250.0f,
3757 6.0f, 195.0f, 80.0f,
3758
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003759 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00003760 113.0f, 95.0f, 202.0f,
3761 77.0f, 114.0f, 71.0f,
3762 122.0f, 246.0f, 166.0f,
3763 82.0f, 28.0f, 37.0f,
3764
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003765 // Batch 0, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00003766 56.0f, 170.0f, 162.0f,
3767 194.0f, 89.0f, 254.0f,
3768 12.0f, 209.0f, 200.0f,
3769 1.0f, 64.0f, 54.0f,
3770
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003771 // Batch 1, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00003772 67.0f, 90.0f, 49.0f,
3773 7.0f, 163.0f, 18.0f,
3774 25.0f, 117.0f, 103.0f,
3775 247.0f, 59.0f, 189.0f,
3776
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003777 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00003778 239.0f, 104.0f, 199.0f,
3779 17.0f, 124.0f, 153.0f,
3780 222.0f, 217.0f, 75.0f,
3781 32.0f, 126.0f, 21.0f,
3782
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003783 // Batch 1, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00003784 97.0f, 145.0f, 215.0f,
3785 115.0f, 116.0f, 238.0f,
3786 226.0f, 16.0f, 132.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003787 92.0f, 125.0f, 88.0f
3788 };
3789 std::vector<float> expectedOutputValues
3790 {
3791 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00003792 235.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
3793 46.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
3794 178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
3795 100.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
3796 123.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
3797 19.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
3798 172.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
3799 74.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
3800 250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
3801 6.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
3802 195.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
3803 80.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
3804
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003805 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00003806 113.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
3807 95.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
3808 202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
3809 77.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
3810 114.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
3811 71.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
3812 122.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
3813 246.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
3814 166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
3815 82.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
3816 28.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
3817 37.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
3818
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003819 // Batch 0, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00003820 56.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
3821 170.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
3822 162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
3823 194.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
3824 89.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
3825 254.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
3826 12.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
3827 209.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
3828 200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
3829 1.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
3830 64.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
3831 54.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
3832
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003833 // Batch 1, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00003834 67.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
3835 90.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
3836 49.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
3837 7.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
3838 163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
3839 18.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
3840 25.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
3841 117.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
3842 103.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
3843 247.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
3844 59.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
3845 189.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
3846
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003847 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00003848 239.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
3849 104.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
3850 199.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
3851 17.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
3852 124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
3853 153.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
3854 222.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
3855 217.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
3856 75.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
3857 32.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
3858 126.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
3859 21.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
3860
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003861 // Batch 1, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00003862 97.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
3863 145.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
3864 215.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
3865 115.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
3866 116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
3867 238.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
3868 226.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
3869 16.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
3870 132.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
3871 92.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
3872 125.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003873 88.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f })
3874 };
telsoa014fcda012018-03-09 14:13:49 +00003875
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003876 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
3877 inputValues, expectedOutputValues, armnn::DataLayout::NCHW);
3878}
telsoa014fcda012018-03-09 14:13:49 +00003879
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003880LayerTestResult<float, 4> L2Normalization4dNhwcTest(armnn::IWorkloadFactory& workloadFactory)
3881{
3882 // Width: 3
3883 // Height: 4
3884 // Channels: 3
3885 // BatchSize: 2
telsoa014fcda012018-03-09 14:13:49 +00003886
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003887 const armnn::TensorShape inputOutputShape{ 2, 4, 3, 3 };
3888 std::vector<float> inputValues
3889 {
3890 // Batch 0, Height 0, Width (3) x Channel (3)
3891 235.0f, 113.0f, 56.0f,
3892 46.0f, 95.0f, 170.0f,
3893 178.0f, 202.0f, 162.0f,
telsoa014fcda012018-03-09 14:13:49 +00003894
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003895 // Batch 0, Height 1, Width (3) x Channel (3)
3896 100.0f, 77.0f, 194.0f,
3897 123.0f, 114.0f, 89.0f,
3898 19.0f, 71.0f, 254.0f,
telsoa014fcda012018-03-09 14:13:49 +00003899
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003900 // Batch 0, Height 2, Width (3) x Channel (3)
3901 172.0f, 122.0f, 12.0f,
3902 74.0f, 246.0f, 209.0f,
3903 250.0f, 166.0f, 200.0f,
telsoa014fcda012018-03-09 14:13:49 +00003904
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003905 // Batch 0, Height 3, Width (3) x Channel (3)
3906 6.0f, 82.0f, 1.0f,
3907 195.0f, 28.0f, 64.0f,
3908 80.0f, 37.0f, 54.0f,
3909
3910 // Batch 1, Height 0, Width (3) x Channel (3)
3911 67.0f, 239.0f, 97.0f,
3912 90.0f, 104.0f, 145.0f,
3913 49.0f, 199.0f, 215.0f,
3914
3915 // Batch 1, Height 1, Width (3) x Channel (3)
3916 7.0f, 17.0f, 115.0f,
3917 163.0f, 124.0f, 116.0f,
3918 18.0f, 153.0f, 238.0f,
3919
3920 // Batch 1, Height 2, Width (3) x Channel (3)
3921 25.0f, 222.0f, 226.0f,
3922 117.0f, 217.0f, 16.0f,
3923 103.0f, 75.0f, 132.0f,
3924
3925 // Batch 1, Height 3, Width (3) x Channel (3)
3926 247.0f, 32.0f, 92.0f,
3927 59.0f, 126.0f, 125.0f,
3928 189.0f, 21.0f, 88.0f
3929 };
3930 std::vector<float> expectedOutputValues
3931 {
3932 // Batch 0, Height 0, Width (3) x Channel (3)
3933 235.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
3934 113.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
3935 56.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
3936 46.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
3937 95.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
3938 170.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
3939 178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
3940 202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
3941 162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
3942
3943 // Batch 0, Height 1, Width (3) x Channel (3)
3944 100.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
3945 77.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
3946 194.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
3947 123.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
3948 114.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
3949 89.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
3950 19.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
3951 71.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
3952 254.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
3953
3954 // Batch 0, Height 2, Width (3) x Channel (3)
3955 172.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
3956 122.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
3957 12.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
3958 74.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
3959 246.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
3960 209.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
3961 250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
3962 166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
3963 200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
3964
3965 // Batch 0, Height 3, Width (3) x Channel (3)
3966 6.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
3967 82.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
3968 1.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
3969 195.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
3970 28.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
3971 64.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
3972 80.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
3973 37.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
3974 54.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
3975
3976 // Batch 1, Height 0, Width (3) x Channel (3)
3977 67.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
3978 239.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
3979 97.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
3980 90.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
3981 104.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
3982 145.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
3983 49.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
3984 199.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
3985 215.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
3986
3987 // Batch 1, Height 1, Width (3) x Channel (3)
3988 7.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
3989 17.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
3990 115.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
3991 163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
3992 124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
3993 116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
3994 18.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
3995 153.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
3996 238.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
3997
3998 // Batch 1, Height 2, Width (3) x Channel (3)
3999 25.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
4000 222.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
4001 226.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
4002 117.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
4003 217.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
4004 16.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
4005 103.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
4006 75.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
4007 132.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
4008
4009 // Batch 1, Height 3, Width (3) x Channel (3)
4010 247.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
4011 32.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
4012 92.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
4013 59.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
4014 126.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
4015 125.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
4016 189.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
4017 21.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
4018 88.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f })
4019 };
4020
4021 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
4022 inputValues, expectedOutputValues, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00004023}
4024
4025template <typename T>
4026LayerTestResult<T, 4> ConstantTestImpl(armnn::IWorkloadFactory& workloadFactory,
4027 float qScale,
4028 int32_t qOffset)
4029{
4030 constexpr unsigned int inputWidth = 3;
4031 constexpr unsigned int inputHeight = 4;
4032 constexpr unsigned int inputChannels = 3;
4033 constexpr unsigned int inputBatchSize = 2;
4034
4035 constexpr unsigned int outputWidth = inputWidth;
4036 constexpr unsigned int outputHeight = inputHeight;
4037 constexpr unsigned int outputChannels = inputChannels;
4038 constexpr unsigned int outputBatchSize = inputBatchSize;
4039
4040 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
4041 armnn::GetDataType<T>());
4042
4043 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
4044 armnn::GetDataType<T>());
4045
4046 // Set quantization parameters if the requested type is a quantized type.
4047 if(armnn::IsQuantizedType<T>())
4048 {
4049 inputTensorInfo.SetQuantizationScale(qScale);
4050 inputTensorInfo.SetQuantizationOffset(qOffset);
4051 outputTensorInfo.SetQuantizationScale(qScale);
4052 outputTensorInfo.SetQuantizationOffset(qOffset);
4053 }
4054
4055 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
4056 QuantizedVector<T>(qScale, qOffset, {
4057 // Batch 0, Channel 0
4058 235.0f, 46.0f, 178.0f,
4059 100.0f, 123.0f, 19.0f,
4060 172.0f, 74.0f, 250.0f,
4061 6.0f, 195.0f, 80.0f,
4062
4063 // Batch 0, Channel 1
4064 113.0f, 95.0f, 202.0f,
4065 77.0f, 114.0f, 71.0f,
4066 122.0f, 246.0f, 166.0f,
4067 82.0f, 28.0f, 37.0f,
4068
4069 // Batch 0, Channel 2
4070 56.0f, 170.0f, 162.0f,
4071 194.0f, 89.0f, 254.0f,
4072 12.0f, 209.0f, 200.0f,
4073 1.0f, 64.0f, 54.0f,
4074
4075 // Batch 1, Channel 0
4076 67.0f, 90.0f, 49.0f,
4077 7.0f, 163.0f, 18.0f,
4078 25.0f, 117.0f, 103.0f,
4079 247.0f, 59.0f, 189.0f,
4080
4081 // Batch 1, Channel 1
4082 239.0f, 104.0f, 199.0f,
4083 17.0f, 124.0f, 153.0f,
4084 222.0f, 217.0f, 75.0f,
4085 32.0f, 126.0f, 21.0f,
4086
4087 // Batch 1, Channel 2
4088 97.0f, 145.0f, 215.0f,
4089 115.0f, 116.0f, 238.0f,
4090 226.0f, 16.0f, 132.0f,
4091 92.0f, 125.0f, 88.0f,
4092 })));
4093
4094 LayerTestResult<T, 4> result(outputTensorInfo);
4095 result.outputExpected = input;
4096
4097 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4098
4099 armnn::ScopedCpuTensorHandle constantTensor(inputTensorInfo);
4100 AllocateAndCopyDataToITensorHandle(&constantTensor, &input[0][0][0][0]);
4101
4102 armnn::ConstantQueueDescriptor descriptor;
4103 descriptor.m_LayerOutput = &constantTensor;
4104
4105 armnn::WorkloadInfo info;
4106 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4107
4108 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConstant(descriptor, info);
4109
4110 outputHandle->Allocate();
4111
surmeh013537c2c2018-05-18 16:31:43 +01004112 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00004113 workload->Execute();
4114
4115 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4116 return result;
4117}
4118
4119LayerTestResult<float, 4> ConstantTest(armnn::IWorkloadFactory& workloadFactory)
4120{
4121 return ConstantTestImpl<float>(workloadFactory, 0.0f, 0);
4122}
4123
4124LayerTestResult<uint8_t, 4> ConstantTestUint8(armnn::IWorkloadFactory& workloadFactory)
4125{
4126 return ConstantTestImpl<uint8_t>(workloadFactory, 1.0f, 0);
4127}
4128
4129LayerTestResult<uint8_t, 3> MergerUint8Test(armnn::IWorkloadFactory& workloadFactory)
4130{
surmeh013537c2c2018-05-18 16:31:43 +01004131 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +00004132 unsigned int outputHeight = 6;
4133 unsigned int outputChannels = 3;
4134
surmeh013537c2c2018-05-18 16:31:43 +01004135 unsigned int inputWidth1 = 3;
4136 unsigned int inputHeight1 = 6;
4137 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +00004138
surmeh013537c2c2018-05-18 16:31:43 +01004139 unsigned int inputWidth2 = 3;
4140 unsigned int inputHeight2 = 6;
4141 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +00004142
telsoa01c577f2c2018-08-31 09:22:23 +01004143 // Defines the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +00004144 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
4145 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
4146 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
telsoa014fcda012018-03-09 14:13:49 +00004147
telsoa01c577f2c2018-08-31 09:22:23 +01004148 // Arbitrary scale and offsets. They don't really matter as the merger operator doesn't dequantize/quantize them.
telsoa014fcda012018-03-09 14:13:49 +00004149 const float scale = 0.13497836f;
4150 const int32_t offset = -7;
4151
4152 outputTensorInfo.SetQuantizationScale(scale);
4153 outputTensorInfo.SetQuantizationOffset(offset);
4154 inputTensorInfo1.SetQuantizationScale(scale);
4155 inputTensorInfo1.SetQuantizationOffset(offset);
4156 inputTensorInfo2.SetQuantizationScale(scale);
4157 inputTensorInfo2.SetQuantizationOffset(offset);
telsoa014fcda012018-03-09 14:13:49 +00004158
4159 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
4160
4161 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
surmeh013537c2c2018-05-18 16:31:43 +01004162 {
4163 1, 2, 3,
4164 4, 5, 6,
4165 7, 8, 9,
4166 10, 11, 12,
4167 13, 14, 15,
4168 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00004169
surmeh013537c2c2018-05-18 16:31:43 +01004170 19, 20, 21,
4171 22, 23, 24,
4172 25, 26, 27,
4173 28, 29, 30,
4174 31, 32, 33,
4175 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00004176
surmeh013537c2c2018-05-18 16:31:43 +01004177 37, 38, 39,
4178 40, 41, 42,
4179 43, 44, 45,
4180 46, 47, 48,
4181 49, 50, 51,
4182 52, 53, 54,
4183 })
telsoa014fcda012018-03-09 14:13:49 +00004184 );
4185
telsoa014fcda012018-03-09 14:13:49 +00004186 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
4187 {
surmeh013537c2c2018-05-18 16:31:43 +01004188 1, 2, 3,
4189 4, 5, 6,
4190 7, 8, 9,
4191 10, 11, 12,
4192 13, 14, 15,
4193 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00004194
surmeh013537c2c2018-05-18 16:31:43 +01004195 19, 20, 21,
4196 22, 23, 24,
4197 25, 26, 27,
4198 28, 29, 30,
4199 31, 32, 33,
4200 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00004201 })
4202 );
4203
4204 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
4205 {
surmeh013537c2c2018-05-18 16:31:43 +01004206 37, 38, 39,
4207 40, 41, 42,
telsoa014fcda012018-03-09 14:13:49 +00004208 43, 44, 45,
surmeh013537c2c2018-05-18 16:31:43 +01004209 46, 47, 48,
4210 49, 50, 51,
4211 52, 53, 54,
telsoa014fcda012018-03-09 14:13:49 +00004212 })
4213 );
4214
telsoa01c577f2c2018-08-31 09:22:23 +01004215 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
telsoa014fcda012018-03-09 14:13:49 +00004216 armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1);
4217
telsoa01c577f2c2018-08-31 09:22:23 +01004218 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
telsoa014fcda012018-03-09 14:13:49 +00004219 armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2);
4220
telsoa014fcda012018-03-09 14:13:49 +00004221
4222 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4223
4224 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
4225
4226 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
4227 subTensorsSupported ?
4228 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
4229 workloadFactory.CreateTensorHandle(inputTensorInfo1);
4230
4231 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
4232 subTensorsSupported ?
4233 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
4234 workloadFactory.CreateTensorHandle(inputTensorInfo2);
4235
telsoa014fcda012018-03-09 14:13:49 +00004236
4237 armnn::MergerQueueDescriptor data;
4238 armnn::WorkloadInfo info;
4239 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
4240 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +00004241 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4242
4243 data.m_ViewOrigins.push_back(window1);
4244 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +00004245
4246 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(data, info);
4247
4248 inputHandle1->Allocate();
4249 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00004250 outputHandle->Allocate();
4251
4252 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
4253 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00004254
surmeh013537c2c2018-05-18 16:31:43 +01004255 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00004256 workload->Execute();
4257
4258 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
4259
4260 return ret;
4261}
4262
4263LayerTestResult<uint8_t, 4> AdditionUint8Test(armnn::IWorkloadFactory& workloadFactory)
4264{
4265 unsigned int batchSize = 1;
4266 unsigned int channels = 2;
4267 unsigned int height = 2;
4268 unsigned int width = 3;
4269
4270 const float scale = 7.0f;
4271 const int32_t offset = 3;
4272
4273 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
4274 armnn::TensorInfo outputTensorInfo;
4275
4276 const unsigned int shape[] = { batchSize, channels, height, width };
4277 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
4278 inputTensorInfo1.SetQuantizationScale(scale);
4279 inputTensorInfo1.SetQuantizationOffset(offset);
4280
4281 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
4282 inputTensorInfo2.SetQuantizationScale(scale);
4283 inputTensorInfo2.SetQuantizationOffset(offset);
4284
4285 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
4286 outputTensorInfo.SetQuantizationScale(scale);
4287 outputTensorInfo.SetQuantizationOffset(offset);
4288
telsoa01c577f2c2018-08-31 09:22:23 +01004289 // See dequantized values to the right.
telsoa014fcda012018-03-09 14:13:49 +00004290 auto input1 = MakeTensor<uint8_t, 4>(inputTensorInfo1, std::vector<uint8_t>(
4291 {
4292 63, 35, 77, 70, 56, 112, // 420, 224, 518, 469, 371, 763
4293 203, 28, 252, 168, 245, 91 // 1400, 175, 1743, 1155, 1694, 616
4294 }));
4295
telsoa01c577f2c2018-08-31 09:22:23 +01004296 // See dequantized values to the right.
telsoa014fcda012018-03-09 14:13:49 +00004297 auto input2 = MakeTensor<uint8_t, 4>(inputTensorInfo1, std::vector<uint8_t>(
4298 {
4299 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
4300 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
4301 }));
4302
telsoa01c577f2c2018-08-31 09:22:23 +01004303 // See dequantized values to the right.
telsoa014fcda012018-03-09 14:13:49 +00004304 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
4305 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>(
4306 {
4307 81, 39, 249, 255, 228, 255, // 546, 252, 1722, 2065(clamped), 1575, 2212(clamped)
4308 255, 186, 255, 186, 255, 214, // 2261(clamped), 1281, 2163(clamped), 1281, 2408(clamped), 1477
4309 }));
4310
4311 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
4312 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
4313 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4314
4315 armnn::AdditionQueueDescriptor data;
4316 armnn::WorkloadInfo info;
4317 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
4318 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
4319 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4320
4321 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
4322
4323 inputHandle1->Allocate();
4324 inputHandle2->Allocate();
4325 outputHandle->Allocate();
4326
4327 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
4328 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
4329
surmeh013537c2c2018-05-18 16:31:43 +01004330 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00004331 workload->Execute();
4332
4333 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4334
4335 return result;
4336}
4337
surmeh01bceff2f2018-03-29 16:29:27 +01004338namespace
telsoa014fcda012018-03-09 14:13:49 +00004339{
surmeh01bceff2f2018-03-29 16:29:27 +01004340LayerTestResult<uint8_t, 4> MultiplicationUint8TestHelper(armnn::IWorkloadFactory& workloadFactory,
4341 const unsigned int shape0[4],
4342 const std::vector<uint8_t> & values0,
4343 float scale0,
4344 int32_t offset0,
4345 const unsigned int shape1[4],
4346 const std::vector<uint8_t> & values1,
4347 float scale1,
4348 int32_t offset1,
4349 const unsigned int outShape[4],
4350 const std::vector<uint8_t> & outValues,
4351 float outScale,
4352 int32_t outOffset)
4353{
4354 armnn::TensorInfo inputTensorInfo0(4, shape0, armnn::DataType::QuantisedAsymm8);
4355 armnn::TensorInfo inputTensorInfo1(4, shape1, armnn::DataType::QuantisedAsymm8);
4356 armnn::TensorInfo outputTensorInfo(4, outShape, armnn::DataType::QuantisedAsymm8);
telsoa014fcda012018-03-09 14:13:49 +00004357
surmeh01bceff2f2018-03-29 16:29:27 +01004358 inputTensorInfo0.SetQuantizationScale(scale0);
4359 inputTensorInfo0.SetQuantizationOffset(offset0);
telsoa014fcda012018-03-09 14:13:49 +00004360
surmeh01bceff2f2018-03-29 16:29:27 +01004361 inputTensorInfo1.SetQuantizationScale(scale1);
4362 inputTensorInfo1.SetQuantizationOffset(offset1);
telsoa014fcda012018-03-09 14:13:49 +00004363
surmeh01bceff2f2018-03-29 16:29:27 +01004364 outputTensorInfo.SetQuantizationScale(outScale);
4365 outputTensorInfo.SetQuantizationOffset(outOffset);
telsoa014fcda012018-03-09 14:13:49 +00004366
surmeh01bceff2f2018-03-29 16:29:27 +01004367 auto input0 = MakeTensor<uint8_t, 4>(inputTensorInfo0, values0);
4368 auto input1 = MakeTensor<uint8_t, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00004369
telsoa014fcda012018-03-09 14:13:49 +00004370 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
surmeh01bceff2f2018-03-29 16:29:27 +01004371 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00004372
surmeh01bceff2f2018-03-29 16:29:27 +01004373 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
telsoa014fcda012018-03-09 14:13:49 +00004374 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
telsoa014fcda012018-03-09 14:13:49 +00004375 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4376
4377 armnn::MultiplicationQueueDescriptor data;
4378 armnn::WorkloadInfo info;
surmeh01bceff2f2018-03-29 16:29:27 +01004379 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
4380 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
telsoa014fcda012018-03-09 14:13:49 +00004381 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4382
4383 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
4384
surmeh01bceff2f2018-03-29 16:29:27 +01004385 inputHandle0->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00004386 inputHandle1->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00004387 outputHandle->Allocate();
4388
surmeh01bceff2f2018-03-29 16:29:27 +01004389 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00004390 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00004391
surmeh013537c2c2018-05-18 16:31:43 +01004392 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00004393 workload->Execute();
4394
4395 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4396
4397 return result;
4398}
surmeh01bceff2f2018-03-29 16:29:27 +01004399} // anonymous namespace
4400
4401LayerTestResult<uint8_t, 4> MultiplicationUint8Test(armnn::IWorkloadFactory& workloadFactory)
4402{
4403 unsigned int batchSize = 1;
4404 unsigned int channels = 2;
4405 unsigned int height = 2;
4406 unsigned int width = 3;
4407 const unsigned int shape[] = { batchSize, channels, height, width };
4408
telsoa01c577f2c2018-08-31 09:22:23 +01004409 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01004410 std::vector<uint8_t> input0({
4411 62, 37, 3, 172, 13, 111, // 244, 144, 8, 684, 48, 440,
4412 188, 20, 73, 31, 23, 31 // 748, 76, 288, 120, 88, 120
4413 });
4414
telsoa01c577f2c2018-08-31 09:22:23 +01004415 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01004416 std::vector<uint8_t> input1({
4417 126, 240, 252, 183, 121, 247, // 384, 726, 762, 555, 369, 747,
4418 48, 115, 151, 79, 78, 97 // 150, 351, 459, 243, 240, 297
4419 });
4420
telsoa01c577f2c2018-08-31 09:22:23 +01004421 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01004422 std::vector<uint8_t> output(
4423 {
4424 64, 72, 0, 255, 8, 236, // 93696, 104544, 6096(clamped), 379620(clamped), 17712, 328680,
4425 77, 15, 92, 16, 10, 21, // 112200, 26676, 132192, 29160, 21120, 35640
4426 });
4427
4428 return MultiplicationUint8TestHelper(workloadFactory,
4429 shape,
4430 input0,
4431 4.0f,
4432 1,
4433 shape,
4434 input1,
4435 3.0f,
4436 -2,
4437 shape,
4438 output,
telsoa01c577f2c2018-08-31 09:22:23 +01004439 1366.255f, // Scale/offset chosen to have output values out of range.
surmeh01bceff2f2018-03-29 16:29:27 +01004440 -5);
4441}
4442
4443LayerTestResult<uint8_t, 4> MultiplicationBroadcast1ElementUint8Test(armnn::IWorkloadFactory& workloadFactory)
4444{
4445 const unsigned int shape0[] = { 1, 2, 2, 3 };
4446 const unsigned int shape1[] = { 1, 1, 1, 1 };
4447
4448 std::vector<uint8_t> input0({
4449 1, 2, 3, 4, 5, 6,
4450 7, 8, 9, 10, 11, 12
4451 });
4452
4453 std::vector<uint8_t> input1({2});
4454
4455 std::vector<uint8_t> output({
4456 2, 4, 6, 8, 10, 12,
4457 14, 16, 18, 20, 22, 24
4458 });
4459
4460 return MultiplicationUint8TestHelper(workloadFactory,
4461 shape0,
4462 input0,
4463 1.0f,
4464 0,
4465 shape1,
4466 input1,
4467 1.0f,
4468 0,
4469 shape0,
4470 output,
4471 1.0f,
4472 0);
4473}
4474
4475LayerTestResult<uint8_t, 4> MultiplicationBroadcast1DVectorUint8Test(armnn::IWorkloadFactory& workloadFactory)
4476{
4477 const unsigned int shape0[] = { 1, 2, 2, 3 };
4478 const unsigned int shape1[] = { 1, 1, 1, 3 };
4479
4480 std::vector<uint8_t> input0({
4481 1, 2, 3, 4, 5, 6,
4482 7, 8, 9, 10, 11, 12
4483 });
4484
4485 std::vector<uint8_t> input1({1, 2, 3});
4486
4487 std::vector<uint8_t> output({
4488 1, 4, 9, 4, 10, 18,
4489 7, 16, 27, 10, 22, 36
4490 });
4491
4492 return MultiplicationUint8TestHelper(workloadFactory,
4493 shape0,
4494 input0,
4495 1.0f,
4496 0,
4497 shape1,
4498 input1,
4499 1.0f,
4500 0,
4501 shape0,
4502 output,
4503 1.0f,
4504 0);
4505}
telsoa014fcda012018-03-09 14:13:49 +00004506
David Beckf195f032018-09-06 16:46:34 +01004507namespace
4508{
4509template <typename T>
4510LayerTestResult<T, 4> SubtractionTestHelper(armnn::IWorkloadFactory& workloadFactory,
4511 const unsigned int shape0[4],
4512 const std::vector<T>& values0,
4513 float scale0,
4514 int32_t offset0,
4515 const unsigned int shape1[4],
4516 const std::vector<T> & values1,
4517 float scale1,
4518 int32_t offset1,
4519 const unsigned int outShape[4],
4520 const std::vector<T> & outValues,
4521 float outScale,
4522 int32_t outOffset)
4523{
4524 auto dataType = (std::is_same<T, uint8_t>::value ?
4525 armnn::DataType::QuantisedAsymm8 :
4526 armnn::DataType::Float32);
4527
4528 armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
4529 armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
4530 armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
4531
4532 inputTensorInfo0.SetQuantizationScale(scale0);
4533 inputTensorInfo0.SetQuantizationOffset(offset0);
4534
4535 inputTensorInfo1.SetQuantizationScale(scale1);
4536 inputTensorInfo1.SetQuantizationOffset(offset1);
4537
4538 outputTensorInfo.SetQuantizationScale(outScale);
4539 outputTensorInfo.SetQuantizationOffset(outOffset);
4540
4541 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
4542 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
4543
4544 LayerTestResult<T, 4> result(outputTensorInfo);
4545 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
4546
4547 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
4548 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
4549 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4550
4551 armnn::SubtractionQueueDescriptor data;
4552 armnn::WorkloadInfo info;
4553 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
4554 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
4555 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4556
4557 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSubtraction(data, info);
4558
4559 inputHandle0->Allocate();
4560 inputHandle1->Allocate();
4561 outputHandle->Allocate();
4562
4563 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
4564 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
4565
4566 workloadFactory.Finalize();
4567 workload->Execute();
4568
4569 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4570
4571 return result;
4572}
4573} // anonymous namespace
4574
4575LayerTestResult<uint8_t, 4> SubtractionUint8Test(armnn::IWorkloadFactory& workloadFactory)
4576{
4577 const unsigned int shape0[] = { 1, 1, 2, 2 };
4578 const unsigned int shape1[] = { 1, 1, 2, 2 };
4579
4580 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
4581 std::vector<uint8_t> input1({ 1, 2, 1, 2 });
4582 std::vector<uint8_t> output({ 3, 3, 5, 5 });
4583
4584 return SubtractionTestHelper(workloadFactory,
4585 shape0, input0, 0.5f, 2,
4586 shape1, input1, 1.0f, 0,
4587 shape0, output, 1.0f, 0);
4588}
4589
4590LayerTestResult<uint8_t, 4> SubtractionBroadcast1ElementUint8Test(armnn::IWorkloadFactory& workloadFactory)
4591{
4592 const unsigned int shape0[] = { 1, 1, 2, 2 };
4593 const unsigned int shape1[] = { 1, 1, 1, 1 };
4594
4595 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
4596 std::vector<uint8_t> input1({ 2 });
4597 std::vector<uint8_t> output({ 5, 6, 7, 8 });
4598
4599 return SubtractionTestHelper(workloadFactory,
4600 shape0, input0, 0.5f, 2,
4601 shape1, input1, 1.0f, 0,
4602 shape0, output, 1.0f, 3);
4603}
4604
4605LayerTestResult<uint8_t, 4> SubtractionBroadcastUint8Test(armnn::IWorkloadFactory& workloadFactory)
4606{
4607 const unsigned int shape0[] = { 1, 1, 2, 2 };
4608 const unsigned int shape1[] = { 1, 1, 2, 1 };
4609
4610 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
4611 std::vector<uint8_t> input1({ 2, 1 });
4612 std::vector<uint8_t> output({ 8, 11, 12, 15 });
4613
4614 return SubtractionTestHelper(workloadFactory,
4615 shape0, input0, 1.0f, 0,
4616 shape1, input1, 1.0f, 0,
4617 shape0, output, 1.0f, 0);
4618}
4619
4620LayerTestResult<float, 4> SubtractionTest(armnn::IWorkloadFactory& workloadFactory)
4621{
4622 const unsigned int shape0[] = { 1, 1, 2, 2 };
4623 const unsigned int shape1[] = { 1, 1, 2, 2 };
4624
4625 std::vector<float> input0({ 1, 2, 3, 4 });
4626 std::vector<float> input1({ 1, -1, 0, 2 });
4627 std::vector<float> output({ 0, 3, 3, 2 });
4628
4629 return SubtractionTestHelper(workloadFactory,
4630 shape0, input0, 1.0f, 0,
4631 shape1, input1, 1.0f, 0,
4632 shape0, output, 1.0f, 0);
4633}
4634
4635LayerTestResult<float, 4> SubtractionBroadcast1ElementTest(armnn::IWorkloadFactory& workloadFactory)
4636{
4637 const unsigned int shape0[] = { 1, 1, 2, 2 };
4638 const unsigned int shape1[] = { 1, 1, 1, 1 };
4639
4640 std::vector<float> input0({ 1, 2, 3, 4 });
4641 std::vector<float> input1({ 10 });
4642 std::vector<float> output({ -9, -8, -7, -6 });
4643
4644 return SubtractionTestHelper(workloadFactory,
4645 shape0, input0, 1.0f, 0,
4646 shape1, input1, 1.0f, 0,
4647 shape0, output, 1.0f, 0);
4648}
4649
4650LayerTestResult<float, 4> SubtractionBroadcastTest(armnn::IWorkloadFactory& workloadFactory)
4651{
4652 const unsigned int shape0[] = { 1, 1, 2, 2 };
4653 const unsigned int shape1[] = { 1, 1, 1, 2 };
4654
4655 std::vector<float> input0({ 1, 2, 3, 4 });
4656 std::vector<float> input1({ 10, -5 });
4657 std::vector<float> output({ -9, 7, -7, 9 });
4658
4659 return SubtractionTestHelper(workloadFactory,
4660 shape0, input0, 1.0f, 0,
4661 shape1, input1, 1.0f, 0,
4662 shape0, output, 1.0f, 0);
4663}
4664
telsoa014fcda012018-03-09 14:13:49 +00004665LayerTestResult<uint8_t, 4> ResizeBilinearNopUint8Test(armnn::IWorkloadFactory& workloadFactory)
4666{
4667 constexpr unsigned int inputWidth = 4;
4668 constexpr unsigned int inputHeight = 4;
4669 constexpr unsigned int inputChannels = 1;
4670 constexpr unsigned int inputBatchSize = 1;
4671
4672 constexpr unsigned int outputWidth = inputWidth;
4673 constexpr unsigned int outputHeight = inputHeight;
4674 constexpr unsigned int outputChannels = inputChannels;
4675 constexpr unsigned int outputBatchSize = inputBatchSize;
4676
4677 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
4678 armnn::DataType::QuantisedAsymm8);
4679 inputTensorInfo.SetQuantizationScale(1.5f);
4680 inputTensorInfo.SetQuantizationOffset(-3);
4681
4682 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
4683 armnn::DataType::QuantisedAsymm8);
4684 outputTensorInfo.SetQuantizationScale(1.5f);
4685 outputTensorInfo.SetQuantizationOffset(-3);
4686
4687 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
4688 1, 2, 3, 4,
4689 2, 3, 4, 5,
4690 3, 4, 5, 6,
4691 4, 5, 6, 7
4692 }));
4693
4694 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
4695 result.outputExpected = input;
4696
4697 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4698 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4699
4700 armnn::ResizeBilinearQueueDescriptor descriptor;
4701 armnn::WorkloadInfo info;
4702 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
4703 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4704
4705 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
4706
4707 inputHandle->Allocate();
4708 outputHandle->Allocate();
4709 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
4710
surmeh013537c2c2018-05-18 16:31:43 +01004711 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00004712 workload->Execute();
4713
4714 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4715 return result;
4716}
4717
4718LayerTestResult<uint8_t, 4> SimpleResizeBilinearUint8Test(armnn::IWorkloadFactory& workloadFactory)
4719{
4720 constexpr unsigned int inputWidth = 2;
4721 constexpr unsigned int inputHeight = 2;
4722 constexpr unsigned int inputChannels = 1;
4723 constexpr unsigned int inputBatchSize = 1;
4724
4725 constexpr unsigned int outputWidth = inputWidth / 2;
4726 constexpr unsigned int outputHeight = inputHeight / 2;
4727 constexpr unsigned int outputChannels = inputChannels;
4728 constexpr unsigned int outputBatchSize = inputBatchSize;
4729
4730 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
4731 armnn::DataType::QuantisedAsymm8);
4732 inputTensorInfo.SetQuantizationScale(0.1567f);
4733 inputTensorInfo.SetQuantizationOffset(1);
4734
4735 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
4736 armnn::DataType::QuantisedAsymm8);
4737 outputTensorInfo.SetQuantizationScale(0.1567f);
4738 outputTensorInfo.SetQuantizationOffset(1);
4739
4740 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
4741 1, 255,
4742 200, 250
4743 }));
4744
4745 // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
4746 // then figures out the interpolants and weights. Note this is different to projecting the centre of the
telsoa01c577f2c2018-08-31 09:22:23 +01004747 // output texel - and thus we'll expect the output 1x1 matrix to contain, as its single element, the value
telsoa014fcda012018-03-09 14:13:49 +00004748 // that was at position (0,0) of the input matrix (rather than an average, which we would expect if projecting
4749 // the centre).
4750 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
4751 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
4752 1
4753 }));
4754
4755 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4756 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4757
4758 armnn::ResizeBilinearQueueDescriptor descriptor;
4759 armnn::WorkloadInfo info;
4760 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
4761 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4762
4763 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
4764
4765 inputHandle->Allocate();
4766 outputHandle->Allocate();
4767 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
4768
surmeh013537c2c2018-05-18 16:31:43 +01004769 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00004770 workload->Execute();
4771
4772 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4773 return result;
4774}
4775
4776LayerTestResult<uint8_t, 4> ResizeBilinearSqMinUint8Test(armnn::IWorkloadFactory& workloadFactory)
4777{
4778 constexpr unsigned int inputWidth = 4;
4779 constexpr unsigned int inputHeight = 4;
4780 constexpr unsigned int inputChannels = 1;
4781 constexpr unsigned int inputBatchSize = 1;
4782
4783 constexpr unsigned int outputWidth = inputWidth / 2;
4784 constexpr unsigned int outputHeight = inputHeight / 2;
4785 constexpr unsigned int outputChannels = inputChannels;
4786 constexpr unsigned int outputBatchSize = inputBatchSize;
4787
4788 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
4789 armnn::DataType::QuantisedAsymm8);
4790 inputTensorInfo.SetQuantizationScale(3.141592f);
4791 inputTensorInfo.SetQuantizationOffset(3);
4792
4793 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
4794 armnn::DataType::QuantisedAsymm8);
4795 outputTensorInfo.SetQuantizationScale(3.141592f);
4796 outputTensorInfo.SetQuantizationOffset(3);
4797
4798 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
4799 1, 2, 3, 4,
4800 2, 3, 4, 5,
4801 3, 4, 5, 6,
4802 4, 5, 6, 7
4803 }));
4804
4805 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
4806 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
4807 1, 3,
4808 3, 5
4809 }));
4810
4811 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4812 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4813
4814 armnn::ResizeBilinearQueueDescriptor descriptor;
4815 armnn::WorkloadInfo info;
4816 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
4817 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4818
4819 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
4820
4821 inputHandle->Allocate();
4822 outputHandle->Allocate();
4823 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
4824
surmeh013537c2c2018-05-18 16:31:43 +01004825 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00004826 workload->Execute();
4827
4828 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4829 return result;
4830}
4831
4832LayerTestResult<uint8_t, 4> ResizeBilinearMinUint8Test(armnn::IWorkloadFactory& workloadFactory)
4833{
4834 constexpr unsigned int inputWidth = 3;
4835 constexpr unsigned int inputHeight = 2;
4836 constexpr unsigned int inputChannels = 1;
4837 constexpr unsigned int inputBatchSize = 1;
4838
4839 constexpr unsigned int outputWidth = 2;
4840 constexpr unsigned int outputHeight = 1;
4841 constexpr unsigned int outputChannels = inputChannels;
4842 constexpr unsigned int outputBatchSize = inputBatchSize;
4843
4844 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
4845 armnn::DataType::QuantisedAsymm8);
4846 inputTensorInfo.SetQuantizationScale(1.5f);
4847 inputTensorInfo.SetQuantizationOffset(-1);
4848
4849 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
4850 armnn::DataType::QuantisedAsymm8);
4851 outputTensorInfo.SetQuantizationScale(1.5f);
4852 outputTensorInfo.SetQuantizationOffset(-1);
4853
4854 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
4855 1, 2, 3, // 3.0, 4.5, 6.0
4856 5, 8, 13 // 9.0, 13.5, 21.0
4857 }));
4858
4859 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
4860 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
4861 1, 3 // 3.0, 5.25
4862 }));
4863
4864 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4865 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4866
4867 armnn::ResizeBilinearQueueDescriptor descriptor;
4868 armnn::WorkloadInfo info;
4869 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
4870 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4871
4872 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
4873
4874 inputHandle->Allocate();
4875 outputHandle->Allocate();
4876
4877 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
4878
surmeh013537c2c2018-05-18 16:31:43 +01004879 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00004880 workload->Execute();
4881
4882 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4883 return result;
4884}
4885
4886LayerTestResult<uint8_t, 4> ResizeBilinearMagUint8Test(armnn::IWorkloadFactory& workloadFactory)
4887{
4888 constexpr unsigned int inputWidth = 2;
4889 constexpr unsigned int inputHeight = 3;
4890 constexpr unsigned int inputChannels = 1;
4891 constexpr unsigned int inputBatchSize = 1;
4892
4893 constexpr unsigned int outputWidth = 5;
4894 constexpr unsigned int outputHeight = 3;
4895 constexpr unsigned int outputChannels = inputChannels;
4896 constexpr unsigned int outputBatchSize = inputBatchSize;
4897
4898 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
4899 armnn::DataType::QuantisedAsymm8);
4900 inputTensorInfo.SetQuantizationScale(0.010765f);
4901 inputTensorInfo.SetQuantizationOffset(7);
4902
4903 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
4904 armnn::DataType::QuantisedAsymm8);
4905 outputTensorInfo.SetQuantizationScale(0.010132f);
4906 outputTensorInfo.SetQuantizationOffset(-18);
4907
4908 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
4909 24, 228, // 0.183005, 2.379065,
4910 105, 128, // 1.05497, 1.302565
4911 230, 71 // 2.400595, 0.68896
4912 }));
4913
4914 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
4915 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
4916 0, 87, 173, 217, 217, // 0.18300501, 1.06142902, 1.93985295, 2.37906504, 2.37906504
4917 86, 96, 106, 111, 111, // 1.05497003, 1.15400803, 1.25304604, 1.30256498, 1.30256498
4918 219, 151, 84, 50, 50 // 2.40059495, 1.71594095, 1.03128707, 0.68896002, 0.68896002
4919 }));
4920
4921 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4922 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4923
4924 armnn::ResizeBilinearQueueDescriptor descriptor;
4925 armnn::WorkloadInfo info;
4926 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
4927 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4928
4929 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
4930
4931 inputHandle->Allocate();
4932 outputHandle->Allocate();
4933 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
4934
surmeh013537c2c2018-05-18 16:31:43 +01004935 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00004936 workload->Execute();
4937
4938 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4939 return result;
4940}
4941
4942LayerTestResult<float, 4> BatchNormTest(armnn::IWorkloadFactory& workloadFactory)
4943{
4944 auto ret = BatchNormTestImpl<float>(workloadFactory, 0.f, 0);
4945 return ret;
4946}
4947
4948LayerTestResult<uint8_t, 4> BatchNormUint8Test(armnn::IWorkloadFactory& workloadFactory)
4949{
4950 auto ret = BatchNormTestImpl<uint8_t>(workloadFactory, 1.f/20.f, 50);
4951 return ret;
4952}
4953
4954LayerTestResult<uint8_t, 4> ConstantUint8Test(armnn::IWorkloadFactory& workloadFactory)
4955{
4956 return ConstantTestImpl<uint8_t>(workloadFactory, 2e-6f, 1);
4957}
4958
4959LayerTestResult<uint8_t, 1> Concatenation1dUint8Test(armnn::IWorkloadFactory& workloadFactory)
4960{
4961 return Concatenation1dTestImpl<uint8_t>(workloadFactory, 0.5f, -1);
4962}
4963
4964LayerTestResult<uint8_t, 2> Concatenation2dDim0Uint8Test(armnn::IWorkloadFactory& workloadFactory)
4965{
4966 return Concatenation2dDim0TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
4967}
4968
4969LayerTestResult<uint8_t, 2> Concatenation2dDim1Uint8Test(armnn::IWorkloadFactory& workloadFactory)
4970{
4971 return Concatenation2dDim1TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
4972}
4973
4974LayerTestResult<uint8_t, 2> Concatenation2dDim0DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory)
4975{
4976 return Concatenation2dDim0DiffInputDimsTestImpl<uint8_t>(workloadFactory, 0.5f, -1);
4977}
4978
4979LayerTestResult<uint8_t, 2> Concatenation2dDim1DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory)
4980{
4981 return Concatenation2dDim1DiffInputDimsTestImpl<uint8_t>(workloadFactory, 0.5f, -1);
4982}
4983
4984LayerTestResult<uint8_t, 3> Concatenation3dDim0Uint8Test(armnn::IWorkloadFactory& workloadFactory)
4985{
4986 return Concatenation3dDim0TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
4987}
4988
4989LayerTestResult<uint8_t, 3> Concatenation3dDim1Uint8Test(armnn::IWorkloadFactory& workloadFactory)
4990{
4991 return Concatenation3dDim1TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
4992}
4993
4994LayerTestResult<uint8_t, 3> Concatenation3dDim2Uint8Test(armnn::IWorkloadFactory& workloadFactory)
4995{
4996 return Concatenation3dDim2TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
4997}
4998
4999LayerTestResult<uint8_t, 3> Concatenation3dDim0DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory)
5000{
5001 return Concatenation3dDim0TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5002}
5003
5004LayerTestResult<uint8_t, 3> Concatenation3dDim1DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory)
5005{
5006 return Concatenation3dDim1DiffInputDimsTestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5007}
5008
5009LayerTestResult<uint8_t, 3> Concatenation3dDim2DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory)
5010{
5011 return Concatenation3dDim2DiffInputDimsTestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5012}
5013
5014LayerTestResult<float, 4> SimpleMaxPooling2dSize2x2Stride2x2Test(armnn::IWorkloadFactory& workloadFactory,
5015 bool forceNoPadding)
5016{
5017 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<float>(workloadFactory, forceNoPadding);
5018}
5019
5020LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(armnn::IWorkloadFactory& workloadFactory,
5021 bool forceNoPadding)
5022{
5023 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<uint8_t>(workloadFactory, forceNoPadding, 3.0f, -5);
5024}
5025
5026LayerTestResult<float, 4> SimpleMaxPooling2dSize3x3Stride2x4Test(armnn::IWorkloadFactory& workloadFactory,
5027 bool forceNoPadding)
5028{
5029 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<float>(workloadFactory, forceNoPadding);
5030}
5031
5032LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(armnn::IWorkloadFactory& workloadFactory,
5033 bool forceNoPadding)
5034{
5035 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<uint8_t>(workloadFactory, forceNoPadding, 0.1f, 128);
5036}
5037
5038LayerTestResult<float, 4> SimpleAveragePooling2dTest(armnn::IWorkloadFactory& workloadFactory)
5039{
5040 return SimpleAveragePooling2dTestCommon<float>(workloadFactory);
5041}
5042
Francis Murtagh043d0d02018-10-05 14:08:48 +01005043LayerTestResult<float, 4> SimpleAveragePooling2dNhwcTest(armnn::IWorkloadFactory& workloadFactory)
5044{
5045 return SimpleAveragePooling2dNhwcTestCommon<float>(workloadFactory);
5046}
5047
telsoa014fcda012018-03-09 14:13:49 +00005048LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
5049{
5050 return SimpleAveragePooling2dTestCommon<uint8_t>(workloadFactory, 0.5, -1);
5051}
5052
surmeh01bceff2f2018-03-29 16:29:27 +01005053LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(armnn::IWorkloadFactory& workloadFactory,
5054 bool forceNoPadding)
5055{
5056 return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<float>(workloadFactory, forceNoPadding);
5057}
5058
telsoa014fcda012018-03-09 14:13:49 +00005059LayerTestResult<float, 4> LargeTensorsAveragePooling2dTest(armnn::IWorkloadFactory& workloadFactory)
5060{
5061 return LargeTensorsAveragePooling2dTestCommon<float>(workloadFactory);
5062}
5063
5064LayerTestResult<uint8_t, 4> LargeTensorsAveragePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
5065{
5066 return LargeTensorsAveragePooling2dTestCommon<uint8_t>(workloadFactory, 0.5, -1);
5067}
5068
5069LayerTestResult<float, 4> SimpleL2Pooling2dTest(armnn::IWorkloadFactory& workloadFactory)
5070{
5071 return SimpleL2Pooling2dTestCommon<float>(workloadFactory);
5072}
5073
5074LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
5075{
5076 return SimpleL2Pooling2dTestCommon<uint8_t>(workloadFactory);
5077}
5078
5079LayerTestResult<float, 4> L2Pooling2dSize3Stride1Test(armnn::IWorkloadFactory& workloadFactory)
5080{
5081 return L2Pooling2dSize3Stride1TestCommon<float>(workloadFactory);
5082}
5083
5084LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride1Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5085{
5086 return L2Pooling2dSize3Stride1TestCommon<uint8_t>(workloadFactory);
5087}
5088
5089LayerTestResult<float, 4> L2Pooling2dSize3Stride3Test(armnn::IWorkloadFactory& workloadFactory)
5090{
5091 return L2Pooling2dSize3Stride3TestCommon<float>(workloadFactory);
5092}
5093
5094LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride3Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5095{
5096 return L2Pooling2dSize3Stride3TestCommon<uint8_t>(workloadFactory);
5097}
5098
5099LayerTestResult<float, 4> L2Pooling2dSize3Stride4Test(armnn::IWorkloadFactory& workloadFactory)
5100{
5101 return L2Pooling2dSize3Stride4TestCommon<float>(workloadFactory);
5102}
5103
5104LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride4Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5105{
5106 return L2Pooling2dSize3Stride4TestCommon<uint8_t>(workloadFactory);
5107}
5108
5109LayerTestResult<float, 4> L2Pooling2dSize7Test(armnn::IWorkloadFactory& workloadFactory)
5110{
5111 return L2Pooling2dSize7TestCommon<float>(workloadFactory);
5112}
5113
5114LayerTestResult<uint8_t, 4> L2Pooling2dSize7Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5115{
5116 return L2Pooling2dSize7TestCommon<uint8_t>(workloadFactory);
5117}
5118
5119LayerTestResult<float, 4> L2Pooling2dSize9Test(armnn::IWorkloadFactory& workloadFactory)
5120{
5121 return L2Pooling2dSize9TestCommon<float>(workloadFactory);
5122}
5123
5124LayerTestResult<uint8_t, 4> L2Pooling2dSize9Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5125{
5126 return L2Pooling2dSize9TestCommon<uint8_t>(workloadFactory);
5127}
5128
5129LayerTestResult<float, 4> AsymmetricNonSquarePooling2dTest(armnn::IWorkloadFactory& workloadFactory)
5130{
5131 return AsymmetricNonSquarePooling2dTestCommon<float>(workloadFactory);
5132}
5133
5134LayerTestResult<uint8_t, 4> AsymmetricNonSquarePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
5135{
5136 return AsymmetricNonSquarePooling2dTestCommon<uint8_t>(workloadFactory);
5137}
5138
5139LayerTestResult<float, 4> ComparePooling2dTest(armnn::IWorkloadFactory& workloadFactory,
5140 armnn::IWorkloadFactory& refWorkloadFactory,
5141 armnn::PoolingAlgorithm poolingType)
5142{
5143 return ComparePooling2dTestCommon<float>(workloadFactory, refWorkloadFactory, poolingType);
5144}
5145
5146LayerTestResult<uint8_t, 4> ComparePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory,
5147 armnn::IWorkloadFactory& refWorkloadFactory,
5148 armnn::PoolingAlgorithm poolingType)
5149{
5150 return ComparePooling2dTestCommon<uint8_t>(workloadFactory, refWorkloadFactory, poolingType, 0.1f, 128);
5151}
5152
5153LayerTestResult<float, 2> FullyConnectedLargeTest(armnn::IWorkloadFactory& workloadFactory,
5154 bool transposeWeights)
5155{
5156 return FullyConnectedLargeTestCommon<float>(workloadFactory, transposeWeights);
5157}
5158
5159LayerTestResult<float, 4> IgnorePaddingSimpleMaxPooling2dTest(armnn::IWorkloadFactory& workloadFactory)
5160{
5161 return IgnorePaddingSimpleMaxPooling2dTestCommon<float>(workloadFactory);
5162}
5163
5164LayerTestResult<uint8_t, 4> IgnorePaddingSimpleMaxPooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
5165{
5166 return IgnorePaddingSimpleMaxPooling2dTestCommon<uint8_t>(workloadFactory, 1.0f, -5);
5167}
5168
5169LayerTestResult<float, 4> IgnorePaddingMaxPooling2dSize3Test(armnn::IWorkloadFactory& workloadFactory)
5170{
5171 return IgnorePaddingMaxPooling2dSize3TestCommon<float>(workloadFactory);
5172}
5173
5174LayerTestResult<uint8_t, 4> IgnorePaddingMaxPooling2dSize3Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5175{
5176 return IgnorePaddingMaxPooling2dSize3TestCommon<uint8_t>(workloadFactory, 1.0f, -5);
5177}
5178
5179LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dTest(armnn::IWorkloadFactory& workloadFactory)
5180{
5181 return IgnorePaddingSimpleAveragePooling2dTestCommon<float>(workloadFactory);
5182}
5183
5184LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
5185{
5186 return IgnorePaddingSimpleAveragePooling2dTestCommon<uint8_t>(workloadFactory);
5187}
5188
5189LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTest(armnn::IWorkloadFactory& workloadFactory)
5190{
5191 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<float>(workloadFactory);
5192}
5193
5194LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(
5195 armnn::IWorkloadFactory& workloadFactory)
5196{
5197 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<uint8_t>(workloadFactory);
5198}
5199
5200LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3Test(armnn::IWorkloadFactory& workloadFactory)
5201{
5202 return IgnorePaddingAveragePooling2dSize3TestCommon<float>(workloadFactory);
5203}
5204
5205LayerTestResult<uint8_t, 4> IgnorePaddingAveragePooling2dSize3Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5206{
5207 return IgnorePaddingAveragePooling2dSize3TestCommon<uint8_t>(workloadFactory);
5208}
5209
5210LayerTestResult<float, 4> IgnorePaddingSimpleL2Pooling2dTest(armnn::IWorkloadFactory& workloadFactory)
5211{
5212 return IgnorePaddingSimpleL2Pooling2dTestCommon<float>(workloadFactory);
5213}
5214
5215LayerTestResult<uint8_t, 4> IgnorePaddingSimpleL2Pooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
5216{
5217 return IgnorePaddingSimpleL2Pooling2dTestCommon<uint8_t>(workloadFactory);
5218}
5219
5220LayerTestResult<float, 4> IgnorePaddingL2Pooling2dSize3Test(armnn::IWorkloadFactory& workloadFactory)
5221{
5222 return IgnorePaddingL2Pooling2dSize3TestCommon<float>(workloadFactory);
5223}
5224
5225LayerTestResult<uint8_t, 4> IgnorePaddingL2Pooling2dSize3Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5226{
5227 return IgnorePaddingL2Pooling2dSize3TestCommon<uint8_t>(workloadFactory);
5228}
5229
5230LayerTestResult<float, 4> SimplePermuteFloat32Test(armnn::IWorkloadFactory& workloadFactory)
5231{
5232 return SimplePermuteFloat32TestCommon(workloadFactory);
5233};
5234
5235LayerTestResult<uint8_t, 4> SimplePermuteUint8Test(armnn::IWorkloadFactory& workloadFactory)
5236{
5237 return SimplePermuteUint8TestCommon(workloadFactory);
5238};
surmeh01bceff2f2018-03-29 16:29:27 +01005239
5240LayerTestResult<float, 4> PermuteFloat32ValueSet1Test(armnn::IWorkloadFactory& workloadFactory)
5241{
5242 return PermuteFloat32ValueSet1TestCommon(workloadFactory);
5243};
5244
5245LayerTestResult<float, 4> PermuteFloat32ValueSet2Test(armnn::IWorkloadFactory& workloadFactory)
5246{
5247 return PermuteFloat32ValueSet2TestCommon(workloadFactory);
5248};
5249
5250LayerTestResult<float, 4> PermuteFloat32ValueSet3Test(armnn::IWorkloadFactory& workloadFactory)
5251{
5252 return PermuteFloat32ValueSet3TestCommon(workloadFactory);
narpra011e4c31d2018-09-28 11:07:51 +01005253};
5254
5255namespace
5256{
5257template <typename T, std::size_t InputDim, std::size_t OutputDim>
5258LayerTestResult<T, OutputDim> MeanTestHelper(armnn::IWorkloadFactory& workloadFactory,
5259 const unsigned int* inputShape,
5260 const std::vector<T>& inputData,
5261 const std::vector<unsigned int>& axis,
5262 bool keepDims,
5263 const unsigned int* outputShape,
5264 const std::vector<T>& outputData,
5265 float scale = 1.0f,
5266 int32_t offset = 0)
5267{
5268 auto dataType = (std::is_same<T, uint8_t>::value ?
5269 armnn::DataType::QuantisedAsymm8 :
5270 armnn::DataType::Float32);
5271
5272 armnn::TensorInfo inputTensorInfo(InputDim, inputShape, dataType);
5273 armnn::TensorInfo outputTensorInfo(OutputDim, outputShape, dataType);
5274
5275 inputTensorInfo.SetQuantizationScale(scale);
5276 inputTensorInfo.SetQuantizationOffset(offset);
5277
5278 outputTensorInfo.SetQuantizationScale(scale);
5279 outputTensorInfo.SetQuantizationOffset(offset);
5280
5281 auto input = MakeTensor<T, InputDim>(inputTensorInfo, inputData);
5282
5283 LayerTestResult<T, OutputDim> result(outputTensorInfo);
5284 result.outputExpected = MakeTensor<T, OutputDim>(outputTensorInfo, outputData);
5285
5286 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5287 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5288
5289 armnn::MeanQueueDescriptor data;
5290 data.m_Parameters.m_Axis = axis;
5291 data.m_Parameters.m_KeepDims = keepDims;
5292 armnn::WorkloadInfo info;
5293 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
5294 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
5295
5296 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMean(data, info);
5297
5298 inputHandle->Allocate();
5299 outputHandle->Allocate();
5300
5301 CopyDataToITensorHandle(inputHandle.get(), input.origin());
5302
5303 workloadFactory.Finalize();
5304 workload->Execute();
5305
5306 CopyDataFromITensorHandle(result.output.origin(), outputHandle.get());
5307
5308 return result;
5309}
5310} // anonymous namespace
5311
5312LayerTestResult<uint8_t, 1> MeanUint8SimpleTest(armnn::IWorkloadFactory& workloadFactory)
5313{
5314 const unsigned int inputShape[] = { 3, 2 };
5315 const unsigned int outputShape[] = { 1 };
5316
5317 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
5318 std::vector<uint8_t> output({ 2 });
5319
5320 return MeanTestHelper<uint8_t, 2, 1>(workloadFactory, inputShape, input, {}, false, outputShape, output);
5321}
5322
5323LayerTestResult<uint8_t, 3> MeanUint8SimpleAxisTest(armnn::IWorkloadFactory& workloadFactory)
5324{
5325 const unsigned int inputShape[] = { 1, 1, 3, 2 };
5326 const unsigned int outputShape[] = { 1, 1, 2 };
5327
5328 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
5329 std::vector<uint8_t> output({ 2, 2 });
5330
5331 return MeanTestHelper<uint8_t, 4, 3>(workloadFactory, inputShape, input, {2}, false, outputShape, output);
5332}
5333
5334LayerTestResult<uint8_t, 4> MeanUint8KeepDimsTest(armnn::IWorkloadFactory& workloadFactory)
5335{
5336 const unsigned int inputShape[] = { 1, 1, 3, 2 };
5337 const unsigned int outputShape[] = { 1, 1, 1, 2 };
5338
5339 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
5340 std::vector<uint8_t> output({ 2, 2 });
5341
5342 return MeanTestHelper<uint8_t, 4, 4>(workloadFactory, inputShape, input, {2}, true, outputShape, output);
5343}
5344
5345LayerTestResult<uint8_t, 4> MeanUint8MultipleDimsTest(armnn::IWorkloadFactory& workloadFactory)
5346{
5347 const unsigned int inputShape[] = { 2, 3, 1, 2 };
5348 const unsigned int outputShape[] = { 1, 3, 1, 1 };
5349
5350 std::vector<uint8_t> input({ 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6});
5351 std::vector<uint8_t> output({ 1, 3, 5 });
5352
5353 return MeanTestHelper<uint8_t, 4, 4>(workloadFactory, inputShape, input, {0, 3}, true, outputShape, output);
5354}
5355
5356LayerTestResult<uint8_t, 1> MeanVtsUint8Test(armnn::IWorkloadFactory& workloadFactory)
5357{
5358 const unsigned int inputShape[] = {4, 3, 2};
5359 const unsigned int outputShape[] = { 2 };
5360
5361 std::vector<uint8_t> input({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24});
5362 std::vector<uint8_t> output({12, 13});
5363
5364 return MeanTestHelper<uint8_t, 3, 1>(workloadFactory, inputShape, input, {0, 1}, false, outputShape,
5365 output, 0.8f, 5);
5366}
5367
5368LayerTestResult<float, 1> MeanFloatSimpleTest(armnn::IWorkloadFactory& workloadFactory)
5369{
5370 const unsigned int inputShape[] = { 3, 2 };
5371 const unsigned int outputShape[] = { 1 };
5372
5373 std::vector<float> input({ 1., 1., 2., 2., 3., 3. });
5374 std::vector<float> output({ 2. });
5375
5376 return MeanTestHelper<float, 2, 1>(workloadFactory, inputShape, input, {}, false, outputShape, output);
5377}
5378
5379LayerTestResult<float, 3> MeanFloatSimpleAxisTest(armnn::IWorkloadFactory& workloadFactory)
5380{
5381 const unsigned int inputShape[] = { 2, 3, 1, 2 };
5382 const unsigned int outputShape[] = { 3, 1, 2 };
5383
5384 std::vector<float> input({ 1., 2., 3., 4., 5., 6., 1., 2., 3., 4., 5., 6.});
5385 std::vector<float> output({ 1., 2., 3., 4., 5., 6. });
5386
5387 return MeanTestHelper<float, 4, 3>(workloadFactory, inputShape, input, {0}, false, outputShape, output);
5388}
5389
5390LayerTestResult<float, 4> MeanFloatKeepDimsTest(armnn::IWorkloadFactory& workloadFactory)
5391{
5392 const unsigned int inputShape[] = { 1, 1, 3, 2 };
5393 const unsigned int outputShape[] = { 1, 1, 1, 2 };
5394
5395 std::vector<float> input({ 1., 1., 2., 2., 3., 3. });
5396 std::vector<float> output({ 2., 2. });
5397
5398 return MeanTestHelper<float, 4, 4>(workloadFactory, inputShape, input, {2}, true, outputShape, output);
5399}
5400
5401LayerTestResult<float, 4> MeanFloatMultipleDimsTest(armnn::IWorkloadFactory& workloadFactory)
5402{
5403 const unsigned int inputShape[] = { 2, 3, 1, 2 };
5404 const unsigned int outputShape[] = { 1, 3, 1, 1 };
5405
5406 std::vector<float> input({ 1., 2., 3., 4., 5., 6., 1., 2., 3., 4., 5., 6.});
5407 std::vector<float> output({ 1.5, 3.5, 5.5 });
5408
5409 return MeanTestHelper<float, 4, 4>(workloadFactory, inputShape, input, {0, 3}, true, outputShape, output);
5410}
5411
5412LayerTestResult<float, 1> MeanVtsFloat1Test(armnn::IWorkloadFactory& workloadFactory)
5413{
5414 const unsigned int inputShape[] = {4, 3, 2};
5415 const unsigned int outputShape[] = { 2 };
5416
5417 std::vector<float> input({1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f,
5418 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f});
5419 std::vector<float> output({12.0f, 13.0f});
5420
5421 return MeanTestHelper<float, 3, 1>(workloadFactory, inputShape, input, {0, 1}, false, outputShape, output);
5422}
5423
5424LayerTestResult<float, 3> MeanVtsFloat2Test(armnn::IWorkloadFactory& workloadFactory)
5425{
5426 const unsigned int inputShape[] = {4, 3, 2};
5427 const unsigned int outputShape[] = {1, 3, 1 };
5428
5429 std::vector<float> input({1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f,
5430 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f});
5431 std::vector<float> output({10.5f, 12.5f, 14.5f});
5432
5433 return MeanTestHelper<float, 3, 3>(workloadFactory, inputShape, input, {0, 2}, true, outputShape, output);
5434}
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01005435
5436LayerTestResult<float, 4> AdditionAfterMaxPoolTest(armnn::IWorkloadFactory& workloadFactory)
5437{
5438 // Create Initial Tensor
5439 // 1, 2, 3
5440 // 4, 5, 6
5441 // 7, 8, 9
5442
5443 armnn::TensorInfo poolingInputTensorInfo({ 1, 1, 3, 3}, armnn::GetDataType<float>());
5444 armnn::TensorInfo poolingOutputTensorInfo({ 1, 1, 2, 2}, armnn::GetDataType<float>());
5445
5446 boost::multi_array<float, 4> poolingInput = MakeTensor<float,4>(poolingInputTensorInfo,
5447 {1, 2, 3,
5448 4, 5, 6,
5449 7, 8, 9
5450 });
5451
5452 std::unique_ptr<armnn::ITensorHandle> poolingInputHandle =
5453 workloadFactory.CreateTensorHandle(poolingInputTensorInfo);
5454 std::unique_ptr<armnn::ITensorHandle> poolingOutputHandle =
5455 workloadFactory.CreateTensorHandle(poolingOutputTensorInfo);
5456
5457 // Apply MaxPool poolSize = 1x1, stride=2x2
5458 // Result =
5459 // 1, 3
5460 // 7, 9
5461 armnn::Pooling2dDescriptor descriptor;
5462 descriptor.m_PoolHeight = 1;
5463 descriptor.m_PoolWidth = 1;
5464 descriptor.m_StrideX = 2;
5465 descriptor.m_StrideY = 2;
5466 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
5467
5468 armnn::Pooling2dQueueDescriptor queueDescriptor;
5469 queueDescriptor.m_Parameters = descriptor;
5470 armnn::WorkloadInfo workloadInfo;
5471 AddInputToWorkload(queueDescriptor, workloadInfo, poolingInputTensorInfo, poolingInputHandle.get());
5472 AddOutputToWorkload(queueDescriptor, workloadInfo, poolingOutputTensorInfo, poolingOutputHandle.get());
5473
5474 // Create the MaxPool
5475 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
5476
5477 //LayerTestResult<float, 4> result(poolingOutputTensorInfo);
5478 auto shape( GetTensorShapeAsArray<4>(poolingOutputTensorInfo));
5479 boost::multi_array<float, 4> resultMaxPool;
5480 resultMaxPool.resize(shape);
5481
5482
5483 // Create addition with another tensor the same size
5484 // This would be the result to apply a Conv2d with kernel ones(2) and stride 1x1
5485 // with the initial tensor.
5486 // 12, 16
5487 // 24, 28
5488
5489 armnn::TensorInfo addInputTensorInfo({ 1,1,2,2}, armnn::GetDataType<float>());
5490 armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2}, armnn::GetDataType<float>());
5491
5492 boost::multi_array<float, 4> addInput = MakeTensor<float,4>(addInputTensorInfo,
5493 {12, 16,
5494 24, 28,
5495 });
5496
5497 // Expected output tensor after MaxPool and Addition.
5498 LayerTestResult<float,4> addRet(addOutputTensorInfo);
5499 addRet.outputExpected = MakeTensor<float, 4>(addOutputTensorInfo, std::vector<float>(
5500 {
5501 13, 19,
5502 31, 37
5503 }));
5504
5505 std::unique_ptr<armnn::ITensorHandle> addInputHandle = workloadFactory.CreateTensorHandle(addInputTensorInfo);
5506 std::unique_ptr<armnn::ITensorHandle> addOutputHandle = workloadFactory.CreateTensorHandle(addOutputTensorInfo);
5507
5508 armnn::AdditionQueueDescriptor data;
5509 armnn::WorkloadInfo info;
5510
5511 // Add the output of the MaxPool and the new tensor
5512 AddInputToWorkload(data, info, poolingOutputTensorInfo, poolingOutputHandle.get());
5513 AddInputToWorkload(data, info, addInputTensorInfo, addInputHandle.get());
5514 AddOutputToWorkload(data, info, addOutputTensorInfo, addOutputHandle.get());
5515
5516 std::unique_ptr<armnn::IWorkload> addWorkload = workloadFactory.CreateAddition(data, info);
5517
5518 poolingInputHandle->Allocate();
5519 poolingOutputHandle->Allocate();
5520 addInputHandle->Allocate();
5521 addOutputHandle->Allocate();
5522
5523 CopyDataToITensorHandle(poolingInputHandle.get(), &poolingInput[0][0][0][0]);
5524 CopyDataFromITensorHandle(&resultMaxPool[0][0][0][0], poolingOutputHandle.get());
5525
5526 CopyDataToITensorHandle(poolingOutputHandle.get(), &resultMaxPool[0][0][0][0]);
5527 CopyDataToITensorHandle(addInputHandle.get(), &addInput[0][0][0][0]);
5528
5529 workload->Execute();
5530 addWorkload->Execute();
5531
5532 CopyDataFromITensorHandle(&addRet.output[0][0][0][0], addOutputHandle.get());
5533
5534 workloadFactory.Finalize();
5535
5536 return addRet;
5537}