blob: 2556a3e1f57179ef342cea95b56b97a3653e1a2d [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#include "LayerTests.hpp"
6
7#include "test/TensorHelpers.hpp"
8#include "TensorCopyUtils.hpp"
surmeh013537c2c2018-05-18 16:31:43 +01009#include "Permute.hpp"
telsoa014fcda012018-03-09 14:13:49 +000010
11#include <boost/test/unit_test.hpp>
surmeh013537c2c2018-05-18 16:31:43 +010012#include <boost/assert.hpp>
telsoa014fcda012018-03-09 14:13:49 +000013
David Beck711fa312018-09-24 10:46:38 +010014#include <armnn/LayerSupport.hpp>
telsoa014fcda012018-03-09 14:13:49 +000015
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000016#include <backendsCommon/CpuTensorHandle.hpp>
17#include <backendsCommon/WorkloadFactory.hpp>
telsoa014fcda012018-03-09 14:13:49 +000018
telsoa014fcda012018-03-09 14:13:49 +000019#include <algorithm>
20#include <boost/cast.hpp>
21
22#include "WorkloadTestUtils.hpp"
23#include "Conv2dTestImpl.hpp"
24#include "BatchNormTestImpl.hpp"
25#include "ActivationTestImpl.hpp"
26#include "Pooling2dTestImpl.hpp"
27#include "ReshapeTestImpl.hpp"
28#include "FullyConnectedTestImpl.hpp"
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +000029#include "SpaceToBatchNdTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000030#include "SplitterTestImpl.hpp"
31#include "SoftmaxTestImpl.hpp"
32#include "NormTestImpl.hpp"
33#include "PermuteTestImpl.hpp"
telsoa01c577f2c2018-08-31 09:22:23 +010034#include "LstmTestImpl.hpp"
35#include "ConvertFp16ToFp32TestImpl.hpp"
36#include "ConvertFp32ToFp16TestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000037
telsoa01c577f2c2018-08-31 09:22:23 +010038// 3-channel 16x8 image used as common input data for a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000039static std::vector<float> ConvInput3x8x16({
40 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
41 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
42 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
43 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
44 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
45 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
46 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
47 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
48 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
49 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
50 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
51 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
52 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
53 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
54 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
55 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
56 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
57 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
58 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
59 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
60 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
61 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
62 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
63 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
64});
65
telsoa01c577f2c2018-08-31 09:22:23 +010066// 2-channel bias used by a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000067static std::vector<float> Bias2({0, 2});
68
jimfly013aab7c32018-11-12 13:32:08 +000069armnn::TensorShape GetTestTensorShape(unsigned int numberOfBatches,
70 unsigned int numberOfChannels,
71 unsigned int height,
72 unsigned int width,
73 const armnn::DataLayoutIndexed& dataLayout)
74{
75 switch (dataLayout.GetDataLayout())
76 {
77 case armnn::DataLayout::NCHW:
78 return armnn::TensorShape({numberOfBatches, numberOfChannels, height, width});
79 case armnn::DataLayout::NHWC:
80 return armnn::TensorShape({numberOfBatches, height, width, numberOfChannels});
81 default:
82 throw armnn::InvalidArgumentException("unknown data layout ["
83 + std::to_string(static_cast<int>(dataLayout.GetDataLayout())) + "]");
84 }
85}
86
telsoa01c577f2c2018-08-31 09:22:23 +010087// Helper function that returns either Bias2 or an empty vector depending on whether bias is enabled.
telsoa014fcda012018-03-09 14:13:49 +000088template<typename T>
89boost::multi_array<T, 1> GetBias2(bool biasEnabled, float qScale, int32_t qOffset)
90{
91 if(biasEnabled)
92 {
93 armnn::TensorInfo biasDesc({static_cast<unsigned int>(Bias2.size())}, armnn::GetDataType<T>());
94 boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(qScale, qOffset, Bias2));
95 return bias;
96 }
97 else
98 {
99 return boost::multi_array<T, 1>();
100 }
101}
102
103template<typename T>
104LayerTestResult<T, 4> SimpleConvolution2d3x5TestCommon(armnn::IWorkloadFactory& workloadFactory,
105 float qScale,
106 int32_t qOffset,
jimfly010a088a62018-10-25 17:05:05 +0100107 bool biasEnabled,
108 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000109{
telsoa01c577f2c2018-08-31 09:22:23 +0100110 // Use common single-batch 3-channel 16x8 image.
telsoa014fcda012018-03-09 14:13:49 +0000111 armnn::TensorInfo inputDesc({1, 3, 8, 16}, armnn::GetDataType<T>());
112 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
113
telsoa01c577f2c2018-08-31 09:22:23 +0100114 // Use a 2-element batch with 3-channel 3x5 kernels.
telsoa014fcda012018-03-09 14:13:49 +0000115 armnn::TensorInfo kernelDesc({2, 3, 5, 3}, armnn::GetDataType<T>());
116 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
117 QuantizedVector<T>(qScale, qOffset, {
118 1, 1, 1,
119 1, -1, 1,
120 1, 1, 1,
121 1, 1, 1,
122 1, 1, 1,
123
124 0, 0, 0,
125 0, 0, 0,
126 0, 0, 0,
127 0, 0, 0,
128 0, 0, 0,
129
130 2, 2, 2,
131 2, 2, 2,
132 2, 2, 2,
133 2, 2, 2,
134 2, 2, 2,
135
136
137 0, 0, 0,
138 0, 0, 0,
139 0, 0, 0,
140 0, 0, 0,
141 0, 0, 0,
142
143 1, 1, 1,
144 1, 1, 1,
145 1, 1, 1,
146 1, 1, 1,
147 1, 1, 1,
148
149 0, 0, 0,
150 0, 0, 0,
151 0, 0, 0,
152 0, 0, 0,
153 0, 0, 0
154 })));
155
telsoa01c577f2c2018-08-31 09:22:23 +0100156 // Expected output is 2 batch elements of a 1-channel 14x4 image.
telsoa014fcda012018-03-09 14:13:49 +0000157 armnn::TensorInfo outputDesc({1, 2, 4, 14}, armnn::GetDataType<T>());
158 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
159 QuantizedVector<T>(qScale, qOffset, {
160 -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24,
161 -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25,
162 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
163 -23.5f, -23.5f, -23.5f,
164 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
165 -23.5f, -23.5f, -23.5f,
166
167 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
168 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
169 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
170 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
171 })));
172
173 return SimpleConvolution2dTestImpl<T>(workloadFactory,
174 input,
175 kernel,
176 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
177 expectedOutput,
178 qScale,
jimfly010a088a62018-10-25 17:05:05 +0100179 qOffset,
180 layout);
telsoa014fcda012018-03-09 14:13:49 +0000181}
182
183template<typename T>
184LayerTestResult<T, 4> SimpleConvolution2d3x3TestCommon(armnn::IWorkloadFactory& workloadFactory,
185 float qScale,
186 int32_t qOffset,
narpra015f703182018-10-26 16:24:58 +0100187 bool biasEnabled,
188 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000189{
telsoa01c577f2c2018-08-31 09:22:23 +0100190 // Use a 3x3 kernel, which exercises ArmCompute's direct convolution path.
telsoa014fcda012018-03-09 14:13:49 +0000191
telsoa01c577f2c2018-08-31 09:22:23 +0100192 // Use common single-batch 3-channel 16x8 image.
telsoa014fcda012018-03-09 14:13:49 +0000193 armnn::TensorInfo inputDesc({1, 3, 8, 16}, armnn::GetDataType<T>());
194 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
195
telsoa01c577f2c2018-08-31 09:22:23 +0100196 // Use a 2-element batch of 3-channel 3x3 kernels.
telsoa014fcda012018-03-09 14:13:49 +0000197 armnn::TensorInfo kernelDesc({2, 3, 3, 3}, armnn::GetDataType<T>());
198 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
199 QuantizedVector<T>(qScale, qOffset, {
200 1, 1, 1,
201 1, -1, 1,
202 1, 1, 1,
203
204 0, 0, 0,
205 0, 0, 0,
206 0, 0, 0,
207
208 2, 2, 2,
209 2, 2, 2,
210 2, 2, 2,
211
212
213 0, 0, 0,
214 0, 0, 0,
215 0, 0, 0,
216
217 1, 1, 1,
218 1, 1, 1,
219 1, 1, 1,
220
221 0, 0, 0,
222 0, 0, 0,
223 0, 0, 0
224 })));
225
telsoa01c577f2c2018-08-31 09:22:23 +0100226 // Expected output is 1 batch of a 2-channel 14x6 image.
telsoa014fcda012018-03-09 14:13:49 +0000227 armnn::TensorInfo outputDesc({1, 2, 6, 14}, armnn::GetDataType<T>());
228 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
229 QuantizedVector<T>(qScale, qOffset, {
230 -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15,
231 -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16,
232 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
233 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
234 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
235 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
236
237 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
238 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
239 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
240 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
241 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
242 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
243 })));
244
245 return SimpleConvolution2dTestImpl<T>(workloadFactory,
246 input,
247 kernel,
248 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
249 expectedOutput,
250 qScale,
narpra015f703182018-10-26 16:24:58 +0100251 qOffset,
252 layout);
telsoa014fcda012018-03-09 14:13:49 +0000253}
254
Francis Murtaghd59116e2018-10-04 16:03:07 +0100255template<typename T>
256LayerTestResult<T, 4> SimpleConvolution2d3x3NhwcTestCommon(armnn::IWorkloadFactory& workloadFactory,
257 float qScale,
258 int32_t qOffset,
259 bool biasEnabled,
260 armnn::DataLayout dataLayout)
261{
262 // Use common single-batch 5x5 image.
263
264 armnn::TensorInfo inputDesc({1, 3, 4, 1}, armnn::GetDataType<T>());
265 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
266 {
267 1, 5, 2, 3,
268 8, 7, 3, 6,
269 3, 3, 9, 1
270 });
271
272
273 // Use a 2-element batch of 3-channel 3x3 kernels.
274 armnn::TensorInfo kernelDesc({1, 3, 3, 1}, armnn::GetDataType<T>());
275 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, {
276 4, 5, 6,
277 0, 0, 0,
278 3, 2, 1
279 });
280
281 // Expected output is 1 batch of a 5x5 image.
282 armnn::TensorInfo outputDesc({1, 3, 4, 1}, armnn::GetDataType<T>());
283
284 const std::vector<float> outputData =
285 {
286 23, 41, 33, 21,
287 44, 65, 76, 52,
288 82, 85, 79, 42
289 };
290
291 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
292
293 return SimpleConvolution2dNhwcTestImpl<T>(workloadFactory,
294 input,
295 kernel,
296 boost::multi_array<T, 1>(),
297 expectedOutput,
298 dataLayout,
299 qScale,
300 qOffset);
301}
302
telsoa014fcda012018-03-09 14:13:49 +0000303LayerTestResult<float, 4> SimpleConvolution2d3x5Test(armnn::IWorkloadFactory& workloadFactory,
jimfly010a088a62018-10-25 17:05:05 +0100304 bool biasEnabled,
305 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000306{
jimfly010a088a62018-10-25 17:05:05 +0100307 return SimpleConvolution2d3x5TestCommon<float>(workloadFactory, 0.f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000308}
309
310LayerTestResult<uint8_t, 4> SimpleConvolution2d3x5Uint8Test(armnn::IWorkloadFactory& workloadFactory,
jimfly010a088a62018-10-25 17:05:05 +0100311 bool biasEnabled,
312 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000313{
jimfly010a088a62018-10-25 17:05:05 +0100314 return SimpleConvolution2d3x5TestCommon<uint8_t>(workloadFactory, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000315}
316
317LayerTestResult<float, 4> SimpleConvolution2d3x3Test(armnn::IWorkloadFactory& workloadFactory,
narpra015f703182018-10-26 16:24:58 +0100318 bool biasEnabled,
319 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000320{
narpra015f703182018-10-26 16:24:58 +0100321 return SimpleConvolution2d3x3TestCommon<float>(workloadFactory, 0.f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000322}
323
Francis Murtaghd59116e2018-10-04 16:03:07 +0100324LayerTestResult<float, 4> SimpleConvolution2d3x3NhwcTest(armnn::IWorkloadFactory& workloadFactory,
325 bool biasEnabled)
326{
327 return SimpleConvolution2d3x3NhwcTestCommon<float>(workloadFactory, 0.f, 0, biasEnabled, armnn::DataLayout::NHWC);
328}
329
telsoa014fcda012018-03-09 14:13:49 +0000330LayerTestResult<uint8_t, 4> SimpleConvolution2d3x3Uint8Test(armnn::IWorkloadFactory& workloadFactory,
narpra015f703182018-10-26 16:24:58 +0100331 bool biasEnabled,
332 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000333{
narpra015f703182018-10-26 16:24:58 +0100334 return SimpleConvolution2d3x3TestCommon<uint8_t>(workloadFactory, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000335}
336
337template<typename T>
338LayerTestResult<T, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon(
339 armnn::IWorkloadFactory& workloadFactory,
narpra015f703182018-10-26 16:24:58 +0100340 const armnn::DataLayoutIndexed& layout,
telsoa014fcda012018-03-09 14:13:49 +0000341 float qScale,
342 int32_t qOffset)
343{
telsoa01c577f2c2018-08-31 09:22:23 +0100344 // Use a single-batch 1-channel 3x3 image as input.
telsoa014fcda012018-03-09 14:13:49 +0000345 armnn::TensorInfo inputDesc({1, 1, 3, 3}, armnn::GetDataType<T>());
346 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
347 QuantizedVector<T>(qScale, qOffset, {
348 11,21,31,
349 12,22,32,
350 13,23,33
351 })));
352
telsoa01c577f2c2018-08-31 09:22:23 +0100353 // Use 1 batch of a 1-channel 2x2 kernel.
telsoa014fcda012018-03-09 14:13:49 +0000354 armnn::TensorInfo kernelDesc({1, 1, 2, 2}, armnn::GetDataType<T>());
355 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
356 QuantizedVector<T>(qScale, qOffset, {
357 -11,-21,
358 -12,-22,
359 })));
360
telsoa01c577f2c2018-08-31 09:22:23 +0100361// Expected output is 1 batch of a 1-channel 6x8 image.
telsoa014fcda012018-03-09 14:13:49 +0000362// Manually calculated like this:
363//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
364//[-11*0 -21*0 -12*0 -22*11 ; -11*0 -21*0 -12*11 -22*21 ; -11*0 -21*0 -12*21 -22*31 ; -11*0 -21*0 -12*31 -22*0 ..]
365//[-11*0 -21*11 -12*0 -22*12 ; -11*11 -21*21 -12*12 -22*22 ; -11*21 -21*31 -12*22 -22*32 ; -11*31 -21*0 -12*32 -22*0 ..]
366//[-11*0 -21*12 -12*0 -22*13 ; -11*12 -21*22 -12*13 -22*23 ; -11*22 -21*32 -12*23 -22*33 ; -11*32 -21*0 -12*33 -22*0 ..]
367//[-11*0 -21*13 -12*0 -22*0 ; -11*13 -21*23 -12*0 -22*0 ; -11*23 -21*33 -12*0 -22*0 ; -11*33 -21*0 -12*0 -22*0 ..]
368//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
369//[..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ..]
370 armnn::TensorInfo outputDesc({1, 1, 8, 6}, armnn::GetDataType<T>());
371 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
372 QuantizedVector<T>(qScale, qOffset, {
373 0, 0, 0, 0, 0, 0,
374 -242, -594, -934, -372, 0, 0,
375 -495, -1190, -1850, -725, 0, 0,
376 -538, -1256, -1916, -748, 0, 0,
377 -273, -626, -946, -363, 0, 0,
378 0, 0, 0, 0, 0, 0,
379 0, 0, 0, 0, 0, 0,
380 0, 0, 0, 0, 0, 0
381 })));
382
383 return SimpleConvolution2dTestImpl<T>(workloadFactory,
384 input,
385 kernel,
386 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(false, qScale, qOffset),
387 expectedOutput,
388 qScale,
389 qOffset,
narpra015f703182018-10-26 16:24:58 +0100390 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100391 1, // Padding left.
392 2, // Padding top.
393 3, // Padding right.
394 4); // Padding bottom.
telsoa014fcda012018-03-09 14:13:49 +0000395}
396
397template<typename T>
398LayerTestResult<T, 4> SimpleConvolution2dAsymmetricPaddingTestCommon(armnn::IWorkloadFactory& workloadFactory,
narpra015f703182018-10-26 16:24:58 +0100399 const armnn::DataLayoutIndexed& layout,
400 float qScale,
401 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +0000402{
telsoa01c577f2c2018-08-31 09:22:23 +0100403 // Use a single-batch 1-channel 5x5 image as input.
telsoa014fcda012018-03-09 14:13:49 +0000404 armnn::TensorInfo inputDesc({ 1, 1, 5, 5 }, armnn::GetDataType<T>());
405 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
406 QuantizedVector<T>(qScale, qOffset, {
407 11,21,31,41,51,
408 12,22,32,42,52,
409 13,23,33,43,53,
410 14,24,34,44,54,
411 15,25,35,45,55,
412 })));
413
telsoa01c577f2c2018-08-31 09:22:23 +0100414 // Use 1 batch of a 1-channel 4x4 kernel.
telsoa014fcda012018-03-09 14:13:49 +0000415 armnn::TensorInfo kernelDesc({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
416 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
417 QuantizedVector<T>(qScale, qOffset, {
418 -11,-21,-31,-41,
419 -12,-22,-32,-42,
420 -13,-23,-33,-43,
421 -14,-24,-34,-44,
422 })));
423
telsoa01c577f2c2018-08-31 09:22:23 +0100424 // Expected output is 1 batch of a 1-channel 5x5 image.
telsoa014fcda012018-03-09 14:13:49 +0000425 armnn::TensorInfo outputDesc({ 1, 1, 5, 5 }, armnn::GetDataType<T>());
426 std::vector<T> myVec(outputDesc.GetNumElements(), 0);
427 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
428 QuantizedVector<T>(qScale, qOffset, {
telsoa014fcda012018-03-09 14:13:49 +0000429 -7140, -10580, -13940, -9300, -5230,
430 -9590, -14120, -18520, -12290, -6860,
431 -9980, -14560, -18960, -12560, -7000,
432 -7518, -10904, -14144, -9318, -5152,
surmeh013537c2c2018-05-18 16:31:43 +0100433 -5032, -7256, -9376, -6142, -3368,
telsoa014fcda012018-03-09 14:13:49 +0000434 })));
435
436 return SimpleConvolution2dTestImpl<T>(workloadFactory,
437 input,
438 kernel,
439 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(false, qScale, qOffset),
440 expectedOutput,
441 qScale,
442 qOffset,
narpra015f703182018-10-26 16:24:58 +0100443 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100444 1, // Padding left.
445 1, // Padding top.
446 2, // Padding right.
447 2); // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100448}
449
450template<typename T>
451LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(armnn::IWorkloadFactory& workloadFactory,
452 float qScale,
453 int32_t qOffset,
jimfly01382a91d2018-10-26 15:55:50 +0100454 bool biasEnabled,
455 const armnn::DataLayoutIndexed& layout)
surmeh013537c2c2018-05-18 16:31:43 +0100456{
telsoa01c577f2c2018-08-31 09:22:23 +0100457 // Use a single-batch 2-channel 5x5 image as input.
surmeh013537c2c2018-05-18 16:31:43 +0100458 armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5 }, armnn::GetDataType<T>());
459 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
460 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
461 0, 1, 2, 3, 4,
462 5, 6, 7, 8, 9,
463 10, 11, 12, 13, 14,
464 15, 16, 17, 18, 19,
465 20, 21, 22, 23, 24,
466
467 25, 26, 27, 28, 29,
468 30, 31, 32, 33, 34,
469 35, 36, 37, 38, 39,
470 40, 41, 42, 43, 44,
471 45, 46, 47, 48, 49
472 })));
473
telsoa01c577f2c2018-08-31 09:22:23 +0100474 // Use a depth multiplier of 1 on a 2-channel 4x4 kernel.
surmeh013537c2c2018-05-18 16:31:43 +0100475 armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, armnn::GetDataType<T>());
476 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
477 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
478 32, 31, 30, 29,
479 28, 27, 26, 25,
480 24, 23, 22, 21,
481 20, 19, 18, 17,
482
483 16, 15, 14, 13,
484 12, 11, 10, 9,
485 8, 7, 6, 5,
486 4, 3, 2, 1
487 })));
488
telsoa01c577f2c2018-08-31 09:22:23 +0100489 // Expected output is 1 batch of a 2-channel 5x5 image.
490 // Calculated using the python tensorflow library with strideX=1, strideY=1.
surmeh013537c2c2018-05-18 16:31:43 +0100491 armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5 }, armnn::GetDataType<T>());
492 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
493 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
494 1062, 1580, 1850, 1530, 1117,
495 2140, 3108, 3500, 2842, 2042,
496 3580, 5068, 5460, 4342, 3062,
497 3618, 5072, 5390, 4248, 2971,
498 3074, 4282, 4510, 3533, 2457,
499 1550, 2284, 2362, 1955, 1428,
500 2910, 4206, 4342, 3528, 2536,
501 3390, 4886, 5022, 4068, 2916,
502 3566, 5056, 5182, 4133, 2922,
503 3100, 4352, 4452, 3517, 2465
504 })));
505
506 return DepthwiseConvolution2dAsymmetricTestImpl<T>(workloadFactory,
507 input,
508 kernel,
509 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
510 expectedOutput,
511 qScale,
512 qOffset,
jimfly01382a91d2018-10-26 15:55:50 +0100513 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100514 1, // Padding left.
515 1, // Padding top.
516 2, // Padding right.
517 2, // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100518 1, // strideX
519 1); // strideY
telsoa014fcda012018-03-09 14:13:49 +0000520}
521
Nikhil Rajcec6b652018-10-12 13:51:57 +0100522template<typename T>
523LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestCommon(armnn::IWorkloadFactory& workloadFactory,
524 float qScale,
525 int32_t qOffset,
526 bool biasEnabled)
527{
528 armnn::TensorInfo inputTensorInfo({ 1, 5, 5, 2}, armnn::GetDataType<T>());
529 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
530 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
531 0, 25,
532 1, 26,
533 2, 27,
534 3, 28,
535 4, 29,
536
537 5, 30,
538 6, 31,
539 7, 32,
540 8, 33,
541 9, 34,
542
543 10, 35,
544 11, 36,
545 12, 37,
546 13, 38,
547 14, 39,
548
549 15, 40,
550 16, 41,
551 17, 42,
552 18, 43,
553 19, 44,
554
555 20, 45,
556 21, 46,
557 22, 47,
558 23, 48,
559 24, 49
560 })));
561
562 armnn::TensorInfo kernelTensorInfo({ 1, 4, 4, 2}, armnn::GetDataType<T>());
563 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
564 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
565 32, 16,
566 31, 15,
567 30, 14,
568 29, 13,
569
570 28, 12,
571 27, 11,
572 26, 10,
573 25, 9,
574
575 24, 8,
576 23, 7,
577 22, 6,
578 21, 5,
579
580 20, 4,
581 19, 3,
582 18, 2,
583 17, 1
584 })));
585
586 armnn::TensorInfo outputTensorInfo({ 1, 5, 5, 2}, armnn::GetDataType<T>());
587 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
588 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
589 1062, 1550,
590 1580, 2284,
591 1850, 2362,
592 1530, 1955,
593 1117, 1428,
594
595 2140, 2910,
596 3108, 4206,
597 3500, 4342,
598 2842, 3528,
599 2042, 2536,
600
601 3580, 3390,
602 5068, 4886,
603 5460, 5022,
604 4342, 4068,
605 3062, 2916,
606
607 3618, 3566,
608 5072, 5056,
609 5390, 5182,
610 4248, 4133,
611 2971, 2922,
612
613 3074, 3100,
614 4282, 4352,
615 4510, 4452,
616 3533, 3517,
617 2457, 2465
618 })));
619
620 return DepthwiseConvolution2dNhwcTestImpl<T>(workloadFactory,
621 input,
622 kernel,
623 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
624 expectedOutput,
625 qScale,
626 qOffset,
627 1, // Padding left.
628 1, // Padding top.
629 2, // Padding right.
630 2, // Padding bottom.
631 1, // strideX
632 1); // strideY
633}
634
telsoa014fcda012018-03-09 14:13:49 +0000635LayerTestResult<float, 4>
narpra015f703182018-10-26 16:24:58 +0100636Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest(armnn::IWorkloadFactory& workloadFactory,
637 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000638{
narpra015f703182018-10-26 16:24:58 +0100639 return Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon<float>(workloadFactory, layout, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000640}
641
narpra015f703182018-10-26 16:24:58 +0100642LayerTestResult<float, 4> Convolution2dAsymmetricPaddingTest(armnn::IWorkloadFactory& workloadFactory,
643 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000644{
narpra015f703182018-10-26 16:24:58 +0100645 return SimpleConvolution2dAsymmetricPaddingTestCommon<float>(workloadFactory, layout, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000646}
647
648LayerTestResult<float, 4> DepthwiseConvolution2dTest(armnn::IWorkloadFactory& workloadFactory,
jimfly01d84216a2018-10-26 12:56:21 +0100649 bool biasEnabled,
650 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000651{
jimfly01d84216a2018-10-26 12:56:21 +0100652 return DepthwiseConvolution2dTestImpl<float, float>(workloadFactory, 0.0f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000653}
654
Nikhil Rajcec6b652018-10-12 13:51:57 +0100655LayerTestResult<float, 4> DepthwiseConvolution2dDepthNhwcTest(armnn::IWorkloadFactory& workloadFactory,
656 bool biasEnabled)
657{
658 return DepthwiseConvolution2dNhwcTestCommon<float>(workloadFactory, 0.0f, 0, biasEnabled);
659}
660
telsoa014fcda012018-03-09 14:13:49 +0000661LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul1Test(armnn::IWorkloadFactory& workloadFactory,
jimfly01b9c89632018-10-26 16:50:13 +0100662 bool biasEnabled,
663 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000664{
jimfly01b9c89632018-10-26 16:50:13 +0100665 return DepthwiseConvolution2dDepthMul1TestImpl<float, float>(workloadFactory, 0.0f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000666}
667
surmeh013537c2c2018-05-18 16:31:43 +0100668LayerTestResult<float, 4> DepthwiseConvolution2dAsymmetricTest(armnn::IWorkloadFactory& workloadFactory,
jimfly01382a91d2018-10-26 15:55:50 +0100669 bool biasEnabled,
670 const armnn::DataLayoutIndexed& layout)
surmeh013537c2c2018-05-18 16:31:43 +0100671{
jimfly01382a91d2018-10-26 15:55:50 +0100672 return DepthwiseConvolution2dAsymmetricTestCommon<float>(workloadFactory, 0.0f, 0, biasEnabled, layout);
surmeh013537c2c2018-05-18 16:31:43 +0100673}
674
telsoa014fcda012018-03-09 14:13:49 +0000675LayerTestResult<uint8_t, 4> DepthwiseConvolution2dUint8Test(armnn::IWorkloadFactory& workloadFactory,
jimfly01d84216a2018-10-26 12:56:21 +0100676 bool biasEnabled,
677 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000678{
jimfly01d84216a2018-10-26 12:56:21 +0100679 return DepthwiseConvolution2dTestImpl<uint8_t, int32_t>(workloadFactory, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000680}
681
682LayerTestResult<uint8_t, 4> DepthwiseConvolution2dDepthMul1Uint8Test(armnn::IWorkloadFactory& workloadFactory,
jimfly01b9c89632018-10-26 16:50:13 +0100683 bool biasEnabled,
684 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000685{
jimfly01b9c89632018-10-26 16:50:13 +0100686 return DepthwiseConvolution2dDepthMul1TestImpl<uint8_t, int32_t>(workloadFactory, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000687}
688
689LayerTestResult<float, 4> Convolution1dTest(armnn::IWorkloadFactory& workloadFactory, bool biasEnabled)
690{
691 return Convolution1dTestImpl<float>(workloadFactory, 0.0f, 0, biasEnabled);
692}
693
694LayerTestResult<uint8_t, 4> Convolution1dUint8Test(armnn::IWorkloadFactory& workloadFactory, bool biasEnabled)
695{
696 return Convolution1dTestImpl<uint8_t>(workloadFactory, 0.1f, 128, biasEnabled);
697}
698
699LayerTestResult<float,4> CompareConvolution2dTest(armnn::IWorkloadFactory& workloadFactory,
jimfly01382a91d2018-10-26 15:55:50 +0100700 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +0000701{
702 return CompareConvolution2dTestImpl<float>(workloadFactory, refWorkloadFactory);
703}
704
705template<typename T>
706LayerTestResult<T,4> CompareDepthwiseConvolution2dTest(armnn::IWorkloadFactory& workloadFactory,
jimfly017af00da2018-10-31 14:43:53 +0000707 armnn::IWorkloadFactory& refWorkloadFactory,
708 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000709{
jimfly017af00da2018-10-31 14:43:53 +0000710 return CompareDepthwiseConvolution2dTestImpl<T>(workloadFactory, refWorkloadFactory, layout);
telsoa014fcda012018-03-09 14:13:49 +0000711}
712
713template LayerTestResult<float, 4> CompareDepthwiseConvolution2dTest<float>(
jimfly017af00da2018-10-31 14:43:53 +0000714 armnn::IWorkloadFactory&, armnn::IWorkloadFactory&, const armnn::DataLayoutIndexed&);
telsoa014fcda012018-03-09 14:13:49 +0000715template LayerTestResult<uint8_t, 4> CompareDepthwiseConvolution2dTest<uint8_t>(
jimfly017af00da2018-10-31 14:43:53 +0000716 armnn::IWorkloadFactory&, armnn::IWorkloadFactory&, const armnn::DataLayoutIndexed&);
telsoa014fcda012018-03-09 14:13:49 +0000717
718LayerTestResult<float,4> SimpleNormalizationAcrossTest(armnn::IWorkloadFactory& workloadFactory)
719{
720 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
721 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
722 return SimpleNormalizationTestImpl(workloadFactory, normChannel, normMethod);
723}
724
725LayerTestResult<float,4> SimpleNormalizationWithinTest(armnn::IWorkloadFactory& workloadFactory)
726{
727 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
728 auto normChannel = armnn::NormalizationAlgorithmChannel::Within;
729 return SimpleNormalizationTestImpl(workloadFactory, normChannel, normMethod);
730}
731
narpra0155a97bc2018-10-02 14:35:53 +0100732LayerTestResult<float,4> SimpleNormalizationAcrossNhwcTest(armnn::IWorkloadFactory& workloadFactory)
733{
734 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
735 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Matteo Martincigh8e6f92d2018-10-18 08:45:39 +0100736 return SimpleNormalizationNhwcTestImpl(workloadFactory, normChannel, normMethod);
narpra0155a97bc2018-10-02 14:35:53 +0100737}
738
telsoa014fcda012018-03-09 14:13:49 +0000739LayerTestResult<float,2> SimpleSoftmaxTest(armnn::IWorkloadFactory& workloadFactory, float beta)
740{
741 return SimpleSoftmaxTestImpl<float>(workloadFactory, beta);
742}
743
744LayerTestResult<uint8_t,2> SimpleSoftmaxUint8Test(armnn::IWorkloadFactory& workloadFactory, float beta)
745{
746 return SimpleSoftmaxTestImpl<uint8_t>(workloadFactory, beta);
747}
748
749LayerTestResult<float,4> CompareNormalizationTest(armnn::IWorkloadFactory& workloadFactory,
750 armnn::IWorkloadFactory& refWorkloadFactory,
751 armnn::NormalizationAlgorithmChannel normChannel,
752 armnn::NormalizationAlgorithmMethod normMethod)
753{
754 return CompareNormalizationTestImpl(workloadFactory, refWorkloadFactory, normChannel, normMethod);
755}
756
757LayerTestResult<float,2> CompareSoftmaxTest(armnn::IWorkloadFactory& workloadFactory,
758 armnn::IWorkloadFactory& refWorkloadFactory,
759 float beta)
760{
761 return CompareSoftmaxTestImpl<float>(workloadFactory, refWorkloadFactory, beta);
762}
763
764LayerTestResult<uint8_t,2> CompareSoftmaxUint8Test(armnn::IWorkloadFactory& workloadFactory,
765 armnn::IWorkloadFactory& refWorkloadFactory,
766 float beta)
767{
768 return CompareSoftmaxTestImpl<uint8_t>(workloadFactory, refWorkloadFactory, beta);
769}
770
771std::vector<LayerTestResult<float,3>> SplitterTest(armnn::IWorkloadFactory& workloadFactory)
772{
773 return SplitterTestCommon<float>(workloadFactory);
774}
775
776std::vector<LayerTestResult<uint8_t,3>> SplitterUint8Test(armnn::IWorkloadFactory& workloadFactory)
777{
778 return SplitterTestCommon<uint8_t>(workloadFactory, 1.0f, 0);
779}
780
781LayerTestResult<float, 3> CopyViaSplitterTest(armnn::IWorkloadFactory& workloadFactory)
782{
783 return CopyViaSplitterTestImpl<float>(workloadFactory, 0.0f, 0);
784}
785
786LayerTestResult<uint8_t, 3> CopyViaSplitterUint8Test(armnn::IWorkloadFactory& workloadFactory)
787{
788 return CopyViaSplitterTestImpl<uint8_t>(workloadFactory, 1.0f, 0);
789}
790
telsoa01c577f2c2018-08-31 09:22:23 +0100791LayerTestResult<float, 2> LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest(
792 armnn::IWorkloadFactory& workloadFactory)
793{
794 armnn::TensorInfo inputDesc({ 2, 2 }, armnn::GetDataType<float>());
795 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
796 { 2., 3., 3., 4. }));
797
798 armnn::TensorInfo outputDesc({ 2, 4 }, armnn::GetDataType<float>());
799 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
800 {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
801 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f}));
802 return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl(workloadFactory, input, expectedOutput);
803}
804
805LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest(
806 armnn::IWorkloadFactory& workloadFactory)
807{
808 armnn::TensorInfo inputDesc({ 2, 5 }, armnn::GetDataType<float>());
809 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
810 {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
811 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f}));
812
813 armnn::TensorInfo outputDesc({ 2, 16 }, armnn::GetDataType<float>());
814 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
815 {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f,
816 -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f,
817 -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f,
818 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f,
819 -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f,
820 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f,
821 0.02168f}));
822 return LstmLayerFloat32NoCifgWithPeepholeWithProjectionTestImpl(workloadFactory, input, expectedOutput);
823}
824
825LayerTestResult<float, 2> LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest(armnn::IWorkloadFactory& workloadFactory)
826{
827 armnn::TensorInfo inputDesc({2, 2}, armnn::GetDataType<float>());
828 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
829 {2., 3., 3., 4.}));
830
831
832 armnn::TensorInfo outputDesc({2, 4}, armnn::GetDataType<float>());
833 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
834 {{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
835 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}}));
836
837 return LstmNoCifgNoPeepholeNoProjectionTestImpl(workloadFactory, input, expectedOutput);
838}
839
telsoa014fcda012018-03-09 14:13:49 +0000840LayerTestResult<float,3> MergerTest(armnn::IWorkloadFactory& workloadFactory)
841{
surmeh013537c2c2018-05-18 16:31:43 +0100842 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +0000843 unsigned int outputHeight = 6;
844 unsigned int outputChannels = 3;
845
surmeh013537c2c2018-05-18 16:31:43 +0100846 unsigned int inputWidth1 = 3;
847 unsigned int inputHeight1 = 6;
848 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +0000849
surmeh013537c2c2018-05-18 16:31:43 +0100850 unsigned int inputWidth2 = 3;
851 unsigned int inputHeight2 = 6;
852 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +0000853
telsoa01c577f2c2018-08-31 09:22:23 +0100854 // Define the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +0000855 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::Float32);
856 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::Float32);
857 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +0000858
859 LayerTestResult<float,3> ret(outputTensorInfo);
860
telsoa014fcda012018-03-09 14:13:49 +0000861 ret.outputExpected = MakeTensor<float, 3>(outputTensorInfo, std::vector<float>(
surmeh013537c2c2018-05-18 16:31:43 +0100862 {
863 1.0f, 2.0f, 3.0f,
864 4.0f, 5.0f, 6.0f,
865 7.0f, 8.0f, 9.0f,
866 10.0f, 11.0f, 12.0f,
867 13.0f, 14.0f, 15.0f,
868 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +0000869
surmeh013537c2c2018-05-18 16:31:43 +0100870 19.0f, 20.0f, 21.0f,
871 22.0f, 23.0f, 24.0f,
872 25.0f, 26.0f, 27.0f,
873 28.0f, 29.0f, 30.0f,
874 31.0f, 32.0f, 33.0f,
875 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +0000876
surmeh013537c2c2018-05-18 16:31:43 +0100877 37.0f, 38.0f, 39.0f,
878 40.0f, 41.0f, 42.0f,
879 43.0f, 44.0f, 45.0f,
880 46.0f, 47.0f, 48.0f,
881 49.0f, 50.0f, 51.0f,
882 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +0000883 })
884 );
885
telsoa014fcda012018-03-09 14:13:49 +0000886 auto input1 = MakeTensor<float, 3>(inputTensorInfo1, std::vector<float>(
887 {
surmeh013537c2c2018-05-18 16:31:43 +0100888 1.0f, 2.0f, 3.0f,
889 4.0f, 5.0f, 6.0f,
890 7.0f, 8.0f, 9.0f,
891 10.0f, 11.0f, 12.0f,
892 13.0f, 14.0f, 15.0f,
893 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +0000894
surmeh013537c2c2018-05-18 16:31:43 +0100895 19.0f, 20.0f, 21.0f,
896 22.0f, 23.0f, 24.0f,
897 25.0f, 26.0f, 27.0f,
898 28.0f, 29.0f, 30.0f,
899 31.0f, 32.0f, 33.0f,
900 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +0000901 })
902 );
903
904 auto input2 = MakeTensor<float, 3>(inputTensorInfo2, std::vector<float>(
905 {
surmeh013537c2c2018-05-18 16:31:43 +0100906 37.0f, 38.0f, 39.0f,
907 40.0f, 41.0f, 42.0f,
telsoa014fcda012018-03-09 14:13:49 +0000908 43.0f, 44.0f, 45.0f,
surmeh013537c2c2018-05-18 16:31:43 +0100909 46.0f, 47.0f, 48.0f,
910 49.0f, 50.0f, 51.0f,
911 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +0000912 })
913 );
914
telsoa01c577f2c2018-08-31 09:22:23 +0100915 std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of input[0].
telsoa014fcda012018-03-09 14:13:49 +0000916 armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1);
917
telsoa01c577f2c2018-08-31 09:22:23 +0100918 std::vector<unsigned int> wOrigin2 = {2, 0, 0}; //Extent of the window is defined by size of input[1].
telsoa014fcda012018-03-09 14:13:49 +0000919 armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2);
920
telsoa014fcda012018-03-09 14:13:49 +0000921 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
922
923 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
924
925 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
926 subTensorsSupported ?
927 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
928 workloadFactory.CreateTensorHandle(inputTensorInfo1);
929
930 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
931 subTensorsSupported ?
932 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
933 workloadFactory.CreateTensorHandle(inputTensorInfo2);
934
telsoa014fcda012018-03-09 14:13:49 +0000935 armnn::MergerQueueDescriptor data;
936 armnn::WorkloadInfo info;
937 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
938 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +0000939 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
940
941 data.m_ViewOrigins.push_back(window1);
942 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +0000943
944 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(data, info);
945
946 inputHandle1->Allocate();
947 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +0000948 outputHandle->Allocate();
949
950 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
951 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +0000952
953 workload->Execute();
954
955 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
956
957 return ret;
958}
959
960LayerTestResult<float,4> AdditionTest(armnn::IWorkloadFactory& workloadFactory)
961{
962 unsigned int batchSize = 2;
963 unsigned int channels = 2;
964 unsigned int height = 2;
965 unsigned int width = 3;
966
967 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
968 armnn::TensorInfo outputTensorInfo;
969
970 unsigned int shape[] = {batchSize, channels, height, width};
971
972 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
973 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
974 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
975
976
977 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, std::vector<float>(
978 {
979 0.0f, 2.0f, 1.0f,
980 0.2f, 1.0f, 2.0f,
981
982 1.0f, 2.0f, 1.0f,
983 0.2f, 1.0f, 2.0f,
984
985 0.0f, 2.0f, 1.0f,
986 4.2f, 1.0f, 2.0f,
987
988 0.0f, 0.0f, 1.0f,
989 0.2f, 1.0f, 2.0f,
990 }));
991
992 auto input2 = MakeTensor<float, 4>(inputTensorInfo2, std::vector<float>(
993 {
994 1.0f, 2.0f, 1.0f,
995 0.0f, 1.0f, 2.0f,
996
997 1.0f, 2.0f, -2.0f,
998 0.2f, 1.0f, 2.0f,
999
1000 0.0f, 2.0f, 1.0f,
1001 4.2f, 0.0f, -3.0f,
1002
1003 0.0f, 0.0f, 1.0f,
1004 0.7f, 1.0f, 5.0f,
1005 }));
1006
1007 LayerTestResult<float,4> ret(outputTensorInfo);
1008 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>(
1009 {
1010 1.0f, 4.0f, 2.0f,
1011 0.2f, 2.0f, 4.0f,
1012
1013 2.0f, 4.0f, -1.0f,
1014 0.4f, 2.0f, 4.0f,
1015
1016 0.0f, 4.0f, 2.0f,
1017 8.4f, 1.0f, -1.0f,
1018
1019 0.0f, 0.0f, 2.0f,
1020 0.9f, 2.0f, 7.0f,
1021 }));
1022
1023 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1024 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1025 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1026
1027 armnn::AdditionQueueDescriptor data;
1028 armnn::WorkloadInfo info;
1029 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1030 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1031 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1032
1033 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1034
1035 inputHandle1->Allocate();
1036 inputHandle2->Allocate();
1037 outputHandle->Allocate();
1038
1039 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1040 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1041
1042 workload->Execute();
1043
1044 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1045
1046 return ret;
1047}
1048
1049template <typename T>
1050LayerTestResult<T, 4> AdditionBroadcastTestImpl(armnn::IWorkloadFactory& workloadFactory,
1051 float qScale,
1052 int32_t qOffset)
1053{
1054 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, armnn::GetDataType<T>());
1055 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, armnn::GetDataType<T>());
1056 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>());
1057
1058 if (armnn::IsQuantizedType<T>())
1059 {
1060 inputTensorInfo1.SetQuantizationScale(qScale);
1061 inputTensorInfo1.SetQuantizationOffset(qOffset);
1062 inputTensorInfo2.SetQuantizationScale(qScale);
1063 inputTensorInfo2.SetQuantizationOffset(qOffset);
1064 outputTensorInfo.SetQuantizationScale(qScale);
1065 outputTensorInfo.SetQuantizationOffset(qOffset);
1066 }
1067
1068 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
1069 {
1070 0.0f,
1071 1.0f,
1072
1073 2.0f,
1074 3.0f,
1075
1076 4.0f,
1077 5.0f,
1078 }));
1079
1080 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
1081 {
1082 0.5f, 1.5f, 2.5f,
1083 3.5f, 4.5f, 5.5f,
1084 }));
1085
1086 LayerTestResult<T,4> ret(outputTensorInfo);
1087 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
1088 {
1089 0.5f, 1.5f, 2.5f,
1090 4.5f, 5.5f, 6.5f,
1091
1092 2.5f, 3.5f, 4.5f,
1093 6.5f, 7.5f, 8.5f,
1094
1095 4.5f, 5.5f, 6.5f,
1096 8.5f, 9.5f, 10.5f,
1097 }));
1098
1099 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1100 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1101 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1102
1103 armnn::AdditionQueueDescriptor data;
1104 armnn::WorkloadInfo info;
1105 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1106 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1107 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1108
1109 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1110
1111 inputHandle1->Allocate();
1112 inputHandle2->Allocate();
1113 outputHandle->Allocate();
1114
1115 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1116 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1117
1118 workload->Execute();
1119
1120 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1121
1122 return ret;
1123}
1124
1125template <typename T>
1126LayerTestResult<T, 4> AdditionBroadcast1ElementTestImpl(armnn::IWorkloadFactory& workloadFactory,
1127 float qScale,
1128 int32_t qOffset)
1129{
1130 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>());
1131 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, armnn::GetDataType<T>());
1132 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>());
1133
1134 if (armnn::IsQuantizedType<T>())
1135 {
1136 inputTensorInfo1.SetQuantizationScale(qScale);
1137 inputTensorInfo1.SetQuantizationOffset(qOffset);
1138 inputTensorInfo2.SetQuantizationScale(qScale);
1139 inputTensorInfo2.SetQuantizationOffset(qOffset);
1140 outputTensorInfo.SetQuantizationScale(qScale);
1141 outputTensorInfo.SetQuantizationOffset(qOffset);
1142 }
1143
1144 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
1145 {
1146 0.0f, 1.0f, 2.0f,
1147 3.0f, 4.0f, 5.0f,
1148 6.0f, 7.0f, 8.0f,
1149 9.0f, 10.0f, 11.0f,
1150 12.0f, 13.0f, 14.0f,
1151 15.0f, 16.0f, 17.0f,
1152 }));
1153
1154 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
1155 {
1156 0.5f,
1157 }));
1158
1159 LayerTestResult<T,4> ret(outputTensorInfo);
1160 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
1161 {
1162 0.5f, 1.5f, 2.5f,
1163 3.5f, 4.5f, 5.5f,
1164 6.5f, 7.5f, 8.5f,
1165 9.5f, 10.5f, 11.5f,
1166 12.5f, 13.5f, 14.5f,
1167 15.5f, 16.5f, 17.5f,
1168 }));
1169
1170 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1171 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1172 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1173
1174 armnn::AdditionQueueDescriptor data;
1175 armnn::WorkloadInfo info;
1176 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1177 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1178 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1179
1180 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1181
1182 inputHandle1->Allocate();
1183 inputHandle2->Allocate();
1184 outputHandle->Allocate();
1185
1186 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1187 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1188
1189 workload->Execute();
1190
1191 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1192
1193 return ret;
1194}
1195
1196LayerTestResult<float, 4> AdditionBroadcastTest(armnn::IWorkloadFactory& workloadFactory)
1197{
1198 return AdditionBroadcastTestImpl<float>(workloadFactory, 0.0f, 0);
1199}
1200
1201LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test(armnn::IWorkloadFactory& workloadFactory)
1202{
1203 return AdditionBroadcastTestImpl<uint8_t>(workloadFactory, 2.f, 0);
1204}
1205
1206LayerTestResult<float, 4> AdditionBroadcast1ElementTest(armnn::IWorkloadFactory& workloadFactory)
1207{
1208 return AdditionBroadcast1ElementTestImpl<float>(workloadFactory, 0.0f, 0);
1209}
1210
1211LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test(armnn::IWorkloadFactory& workloadFactory)
1212{
1213 return AdditionBroadcast1ElementTestImpl<uint8_t>(workloadFactory, 0.1333333f, 128);
1214}
1215
1216LayerTestResult<float,4> CompareAdditionTest(armnn::IWorkloadFactory& workloadFactory,
David Beckf195f032018-09-06 16:46:34 +01001217 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00001218{
1219 unsigned int batchSize = 4;
1220 unsigned int channels = 1;
1221 unsigned int height = 2;
1222 unsigned int width = 3;
1223
1224 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
1225 armnn::TensorInfo outputTensorInfo;
1226
1227 unsigned int shape[] = {batchSize, channels, height, width};
1228
1229 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1230 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1231 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1232
1233 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 1232);
1234 auto input2 = MakeRandomTensor<float, 4>(inputTensorInfo2, 456);
1235
1236 LayerTestResult<float,4> ret(outputTensorInfo);
1237
1238 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1239 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1240 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1241
1242 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
1243 std::unique_ptr<armnn::ITensorHandle> inputHandle2Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo2);
1244 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
1245
1246 armnn::AdditionQueueDescriptor data;
1247 armnn::WorkloadInfo info;
1248 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1249 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1250 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1251
1252 armnn::AdditionQueueDescriptor refData = data;
1253 armnn::WorkloadInfo refInfo = info;
1254 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo1, inputHandle1Ref.get());
1255 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get());
1256 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1257
1258 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1259 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateAddition(refData, refInfo);
1260
1261 inputHandle1->Allocate();
1262 inputHandle2->Allocate();
1263 outputHandle->Allocate();
1264 inputHandle1Ref->Allocate();
1265 inputHandle2Ref->Allocate();
1266 outputHandleRef->Allocate();
1267
1268 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1269 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1270 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
1271 CopyDataToITensorHandle(inputHandle2Ref.get(), &input2[0][0][0][0]);
1272
1273 workload->Execute();
1274 workloadRef->Execute();
1275
1276 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1277 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
1278
1279 return ret;
1280}
1281
surmeh01bceff2f2018-03-29 16:29:27 +01001282namespace {
David Beck5cd01f32018-09-12 16:00:08 +01001283template <typename T>
1284LayerTestResult<T, 4> DivisionTestHelper(armnn::IWorkloadFactory& workloadFactory,
1285 const unsigned int shape0[4],
1286 const std::vector<T>& values0,
1287 float scale0,
1288 int32_t offset0,
1289 const unsigned int shape1[4],
1290 const std::vector<T> & values1,
1291 float scale1,
1292 int32_t offset1,
1293 const unsigned int outShape[4],
1294 const std::vector<T> & outValues,
1295 float outScale,
1296 int32_t outOffset)
1297{
1298 auto dataType = (std::is_same<T, uint8_t>::value ?
1299 armnn::DataType::QuantisedAsymm8 :
1300 armnn::DataType::Float32);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001301
David Beck5cd01f32018-09-12 16:00:08 +01001302 armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
1303 armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
1304 armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001305
David Beck5cd01f32018-09-12 16:00:08 +01001306 inputTensorInfo0.SetQuantizationScale(scale0);
1307 inputTensorInfo0.SetQuantizationOffset(offset0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001308
David Beck5cd01f32018-09-12 16:00:08 +01001309 inputTensorInfo1.SetQuantizationScale(scale1);
1310 inputTensorInfo1.SetQuantizationOffset(offset1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001311
David Beck5cd01f32018-09-12 16:00:08 +01001312 outputTensorInfo.SetQuantizationScale(outScale);
1313 outputTensorInfo.SetQuantizationOffset(outOffset);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001314
David Beck5cd01f32018-09-12 16:00:08 +01001315 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
1316 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001317
David Beck5cd01f32018-09-12 16:00:08 +01001318 LayerTestResult<T, 4> result(outputTensorInfo);
1319 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001320
David Beck5cd01f32018-09-12 16:00:08 +01001321 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
1322 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1323 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001324
David Beck5cd01f32018-09-12 16:00:08 +01001325 armnn::DivisionQueueDescriptor data;
1326 armnn::WorkloadInfo info;
1327 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
1328 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1329 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001330
David Beck5cd01f32018-09-12 16:00:08 +01001331 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDivision(data, info);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001332
David Beck5cd01f32018-09-12 16:00:08 +01001333 inputHandle0->Allocate();
1334 inputHandle1->Allocate();
1335 outputHandle->Allocate();
1336
1337 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
1338 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1339
David Beck5cd01f32018-09-12 16:00:08 +01001340 workload->Execute();
1341
1342 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
1343
1344 return result;
1345}
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001346} // anonymous namespace
1347
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01001348LayerTestResult<float,4> DivisionByZeroTest(armnn::IWorkloadFactory& workloadFactory)
1349{
1350 const unsigned int width = 2;
1351 const unsigned int height = 2;
1352 const unsigned int channelCount = 2;
1353 const unsigned int batchSize = 2;
1354
1355 unsigned int shape[] = { batchSize, channelCount, height, width };
1356
1357 std::vector<float> input0({
1358 1.f, 1.f, 1.f, 1.f, 0.f, 0.f, 0.f, 0.f,
1359 -1.f, -1.f, -1.f, -1.f, 5.f, 5.f, 5.f, 5.f });
1360
1361 std::vector<float> input1({
1362 0.f, 0.f, -0.f, -0.f, 0.f, 0.f, -0.f, -0.f,
1363 0.f, 0.f, -0.f, -0.f, 5.f, 5.f, 5.f, 5.f });
1364
1365 std::vector<float> output({
1366 INFINITY, INFINITY, -INFINITY, -INFINITY, NAN, NAN, -NAN, -NAN,
1367 -INFINITY, -INFINITY, INFINITY, INFINITY, 1, 1, 1, 1 });
1368
David Beck5cd01f32018-09-12 16:00:08 +01001369 return DivisionTestHelper<float>(workloadFactory,
1370 shape, input0, 1.0f, 0,
1371 shape, input1, 1.0f, 0,
1372 shape, output, 1.0f, 0);
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01001373}
1374
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001375LayerTestResult<float,4> DivisionTest(armnn::IWorkloadFactory& workloadFactory)
1376{
1377 const unsigned int width = 2;
1378 const unsigned int height = 2;
1379 const unsigned int channelCount = 2;
1380 const unsigned int batchSize = 2;
1381
1382 unsigned int shape[] = { batchSize, channelCount, height, width };
1383
1384 std::vector<float> input0({
1385 2, 2, 2, 2, 3, 3, 3, 3,
1386 4, 4, 4, 4, 5, 5, 5, 5 });
1387
1388 std::vector<float> input1({
1389 1, 1, 1, 1, 2, 2, 2, 2,
1390 4, 4, 4, 4, 4, 4, 4, 4 });
1391
1392 std::vector<float> output({
1393 2, 2, 2, 2, 1.5, 1.5, 1.5, 1.5,
1394 1, 1, 1, 1, 1.25, 1.25, 1.25, 1.25 });
1395
David Beck5cd01f32018-09-12 16:00:08 +01001396
1397 return DivisionTestHelper<float>(workloadFactory,
1398 shape, input0, 1.0f, 0,
1399 shape, input1, 1.0f, 0,
1400 shape, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001401}
1402
1403LayerTestResult<float, 4> DivisionBroadcast1ElementTest(armnn::IWorkloadFactory& workloadFactory)
1404{
1405 unsigned int shape0[] = { 1, 2, 2, 2 };
1406 std::vector<float> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
1407
1408 unsigned int shape1[] = { 1, 1, 1, 1 };
1409 std::vector<float> input1({ 2 });
1410
1411 std::vector<float> output({ 1, 2, 3, 4, 5, 6, 7, 8});
1412
David Beck5cd01f32018-09-12 16:00:08 +01001413
1414 return DivisionTestHelper<float>(workloadFactory,
1415 shape0, input0, 1.0f, 0,
1416 shape1, input1, 1.0f, 0,
1417 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001418}
1419
1420LayerTestResult<float, 4> DivisionBroadcast1DVectorTest(armnn::IWorkloadFactory& workloadFactory)
1421{
1422 unsigned int shape0[] = { 1, 3, 3, 2 };
1423 std::vector<float> input0({
1424 1, 4, 3, 8, 5, 12,
1425 7, 16, 9, 20, 11, 24,
1426 13, 28, 15, 32, 17, 36});
1427
1428 unsigned int shape1[] = { 1, 1, 1, 2 };
1429 std::vector<float> input1({ 1, 2 });
1430
1431 std::vector<float> output({
1432 1, 2, 3, 4, 5, 6,
1433 7, 8, 9, 10, 11, 12,
1434 13, 14, 15, 16, 17, 18});
1435
David Beck5cd01f32018-09-12 16:00:08 +01001436 return DivisionTestHelper<float>(workloadFactory,
1437 shape0, input0, 1.0f, 0,
1438 shape1, input1, 1.0f, 0,
1439 shape0, output, 1.0f, 0);
1440}
1441
1442
1443LayerTestResult<uint8_t,4> DivisionUint8Test(armnn::IWorkloadFactory& workloadFactory)
1444{
1445 const unsigned int width = 2;
1446 const unsigned int height = 2;
1447 const unsigned int channelCount = 2;
1448 const unsigned int batchSize = 2;
1449
1450 unsigned int shape[] = { batchSize, channelCount, height, width };
1451
1452 std::vector<uint8_t> input0({2, 2, 2, 2, 3, 3, 3, 3,
1453 4, 4, 4, 4, 5, 5, 5, 5 });
1454
1455 std::vector<uint8_t> input1({1, 1, 1, 1, 2, 2, 2, 2,
1456 4, 4, 4, 4, 4, 4, 4, 4 });
1457
1458 std::vector<uint8_t> output({8, 8, 8, 8, 6, 6, 6, 6,
1459 4, 4, 4, 4, 5, 5, 5, 5});
1460
1461
1462 return DivisionTestHelper<uint8_t>(workloadFactory,
1463 shape, input0, 1.0f, 0,
1464 shape, input1, 1.0f, 0,
1465 shape, output, 0.25f, 0);
1466}
1467
1468LayerTestResult<uint8_t, 4> DivisionBroadcast1ElementUint8Test(armnn::IWorkloadFactory& workloadFactory)
1469{
1470 unsigned int shape0[] = { 1, 2, 2, 2 };
1471 std::vector<uint8_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
1472
1473 unsigned int shape1[] = { 1, 1, 1, 1 };
1474 std::vector<uint8_t> input1({ 2 });
1475
1476 std::vector<uint8_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
1477
1478 return DivisionTestHelper<uint8_t>(workloadFactory,
1479 shape0, input0, 1.0f, 0,
1480 shape1, input1, 1.0f, 0,
1481 shape0, output, 1.0f, 0);
1482}
1483
1484LayerTestResult<uint8_t, 4> DivisionBroadcast1DVectorUint8Test(armnn::IWorkloadFactory& workloadFactory)
1485{
1486 unsigned int shape0[] = { 1, 3, 3, 2 };
1487 std::vector<uint8_t> input0({1, 4, 3, 8, 5, 12,
1488 7, 16, 9, 20, 11, 24,
1489 13, 28, 15, 32, 17, 36});
1490
1491 unsigned int shape1[] = { 1, 1, 1, 2 };
1492 std::vector<uint8_t> input1({ 1, 2 });
1493
1494 std::vector<uint8_t> output({1, 2, 3, 4, 5, 6,
1495 7, 8, 9, 10, 11, 12,
1496 13, 14, 15, 16, 17, 18});
1497
1498 return DivisionTestHelper<uint8_t>(workloadFactory,
1499 shape0, input0, 1.0f, 0,
1500 shape1, input1, 1.0f, 0,
1501 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001502}
1503
1504namespace {
surmeh01bceff2f2018-03-29 16:29:27 +01001505LayerTestResult<float,4> MultiplicationTestHelper(armnn::IWorkloadFactory& workloadFactory,
1506 const unsigned int shape0[4],
1507 const std::vector<float> & values0,
1508 const unsigned int shape1[4],
1509 const std::vector<float> & values1,
1510 const unsigned int outShape[4],
1511 const std::vector<float> & outValues)
telsoa014fcda012018-03-09 14:13:49 +00001512{
surmeh01bceff2f2018-03-29 16:29:27 +01001513 const size_t dimensionCount = 4;
1514 armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, armnn::DataType::Float32};
1515 armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, armnn::DataType::Float32};
1516 armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, armnn::DataType::Float32};
telsoa014fcda012018-03-09 14:13:49 +00001517
surmeh01bceff2f2018-03-29 16:29:27 +01001518 auto input0 = MakeTensor<float, 4>(inputTensorInfo0, values0);
1519 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00001520
1521 LayerTestResult<float,4> ret(outputTensorInfo);
1522
1523 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
1524 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1525 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1526
1527 armnn::MultiplicationQueueDescriptor data;
1528 armnn::WorkloadInfo info;
1529 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
1530 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1531 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1532
1533 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
1534
1535 inputHandle0->Allocate();
1536 inputHandle1->Allocate();
1537 outputHandle->Allocate();
1538
1539 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
1540 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1541
1542 workload->Execute();
1543
1544 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1545
surmeh01bceff2f2018-03-29 16:29:27 +01001546 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00001547 return ret;
1548}
surmeh01bceff2f2018-03-29 16:29:27 +01001549} // anonymous namespace
1550
1551
1552LayerTestResult<float,4> MultiplicationTest(armnn::IWorkloadFactory& workloadFactory)
1553{
1554 const unsigned int width = 2;
1555 const unsigned int height = 2;
1556 const unsigned int channelCount = 2;
1557 const unsigned int batchSize = 2;
1558
1559 unsigned int shape[] = { batchSize, channelCount, height, width };
1560
1561 std::vector<float> input0({
1562 1, 1, 1, 1, 2, 2, 2, 2,
1563 3, 3, 3, 3, 4, 4, 4, 4 });
1564
1565 std::vector<float> input1({
1566 2, 2, 2, 2, 3, 3, 3, 3,
1567 4, 4, 4, 4, 5, 5, 5, 5 });
1568
1569 std::vector<float> output({
1570 2, 2, 2, 2, 6, 6, 6, 6,
1571 12, 12, 12, 12, 20, 20, 20, 20 });
1572
1573 return MultiplicationTestHelper(workloadFactory,
1574 shape,
1575 input0,
1576 shape,
1577 input1,
1578 shape,
1579 output);
1580}
1581
1582LayerTestResult<float, 4> MultiplicationBroadcast1ElementTest(armnn::IWorkloadFactory& workloadFactory)
1583{
1584 unsigned int shape0[] = { 1, 2, 2, 2 };
1585 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
1586
1587 unsigned int shape1[] = { 1, 1, 1, 1 };
1588 std::vector<float> input1({ 2 });
1589
1590 std::vector<float> output({ 2, 4, 6, 8, 10, 12, 14, 16});
1591
1592 return MultiplicationTestHelper(workloadFactory,
1593 shape0,
1594 input0,
1595 shape1,
1596 input1,
1597 shape0,
1598 output);
1599}
1600
1601LayerTestResult<float, 4> MultiplicationBroadcast1DVectorTest(armnn::IWorkloadFactory& workloadFactory)
1602{
1603 unsigned int shape0[] = { 1, 3, 3, 2 };
1604 std::vector<float> input0({
1605 1, 2, 3, 4, 5, 6,
1606 7, 8, 9, 10, 11, 12,
1607 13, 14, 15, 16, 17, 18});
1608
1609 unsigned int shape1[] = { 1, 1, 1, 2 };
1610 std::vector<float> input1({ 1, 2 });
1611
1612 std::vector<float> output({
1613 1, 4, 3, 8, 5, 12,
1614 7, 16, 9, 20, 11, 24,
1615 13, 28, 15, 32, 17, 36});
1616
1617 return MultiplicationTestHelper(workloadFactory,
1618 shape0,
1619 input0,
1620 shape1,
1621 input1,
1622 shape0,
1623 output);
1624}
telsoa014fcda012018-03-09 14:13:49 +00001625
1626LayerTestResult<float,4> CompareMultiplicationTest(armnn::IWorkloadFactory& workloadFactory,
1627 armnn::IWorkloadFactory& refWorkloadFactory)
1628{
1629 const unsigned int width = 16;
1630 const unsigned int height = 32;
1631 const unsigned int channelCount = 2;
1632 const unsigned int batchSize = 5;
1633
1634 armnn::TensorInfo inputTensorInfo0;
1635 armnn::TensorInfo inputTensorInfo1;
1636 armnn::TensorInfo outputTensorInfo;
1637
1638 constexpr unsigned int shape[] = { batchSize, channelCount, height, width };
1639
1640 inputTensorInfo0 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1641 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1642 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1643
1644 LayerTestResult<float,4> comparisonResult(outputTensorInfo);
1645
1646 auto input0 = MakeRandomTensor<float, 4>(inputTensorInfo0, 803506992);
1647 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 54902257);
1648
1649 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
1650 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1651 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1652
1653 std::unique_ptr<armnn::ITensorHandle> inputHandle0Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo0);
1654 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
1655 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
1656
1657 armnn::MultiplicationQueueDescriptor data;
1658 armnn::WorkloadInfo info;
1659 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
1660 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1661 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1662
1663 armnn::MultiplicationQueueDescriptor refData = data;
1664 armnn::WorkloadInfo refInfo = info;
1665 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo0, inputHandle0Ref.get());
1666 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo1, inputHandle1Ref.get());
1667 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1668
1669 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
1670 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateMultiplication(refData, refInfo);
1671
1672 inputHandle0->Allocate();
1673 inputHandle1->Allocate();
1674 outputHandle->Allocate();
1675 inputHandle0Ref->Allocate();
1676 inputHandle1Ref->Allocate();
1677 outputHandleRef->Allocate();
1678
1679 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
1680 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1681 CopyDataToITensorHandle(inputHandle0Ref.get(), &input0[0][0][0][0]);
1682 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
1683
1684 workload->Execute();
1685 workloadRef->Execute();
1686
1687 CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get());
1688 CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get());
1689
1690 return comparisonResult;
1691}
1692
1693LayerTestResult<float,4> CompareBatchNormTest(armnn::IWorkloadFactory& workloadFactory,
1694 armnn::IWorkloadFactory& refWorkloadFactory)
1695{
1696 const unsigned int width = 2;
1697 const unsigned int height = 3;
1698 const unsigned int channels = 5;
1699 const unsigned int batchSize = 3;
1700
1701 armnn::TensorInfo inputTensorInfo;
1702 armnn::TensorInfo outputTensorInfo;
1703 armnn::TensorInfo tensorInfo;
1704
1705 constexpr unsigned int shape[] = {batchSize, channels, height, width};
1706 constexpr unsigned int tensorShape[] = {channels};
1707
1708 inputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1709 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1710 tensorInfo = armnn::TensorInfo(1, tensorShape, armnn::DataType::Float32);
1711
1712 auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 21312);
1713
1714 auto mean = MakeRandomTensor<float, 1>(tensorInfo, 123);
1715 auto variance = MakeRandomTensor<float, 1>(tensorInfo, 234, 0.0f);
1716 auto beta = MakeRandomTensor<float, 1>(tensorInfo, 123);
1717 auto gamma = MakeRandomTensor<float, 1>(tensorInfo, 345);
1718
1719 LayerTestResult<float,4> ret(outputTensorInfo);
1720
1721 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
1722 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1723
1724 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
1725 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
1726
1727 armnn::BatchNormalizationQueueDescriptor data;
1728 armnn::WorkloadInfo info;
1729 armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
1730 armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
1731 armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
1732 armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
1733
1734 AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
1735 AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
1736 AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
1737 AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
1738
1739 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1740 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1741 data.m_Mean = &meanTensor;
1742 data.m_Variance = &varianceTensor;
1743 data.m_Beta = &betaTensor;
1744 data.m_Gamma = &gammaTensor;
1745 data.m_Parameters.m_Eps = 0.01f;
1746
1747 armnn::BatchNormalizationQueueDescriptor refData = data;
1748 armnn::WorkloadInfo refInfo = info;
1749 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
1750 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1751
1752 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
1753 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateBatchNormalization(refData, refInfo);
1754
1755 inputHandle->Allocate();
1756 outputHandle->Allocate();
1757 inputHandleRef->Allocate();
1758 outputHandleRef->Allocate();
1759
1760 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
1761 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
1762
1763 workload->Execute();
1764 workloadRef->Execute();
1765
1766 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1767 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
1768
1769 return ret;
1770}
1771
surmeh013537c2c2018-05-18 16:31:43 +01001772template<typename T>
1773void PermuteTensorData(
1774 armnn::IWorkloadFactory& workloadFactory,
1775 const armnn::PermutationVector& mappings,
1776 armnn::TensorInfo & inputTensorInfo,
1777 const T * inputData,
1778 std::vector<T>& outputData)
telsoa014fcda012018-03-09 14:13:49 +00001779{
surmeh013537c2c2018-05-18 16:31:43 +01001780 BOOST_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
1781 if (inputData == nullptr)
1782 {
1783 // Nullptr is an error in the test. By returning without doing the concatenation
1784 // I expect the caller to fail the test. It still makes sense to report this as
1785 // an assert for Debug builds.
1786 return;
1787 }
telsoa014fcda012018-03-09 14:13:49 +00001788
surmeh013537c2c2018-05-18 16:31:43 +01001789 armnn::TensorInfo outputTensorInfo = armnnUtils::Permuted(inputTensorInfo, mappings);
1790
1791 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
1792 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1793
1794 armnn::PermuteQueueDescriptor queueDescriptor;
1795 queueDescriptor.m_Parameters = armnn::PermuteDescriptor{mappings};
1796 armnn::WorkloadInfo workloadInfo;
1797 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
1798 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
1799
1800 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePermute(queueDescriptor, workloadInfo);
1801
1802 inputHandle->Allocate();
1803 outputHandle->Allocate();
1804
1805 CopyDataToITensorHandle(inputHandle.get(), inputData);
1806
1807 workload->Execute();
1808
1809 outputData.resize(outputTensorInfo.GetNumElements());
1810 CopyDataFromITensorHandle(&outputData[0], outputHandle.get());
1811 inputTensorInfo = outputTensorInfo;
1812}
1813
1814armnn::OriginsDescriptor CreateMergerDescriptorForConcatenation(
1815 const std::vector<armnn::TensorInfo> & inputTensorInfos,
1816 unsigned int concatDim)
1817{
telsoa014fcda012018-03-09 14:13:49 +00001818 std::vector<armnn::TensorShape> shapes;
1819 shapes.reserve(inputTensorInfos.size());
1820 for (const armnn::TensorInfo& it: inputTensorInfos)
1821 {
1822 shapes.push_back(it.GetShape());
1823 }
surmeh013537c2c2018-05-18 16:31:43 +01001824
1825 return armnn::CreateMergerDescriptorForConcatenation(shapes.begin(),
1826 shapes.end(),
1827 concatDim);
1828}
1829
1830//
1831// Concatenation is only supported for N and C dimensions for NCHW. In case of
telsoa01c577f2c2018-08-31 09:22:23 +01001832// <4 dimensions we need to make sure that the concat dimensions are at least
surmeh013537c2c2018-05-18 16:31:43 +01001833// the 3rd slowest iterating one.
1834//
1835
1836bool NeedPermuteForConcat(
1837 const std::vector<armnn::TensorInfo> & inputTensorInfos,
1838 unsigned int concatDim)
1839{
1840 // See note above. Additionally we expect the input shapes to have the
1841 // same number of dimensions.
1842 unsigned int nDimensions = 0;
1843
telsoa01c577f2c2018-08-31 09:22:23 +01001844 // Determine the number of dimensions as well as sanity check them
1845 // agains test implementation issues.
surmeh013537c2c2018-05-18 16:31:43 +01001846 for (auto && tensorInfo : inputTensorInfos)
1847 {
1848 if (!nDimensions)
1849 {
1850 nDimensions = tensorInfo.GetShape().GetNumDimensions();
1851 }
1852 else
1853 {
1854 BOOST_ASSERT_MSG(nDimensions == tensorInfo.GetShape().GetNumDimensions(),
1855 "Input shapes must have the same number of dimensions");
1856 }
1857 }
1858
1859 return (nDimensions-concatDim) < 3;
1860}
1861
1862armnn::TensorShape ExpandTensorShapeTo3dForPermute(const armnn::TensorShape & inputShape)
1863{
1864 unsigned int numDims = inputShape.GetNumDimensions();
1865 if (numDims >= 3)
1866 {
1867 // Nothing to do if the inputShape has at least 3 dimensions.
1868 return inputShape;
1869 }
1870
1871 std::vector<unsigned int> newDims(size_t(3), 1u);
1872 unsigned int expandedBy = 3 - numDims;
1873 for (unsigned int i=0; i<numDims; ++i)
1874 {
1875 newDims[expandedBy+i] = inputShape[i];
1876 }
1877 return armnn::TensorShape(3u, &newDims[0]);
1878}
1879
1880void Generate3dPermuteVectorForConcat(
1881 unsigned int numDimensions,
1882 unsigned int & concatDim,
1883 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutations)
1884{
1885 BOOST_ASSERT_MSG(numDimensions <= 3,
1886 "Only dimensions 1,2 and 3 are supported by this helper");
1887
1888 unsigned int expandedBy = 3 - numDimensions;
1889 unsigned int expandedConcatAxis = concatDim + expandedBy;
1890
1891 if (expandedConcatAxis == 2)
1892 {
1893 concatDim = 0;
1894 armnn::PermutationVector forwardPermutation({1, 2, 0});
1895 armnn::PermutationVector reversePermutation({2, 0, 1});
1896 permutations = std::make_pair(forwardPermutation, reversePermutation);
1897 }
1898 else if (expandedConcatAxis == 1)
1899 {
1900 concatDim = 0;
1901 armnn::PermutationVector forwardPermutation({2, 0, 1});
1902 armnn::PermutationVector reversePermutation({1, 2, 0});
1903 permutations = std::make_pair(forwardPermutation, reversePermutation);
1904 }
1905 else
1906 {
1907 BOOST_ASSERT(expandedConcatAxis == 0);
1908 concatDim = 0;
1909 }
1910}
1911
1912//
1913// Permute the input tensors so we can do a supported concatenation.
1914// Also treat lower than 3d tensors as 3d by adding dummy 1 dimensions
1915// at the front. Finally this function tells what the output shape
1916// of the permuted concatenated tensor is going to be.
1917//
1918template <typename T>
1919void PermuteInputsForConcat(
1920 armnn::IWorkloadFactory& workloadFactory,
1921 std::vector<armnn::TensorInfo> & inputTensorInfos,
1922 std::vector<T *> & inputData,
1923 std::vector<std::vector<T>> & inputDataStorage,
1924 armnn::PermutationVector & permuteVector,
1925 unsigned int & concatDim,
1926 armnn::TensorInfo & outputTensorInfo)
1927{
1928 BOOST_ASSERT_MSG(inputTensorInfos.size() > 1,
1929 "Expecting more than one tensor to be concatenated here");
1930
1931 unsigned int numDims = 0;
1932 unsigned int nthInput = 0;
1933 const armnn::PermutationVector identity({0, 1, 2});
1934
1935 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutations =
1936 std::make_pair(identity, identity);
1937
1938 inputDataStorage.resize(inputData.size());
1939
1940 for (auto && tensorInfo : inputTensorInfos)
1941 {
1942 if (numDims == 0)
1943 {
1944 numDims = tensorInfo.GetShape().GetNumDimensions();
1945 Generate3dPermuteVectorForConcat(numDims, concatDim, permutations);
telsoa01c577f2c2018-08-31 09:22:23 +01001946 // Store the reverese permutation.
surmeh013537c2c2018-05-18 16:31:43 +01001947 permuteVector = permutations.second;
1948 BOOST_ASSERT_MSG(!permuteVector.IsEqual(identity),
1949 "Test logic error, we don't need permutation, so we shouldn't arrive here");
1950 }
1951 else
1952 {
1953 BOOST_ASSERT_MSG(numDims == tensorInfo.GetShape().GetNumDimensions(),
1954 "All inputs must have the same number of dimensions");
1955 }
1956
1957 armnn::TensorInfo newTensorInfo = tensorInfo;
1958 newTensorInfo.SetShape(ExpandTensorShapeTo3dForPermute(tensorInfo.GetShape()));
1959
1960 PermuteTensorData<T>(workloadFactory,
1961 permutations.first,
1962 newTensorInfo,
1963 inputData[nthInput],
1964 inputDataStorage[nthInput]);
1965
1966 inputData[nthInput] = inputDataStorage[nthInput].data();
1967 inputTensorInfos[nthInput] = newTensorInfo;
1968
1969 ++nthInput;
1970 }
1971
1972 outputTensorInfo.SetShape(
1973 armnnUtils::Permuted(
1974 ExpandTensorShapeTo3dForPermute(outputTensorInfo.GetShape()),
1975 permutations.first));
1976}
1977
1978
1979//
1980// This is the pair of PermuteInputsForConcat(...) which permutes back
telsoa01c577f2c2018-08-31 09:22:23 +01001981// the output of the concatenation so we can check it against an expected
surmeh013537c2c2018-05-18 16:31:43 +01001982// output.
1983//
1984template <typename T>
1985void PermuteOutputForConcat(
1986 armnn::IWorkloadFactory& workloadFactory,
1987 const armnn::TensorInfo & tensorInfo,
1988 const armnn::PermutationVector & permuteVector,
1989 std::unique_ptr<armnn::ITensorHandle> && inputDataHandle,
1990 T * data)
1991{
1992 BOOST_ASSERT_MSG(data != nullptr, "data must not be null");
1993 if (data == nullptr)
1994 {
1995 // Nullptr is an error in the test. By returning without doing the permutation
1996 // I expect the caller to fail the test. It still makes sense to report this as
1997 // an assert for Debug builds.
1998 return;
1999 }
2000
2001 armnn::TensorInfo resultTensorInfo = tensorInfo;
2002 std::vector<T> inputData(tensorInfo.GetNumElements());
2003 std::vector<T> outputData;
2004
2005 CopyDataFromITensorHandle(&inputData[0], inputDataHandle.get());
2006
2007 PermuteTensorData<T>(workloadFactory,
2008 permuteVector,
2009 resultTensorInfo,
2010 &inputData[0],
2011 outputData);
2012
2013 ::memcpy(data, &outputData[0], sizeof(T)*outputData.size());
2014}
2015
2016template <typename T>
2017void Concatenate(armnn::IWorkloadFactory& workloadFactory,
2018 std::initializer_list<const armnn::TensorInfo> inputTensorInfosOrig,
2019 std::initializer_list<T *> inputsOrig,
2020 const armnn::TensorInfo& outputTensorInfoOrig,
2021 T * output,
2022 unsigned int concatDim)
2023{
2024 BOOST_ASSERT_MSG(output != nullptr, "output must not be null");
2025 if (output == nullptr)
2026 {
2027 // Nullptr is an error in the test. By returning without doing the permutation
2028 // I expect the caller to fail the test. It still makes sense to report this as
2029 // an assert for Debug builds.
2030 return;
2031 }
2032
2033 armnn::MergerQueueDescriptor queueDescriptor;
2034
telsoa01c577f2c2018-08-31 09:22:23 +01002035 // Saves a copy of the parameters which we might need to change.
surmeh013537c2c2018-05-18 16:31:43 +01002036 std::vector<armnn::TensorInfo> inputTensorInfos(inputTensorInfosOrig.begin(), inputTensorInfosOrig.end());
2037 std::vector<T *> inputs = inputsOrig;
2038 armnn::TensorInfo outputTensorInfo = outputTensorInfoOrig;
2039
2040 armnn::PermutationVector permuteVector{0, 1, 2};
2041
telsoa01c577f2c2018-08-31 09:22:23 +01002042 // Holds and automatically releases memory for the reshaped input data.
surmeh013537c2c2018-05-18 16:31:43 +01002043 std::vector<std::vector<T>> tmpInputDataStorage;
2044
2045 const size_t inputCount = inputTensorInfos.size();
2046
2047 bool needPermuteForConcat = NeedPermuteForConcat(inputTensorInfos, concatDim);
2048
2049 if (needPermuteForConcat)
2050 {
2051 //
2052 // We need to permute the inputs, because concatenation along
telsoa01c577f2c2018-08-31 09:22:23 +01002053 // the requested axis is not supported.
surmeh013537c2c2018-05-18 16:31:43 +01002054 //
2055 PermuteInputsForConcat<T>(workloadFactory,
2056 inputTensorInfos,
2057 inputs,
2058 tmpInputDataStorage,
2059 permuteVector,
2060 concatDim,
2061 outputTensorInfo);
2062 }
2063
2064 armnn::OriginsDescriptor viewsDescriptor = CreateMergerDescriptorForConcatenation(inputTensorInfos, concatDim);
telsoa014fcda012018-03-09 14:13:49 +00002065
2066 queueDescriptor.m_ViewOrigins.reserve(viewsDescriptor.GetNumViews());
2067 for (unsigned int i = 0; i < viewsDescriptor.GetNumViews(); ++i)
2068 {
2069 queueDescriptor.m_ViewOrigins.emplace_back(std::vector<unsigned int>(viewsDescriptor.GetViewOrigin(i),
2070 viewsDescriptor.GetViewOrigin(i) + viewsDescriptor.GetNumDimensions()));
2071 }
2072
telsoa014fcda012018-03-09 14:13:49 +00002073 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2074
2075 std::vector<std::unique_ptr<armnn::ITensorHandle>> inputHandles;
2076 inputHandles.reserve(inputCount);
2077
2078 const bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2079 for (unsigned int i = 0; i < inputCount; ++i)
2080 {
surmeh013537c2c2018-05-18 16:31:43 +01002081 const armnn::TensorInfo& inputTensorInfo = inputTensorInfos[i];
telsoa014fcda012018-03-09 14:13:49 +00002082
2083 std::unique_ptr<armnn::ITensorHandle> inputHandle = subTensorsSupported ?
2084 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo.GetShape(),
2085 queueDescriptor.m_ViewOrigins[i].m_Origin.data())
2086 : workloadFactory.CreateTensorHandle(inputTensorInfo);
2087
2088 inputHandles.emplace_back(std::move(inputHandle));
2089 }
2090
2091 armnn::WorkloadInfo workloadInfo;
2092
2093 for (unsigned int i = 0; i < inputCount; ++i)
2094 {
surmeh013537c2c2018-05-18 16:31:43 +01002095 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfos[i], inputHandles[i].get());
telsoa014fcda012018-03-09 14:13:49 +00002096 }
2097
2098 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
2099
2100 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(queueDescriptor, workloadInfo);
2101
2102 for (auto& inputHandle : inputHandles)
2103 {
2104 inputHandle->Allocate();
2105 }
2106
2107 outputHandle->Allocate();
2108
2109 unsigned int nextInputId = 0;
2110 for (auto& inputHandle : inputHandles)
2111 {
surmeh013537c2c2018-05-18 16:31:43 +01002112 CopyDataToITensorHandle(inputHandle.get(), inputs[nextInputId]);
2113 ++nextInputId;
telsoa014fcda012018-03-09 14:13:49 +00002114 }
2115
2116 workload->Execute();
2117
surmeh013537c2c2018-05-18 16:31:43 +01002118 if (needPermuteForConcat)
2119 {
2120 PermuteOutputForConcat<T>(workloadFactory,
2121 outputTensorInfo,
2122 permuteVector,
2123 std::move(outputHandle),
2124 output);
2125 }
2126 else
2127 {
2128 CopyDataFromITensorHandle(output, outputHandle.get());
2129 }
telsoa014fcda012018-03-09 14:13:49 +00002130}
2131
2132template <typename T>
2133LayerTestResult<T, 1> Concatenation1dTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale, int32_t qOffset)
2134{
2135 armnn::TensorInfo inputTensorInfo({ 3 }, armnn::GetDataType<T>());
2136
2137 auto input0 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 1.0f, 2.0f, 3.0f }));
2138 auto input1 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 4.0f, 5.0f, 6.0f }));
2139 auto input2 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 7.0f, 8.0f, 9.0f }));
2140
2141 armnn::TensorInfo outputTensorInfo({ 9 }, armnn::GetDataType<T>());
2142
2143 LayerTestResult<T, 1> result(outputTensorInfo);
2144
2145 std::vector<T> output;
2146 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002147 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002148 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
2149 { input0.data(), input1.data(), input2.data() },
2150 outputTensorInfo,
2151 output.data(),
2152 0);
2153
2154 result.output = MakeTensor<T, 1>(outputTensorInfo, output);
2155 result.outputExpected = MakeTensor<T, 1>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2156 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f
2157 }));
2158
2159 return result;
2160}
2161
2162LayerTestResult<float, 1> Concatenation1dTest(armnn::IWorkloadFactory& workloadFactory)
2163{
2164 return Concatenation1dTestImpl<float>(workloadFactory, 0.0f, 0);
2165}
2166
2167template <typename T>
2168LayerTestResult<T, 2> Concatenation2dTestImpl(armnn::IWorkloadFactory& workloadFactory,
2169 const armnn::TensorInfo& outputTensorInfo,
2170 unsigned int dimension,
2171 const float qScale,
2172 const int32_t qOffset)
2173{
2174 armnn::TensorInfo inputTensorInfo({ 2, 3 }, armnn::GetDataType<T>());
2175
2176 auto input0 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2177 // Batch 0
2178 1.0f, 2.0f, 3.0f,
2179
2180 // Batch 1
2181 10.0f, 11.0f, 12.0f,
2182 }));
2183
2184 auto input1 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2185 // Batch 0
2186 4.0f, 5.0f, 6.0f,
2187
2188 // Batch 1
2189 13.0f, 14.0f, 15.0f,
2190 }));
2191
2192 auto input2 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2193 // Batch 0
2194 7.0f, 8.0f, 9.0f,
2195
2196 // Batch 1
2197 16.0f, 17.0f, 18.0f,
2198 }));
2199
2200 LayerTestResult<T, 2> result(outputTensorInfo);
2201
2202 std::vector<T> output;
2203 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002204 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002205 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
2206 { input0.data(), input1.data(), input2.data() },
2207 outputTensorInfo,
2208 output.data(),
2209 dimension);
2210
2211 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
2212 return result;
2213}
2214
2215template <typename T>
2216LayerTestResult<T, 2> Concatenation2dDim0TestImpl(armnn::IWorkloadFactory& workloadFactory,
2217 float qScale, int32_t qOffset)
2218{
2219 armnn::TensorInfo outputTensorInfo({ 6, 3 }, armnn::GetDataType<T>());
2220
2221 LayerTestResult<T, 2> result = Concatenation2dTestImpl<T>(workloadFactory, outputTensorInfo, 0, qScale, qOffset);
2222 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2223 // Batch 0
2224 1.0f, 2.0f, 3.0f,
2225
2226 // Batch 1
2227 10.0f, 11.0f, 12.0f,
2228
2229 // Batch 2
2230 4.0f, 5.0f, 6.0f,
2231
2232 // Batch 3
2233 13.0f, 14.0f, 15.0f,
2234
2235 // Batch 4
2236 7.0f, 8.0f, 9.0f,
2237
2238 // Batch 5
2239 16.0f, 17.0f, 18.0f,
2240 }));
2241
2242 return result;
2243}
2244
2245LayerTestResult<float, 2> Concatenation2dDim0Test(armnn::IWorkloadFactory& workloadFactory)
2246{
2247 return Concatenation2dDim0TestImpl<float>(workloadFactory, 0.0f, 0);
2248}
2249
2250template <typename T>
2251LayerTestResult<T, 2> Concatenation2dDim1TestImpl(armnn::IWorkloadFactory& workloadFactory,
2252 float qScale, int32_t qOffset)
2253{
2254 armnn::TensorInfo outputTensorInfo({ 2, 9 }, armnn::GetDataType<T>());
2255
2256 LayerTestResult<T, 2> result = Concatenation2dTestImpl<T>(workloadFactory, outputTensorInfo, 1, qScale, qOffset);
2257 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2258 // Batch 0
2259 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
2260
2261 // Batch 1
2262 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f
2263 }));
2264
2265 return result;
2266}
2267
2268LayerTestResult<float, 2> Concatenation2dDim1Test(armnn::IWorkloadFactory& workloadFactory)
2269{
2270 return Concatenation2dDim1TestImpl<float>(workloadFactory, 0.0f, 0);
2271}
2272
2273template <typename T>
2274LayerTestResult<T, 2> Concatenation2dDim0DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2275 int32_t qOffset)
2276{
2277 armnn::TensorInfo input0TensorInfo({ 2, 3 }, armnn::GetDataType<T>());
2278 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2279 // Batch 0
2280 1.0f, 2.0f, 3.0f,
2281
2282 // Batch 1
2283 10.0f, 11.0f, 12.0f,
2284 }));
2285
2286 armnn::TensorInfo input1TensorInfo({ 3, 3 }, armnn::GetDataType<T>());
2287 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2288 // Batch 0
2289 4.0f, 5.0f, 6.0f,
2290
2291 // Batch 1
2292 13.0f, 14.0f, 15.0f,
2293
2294 // Batch 0
2295 7.0f, 8.0f, 9.0f,
2296 }));
2297
2298 armnn::TensorInfo input2TensorInfo({ 1, 3 }, armnn::GetDataType<T>());
2299 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2300 // Batch 1
2301 16.0f, 17.0f, 18.0f,
2302 }));
2303
2304 armnn::TensorInfo outputTensorInfo({ 6, 3 }, armnn::GetDataType<T>());
2305 LayerTestResult<T, 2> result(outputTensorInfo);
2306
2307 std::vector<T> output;
2308 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002309 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002310 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
2311 { input0.data(), input1.data(), input2.data() },
2312 outputTensorInfo,
2313 output.data(),
2314 0);
2315
2316 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
2317 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2318 // Batch 0
2319 1.0f, 2.0f, 3.0f,
2320
2321 // Batch 1
2322 10.0f, 11.0f, 12.0f,
2323
2324 // Batch 2
2325 4.0f, 5.0f, 6.0f,
2326
2327 // Batch 3
2328 13.0f, 14.0f, 15.0f,
2329
2330 // Batch 4
2331 7.0f, 8.0f, 9.0f,
2332
2333 // Batch 5
2334 16.0f, 17.0f, 18.0f,
2335 }));
2336
2337 return result;
2338}
2339
2340LayerTestResult<float, 2> Concatenation2dDim0DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory)
2341{
2342 return Concatenation2dDim0DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
2343}
2344
2345template <typename T>
2346LayerTestResult<T, 2> Concatenation2dDim1DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2347 int32_t qOffset)
2348{
2349 armnn::TensorInfo input0TensorInfo({ 2, 3 }, armnn::GetDataType<T>());
2350 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2351 // Batch 0
2352 1.0f, 2.0f, 3.0f,
2353
2354 // Batch 1
2355 10.0f, 11.0f, 12.0f,
2356 }));
2357
2358 armnn::TensorInfo input1TensorInfo({ 2, 5 }, armnn::GetDataType<T>());
2359 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2360 // Batch 0
2361 4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
2362
2363 // Batch 1
2364 13.0f, 14.0f, 15.0f, 16.0f, 17.0f,
2365 }));
2366
2367 armnn::TensorInfo input2TensorInfo({ 2, 1 }, armnn::GetDataType<T>());
2368 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2369 // Batch 0
2370 9.0f,
2371
2372 // Batch 1
2373 18.0f
2374 }));
2375
2376 armnn::TensorInfo outputTensorInfo({ 2, 9 }, armnn::GetDataType<T>());
2377 LayerTestResult<T, 2> result(outputTensorInfo);
2378
2379 std::vector<T> output;
2380 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002381 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002382 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
2383 { input0.data(), input1.data(), input2.data() },
2384 outputTensorInfo,
2385 output.data(),
2386 1);
2387
2388 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
2389 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2390 // Batch 0
2391 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
2392
2393 // Batch 1
2394 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f,
2395 }));
2396
2397 return result;
2398}
2399
2400LayerTestResult<float, 2> Concatenation2dDim1DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory)
2401{
2402 return Concatenation2dDim1DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
2403}
2404
2405template <typename T>
2406LayerTestResult<T, 3> Concatenation3dTestImpl(armnn::IWorkloadFactory& workloadFactory,
2407 const armnn::TensorInfo& outputTensorInfo,
2408 unsigned int dimension,
2409 float qScale,
2410 int32_t qOffset)
2411{
2412 armnn::TensorInfo inputTensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
2413
2414 auto input0 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2415 // Batch 0, Channel 0
2416 1.0f, 2.0f,
2417
2418 // Batch 0, Channel 1
2419 3.0f, 4.0f,
2420
2421 // Batch 0, Channel 2
2422 5.0f, 6.0f,
2423
2424 // Batch 1, Channel 0
2425 19.0f, 20.0f,
2426
2427 // Batch 1, Channel 1
2428 21.0f, 22.0f,
2429
2430 // Batch 1, Channel 2
2431 23.0f, 24.0f
2432 }));
2433
2434 auto input1 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2435 // Batch 0, Channel 0
2436 7.0f, 8.0f,
2437
2438 // Batch 0, Channel 1
2439 9.0f, 10.0f,
2440
2441 // Batch 0, Channel 2
2442 11.0f, 12.0f,
2443
2444 // Batch 1, Channel 0
2445 25.0f, 26.0f,
2446
2447 // Batch 1, Channel 1
2448 27.0f, 28.0f,
2449
2450 // Batch 1, Channel 2
2451 29.0f, 30.0f
2452 }));
2453
2454 auto input2 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2455 // Batch 0, Channel 0
2456 13.0f, 14.0f,
2457
2458 // Batch 0, Channel 1
2459 15.0f, 16.0f,
2460
2461 // Batch 0, Channel 2
2462 17.0f, 18.0f,
2463
2464 // Batch 1, Channel 0
2465 31.0f, 32.0f,
2466
2467 // Batch 1, Channel 1
2468 33.0f, 34.0f,
2469
2470 // Batch 1, Channel 2
2471 35.0f, 36.0f
2472 }));
2473
2474 LayerTestResult<T, 3> result(outputTensorInfo);
2475
2476 std::vector<T> output;
2477 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002478 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002479 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
2480 { input0.data(), input1.data(), input2.data() },
2481 outputTensorInfo,
2482 output.data(),
2483 dimension);
2484
2485 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
2486 return result;
2487}
2488
2489template <typename T>
2490LayerTestResult<T, 3> Concatenation3dDim0TestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2491 int32_t qOffset)
2492{
2493 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, armnn::GetDataType<T>());
2494
2495 LayerTestResult<T, 3> result = Concatenation3dTestImpl<T>(workloadFactory, outputTensorInfo, 0,
2496 qScale, qOffset);
2497 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2498 // Batch 0, Channel 0
2499 1.0f, 2.0f,
2500
2501 // Batch 0, Channel 1
2502 3.0f, 4.0f,
2503
2504 // Batch 0, Channel 2
2505 5.0f, 6.0f,
2506
2507 // Batch 1, Channel 0
2508 19.0f, 20.0f,
2509
2510 // Batch 1, Channel 1
2511 21.0f, 22.0f,
2512
2513 // Batch 1, Channel 2
2514 23.0f, 24.0f,
2515
2516 // Batch 2, Channel 0
2517 7.0f, 8.0f,
2518
2519 // Batch 2, Channel 1
2520 9.0f, 10.0f,
2521
2522 // Batch 2, Channel 2
2523 11.0f, 12.0f,
2524
2525 // Batch 3, Channel 0
2526 25.0f, 26.0f,
2527
2528 // Batch 3, Channel 1
2529 27.0f, 28.0f,
2530
2531 // Batch 3, Channel 2
2532 29.0f, 30.0f,
2533
2534 // Batch 4, Channel 0
2535 13.0f, 14.0f,
2536
2537 // Batch 4, Channel 1
2538 15.0f, 16.0f,
2539
2540 // Batch 4, Channel 2
2541 17.0f, 18.0f,
2542
2543 // Batch 5, Channel 0
2544 31.0f, 32.0f,
2545
2546 // Batch 5, Channel 1
2547 33.0f, 34.0f,
2548
2549 // Batch 5, Channel 2
2550 35.0f, 36.0f
2551 }));
2552 return result;
2553}
2554
2555LayerTestResult<float, 3> Concatenation3dDim0Test(armnn::IWorkloadFactory& workloadFactory)
2556{
2557 return Concatenation3dDim0TestImpl<float>(workloadFactory, 0.0f, 0);
2558}
2559
2560template <typename T>
2561LayerTestResult<T, 3> Concatenation3dDim1TestImpl(armnn::IWorkloadFactory& workloadFactory,
2562 float qScale, int32_t qOffset)
2563{
2564 armnn::TensorInfo outputTensorInfo({ 2, 9, 2 }, armnn::GetDataType<T>());
2565
2566 LayerTestResult<T, 3> result = Concatenation3dTestImpl<T>(workloadFactory, outputTensorInfo, 1, qScale, qOffset);
2567 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2568 // Batch 0, Channel 0
2569 1.0f, 2.0f,
2570
2571 // Batch 0, Channel 1
2572 3.0f, 4.0f,
2573
2574 // Batch 0, Channel 2
2575 5.0f, 6.0f,
2576
2577 // Batch 0, Channel 3
2578 7.0f, 8.0f,
2579
2580 // Batch 0, Channel 4
2581 9.0f, 10.0f,
2582
2583 // Batch 0, Channel 5
2584 11.0f, 12.0f,
2585
2586 // Batch 0, Channel 6
2587 13.0f, 14.0f,
2588
2589 // Batch 0, Channel 7
2590 15.0f, 16.0f,
2591
2592 // Batch 0, Channel 8
2593 17.0f, 18.0f,
2594
2595 // Batch 1, Channel 0
2596 19.0f, 20.0f,
2597
2598 // Batch 1, Channel 1
2599 21.0f, 22.0f,
2600
2601 // Batch 1, Channel 2
2602 23.0f, 24.0f,
2603
2604 // Batch 1, Channel 3
2605 25.0f, 26.0f,
2606
2607 // Batch 1, Channel 4
2608 27.0f, 28.0f,
2609
2610 // Batch 1, Channel 5
2611 29.0f, 30.0f,
2612
2613 // Batch 1, Channel 6
2614 31.0f, 32.0f,
2615
2616 // Batch 1, Channel 7
2617 33.0f, 34.0f,
2618
2619 // Batch 1, Channel 8
2620 35.0f, 36.0f
2621 }));
2622
2623 return result;
2624}
2625
2626LayerTestResult<float, 3> Concatenation3dDim1Test(armnn::IWorkloadFactory& workloadFactory)
2627{
2628 return Concatenation3dDim1TestImpl<float>(workloadFactory, 0.0f, 0);
2629}
2630
2631template <typename T>
2632LayerTestResult<T, 3> Concatenation3dDim2TestImpl(armnn::IWorkloadFactory& workloadFactory,
2633 float qScale, int32_t qOffset)
2634{
2635 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, armnn::GetDataType<T>());
2636
2637 LayerTestResult<T, 3> result = Concatenation3dTestImpl<T>(workloadFactory, outputTensorInfo, 2, qScale, qOffset);
2638 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2639 // Batch 0, Channel 0
2640 1.0f, 2.0f, 7.0f, 8.0f, 13.0f, 14.0f,
2641
2642 // Batch 0, Channel 1
2643 3.0f, 4.0f, 9.0f, 10.0f, 15.0f, 16.0f,
2644
2645 // Batch 0, Channel 2
2646 5.0f, 6.0f, 11.0f, 12.0f, 17.0f, 18.0f,
2647
2648 // Batch 1, Channel 0
2649 19.0f, 20.0f, 25.0f, 26.0f, 31.0f, 32.0f,
2650
2651 // Batch 1, Channel 1
2652 21.0f, 22.0f, 27.0f, 28.0f, 33.0f, 34.0f,
2653
2654 // Batch 1, Channel 2
2655 23.0f, 24.0f, 29.0f, 30.0f, 35.0f, 36.0f,
2656 }));
2657
2658 return result;
2659}
2660
2661LayerTestResult<float, 3> Concatenation3dDim2Test(armnn::IWorkloadFactory& workloadFactory)
2662{
2663 return Concatenation3dDim2TestImpl<float>(workloadFactory, 0.0f, 0);
2664}
2665
2666template <typename T>
2667LayerTestResult<T, 3> Concatenation3dDim0DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2668 int32_t qOffset)
2669{
2670 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
2671 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2672 // Batch 0, Channel 0
2673 1.0f, 2.0f,
2674
2675 // Batch 0, Channel 1
2676 3.0f, 4.0f,
2677
2678 // Batch 0, Channel 2
2679 5.0f, 6.0f,
2680
2681 // Batch 1, Channel 0
2682 19.0f, 20.0f,
2683
2684 // Batch 1, Channel 1
2685 21.0f, 22.0f,
2686
2687 // Batch 1, Channel 2
2688 23.0f, 24.0f
2689 }));
2690
2691 armnn::TensorInfo input1TensorInfo({ 1, 3, 2 }, armnn::GetDataType<T>());
2692 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2693 // Batch 0, Channel 0
2694 7.0f, 8.0f,
2695
2696 // Batch 0, Channel 1
2697 9.0f, 10.0f,
2698
2699 // Batch 0, Channel 2
2700 11.0f, 12.0f,
2701 }));
2702
2703 armnn::TensorInfo input2TensorInfo({ 3, 3, 2 }, armnn::GetDataType<T>());
2704 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2705 // Batch 0, Channel 0
2706 25.0f, 26.0f,
2707
2708 // Batch 0, Channel 1
2709 27.0f, 28.0f,
2710
2711 // Batch 0, Channel 2
2712 29.0f, 30.0f,
2713
2714 // Batch 1, Channel 0
2715 13.0f, 14.0f,
2716
2717 // Batch 1, Channel 1
2718 15.0f, 16.0f,
2719
2720 // Batch 1, Channel 2
2721 17.0f, 18.0f,
2722
2723 // Batch 2, Channel 0
2724 31.0f, 32.0f,
2725
2726 // Batch 2, Channel 1
2727 33.0f, 34.0f,
2728
2729 // Batch 2, Channel 2
2730 35.0f, 36.0f
2731 }));
2732
2733 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, armnn::GetDataType<T>());
2734 LayerTestResult<T, 3> result(outputTensorInfo);
2735
2736 std::vector<T> output;
2737 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002738 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002739 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
2740 { input0.data(), input1.data(), input2.data() },
2741 outputTensorInfo,
2742 output.data(),
2743 0);
2744
2745 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
2746 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2747 // Batch 0, Channel 0
2748 1.0f, 2.0f,
2749
2750 // Batch 0, Channel 1
2751 3.0f, 4.0f,
2752
2753 // Batch 0, Channel 2
2754 5.0f, 6.0f,
2755
2756 // Batch 1, Channel 0
2757 19.0f, 20.0f,
2758
2759 // Batch 1, Channel 1
2760 21.0f, 22.0f,
2761
2762 // Batch 1, Channel 2
2763 23.0f, 24.0f,
2764
2765 // Batch 2, Channel 0
2766 7.0f, 8.0f,
2767
2768 // Batch 2, Channel 1
2769 9.0f, 10.0f,
2770
2771 // Batch 2, Channel 2
2772 11.0f, 12.0f,
2773
2774 // Batch 3, Channel 0
2775 25.0f, 26.0f,
2776
2777 // Batch 3, Channel 1
2778 27.0f, 28.0f,
2779
2780 // Batch 3, Channel 2
2781 29.0f, 30.0f,
2782
2783 // Batch 4, Channel 0
2784 13.0f, 14.0f,
2785
2786 // Batch 4, Channel 1
2787 15.0f, 16.0f,
2788
2789 // Batch 4, Channel 2
2790 17.0f, 18.0f,
2791
2792 // Batch 5, Channel 0
2793 31.0f, 32.0f,
2794
2795 // Batch 5, Channel 1
2796 33.0f, 34.0f,
2797
2798 // Batch 5, Channel 2
2799 35.0f, 36.0f
2800 }));
2801
2802 return result;
2803}
2804
2805LayerTestResult<float, 3> Concatenation3dDim0DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory)
2806{
2807 return Concatenation3dDim0DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
2808}
2809
2810template <typename T>
2811LayerTestResult<T, 3> Concatenation3dDim1DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2812 int32_t qOffset)
2813{
2814 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
2815 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2816 // Batch 0, Channel 0
2817 1.0f, 2.0f,
2818
2819 // Batch 0, Channel 1
2820 3.0f, 4.0f,
2821
2822 // Batch 0, Channel 2
2823 5.0f, 6.0f,
2824
2825 // Batch 1, Channel 0
2826 19.0f, 20.0f,
2827
2828 // Batch 1, Channel 1
2829 21.0f, 22.0f,
2830
2831 // Batch 1, Channel 2
2832 23.0f, 24.0f
2833 }));
2834
2835 armnn::TensorInfo input1TensorInfo({ 2, 4, 2 }, armnn::GetDataType<T>());
2836 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2837 // Batch 0, Channel 0
2838 7.0f, 8.0f,
2839
2840 // Batch 0, Channel 1
2841 9.0f, 10.0f,
2842
2843 // Batch 0, Channel 2
2844 11.0f, 12.0f,
2845
2846 // Batch 0, Channel 3
2847 25.0f, 26.0f,
2848
2849 // Batch 1, Channel 0
2850 27.0f, 28.0f,
2851
2852 // Batch 1, Channel 1
2853 29.0f, 30.0f,
2854
2855 // Batch 1, Channel 2
2856 13.0f, 14.0f,
2857
2858 // Batch 1, Channel 3
2859 15.0f, 16.0f,
2860 }));
2861
2862 armnn::TensorInfo input2TensorInfo({ 2, 1, 2 }, armnn::GetDataType<T>());
2863 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2864 // Batch 0, Channel 0
2865 17.0f, 18.0f,
2866
2867 // Batch 1, Channel 0
2868 31.0f, 32.0f,
2869 }));
2870
2871 armnn::TensorInfo outputTensorInfo({ 2, 8, 2 }, armnn::GetDataType<T>());
2872 LayerTestResult<T, 3> result(outputTensorInfo);
2873
2874 std::vector<T> output;
2875 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002876 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002877 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
2878 { input0.data(), input1.data(), input2.data() },
2879 outputTensorInfo,
2880 output.data(),
2881 1);
2882
2883 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
2884 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2885 // Batch 0, Channel 0
2886 1.0f, 2.0f,
2887
2888 // Batch 0, Channel 1
2889 3.0f, 4.0f,
2890
2891 // Batch 0, Channel 2
2892 5.0f, 6.0f,
2893
2894 // Batch 0, Channel 3
2895 7.0f, 8.0f,
2896
2897 // Batch 0, Channel 4
2898 9.0f, 10.0f,
2899
2900 // Batch 0, Channel 5
2901 11.0f, 12.0f,
2902
2903 // Batch 0, Channel 6
2904 25.0f, 26.0f,
2905
2906 // Batch 0, Channel 7
2907 17.0f, 18.0f,
2908
2909 // Batch 1, Channel 0
2910 19.0f, 20.0f,
2911
2912 // Batch 1, Channel 1
2913 21.0f, 22.0f,
2914
2915 // Batch 1, Channel 2
2916 23.0f, 24.0f,
2917
2918 // Batch 1, Channel 3
2919 27.0f, 28.0f,
2920
2921 // Batch 1, Channel 4
2922 29.0f, 30.0f,
2923
2924 // Batch 1, Channel 5
2925 13.0f, 14.0f,
2926
2927 // Batch 1, Channel 6
2928 15.0f, 16.0f,
2929
2930 // Batch 1, Channel 7
2931 31.0f, 32.0f,
2932 }));
2933
2934 return result;
2935}
2936
2937LayerTestResult<float, 3> Concatenation3dDim1DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory)
2938{
2939 return Concatenation3dDim1DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
2940}
2941
2942template <typename T>
2943LayerTestResult<T, 3> Concatenation3dDim2DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2944 int32_t qOffset)
2945{
2946 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
2947 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2948 // Batch 0, Channel 0
2949 1.0f, 2.0f,
2950
2951 // Batch 0, Channel 1
2952 3.0f, 4.0f,
2953
2954 // Batch 0, Channel 2
2955 5.0f, 6.0f,
2956
2957 // Batch 1, Channel 0
2958 19.0f, 20.0f,
2959
2960 // Batch 1, Channel 1
2961 21.0f, 22.0f,
2962
2963 // Batch 1, Channel 2
2964 23.0f, 24.0f
2965 }));
2966
2967 armnn::TensorInfo input1TensorInfo({ 2, 3, 1 }, armnn::GetDataType<T>());
2968 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2969 // Batch 0, Channel 0
2970 7.0f,
2971
2972 // Batch 0, Channel 1
2973 9.0f,
2974
2975 // Batch 0, Channel 2
2976 11.0f,
2977
2978 // Batch 1, Channel 0
2979 25.0f,
2980
2981 // Batch 1, Channel 1
2982 27.0f,
2983
2984 // Batch 1, Channel 2
2985 29.0f
2986 }));
2987
2988 armnn::TensorInfo input2TensorInfo({ 2, 3, 3 }, armnn::GetDataType<T>());
2989 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2990 // Batch 0, Channel 0
2991 13.0f, 14.0f, 50.0f,
2992
2993 // Batch 0, Channel 1
2994 15.0f, 16.0f, 51.0f,
2995
2996 // Batch 0, Channel 2
2997 17.0f, 18.0f, 52.0f,
2998
2999 // Batch 1, Channel 0
3000 31.0f, 32.0f, 53.0f,
3001
3002 // Batch 1, Channel 1
3003 33.0f, 34.0f, 54.0f,
3004
3005 // Batch 1, Channel 2
3006 35.0f, 36.0f, 55.0f,
3007 }));
3008
3009 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, armnn::GetDataType<T>());
3010 LayerTestResult<T, 3> result(outputTensorInfo);
3011
3012 std::vector<T> output;
3013 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01003014 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00003015 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
3016 { input0.data(), input1.data(), input2.data() },
3017 outputTensorInfo,
3018 output.data(),
3019 2);
3020
3021 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
3022 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3023 // Batch 0, Channel 0
3024 1.0f, 2.0f, 7.0f, 13.0f, 14.0f, 50.0f,
3025
3026 // Batch 0, Channel 1
3027 3.0f, 4.0f, 9.0f, 15.0f, 16.0f, 51.0f,
3028
3029 // Batch 0, Channel 2
3030 5.0f, 6.0f, 11.0f, 17.0f, 18.0f, 52.0f,
3031
3032 // Batch 1, Channel 0
3033 19.0f, 20.0f, 25.0f, 31.0f, 32.0f, 53.0f,
3034
3035 // Batch 1, Channel 1
3036 21.0f, 22.0f, 27.0f, 33.0f, 34.0f, 54.0f,
3037
3038 // Batch 1, Channel 2
3039 23.0f, 24.0f, 29.0f, 35.0f, 36.0f, 55.0f,
3040 }));
3041
3042 return result;
3043}
3044
3045LayerTestResult<float, 3> Concatenation3dDim2DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory)
3046{
3047 return Concatenation3dDim2DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
3048}
3049
James Conroy6b965822018-11-01 11:33:09 +00003050LayerTestResult<float, 4> ResizeBilinearNopTest(armnn::IWorkloadFactory& workloadFactory,
3051 const armnn::DataLayoutIndexed& dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00003052{
James Conroy6b965822018-11-01 11:33:09 +00003053 const armnn::TensorInfo inputTensorInfo = GetTensorInfo<float>(1, 2, 4, 4, dataLayout);
3054 const armnn::TensorInfo outputTensorInfo = GetTensorInfo<float>(1, 2, 4, 4, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00003055
James Conroy6b965822018-11-01 11:33:09 +00003056 std::vector<float> inputData({
3057 1.0f, 2.0f, 3.0f, 4.0f,
3058 2.0f, 3.0f, 4.0f, 5.0f,
3059 3.0f, 4.0f, 5.0f, 6.0f,
3060 4.0f, 5.0f, 6.0f, 7.0f,
3061
telsoa014fcda012018-03-09 14:13:49 +00003062 1.0f, 2.0f, 3.0f, 4.0f,
3063 2.0f, 3.0f, 4.0f, 5.0f,
3064 3.0f, 4.0f, 5.0f, 6.0f,
3065 4.0f, 5.0f, 6.0f, 7.0f
James Conroy6b965822018-11-01 11:33:09 +00003066 });
3067
3068 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
3069 if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC)
3070 {
3071 std::vector<float> tmp(inputData.size());
3072 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
3073 inputData = tmp;
3074 }
3075
3076 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
telsoa014fcda012018-03-09 14:13:49 +00003077
3078 LayerTestResult<float, 4> result(outputTensorInfo);
3079 result.outputExpected = input;
3080
3081 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3082 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3083
3084 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01003085 descriptor.m_Parameters.m_DataLayout = dataLayout;
3086 armnn::WorkloadInfo info;
3087 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3088 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3089
3090 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
3091
3092 inputHandle->Allocate();
3093 outputHandle->Allocate();
3094 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3095
James Conroy074f3712018-10-03 09:32:03 +01003096 workload->Execute();
3097
3098 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3099 return result;
3100}
3101
James Conroy6b965822018-11-01 11:33:09 +00003102LayerTestResult<float, 4> SimpleResizeBilinearTest(armnn::IWorkloadFactory& workloadFactory,
3103 const armnn::DataLayoutIndexed& dataLayout)
James Conroy074f3712018-10-03 09:32:03 +01003104{
James Conroy6b965822018-11-01 11:33:09 +00003105 const armnn::TensorInfo inputTensorInfo = GetTensorInfo<float>(1, 2, 2, 2, dataLayout);
3106 const armnn::TensorInfo outputTensorInfo = GetTensorInfo<float>(1, 2, 1, 1, dataLayout);
James Conroy074f3712018-10-03 09:32:03 +01003107
James Conroy6b965822018-11-01 11:33:09 +00003108 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01003109 1.0f, 255.0f,
James Conroy6b965822018-11-01 11:33:09 +00003110 200.0f, 250.0f,
3111
3112 250.0f, 200.0f,
3113 250.0f, 1.0f
3114 });
James Conroy074f3712018-10-03 09:32:03 +01003115
3116 // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
3117 // then figures out the interpolants and weights. Note this is different to projecting the centre of the
James Conroy6b965822018-11-01 11:33:09 +00003118 // output texel. Thus, for a input matrix of 2x2, we'll expect the output 1x1 matrix to contain, as
3119 // its single element, the value that was at position (0,0) of the input matrix (rather than an average,
3120 // which we would expect if projecting the centre).
3121
3122 std::vector<float> outputData({
3123 1.0f,
3124
3125 250.0f
3126 });
3127
3128 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
3129 if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC)
3130 {
3131 std::vector<float> tmp(inputData.size());
3132 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
3133 inputData = tmp;
3134
3135 std::vector<float> tmp1(outputData.size());
3136 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data());
3137 outputData = tmp1;
3138 }
3139
3140 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
3141
James Conroy074f3712018-10-03 09:32:03 +01003142 LayerTestResult<float, 4> result(outputTensorInfo);
James Conroy6b965822018-11-01 11:33:09 +00003143 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
James Conroy074f3712018-10-03 09:32:03 +01003144
3145 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3146 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3147
3148 armnn::ResizeBilinearQueueDescriptor descriptor;
3149 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00003150 armnn::WorkloadInfo info;
3151 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3152 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3153
3154 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
3155
3156 inputHandle->Allocate();
3157 outputHandle->Allocate();
3158 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3159
3160 workload->Execute();
3161
3162 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3163 return result;
3164}
3165
James Conroy6b965822018-11-01 11:33:09 +00003166LayerTestResult<float, 4> ResizeBilinearSqMinTest(armnn::IWorkloadFactory& workloadFactory,
3167 const armnn::DataLayoutIndexed& dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00003168{
James Conroy6b965822018-11-01 11:33:09 +00003169 const armnn::TensorInfo inputTensorInfo = GetTensorInfo<float>(1, 2, 4, 4, dataLayout);
3170 const armnn::TensorInfo outputTensorInfo = GetTensorInfo<float>(1, 2, 2, 2, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00003171
James Conroy6b965822018-11-01 11:33:09 +00003172 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01003173 1.0f, 2.0f, 3.0f, 4.0f,
3174 2.0f, 3.0f, 4.0f, 5.0f,
3175 3.0f, 4.0f, 5.0f, 6.0f,
James Conroy6b965822018-11-01 11:33:09 +00003176 4.0f, 5.0f, 6.0f, 7.0f,
3177
3178 7.0f, 6.0f, 5.0f, 4.0f,
3179 6.0f, 5.0f, 4.0f, 3.0f,
3180 5.0f, 4.0f, 3.0f, 2.0f,
3181 4.0f, 3.0f, 2.0f, 1.0f
3182 });
3183
3184 std::vector<float> outputData({
3185 1.0f, 3.0f,
3186 3.0f, 5.0f,
3187
3188 7.0f, 5.0f,
3189 5.0f, 3.0f
3190 });
3191
3192 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
3193 if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC)
3194 {
3195 std::vector<float> tmp(inputData.size());
3196 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
3197 inputData = tmp;
3198
3199 std::vector<float> tmp1(outputData.size());
3200 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data());
3201 outputData = tmp1;
3202 }
3203
3204 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
telsoa014fcda012018-03-09 14:13:49 +00003205
telsoa014fcda012018-03-09 14:13:49 +00003206 LayerTestResult<float, 4> result(outputTensorInfo);
James Conroy6b965822018-11-01 11:33:09 +00003207 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
telsoa014fcda012018-03-09 14:13:49 +00003208
3209 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3210 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3211
3212 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01003213 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00003214 armnn::WorkloadInfo info;
3215 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3216 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3217
3218 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
3219
3220 inputHandle->Allocate();
3221 outputHandle->Allocate();
3222 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3223
3224 workload->Execute();
3225
3226 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3227 return result;
3228}
3229
James Conroy6b965822018-11-01 11:33:09 +00003230LayerTestResult<float, 4> ResizeBilinearMinTest(armnn::IWorkloadFactory& workloadFactory,
3231 const armnn::DataLayoutIndexed& dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00003232{
James Conroy6b965822018-11-01 11:33:09 +00003233 const armnn::TensorInfo inputTensorInfo = GetTensorInfo<float>(1, 2, 3, 5, dataLayout);
3234 const armnn::TensorInfo outputTensorInfo = GetTensorInfo<float>(1, 2, 2, 3, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00003235
James Conroy6b965822018-11-01 11:33:09 +00003236 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01003237 1.0f, 2.0f, 3.0f, 5.0f, 8.0f,
3238 13.0f, 21.0f, 34.0f, 55.0f, 89.0f,
James Conroy6b965822018-11-01 11:33:09 +00003239 144.0f, 233.0f, 377.0f, 610.0f, 987.0f,
3240
3241 987.0f, 610.0f, 377.0f, 233.0f, 144.0f,
3242 89.0f, 55.0f, 34.0f, 21.0f, 13.0f,
3243 8.0f, 5.0f, 3.0f, 2.0f, 1.0f
3244 });
3245
3246 std::vector<float> outputData({
3247 1.0f, 2.6666f, 6.00f,
3248 78.5f, 179.3333f, 401.00f,
3249
3250 987.0f, 454.6670f, 203.33f,
3251 48.5f, 22.3333f, 10.00f
3252 });
3253
3254 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
3255 if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC)
3256 {
3257 std::vector<float> tmp(inputData.size());
3258 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
3259 inputData = tmp;
3260
3261 std::vector<float> tmp1(outputData.size());
3262 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data());
3263 outputData = tmp1;
3264 }
3265
3266 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
telsoa014fcda012018-03-09 14:13:49 +00003267
3268 LayerTestResult<float, 4> result(outputTensorInfo);
James Conroy6b965822018-11-01 11:33:09 +00003269 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
telsoa014fcda012018-03-09 14:13:49 +00003270
3271 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3272 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3273
3274 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01003275 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00003276 armnn::WorkloadInfo info;
3277 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3278 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3279
3280 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
3281
3282 inputHandle->Allocate();
3283 outputHandle->Allocate();
3284 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3285
3286 workload->Execute();
3287
3288 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3289 return result;
3290}
3291
James Conroy6b965822018-11-01 11:33:09 +00003292LayerTestResult<float, 4> ResizeBilinearMagTest(armnn::IWorkloadFactory& workloadFactory,
3293 const armnn::DataLayoutIndexed& dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00003294{
James Conroy6b965822018-11-01 11:33:09 +00003295 const armnn::TensorInfo inputTensorInfo = GetTensorInfo<float>(1, 2, 3, 2, dataLayout);
3296 const armnn::TensorInfo outputTensorInfo = GetTensorInfo<float>(1, 2, 3, 5, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00003297
James Conroy6b965822018-11-01 11:33:09 +00003298 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01003299 1.0f, 2.0f,
3300 13.0f, 21.0f,
James Conroy6b965822018-11-01 11:33:09 +00003301 144.0f, 233.0f,
telsoa014fcda012018-03-09 14:13:49 +00003302
James Conroy6b965822018-11-01 11:33:09 +00003303 233.0f, 144.0f,
3304 21.0f, 13.0f,
3305 2.0f, 1.0f
3306 });
3307
3308 std::vector<float> outputData({
James Conroy074f3712018-10-03 09:32:03 +01003309 1.0f, 1.4f, 1.8f, 2.0f, 2.0f,
3310 13.0f, 16.2f, 19.4f, 21.0f, 21.0f,
James Conroy6b965822018-11-01 11:33:09 +00003311 144.0f, 179.6f, 215.2f, 233.0f, 233.0f,
3312
3313 233.0f, 197.4f, 161.8f, 144.0f, 144.0f,
3314 21.0f, 17.8f, 14.6f, 13.0f, 13.0f,
3315 2.0f, 1.6f, 1.2f, 1.0f, 1.0f
3316 });
3317
3318 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
3319 if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC)
3320 {
3321 std::vector<float> tmp(inputData.size());
3322 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
3323 inputData = tmp;
3324
3325 std::vector<float> tmp1(outputData.size());
3326 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data());
3327 outputData = tmp1;
3328 }
3329
3330 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
3331
3332 LayerTestResult<float, 4> result(outputTensorInfo);
3333 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
telsoa014fcda012018-03-09 14:13:49 +00003334
3335 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3336 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3337
3338 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01003339 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00003340 armnn::WorkloadInfo info;
3341 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3342 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3343
3344 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
3345
3346 inputHandle->Allocate();
3347 outputHandle->Allocate();
3348 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3349
3350 workload->Execute();
3351
3352 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3353 return result;
3354}
3355
telsoa014fcda012018-03-09 14:13:49 +00003356LayerTestResult<float, 2> FakeQuantizationTest(armnn::IWorkloadFactory& workloadFactory)
3357{
3358 constexpr unsigned int width = 2;
3359 constexpr unsigned int height = 3;
3360
3361 const armnn::TensorInfo tensorInfo({height, width },
3362 armnn::DataType::Float32);
3363 auto input = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
3364 -10.0f, -5.0f,
3365 0.0f, 5.0f,
3366 10.0f, 10.0f
3367 }));
3368
3369 LayerTestResult<float, 2> ret(tensorInfo);
3370
3371 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
3372
3373 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
3374
3375 armnn::FakeQuantizationQueueDescriptor data;
3376 armnn::WorkloadInfo info;
3377
3378 AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
3379 AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
3380 float min = -10.f;
3381 float max = 10.f;
3382
3383 data.m_Parameters.m_Min = min;
3384 data.m_Parameters.m_Max = max;
3385
3386 armnn::PassthroughCpuTensorHandle refHandle(tensorInfo, &ret.outputExpected[0][0]);
3387 armnn::FakeQuantizationQueueDescriptor refData = data;
3388 armnn::WorkloadInfo refInfo = info;
3389 SetWorkloadOutput(refData, refInfo, 0, tensorInfo, &refHandle);
3390
3391 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFakeQuantization(data, info);
3392
3393 inputHandle->Allocate();
3394 outputHandle->Allocate();
3395
3396 CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
3397
3398 workload->Execute();
3399
3400 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
3401
3402 ret.outputExpected = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
3403 0.0f, 63.0f,
3404 128.0f, 191.0f,
3405 255.0f, 255.0f
3406 }));
3407 return ret;
3408}
3409
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003410namespace
3411{
3412
3413LayerTestResult<float, 4> L2NormalizationTestImpl(armnn::IWorkloadFactory& workloadFactory,
3414 const armnn::TensorShape& inputOutputTensorShape,
3415 const std::vector<float>& inputValues,
3416 const std::vector<float>& expectedOutputValues,
jimfly013aab7c32018-11-12 13:32:08 +00003417 const armnn::DataLayoutIndexed& layout)
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003418{
3419 const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
3420 const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
3421
jimfly013aab7c32018-11-12 13:32:08 +00003422 // at this point if we require it permute the input data
3423 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
3424 std::vector<float> inputData = inputValues;
3425 if (layout.GetDataLayout() == armnn::DataLayout::NHWC)
3426 {
3427 std::vector<float> tmp(inputData.size());
3428 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
3429 inputData = tmp;
3430 }
3431
3432 auto inputTensor = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(inputData));
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003433
3434 LayerTestResult<float, 4> result(outputTensorInfo);
jimfly013aab7c32018-11-12 13:32:08 +00003435 std::vector<float> expectedOutputData = expectedOutputValues;
3436 if (layout.GetDataLayout() == armnn::DataLayout::NHWC)
3437 {
3438 std::vector<float> tmp(expectedOutputData.size());
3439 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, expectedOutputData.data(), tmp.data());
3440 expectedOutputData = tmp;
3441 }
3442 result.outputExpected = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(expectedOutputData));
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003443
3444 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3445 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3446
3447 armnn::L2NormalizationQueueDescriptor descriptor;
jimfly013aab7c32018-11-12 13:32:08 +00003448 descriptor.m_Parameters.m_DataLayout = layout.GetDataLayout();
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003449 armnn::WorkloadInfo info;
3450
3451 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3452 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3453
3454 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
3455
3456 inputHandle->Allocate();
3457 outputHandle->Allocate();
3458
3459 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
3460
Aron Virginas-Tar60578952018-10-31 11:04:01 +00003461 workloadFactory.Acquire();
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003462 workload->Execute();
Aron Virginas-Tar60578952018-10-31 11:04:01 +00003463 workloadFactory.Release();
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003464
3465 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3466
3467 return result;
3468}
3469
3470float CalcInvL2Norm(std::initializer_list<float> elements)
3471{
3472 const float reduction = std::accumulate(elements.begin(), elements.end(), 0.0f,
3473 [](float acc, float element) { return acc + element * element; });
3474 return 1.0f / sqrtf(reduction);
3475}
3476
3477} // anonymous namespace
3478
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003479template<typename T>
3480LayerTestResult<T, 2> Pad2dTestCommon(armnn::IWorkloadFactory& workloadFactory, float qScale, int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003481{
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003482 const armnn::TensorShape inputShape{ 3, 3 };
3483 const armnn::TensorShape outputShape{ 7, 7 };
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003484
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003485 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::GetDataType<T>());
3486 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::GetDataType<T>());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003487
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003488 std::vector<T> inputValues(
3489 QuantizedVector<T>(qScale, qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003490 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003491 // Height (3) x Width (3)
3492 4, 8, 6,
3493 7, 4, 4,
3494 3, 2, 4
3495 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003496
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003497 std::vector<T> expectedOutputValues(
3498 QuantizedVector<T>(qScale, qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003499 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003500 0, 0, 0, 0, 0, 0, 0,
3501 0, 0, 0, 0, 0, 0, 0,
3502 0, 0, 4, 8, 6, 0, 0,
3503 0, 0, 7, 4, 4, 0, 0,
3504 0, 0, 3, 2, 4, 0, 0,
3505 0, 0, 0, 0, 0, 0, 0,
3506 0, 0, 0, 0, 0, 0, 0
3507 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003508
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003509 auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003510
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003511 LayerTestResult<T, 2> result(outputTensorInfo);
3512 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003513
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003514 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3515 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003516
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003517 armnn::PadQueueDescriptor descriptor;
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003518
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003519 std::vector<std::pair<unsigned int, unsigned int>> PadList;
3520 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
3521 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003522
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003523 descriptor.m_Parameters.m_PadList = PadList;
3524 armnn::WorkloadInfo info;
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003525
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003526 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3527 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003528
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003529 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003530
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003531 inputHandle->Allocate();
3532 outputHandle->Allocate();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003533
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003534 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003535
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003536 workload->Execute();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003537
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003538 CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003539
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003540 return result;
3541}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003542
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003543template <typename T>
3544LayerTestResult<T, 3> Pad3dTestCommon(armnn::IWorkloadFactory& workloadFactory, float qScale, int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003545{
3546 const armnn::TensorShape inputShape{ 2, 2, 2 };
3547 const armnn::TensorShape outputShape{ 3, 5, 6 };
3548
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003549 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::GetDataType<T>());
3550 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::GetDataType<T>());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003551
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003552 std::vector<T> inputValues(
3553 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003554 {
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003555 // Channel 0, Height (2) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003556 0, 4,
3557 2, 5,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003558
3559 // Channel 1, Height (2) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003560 6, 1,
3561 5, 2
3562 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003563
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003564 std::vector<T> expectedOutputValues(
3565 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003566 {
3567
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003568 0, 0, 0, 0, 0, 0,
3569 0, 0, 0, 0, 0, 0,
3570 0, 0, 0, 4, 0, 0,
3571 0, 0, 2, 5, 0, 0,
3572 0, 0, 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003573
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003574 0, 0, 0, 0, 0, 0,
3575 0, 0, 0, 0, 0, 0,
3576 0, 0, 6, 1, 0, 0,
3577 0, 0, 5, 2, 0, 0,
3578 0, 0, 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003579
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003580 0, 0, 0, 0, 0, 0,
3581 0, 0, 0, 0, 0, 0,
3582 0, 0, 0, 0, 0, 0,
3583 0, 0, 0, 0, 0, 0,
3584 0, 0, 0, 0, 0, 0
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003585
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003586 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003587
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003588 auto inputTensor = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003589
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003590 LayerTestResult<T, 3> result(outputTensorInfo);
3591 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003592
3593 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3594 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3595
3596 armnn::PadQueueDescriptor descriptor;
3597
3598 std::vector<std::pair<unsigned int, unsigned int>> PadList;
3599 PadList.push_back(std::pair<unsigned int, unsigned int>(0,1));
3600 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
3601 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
3602
3603 descriptor.m_Parameters.m_PadList = PadList;
3604 armnn::WorkloadInfo info;
3605
3606 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3607 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3608
3609 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
3610
3611 inputHandle->Allocate();
3612 outputHandle->Allocate();
3613
3614 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]);
3615
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003616 workload->Execute();
3617
3618 CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
3619
3620 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003621}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003622
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003623template <typename T>
3624LayerTestResult<T, 4> Pad4dTestCommon(armnn::IWorkloadFactory& workloadFactory, float qScale, int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003625{
3626 const armnn::TensorShape inputShape{ 2, 2, 3, 2 };
3627 const armnn::TensorShape outputShape{ 4, 5, 7, 4 };
3628
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003629 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::GetDataType<T>());
3630 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::GetDataType<T>());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003631
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003632 std::vector<T> inputValues(
3633 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003634 {
3635 // Batch 0, Channel 0, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003636 0, 1,
3637 2, 3,
3638 4, 5,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003639
3640 // Batch 0, Channel 1, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003641 6, 7,
3642 8, 9,
3643 10, 11,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003644
3645 // Batch 1, Channel 0, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003646 12, 13,
3647 14, 15,
3648 16, 17,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003649
3650 // Batch 1, Channel 1, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003651 18, 19,
3652 20, 21,
3653 22, 23
3654 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003655
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003656 std::vector<T> expectedOutputValues(
3657 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003658 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003659 0, 0, 0, 0,
3660 0, 0, 0, 0,
3661 0, 0, 0, 0,
3662 0, 0, 0, 0,
3663 0, 0, 0, 0,
3664 0, 0, 0, 0,
3665 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003666
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003667 0, 0, 0, 0,
3668 0, 0, 0, 0,
3669 0, 0, 0, 0,
3670 0, 0, 0, 0,
3671 0, 0, 0, 0,
3672 0, 0, 0, 0,
3673 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003674
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003675 0, 0, 0, 0,
3676 0, 0, 0, 0,
3677 0, 0, 0, 0,
3678 0, 0, 0, 0,
3679 0, 0, 0, 0,
3680 0, 0, 0, 0,
3681 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003682
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003683 0, 0, 0, 0,
3684 0, 0, 0, 0,
3685 0, 0, 0, 0,
3686 0, 0, 0, 0,
3687 0, 0, 0, 0,
3688 0, 0, 0, 0,
3689 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003690
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003691 0, 0, 0, 0,
3692 0, 0, 0, 0,
3693 0, 0, 0, 0,
3694 0, 0, 0, 0,
3695 0, 0, 0, 0,
3696 0, 0, 0, 0,
3697 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003698
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003699 0, 0, 0, 0,
3700 0, 0, 0, 0,
3701 0, 0, 0, 0,
3702 0, 0, 0, 0,
3703 0, 0, 0, 0,
3704 0, 0, 0, 0,
3705 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003706
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003707 0, 0, 0, 0,
3708 0, 0, 0, 0,
3709 0, 0, 0, 0,
3710 0, 0, 0, 0,
3711 0, 0, 0, 0,
3712 0, 0, 0, 0,
3713 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003714
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003715 0, 0, 0, 0,
3716 0, 0, 0, 0,
3717 0, 0, 0, 0,
3718 0, 0, 1, 0,
3719 0, 2, 3, 0,
3720 0, 4, 5, 0,
3721 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003722
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003723 0, 0, 0, 0,
3724 0, 0, 0, 0,
3725 0, 0, 0, 0,
3726 0, 6, 7, 0,
3727 0, 8, 9, 0,
3728 0, 10, 11, 0,
3729 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003730
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003731 0, 0, 0, 0,
3732 0, 0, 0, 0,
3733 0, 0, 0, 0,
3734 0, 0, 0, 0,
3735 0, 0, 0, 0,
3736 0, 0, 0, 0,
3737 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003738
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003739 0, 0, 0, 0,
3740 0, 0, 0, 0,
3741 0, 0, 0, 0,
3742 0, 0, 0, 0,
3743 0, 0, 0, 0,
3744 0, 0, 0, 0,
3745 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003746
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003747 0, 0, 0, 0,
3748 0, 0, 0, 0,
3749 0, 0, 0, 0,
3750 0, 0, 0, 0,
3751 0, 0, 0, 0,
3752 0, 0, 0, 0,
3753 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003754
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003755 0, 0, 0, 0,
3756 0, 0, 0, 0,
3757 0, 0, 0, 0,
3758 0, 12, 13, 0,
3759 0, 14, 15, 0,
3760 0, 16, 17, 0,
3761 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003762
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003763 0, 0, 0, 0,
3764 0, 0, 0, 0,
3765 0, 0, 0, 0,
3766 0, 18, 19, 0,
3767 0, 20, 21, 0,
3768 0, 22, 23, 0,
3769 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003770
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003771 0, 0, 0, 0,
3772 0, 0, 0, 0,
3773 0, 0, 0, 0,
3774 0, 0, 0, 0,
3775 0, 0, 0, 0,
3776 0, 0, 0, 0,
3777 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003778
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003779 0, 0, 0, 0,
3780 0, 0, 0, 0,
3781 0, 0, 0, 0,
3782 0, 0, 0, 0,
3783 0, 0, 0, 0,
3784 0, 0, 0, 0,
3785 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003786
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003787 0, 0, 0, 0,
3788 0, 0, 0, 0,
3789 0, 0, 0, 0,
3790 0, 0, 0, 0,
3791 0, 0, 0, 0,
3792 0, 0, 0, 0,
3793 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003794
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003795 0, 0, 0, 0,
3796 0, 0, 0, 0,
3797 0, 0, 0, 0,
3798 0, 0, 0, 0,
3799 0, 0, 0, 0,
3800 0, 0, 0, 0,
3801 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003802
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003803 0, 0, 0, 0,
3804 0, 0, 0, 0,
3805 0, 0, 0, 0,
3806 0, 0, 0, 0,
3807 0, 0, 0, 0,
3808 0, 0, 0, 0,
3809 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003810
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003811 0, 0, 0, 0,
3812 0, 0, 0, 0,
3813 0, 0, 0, 0,
3814 0, 0, 0, 0,
3815 0, 0, 0, 0,
3816 0, 0, 0, 0,
3817 0, 0, 0, 0
3818 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003819
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003820 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003821
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003822 LayerTestResult<T, 4> result(outputTensorInfo);
3823 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003824
3825 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3826 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3827
3828 armnn::PadQueueDescriptor descriptor;
3829
3830 std::vector<std::pair<unsigned int, unsigned int>> PadList;
3831 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
3832 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
3833 PadList.push_back(std::pair<unsigned int, unsigned int>(3,1));
3834 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
3835
3836 descriptor.m_Parameters.m_PadList = PadList;
3837 armnn::WorkloadInfo info;
3838
3839 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3840 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3841
3842 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
3843
3844 inputHandle->Allocate();
3845 outputHandle->Allocate();
3846
3847 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
3848
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003849 workload->Execute();
3850
3851 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3852
3853 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003854}
3855
3856LayerTestResult<uint8_t, 2> PadUint82dTest(armnn::IWorkloadFactory& workloadFactory)
3857{
3858 return Pad2dTestCommon<uint8_t>(workloadFactory, 1.0f, 0);
3859}
3860
3861LayerTestResult<uint8_t, 3> PadUint83dTest(armnn::IWorkloadFactory& workloadFactory)
3862{
3863 return Pad3dTestCommon<uint8_t>(workloadFactory, 1.0f, 0);
3864}
3865
3866LayerTestResult<uint8_t, 4> PadUint84dTest(armnn::IWorkloadFactory& workloadFactory)
3867{
3868 return Pad4dTestCommon<uint8_t>(workloadFactory, 1.0f, 0);
3869}
3870
3871LayerTestResult<float, 2> PadFloat322dTest(armnn::IWorkloadFactory& workloadFactory)
3872{
3873 return Pad2dTestCommon<float>(workloadFactory, 0.0f, 0);
3874}
3875
3876LayerTestResult<float, 3> PadFloat323dTest(armnn::IWorkloadFactory& workloadFactory)
3877{
3878 return Pad3dTestCommon<float>(workloadFactory, 0.0f, 0);
3879}
3880
3881LayerTestResult<float, 4> PadFloat324dTest(armnn::IWorkloadFactory& workloadFactory)
3882{
3883 return Pad4dTestCommon<float>(workloadFactory, 0.0f, 0);
3884}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003885
jimfly013aab7c32018-11-12 13:32:08 +00003886LayerTestResult<float, 4> L2Normalization1dTest(armnn::IWorkloadFactory& workloadFactory,
3887 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +00003888{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003889 // Width: 1
3890 // Height: 1
3891 // Channels: 10
3892 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00003893 unsigned int numberOfBatches = 1;
3894 unsigned int numberOfChannels = 10;
3895 unsigned int height = 1;
3896 unsigned int width = 1;
telsoa014fcda012018-03-09 14:13:49 +00003897
jimfly013aab7c32018-11-12 13:32:08 +00003898
3899 const armnn::TensorShape inputOutputShape = GetTestTensorShape(
3900 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003901 std::vector<float> inputValues
3902 {
3903 // Batch 0, Channel 0, Height (1) x Width (1)
3904 1.0f,
telsoa014fcda012018-03-09 14:13:49 +00003905
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003906 // Batch 0, Channel 1, Height (1) x Width (1)
3907 2.0f,
telsoa014fcda012018-03-09 14:13:49 +00003908
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003909 // Batch 0, Channel 2, Height (1) x Width (1)
3910 3.0f,
telsoa014fcda012018-03-09 14:13:49 +00003911
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003912 // Batch 0, Channel 3, Height (1) x Width (1)
3913 4.0f,
3914
3915 // Batch 0, Channel 4, Height (1) x Width (1)
3916 5.0f,
3917
3918 // Batch 0, Channel 5, Height (1) x Width (1)
3919 6.0f,
3920
3921 // Batch 0, Channel 6, Height (1) x Width (1)
3922 7.0f,
3923
3924 // Batch 0, Channel 7, Height (1) x Width (1)
3925 8.0f,
3926
3927 // Batch 0, Channel 8, Height (1) x Width (1)
3928 9.0f,
3929
3930 // Batch 0, Channel 9, Height (1) x Width (1)
3931 10.0f
3932 };
telsoa014fcda012018-03-09 14:13:49 +00003933 const float approxInvL2Norm = 0.050964719f;
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003934 std::vector<float> expectedOutputValues
3935 {
3936 // Batch 0, Channel 0, Height (1) x Width (1)
telsoa014fcda012018-03-09 14:13:49 +00003937 1.0f * approxInvL2Norm,
3938 2.0f * approxInvL2Norm,
3939 3.0f * approxInvL2Norm,
3940 4.0f * approxInvL2Norm,
3941 5.0f * approxInvL2Norm,
3942 6.0f * approxInvL2Norm,
3943 7.0f * approxInvL2Norm,
3944 8.0f * approxInvL2Norm,
3945 9.0f * approxInvL2Norm,
3946 10.0f * approxInvL2Norm
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003947 };
telsoa014fcda012018-03-09 14:13:49 +00003948
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003949 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
jimfly013aab7c32018-11-12 13:32:08 +00003950 inputValues, expectedOutputValues, layout);
telsoa014fcda012018-03-09 14:13:49 +00003951}
3952
jimfly013aab7c32018-11-12 13:32:08 +00003953LayerTestResult<float, 4> L2Normalization2dTest(armnn::IWorkloadFactory& workloadFactory,
3954 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +00003955{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003956 // Width: 5
3957 // Height: 1
3958 // Channels: 2
3959 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00003960 unsigned int numberOfBatches = 1;
3961 unsigned int numberOfChannels = 2;
3962 unsigned int height = 1;
3963 unsigned int width = 5;
telsoa014fcda012018-03-09 14:13:49 +00003964
jimfly013aab7c32018-11-12 13:32:08 +00003965 const armnn::TensorShape inputOutputShape = GetTestTensorShape(
3966 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003967 std::vector<float> inputValues
3968 {
3969 // Batch 0, Channel 0, Height (1) x Width (5)
telsoa014fcda012018-03-09 14:13:49 +00003970 1.0f, 3.0f, 5.0f, 7.0f, 9.0f,
telsoa014fcda012018-03-09 14:13:49 +00003971
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003972 // Batch 0, Channel 1, Height (1) x Width (5)
3973 2.0f, 4.0f, 6.0f, 8.0f, 10.0f
3974 };
3975 std::vector<float> expectedOutputValues
3976 {
3977 // Batch 0, Channel 0, Height (1) x Width (5)
3978 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
3979 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
3980 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
3981 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
telsoa014fcda012018-03-09 14:13:49 +00003982 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
3983
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003984 // Batch 0, Channel 1, Height (1) x Width (5)
3985 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
3986 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
3987 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
3988 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
telsoa014fcda012018-03-09 14:13:49 +00003989 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003990 };
telsoa014fcda012018-03-09 14:13:49 +00003991
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003992 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
jimfly013aab7c32018-11-12 13:32:08 +00003993 inputValues, expectedOutputValues, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003994}
telsoa014fcda012018-03-09 14:13:49 +00003995
jimfly013aab7c32018-11-12 13:32:08 +00003996LayerTestResult<float, 4> L2Normalization3dTest(armnn::IWorkloadFactory& workloadFactory,
3997 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +00003998{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003999 // Width: 3
4000 // Height: 4
4001 // Channels: 2
4002 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00004003 unsigned int numberOfBatches = 1;
4004 unsigned int numberOfChannels = 2;
4005 unsigned int height = 4;
4006 unsigned int width = 3;
telsoa014fcda012018-03-09 14:13:49 +00004007
jimfly013aab7c32018-11-12 13:32:08 +00004008 const armnn::TensorShape inputOutputShape = GetTestTensorShape(
4009 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004010 std::vector<float> inputValues
4011 {
4012 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004013 119.0f, 21.0f, 150.0f,
4014 149.0f, 32.0f, 179.0f,
4015 15.0f, 227.0f, 141.0f,
4016 147.0f, 199.0f, 220.0f,
4017
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004018 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004019 110.0f, 140.0f, 73.0f,
4020 211.0f, 212.0f, 89.0f,
4021 24.0f, 138.0f, 188.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004022 162.0f, 12.0f, 161.0f
4023 };
4024 std::vector<float> expectedOutputValues
4025 {
4026 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004027 119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
4028 21.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
4029 150.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
4030 149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
4031 32.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
4032 179.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
4033 15.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
4034 227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
4035 141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
4036 147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
4037 199.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
4038 220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
4039
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004040 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004041 110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
4042 140.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
4043 73.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
4044 211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
4045 212.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
4046 89.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
4047 24.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
4048 138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
4049 188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
4050 162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
4051 12.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004052 161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
4053 };
telsoa014fcda012018-03-09 14:13:49 +00004054
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004055 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
jimfly013aab7c32018-11-12 13:32:08 +00004056 inputValues, expectedOutputValues, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004057}
telsoa014fcda012018-03-09 14:13:49 +00004058
jimfly013aab7c32018-11-12 13:32:08 +00004059LayerTestResult<float, 4> L2Normalization4dTest(armnn::IWorkloadFactory& workloadFactory,
4060 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +00004061{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004062 // Width: 3
4063 // Height: 4
4064 // Channels: 3
4065 // BatchSize: 2
jimfly013aab7c32018-11-12 13:32:08 +00004066 unsigned int numberOfBatches = 2;
4067 unsigned int numberOfChannels = 3;
4068 unsigned int height = 4;
4069 unsigned int width = 3;
telsoa014fcda012018-03-09 14:13:49 +00004070
jimfly013aab7c32018-11-12 13:32:08 +00004071 const armnn::TensorShape inputOutputShape = GetTestTensorShape(
4072 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004073 std::vector<float> inputValues
4074 {
4075 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004076 235.0f, 46.0f, 178.0f,
4077 100.0f, 123.0f, 19.0f,
4078 172.0f, 74.0f, 250.0f,
4079 6.0f, 195.0f, 80.0f,
4080
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004081 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004082 113.0f, 95.0f, 202.0f,
4083 77.0f, 114.0f, 71.0f,
4084 122.0f, 246.0f, 166.0f,
4085 82.0f, 28.0f, 37.0f,
4086
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004087 // Batch 0, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004088 56.0f, 170.0f, 162.0f,
4089 194.0f, 89.0f, 254.0f,
4090 12.0f, 209.0f, 200.0f,
4091 1.0f, 64.0f, 54.0f,
4092
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004093 // Batch 1, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004094 67.0f, 90.0f, 49.0f,
4095 7.0f, 163.0f, 18.0f,
4096 25.0f, 117.0f, 103.0f,
4097 247.0f, 59.0f, 189.0f,
4098
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004099 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004100 239.0f, 104.0f, 199.0f,
4101 17.0f, 124.0f, 153.0f,
4102 222.0f, 217.0f, 75.0f,
4103 32.0f, 126.0f, 21.0f,
4104
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004105 // Batch 1, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004106 97.0f, 145.0f, 215.0f,
4107 115.0f, 116.0f, 238.0f,
4108 226.0f, 16.0f, 132.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004109 92.0f, 125.0f, 88.0f
4110 };
4111 std::vector<float> expectedOutputValues
4112 {
4113 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004114 235.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
4115 46.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
4116 178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
4117 100.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
4118 123.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
4119 19.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
4120 172.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
4121 74.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
4122 250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
4123 6.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
4124 195.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
4125 80.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
4126
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004127 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004128 113.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
4129 95.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
4130 202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
4131 77.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
4132 114.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
4133 71.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
4134 122.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
4135 246.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
4136 166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
4137 82.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
4138 28.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
4139 37.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
4140
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004141 // Batch 0, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004142 56.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
4143 170.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
4144 162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
4145 194.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
4146 89.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
4147 254.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
4148 12.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
4149 209.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
4150 200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
4151 1.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
4152 64.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
4153 54.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
4154
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004155 // Batch 1, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004156 67.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
4157 90.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
4158 49.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
4159 7.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
4160 163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
4161 18.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
4162 25.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
4163 117.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
4164 103.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
4165 247.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
4166 59.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
4167 189.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
4168
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004169 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004170 239.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
4171 104.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
4172 199.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
4173 17.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
4174 124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
4175 153.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
4176 222.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
4177 217.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
4178 75.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
4179 32.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
4180 126.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
4181 21.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
4182
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004183 // Batch 1, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004184 97.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
4185 145.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
4186 215.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
4187 115.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
4188 116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
4189 238.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
4190 226.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
4191 16.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
4192 132.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
4193 92.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
4194 125.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004195 88.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f })
4196 };
telsoa014fcda012018-03-09 14:13:49 +00004197
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004198 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
jimfly013aab7c32018-11-12 13:32:08 +00004199 inputValues, expectedOutputValues, layout);
telsoa014fcda012018-03-09 14:13:49 +00004200}
4201
4202template <typename T>
4203LayerTestResult<T, 4> ConstantTestImpl(armnn::IWorkloadFactory& workloadFactory,
4204 float qScale,
4205 int32_t qOffset)
4206{
4207 constexpr unsigned int inputWidth = 3;
4208 constexpr unsigned int inputHeight = 4;
4209 constexpr unsigned int inputChannels = 3;
4210 constexpr unsigned int inputBatchSize = 2;
4211
4212 constexpr unsigned int outputWidth = inputWidth;
4213 constexpr unsigned int outputHeight = inputHeight;
4214 constexpr unsigned int outputChannels = inputChannels;
4215 constexpr unsigned int outputBatchSize = inputBatchSize;
4216
4217 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
4218 armnn::GetDataType<T>());
4219
4220 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
4221 armnn::GetDataType<T>());
4222
4223 // Set quantization parameters if the requested type is a quantized type.
4224 if(armnn::IsQuantizedType<T>())
4225 {
4226 inputTensorInfo.SetQuantizationScale(qScale);
4227 inputTensorInfo.SetQuantizationOffset(qOffset);
4228 outputTensorInfo.SetQuantizationScale(qScale);
4229 outputTensorInfo.SetQuantizationOffset(qOffset);
4230 }
4231
4232 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
4233 QuantizedVector<T>(qScale, qOffset, {
4234 // Batch 0, Channel 0
4235 235.0f, 46.0f, 178.0f,
4236 100.0f, 123.0f, 19.0f,
4237 172.0f, 74.0f, 250.0f,
4238 6.0f, 195.0f, 80.0f,
4239
4240 // Batch 0, Channel 1
4241 113.0f, 95.0f, 202.0f,
4242 77.0f, 114.0f, 71.0f,
4243 122.0f, 246.0f, 166.0f,
4244 82.0f, 28.0f, 37.0f,
4245
4246 // Batch 0, Channel 2
4247 56.0f, 170.0f, 162.0f,
4248 194.0f, 89.0f, 254.0f,
4249 12.0f, 209.0f, 200.0f,
4250 1.0f, 64.0f, 54.0f,
4251
4252 // Batch 1, Channel 0
4253 67.0f, 90.0f, 49.0f,
4254 7.0f, 163.0f, 18.0f,
4255 25.0f, 117.0f, 103.0f,
4256 247.0f, 59.0f, 189.0f,
4257
4258 // Batch 1, Channel 1
4259 239.0f, 104.0f, 199.0f,
4260 17.0f, 124.0f, 153.0f,
4261 222.0f, 217.0f, 75.0f,
4262 32.0f, 126.0f, 21.0f,
4263
4264 // Batch 1, Channel 2
4265 97.0f, 145.0f, 215.0f,
4266 115.0f, 116.0f, 238.0f,
4267 226.0f, 16.0f, 132.0f,
4268 92.0f, 125.0f, 88.0f,
4269 })));
4270
4271 LayerTestResult<T, 4> result(outputTensorInfo);
4272 result.outputExpected = input;
4273
4274 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4275
4276 armnn::ScopedCpuTensorHandle constantTensor(inputTensorInfo);
4277 AllocateAndCopyDataToITensorHandle(&constantTensor, &input[0][0][0][0]);
4278
4279 armnn::ConstantQueueDescriptor descriptor;
4280 descriptor.m_LayerOutput = &constantTensor;
4281
4282 armnn::WorkloadInfo info;
4283 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4284
4285 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConstant(descriptor, info);
4286
4287 outputHandle->Allocate();
4288
4289 workload->Execute();
4290
4291 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4292 return result;
4293}
4294
4295LayerTestResult<float, 4> ConstantTest(armnn::IWorkloadFactory& workloadFactory)
4296{
4297 return ConstantTestImpl<float>(workloadFactory, 0.0f, 0);
4298}
4299
4300LayerTestResult<uint8_t, 4> ConstantTestUint8(armnn::IWorkloadFactory& workloadFactory)
4301{
4302 return ConstantTestImpl<uint8_t>(workloadFactory, 1.0f, 0);
4303}
4304
4305LayerTestResult<uint8_t, 3> MergerUint8Test(armnn::IWorkloadFactory& workloadFactory)
4306{
surmeh013537c2c2018-05-18 16:31:43 +01004307 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +00004308 unsigned int outputHeight = 6;
4309 unsigned int outputChannels = 3;
4310
surmeh013537c2c2018-05-18 16:31:43 +01004311 unsigned int inputWidth1 = 3;
4312 unsigned int inputHeight1 = 6;
4313 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +00004314
surmeh013537c2c2018-05-18 16:31:43 +01004315 unsigned int inputWidth2 = 3;
4316 unsigned int inputHeight2 = 6;
4317 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +00004318
telsoa01c577f2c2018-08-31 09:22:23 +01004319 // Defines the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +00004320 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
4321 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
4322 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
telsoa014fcda012018-03-09 14:13:49 +00004323
telsoa01c577f2c2018-08-31 09:22:23 +01004324 // Arbitrary scale and offsets. They don't really matter as the merger operator doesn't dequantize/quantize them.
telsoa014fcda012018-03-09 14:13:49 +00004325 const float scale = 0.13497836f;
4326 const int32_t offset = -7;
4327
4328 outputTensorInfo.SetQuantizationScale(scale);
4329 outputTensorInfo.SetQuantizationOffset(offset);
4330 inputTensorInfo1.SetQuantizationScale(scale);
4331 inputTensorInfo1.SetQuantizationOffset(offset);
4332 inputTensorInfo2.SetQuantizationScale(scale);
4333 inputTensorInfo2.SetQuantizationOffset(offset);
telsoa014fcda012018-03-09 14:13:49 +00004334
4335 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
4336
4337 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
surmeh013537c2c2018-05-18 16:31:43 +01004338 {
4339 1, 2, 3,
4340 4, 5, 6,
4341 7, 8, 9,
4342 10, 11, 12,
4343 13, 14, 15,
4344 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00004345
surmeh013537c2c2018-05-18 16:31:43 +01004346 19, 20, 21,
4347 22, 23, 24,
4348 25, 26, 27,
4349 28, 29, 30,
4350 31, 32, 33,
4351 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00004352
surmeh013537c2c2018-05-18 16:31:43 +01004353 37, 38, 39,
4354 40, 41, 42,
4355 43, 44, 45,
4356 46, 47, 48,
4357 49, 50, 51,
4358 52, 53, 54,
4359 })
telsoa014fcda012018-03-09 14:13:49 +00004360 );
4361
telsoa014fcda012018-03-09 14:13:49 +00004362 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
4363 {
surmeh013537c2c2018-05-18 16:31:43 +01004364 1, 2, 3,
4365 4, 5, 6,
4366 7, 8, 9,
4367 10, 11, 12,
4368 13, 14, 15,
4369 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00004370
surmeh013537c2c2018-05-18 16:31:43 +01004371 19, 20, 21,
4372 22, 23, 24,
4373 25, 26, 27,
4374 28, 29, 30,
4375 31, 32, 33,
4376 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00004377 })
4378 );
4379
4380 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
4381 {
surmeh013537c2c2018-05-18 16:31:43 +01004382 37, 38, 39,
4383 40, 41, 42,
telsoa014fcda012018-03-09 14:13:49 +00004384 43, 44, 45,
surmeh013537c2c2018-05-18 16:31:43 +01004385 46, 47, 48,
4386 49, 50, 51,
4387 52, 53, 54,
telsoa014fcda012018-03-09 14:13:49 +00004388 })
4389 );
4390
telsoa01c577f2c2018-08-31 09:22:23 +01004391 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
telsoa014fcda012018-03-09 14:13:49 +00004392 armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1);
4393
telsoa01c577f2c2018-08-31 09:22:23 +01004394 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
telsoa014fcda012018-03-09 14:13:49 +00004395 armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2);
4396
telsoa014fcda012018-03-09 14:13:49 +00004397
4398 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4399
4400 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
4401
4402 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
4403 subTensorsSupported ?
4404 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
4405 workloadFactory.CreateTensorHandle(inputTensorInfo1);
4406
4407 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
4408 subTensorsSupported ?
4409 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
4410 workloadFactory.CreateTensorHandle(inputTensorInfo2);
4411
telsoa014fcda012018-03-09 14:13:49 +00004412
4413 armnn::MergerQueueDescriptor data;
4414 armnn::WorkloadInfo info;
4415 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
4416 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +00004417 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4418
4419 data.m_ViewOrigins.push_back(window1);
4420 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +00004421
4422 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(data, info);
4423
4424 inputHandle1->Allocate();
4425 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00004426 outputHandle->Allocate();
4427
4428 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
4429 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00004430
4431 workload->Execute();
4432
4433 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
4434
4435 return ret;
4436}
4437
4438LayerTestResult<uint8_t, 4> AdditionUint8Test(armnn::IWorkloadFactory& workloadFactory)
4439{
4440 unsigned int batchSize = 1;
4441 unsigned int channels = 2;
4442 unsigned int height = 2;
4443 unsigned int width = 3;
4444
4445 const float scale = 7.0f;
4446 const int32_t offset = 3;
4447
4448 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
4449 armnn::TensorInfo outputTensorInfo;
4450
4451 const unsigned int shape[] = { batchSize, channels, height, width };
4452 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
4453 inputTensorInfo1.SetQuantizationScale(scale);
4454 inputTensorInfo1.SetQuantizationOffset(offset);
4455
4456 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
4457 inputTensorInfo2.SetQuantizationScale(scale);
4458 inputTensorInfo2.SetQuantizationOffset(offset);
4459
4460 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
4461 outputTensorInfo.SetQuantizationScale(scale);
4462 outputTensorInfo.SetQuantizationOffset(offset);
4463
telsoa01c577f2c2018-08-31 09:22:23 +01004464 // See dequantized values to the right.
telsoa014fcda012018-03-09 14:13:49 +00004465 auto input1 = MakeTensor<uint8_t, 4>(inputTensorInfo1, std::vector<uint8_t>(
4466 {
4467 63, 35, 77, 70, 56, 112, // 420, 224, 518, 469, 371, 763
4468 203, 28, 252, 168, 245, 91 // 1400, 175, 1743, 1155, 1694, 616
4469 }));
4470
telsoa01c577f2c2018-08-31 09:22:23 +01004471 // See dequantized values to the right.
telsoa014fcda012018-03-09 14:13:49 +00004472 auto input2 = MakeTensor<uint8_t, 4>(inputTensorInfo1, std::vector<uint8_t>(
4473 {
4474 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
4475 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
4476 }));
4477
telsoa01c577f2c2018-08-31 09:22:23 +01004478 // See dequantized values to the right.
telsoa014fcda012018-03-09 14:13:49 +00004479 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
4480 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>(
4481 {
4482 81, 39, 249, 255, 228, 255, // 546, 252, 1722, 2065(clamped), 1575, 2212(clamped)
4483 255, 186, 255, 186, 255, 214, // 2261(clamped), 1281, 2163(clamped), 1281, 2408(clamped), 1477
4484 }));
4485
4486 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
4487 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
4488 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4489
4490 armnn::AdditionQueueDescriptor data;
4491 armnn::WorkloadInfo info;
4492 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
4493 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
4494 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4495
4496 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
4497
4498 inputHandle1->Allocate();
4499 inputHandle2->Allocate();
4500 outputHandle->Allocate();
4501
4502 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
4503 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
4504
4505 workload->Execute();
4506
4507 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4508
4509 return result;
4510}
4511
surmeh01bceff2f2018-03-29 16:29:27 +01004512namespace
telsoa014fcda012018-03-09 14:13:49 +00004513{
surmeh01bceff2f2018-03-29 16:29:27 +01004514LayerTestResult<uint8_t, 4> MultiplicationUint8TestHelper(armnn::IWorkloadFactory& workloadFactory,
4515 const unsigned int shape0[4],
4516 const std::vector<uint8_t> & values0,
4517 float scale0,
4518 int32_t offset0,
4519 const unsigned int shape1[4],
4520 const std::vector<uint8_t> & values1,
4521 float scale1,
4522 int32_t offset1,
4523 const unsigned int outShape[4],
4524 const std::vector<uint8_t> & outValues,
4525 float outScale,
4526 int32_t outOffset)
4527{
4528 armnn::TensorInfo inputTensorInfo0(4, shape0, armnn::DataType::QuantisedAsymm8);
4529 armnn::TensorInfo inputTensorInfo1(4, shape1, armnn::DataType::QuantisedAsymm8);
4530 armnn::TensorInfo outputTensorInfo(4, outShape, armnn::DataType::QuantisedAsymm8);
telsoa014fcda012018-03-09 14:13:49 +00004531
surmeh01bceff2f2018-03-29 16:29:27 +01004532 inputTensorInfo0.SetQuantizationScale(scale0);
4533 inputTensorInfo0.SetQuantizationOffset(offset0);
telsoa014fcda012018-03-09 14:13:49 +00004534
surmeh01bceff2f2018-03-29 16:29:27 +01004535 inputTensorInfo1.SetQuantizationScale(scale1);
4536 inputTensorInfo1.SetQuantizationOffset(offset1);
telsoa014fcda012018-03-09 14:13:49 +00004537
surmeh01bceff2f2018-03-29 16:29:27 +01004538 outputTensorInfo.SetQuantizationScale(outScale);
4539 outputTensorInfo.SetQuantizationOffset(outOffset);
telsoa014fcda012018-03-09 14:13:49 +00004540
surmeh01bceff2f2018-03-29 16:29:27 +01004541 auto input0 = MakeTensor<uint8_t, 4>(inputTensorInfo0, values0);
4542 auto input1 = MakeTensor<uint8_t, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00004543
telsoa014fcda012018-03-09 14:13:49 +00004544 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
surmeh01bceff2f2018-03-29 16:29:27 +01004545 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00004546
surmeh01bceff2f2018-03-29 16:29:27 +01004547 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
telsoa014fcda012018-03-09 14:13:49 +00004548 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
telsoa014fcda012018-03-09 14:13:49 +00004549 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4550
4551 armnn::MultiplicationQueueDescriptor data;
4552 armnn::WorkloadInfo info;
surmeh01bceff2f2018-03-29 16:29:27 +01004553 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
4554 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
telsoa014fcda012018-03-09 14:13:49 +00004555 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4556
4557 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
4558
surmeh01bceff2f2018-03-29 16:29:27 +01004559 inputHandle0->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00004560 inputHandle1->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00004561 outputHandle->Allocate();
4562
surmeh01bceff2f2018-03-29 16:29:27 +01004563 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00004564 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00004565
4566 workload->Execute();
4567
4568 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4569
4570 return result;
4571}
surmeh01bceff2f2018-03-29 16:29:27 +01004572} // anonymous namespace
4573
4574LayerTestResult<uint8_t, 4> MultiplicationUint8Test(armnn::IWorkloadFactory& workloadFactory)
4575{
4576 unsigned int batchSize = 1;
4577 unsigned int channels = 2;
4578 unsigned int height = 2;
4579 unsigned int width = 3;
4580 const unsigned int shape[] = { batchSize, channels, height, width };
4581
telsoa01c577f2c2018-08-31 09:22:23 +01004582 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01004583 std::vector<uint8_t> input0({
4584 62, 37, 3, 172, 13, 111, // 244, 144, 8, 684, 48, 440,
4585 188, 20, 73, 31, 23, 31 // 748, 76, 288, 120, 88, 120
4586 });
4587
telsoa01c577f2c2018-08-31 09:22:23 +01004588 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01004589 std::vector<uint8_t> input1({
4590 126, 240, 252, 183, 121, 247, // 384, 726, 762, 555, 369, 747,
4591 48, 115, 151, 79, 78, 97 // 150, 351, 459, 243, 240, 297
4592 });
4593
telsoa01c577f2c2018-08-31 09:22:23 +01004594 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01004595 std::vector<uint8_t> output(
4596 {
4597 64, 72, 0, 255, 8, 236, // 93696, 104544, 6096(clamped), 379620(clamped), 17712, 328680,
4598 77, 15, 92, 16, 10, 21, // 112200, 26676, 132192, 29160, 21120, 35640
4599 });
4600
4601 return MultiplicationUint8TestHelper(workloadFactory,
4602 shape,
4603 input0,
4604 4.0f,
4605 1,
4606 shape,
4607 input1,
4608 3.0f,
4609 -2,
4610 shape,
4611 output,
telsoa01c577f2c2018-08-31 09:22:23 +01004612 1366.255f, // Scale/offset chosen to have output values out of range.
surmeh01bceff2f2018-03-29 16:29:27 +01004613 -5);
4614}
4615
4616LayerTestResult<uint8_t, 4> MultiplicationBroadcast1ElementUint8Test(armnn::IWorkloadFactory& workloadFactory)
4617{
4618 const unsigned int shape0[] = { 1, 2, 2, 3 };
4619 const unsigned int shape1[] = { 1, 1, 1, 1 };
4620
4621 std::vector<uint8_t> input0({
4622 1, 2, 3, 4, 5, 6,
4623 7, 8, 9, 10, 11, 12
4624 });
4625
4626 std::vector<uint8_t> input1({2});
4627
4628 std::vector<uint8_t> output({
4629 2, 4, 6, 8, 10, 12,
4630 14, 16, 18, 20, 22, 24
4631 });
4632
4633 return MultiplicationUint8TestHelper(workloadFactory,
4634 shape0,
4635 input0,
4636 1.0f,
4637 0,
4638 shape1,
4639 input1,
4640 1.0f,
4641 0,
4642 shape0,
4643 output,
4644 1.0f,
4645 0);
4646}
4647
4648LayerTestResult<uint8_t, 4> MultiplicationBroadcast1DVectorUint8Test(armnn::IWorkloadFactory& workloadFactory)
4649{
4650 const unsigned int shape0[] = { 1, 2, 2, 3 };
4651 const unsigned int shape1[] = { 1, 1, 1, 3 };
4652
4653 std::vector<uint8_t> input0({
4654 1, 2, 3, 4, 5, 6,
4655 7, 8, 9, 10, 11, 12
4656 });
4657
4658 std::vector<uint8_t> input1({1, 2, 3});
4659
4660 std::vector<uint8_t> output({
4661 1, 4, 9, 4, 10, 18,
4662 7, 16, 27, 10, 22, 36
4663 });
4664
4665 return MultiplicationUint8TestHelper(workloadFactory,
4666 shape0,
4667 input0,
4668 1.0f,
4669 0,
4670 shape1,
4671 input1,
4672 1.0f,
4673 0,
4674 shape0,
4675 output,
4676 1.0f,
4677 0);
4678}
telsoa014fcda012018-03-09 14:13:49 +00004679
David Beckf195f032018-09-06 16:46:34 +01004680namespace
4681{
4682template <typename T>
4683LayerTestResult<T, 4> SubtractionTestHelper(armnn::IWorkloadFactory& workloadFactory,
4684 const unsigned int shape0[4],
4685 const std::vector<T>& values0,
4686 float scale0,
4687 int32_t offset0,
4688 const unsigned int shape1[4],
4689 const std::vector<T> & values1,
4690 float scale1,
4691 int32_t offset1,
4692 const unsigned int outShape[4],
4693 const std::vector<T> & outValues,
4694 float outScale,
4695 int32_t outOffset)
4696{
4697 auto dataType = (std::is_same<T, uint8_t>::value ?
4698 armnn::DataType::QuantisedAsymm8 :
4699 armnn::DataType::Float32);
4700
4701 armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
4702 armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
4703 armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
4704
4705 inputTensorInfo0.SetQuantizationScale(scale0);
4706 inputTensorInfo0.SetQuantizationOffset(offset0);
4707
4708 inputTensorInfo1.SetQuantizationScale(scale1);
4709 inputTensorInfo1.SetQuantizationOffset(offset1);
4710
4711 outputTensorInfo.SetQuantizationScale(outScale);
4712 outputTensorInfo.SetQuantizationOffset(outOffset);
4713
4714 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
4715 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
4716
4717 LayerTestResult<T, 4> result(outputTensorInfo);
4718 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
4719
4720 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
4721 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
4722 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4723
4724 armnn::SubtractionQueueDescriptor data;
4725 armnn::WorkloadInfo info;
4726 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
4727 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
4728 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4729
4730 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSubtraction(data, info);
4731
4732 inputHandle0->Allocate();
4733 inputHandle1->Allocate();
4734 outputHandle->Allocate();
4735
4736 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
4737 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
4738
David Beckf195f032018-09-06 16:46:34 +01004739 workload->Execute();
4740
4741 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4742
4743 return result;
4744}
4745} // anonymous namespace
4746
4747LayerTestResult<uint8_t, 4> SubtractionUint8Test(armnn::IWorkloadFactory& workloadFactory)
4748{
4749 const unsigned int shape0[] = { 1, 1, 2, 2 };
4750 const unsigned int shape1[] = { 1, 1, 2, 2 };
4751
4752 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
4753 std::vector<uint8_t> input1({ 1, 2, 1, 2 });
4754 std::vector<uint8_t> output({ 3, 3, 5, 5 });
4755
4756 return SubtractionTestHelper(workloadFactory,
4757 shape0, input0, 0.5f, 2,
4758 shape1, input1, 1.0f, 0,
4759 shape0, output, 1.0f, 0);
4760}
4761
4762LayerTestResult<uint8_t, 4> SubtractionBroadcast1ElementUint8Test(armnn::IWorkloadFactory& workloadFactory)
4763{
4764 const unsigned int shape0[] = { 1, 1, 2, 2 };
4765 const unsigned int shape1[] = { 1, 1, 1, 1 };
4766
4767 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
4768 std::vector<uint8_t> input1({ 2 });
4769 std::vector<uint8_t> output({ 5, 6, 7, 8 });
4770
4771 return SubtractionTestHelper(workloadFactory,
4772 shape0, input0, 0.5f, 2,
4773 shape1, input1, 1.0f, 0,
4774 shape0, output, 1.0f, 3);
4775}
4776
4777LayerTestResult<uint8_t, 4> SubtractionBroadcastUint8Test(armnn::IWorkloadFactory& workloadFactory)
4778{
4779 const unsigned int shape0[] = { 1, 1, 2, 2 };
4780 const unsigned int shape1[] = { 1, 1, 2, 1 };
4781
4782 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
4783 std::vector<uint8_t> input1({ 2, 1 });
4784 std::vector<uint8_t> output({ 8, 11, 12, 15 });
4785
4786 return SubtractionTestHelper(workloadFactory,
4787 shape0, input0, 1.0f, 0,
4788 shape1, input1, 1.0f, 0,
4789 shape0, output, 1.0f, 0);
4790}
4791
4792LayerTestResult<float, 4> SubtractionTest(armnn::IWorkloadFactory& workloadFactory)
4793{
4794 const unsigned int shape0[] = { 1, 1, 2, 2 };
4795 const unsigned int shape1[] = { 1, 1, 2, 2 };
4796
4797 std::vector<float> input0({ 1, 2, 3, 4 });
4798 std::vector<float> input1({ 1, -1, 0, 2 });
4799 std::vector<float> output({ 0, 3, 3, 2 });
4800
4801 return SubtractionTestHelper(workloadFactory,
4802 shape0, input0, 1.0f, 0,
4803 shape1, input1, 1.0f, 0,
4804 shape0, output, 1.0f, 0);
4805}
4806
4807LayerTestResult<float, 4> SubtractionBroadcast1ElementTest(armnn::IWorkloadFactory& workloadFactory)
4808{
4809 const unsigned int shape0[] = { 1, 1, 2, 2 };
4810 const unsigned int shape1[] = { 1, 1, 1, 1 };
4811
4812 std::vector<float> input0({ 1, 2, 3, 4 });
4813 std::vector<float> input1({ 10 });
4814 std::vector<float> output({ -9, -8, -7, -6 });
4815
4816 return SubtractionTestHelper(workloadFactory,
4817 shape0, input0, 1.0f, 0,
4818 shape1, input1, 1.0f, 0,
4819 shape0, output, 1.0f, 0);
4820}
4821
4822LayerTestResult<float, 4> SubtractionBroadcastTest(armnn::IWorkloadFactory& workloadFactory)
4823{
4824 const unsigned int shape0[] = { 1, 1, 2, 2 };
4825 const unsigned int shape1[] = { 1, 1, 1, 2 };
4826
4827 std::vector<float> input0({ 1, 2, 3, 4 });
4828 std::vector<float> input1({ 10, -5 });
4829 std::vector<float> output({ -9, 7, -7, 9 });
4830
4831 return SubtractionTestHelper(workloadFactory,
4832 shape0, input0, 1.0f, 0,
4833 shape1, input1, 1.0f, 0,
4834 shape0, output, 1.0f, 0);
4835}
4836
telsoa014fcda012018-03-09 14:13:49 +00004837LayerTestResult<uint8_t, 4> ResizeBilinearNopUint8Test(armnn::IWorkloadFactory& workloadFactory)
4838{
4839 constexpr unsigned int inputWidth = 4;
4840 constexpr unsigned int inputHeight = 4;
4841 constexpr unsigned int inputChannels = 1;
4842 constexpr unsigned int inputBatchSize = 1;
4843
4844 constexpr unsigned int outputWidth = inputWidth;
4845 constexpr unsigned int outputHeight = inputHeight;
4846 constexpr unsigned int outputChannels = inputChannels;
4847 constexpr unsigned int outputBatchSize = inputBatchSize;
4848
4849 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
4850 armnn::DataType::QuantisedAsymm8);
4851 inputTensorInfo.SetQuantizationScale(1.5f);
4852 inputTensorInfo.SetQuantizationOffset(-3);
4853
4854 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
4855 armnn::DataType::QuantisedAsymm8);
4856 outputTensorInfo.SetQuantizationScale(1.5f);
4857 outputTensorInfo.SetQuantizationOffset(-3);
4858
4859 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
4860 1, 2, 3, 4,
4861 2, 3, 4, 5,
4862 3, 4, 5, 6,
4863 4, 5, 6, 7
4864 }));
4865
4866 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
4867 result.outputExpected = input;
4868
4869 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4870 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4871
4872 armnn::ResizeBilinearQueueDescriptor descriptor;
4873 armnn::WorkloadInfo info;
4874 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
4875 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4876
4877 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
4878
4879 inputHandle->Allocate();
4880 outputHandle->Allocate();
4881 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
4882
4883 workload->Execute();
4884
4885 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4886 return result;
4887}
4888
4889LayerTestResult<uint8_t, 4> SimpleResizeBilinearUint8Test(armnn::IWorkloadFactory& workloadFactory)
4890{
4891 constexpr unsigned int inputWidth = 2;
4892 constexpr unsigned int inputHeight = 2;
4893 constexpr unsigned int inputChannels = 1;
4894 constexpr unsigned int inputBatchSize = 1;
4895
4896 constexpr unsigned int outputWidth = inputWidth / 2;
4897 constexpr unsigned int outputHeight = inputHeight / 2;
4898 constexpr unsigned int outputChannels = inputChannels;
4899 constexpr unsigned int outputBatchSize = inputBatchSize;
4900
4901 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
4902 armnn::DataType::QuantisedAsymm8);
4903 inputTensorInfo.SetQuantizationScale(0.1567f);
4904 inputTensorInfo.SetQuantizationOffset(1);
4905
4906 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
4907 armnn::DataType::QuantisedAsymm8);
4908 outputTensorInfo.SetQuantizationScale(0.1567f);
4909 outputTensorInfo.SetQuantizationOffset(1);
4910
4911 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
4912 1, 255,
4913 200, 250
4914 }));
4915
4916 // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
4917 // then figures out the interpolants and weights. Note this is different to projecting the centre of the
telsoa01c577f2c2018-08-31 09:22:23 +01004918 // output texel - and thus we'll expect the output 1x1 matrix to contain, as its single element, the value
telsoa014fcda012018-03-09 14:13:49 +00004919 // that was at position (0,0) of the input matrix (rather than an average, which we would expect if projecting
4920 // the centre).
4921 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
4922 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
4923 1
4924 }));
4925
4926 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4927 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4928
4929 armnn::ResizeBilinearQueueDescriptor descriptor;
4930 armnn::WorkloadInfo info;
4931 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
4932 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4933
4934 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
4935
4936 inputHandle->Allocate();
4937 outputHandle->Allocate();
4938 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
4939
4940 workload->Execute();
4941
4942 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4943 return result;
4944}
4945
4946LayerTestResult<uint8_t, 4> ResizeBilinearSqMinUint8Test(armnn::IWorkloadFactory& workloadFactory)
4947{
4948 constexpr unsigned int inputWidth = 4;
4949 constexpr unsigned int inputHeight = 4;
4950 constexpr unsigned int inputChannels = 1;
4951 constexpr unsigned int inputBatchSize = 1;
4952
4953 constexpr unsigned int outputWidth = inputWidth / 2;
4954 constexpr unsigned int outputHeight = inputHeight / 2;
4955 constexpr unsigned int outputChannels = inputChannels;
4956 constexpr unsigned int outputBatchSize = inputBatchSize;
4957
4958 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
4959 armnn::DataType::QuantisedAsymm8);
4960 inputTensorInfo.SetQuantizationScale(3.141592f);
4961 inputTensorInfo.SetQuantizationOffset(3);
4962
4963 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
4964 armnn::DataType::QuantisedAsymm8);
4965 outputTensorInfo.SetQuantizationScale(3.141592f);
4966 outputTensorInfo.SetQuantizationOffset(3);
4967
4968 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
4969 1, 2, 3, 4,
4970 2, 3, 4, 5,
4971 3, 4, 5, 6,
4972 4, 5, 6, 7
4973 }));
4974
4975 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
4976 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
4977 1, 3,
4978 3, 5
4979 }));
4980
4981 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4982 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4983
4984 armnn::ResizeBilinearQueueDescriptor descriptor;
4985 armnn::WorkloadInfo info;
4986 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
4987 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4988
4989 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
4990
4991 inputHandle->Allocate();
4992 outputHandle->Allocate();
4993 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
4994
4995 workload->Execute();
4996
4997 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4998 return result;
4999}
5000
5001LayerTestResult<uint8_t, 4> ResizeBilinearMinUint8Test(armnn::IWorkloadFactory& workloadFactory)
5002{
5003 constexpr unsigned int inputWidth = 3;
5004 constexpr unsigned int inputHeight = 2;
5005 constexpr unsigned int inputChannels = 1;
5006 constexpr unsigned int inputBatchSize = 1;
5007
5008 constexpr unsigned int outputWidth = 2;
5009 constexpr unsigned int outputHeight = 1;
5010 constexpr unsigned int outputChannels = inputChannels;
5011 constexpr unsigned int outputBatchSize = inputBatchSize;
5012
5013 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
5014 armnn::DataType::QuantisedAsymm8);
5015 inputTensorInfo.SetQuantizationScale(1.5f);
5016 inputTensorInfo.SetQuantizationOffset(-1);
5017
5018 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
5019 armnn::DataType::QuantisedAsymm8);
5020 outputTensorInfo.SetQuantizationScale(1.5f);
5021 outputTensorInfo.SetQuantizationOffset(-1);
5022
5023 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
5024 1, 2, 3, // 3.0, 4.5, 6.0
5025 5, 8, 13 // 9.0, 13.5, 21.0
5026 }));
5027
5028 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
5029 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
5030 1, 3 // 3.0, 5.25
5031 }));
5032
5033 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5034 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5035
5036 armnn::ResizeBilinearQueueDescriptor descriptor;
5037 armnn::WorkloadInfo info;
5038 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5039 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5040
5041 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5042
5043 inputHandle->Allocate();
5044 outputHandle->Allocate();
5045
5046 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5047
5048 workload->Execute();
5049
5050 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5051 return result;
5052}
5053
5054LayerTestResult<uint8_t, 4> ResizeBilinearMagUint8Test(armnn::IWorkloadFactory& workloadFactory)
5055{
5056 constexpr unsigned int inputWidth = 2;
5057 constexpr unsigned int inputHeight = 3;
5058 constexpr unsigned int inputChannels = 1;
5059 constexpr unsigned int inputBatchSize = 1;
5060
5061 constexpr unsigned int outputWidth = 5;
5062 constexpr unsigned int outputHeight = 3;
5063 constexpr unsigned int outputChannels = inputChannels;
5064 constexpr unsigned int outputBatchSize = inputBatchSize;
5065
5066 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
5067 armnn::DataType::QuantisedAsymm8);
5068 inputTensorInfo.SetQuantizationScale(0.010765f);
5069 inputTensorInfo.SetQuantizationOffset(7);
5070
5071 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
5072 armnn::DataType::QuantisedAsymm8);
5073 outputTensorInfo.SetQuantizationScale(0.010132f);
5074 outputTensorInfo.SetQuantizationOffset(-18);
5075
5076 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
5077 24, 228, // 0.183005, 2.379065,
5078 105, 128, // 1.05497, 1.302565
5079 230, 71 // 2.400595, 0.68896
5080 }));
5081
5082 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
5083 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
5084 0, 87, 173, 217, 217, // 0.18300501, 1.06142902, 1.93985295, 2.37906504, 2.37906504
5085 86, 96, 106, 111, 111, // 1.05497003, 1.15400803, 1.25304604, 1.30256498, 1.30256498
5086 219, 151, 84, 50, 50 // 2.40059495, 1.71594095, 1.03128707, 0.68896002, 0.68896002
5087 }));
5088
5089 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5090 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5091
5092 armnn::ResizeBilinearQueueDescriptor descriptor;
5093 armnn::WorkloadInfo info;
5094 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5095 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5096
5097 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5098
5099 inputHandle->Allocate();
5100 outputHandle->Allocate();
5101 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5102
5103 workload->Execute();
5104
5105 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5106 return result;
5107}
5108
5109LayerTestResult<float, 4> BatchNormTest(armnn::IWorkloadFactory& workloadFactory)
5110{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01005111 // BatchSize: 1
5112 // Channels: 2
5113 // Height: 3
5114 // Width: 2
5115
5116 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
5117 std::vector<float> inputValues
5118 {
5119 // Batch 0, Channel 0, Height (3) x Width (2)
5120 1.f, 4.f,
5121 4.f, 2.f,
5122 1.f, 6.f,
5123
5124 // Batch 0, Channel 1, Height (3) x Width (2)
5125 1.f, 1.f,
5126 4.f, 1.f,
5127 -2.f, 4.f
5128 };
5129 std::vector<float> expectedOutputValues
5130 {
5131 // Batch 0, Channel 0, Height (3) x Width (2)
5132 1.f, 4.f,
5133 4.f, 2.f,
5134 1.f, 6.f,
5135
5136 // Batch 0, Channel 1, Height (3) x Width (2)
5137 3.f, 3.f,
5138 4.f, 3.f,
5139 2.f, 4.f
5140 };
5141
5142 return BatchNormTestImpl<float>(workloadFactory, inputOutputShape, inputValues, expectedOutputValues,
5143 0.f, 0, armnn::DataLayout::NCHW);
5144}
5145
5146LayerTestResult<float, 4> BatchNormNhwcTest(armnn::IWorkloadFactory& workloadFactory)
5147{
5148 // BatchSize: 1
5149 // Height: 3
5150 // Width: 2
5151 // Channels: 2
5152
5153 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
5154 std::vector<float> inputValues
5155 {
5156 // Batch 0, Height 0, Width (2) x Channel (2)
5157 1.f, 1.f,
5158 4.f, 1.f,
5159
5160 // Batch 0, Height 1, Width (2) x Channel (2)
5161 4.f, 4.f,
5162 2.f, 1.f,
5163
5164 // Batch 0, Height 2, Width (2) x Channel (2)
5165 1.f, -2.f,
5166 6.f, 4.f
5167 };
5168 std::vector<float> expectedOutputValues
5169 {
5170 // Batch 0, Height 0, Width (2) x Channel (2)
5171 1.f, 3.f,
5172 4.f, 3.f,
5173
5174 // Batch 0, Height 1, Width (2) x Channel (2)
5175 4.f, 4.f,
5176 2.f, 3.f,
5177
5178 // Batch 0, Height 2, Width (2) x Channel (2)
5179 1.f, 2.f,
5180 6.f, 4.f
5181 };
5182
5183 return BatchNormTestImpl<float>(workloadFactory, inputOutputShape, inputValues, expectedOutputValues,
5184 0.f, 0, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00005185}
5186
5187LayerTestResult<uint8_t, 4> BatchNormUint8Test(armnn::IWorkloadFactory& workloadFactory)
5188{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01005189 // BatchSize: 1
5190 // Channels: 2
5191 // Height: 3
5192 // Width: 2
5193
5194 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
5195 std::vector<float> inputValues
5196 {
5197 // Batch 0, Channel 0, Height (3) x Width (2)
5198 1.f, 4.f,
5199 4.f, 2.f,
5200 1.f, 6.f,
5201
5202 // Batch 0, Channel 1, Height (3) x Width (2)
5203 1.f, 1.f,
5204 4.f, 1.f,
5205 -2.f, 4.f
5206 };
5207 std::vector<float> expectedOutputValues
5208 {
5209 // Batch 0, Channel 0, Height (3) x Width (2)
5210 1.f, 4.f,
5211 4.f, 2.f,
5212 1.f, 6.f,
5213
5214 // Batch 0, Channel 1, Height (3) x Width (2)
5215 3.f, 3.f,
5216 4.f, 3.f,
5217 2.f, 4.f
5218 };
5219
5220 return BatchNormTestImpl<uint8_t>(workloadFactory, inputOutputShape, inputValues, expectedOutputValues,
5221 1.f/20.f, 50, armnn::DataLayout::NCHW);
5222}
5223
5224LayerTestResult<uint8_t, 4> BatchNormUint8NhwcTest(armnn::IWorkloadFactory& workloadFactory)
5225{
5226 // BatchSize: 1
5227 // Height: 3
5228 // Width: 2
5229 // Channels: 2
5230
5231 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
5232 std::vector<float> inputValues
5233 {
5234 // Batch 0, Height 0, Width (2) x Channel (2)
5235 1.f, 1.f,
5236 4.f, 1.f,
5237
5238 // Batch 0, Height 1, Width (2) x Channel (2)
5239 4.f, 4.f,
5240 2.f, 1.f,
5241
5242 // Batch 0, Height 2, Width (2) x Channel (2)
5243 1.f, -2.f,
5244 6.f, 4.f
5245 };
5246 std::vector<float> expectedOutputValues
5247 {
5248 // Batch 0, Height 0, Width (2) x Channel (2)
5249 1.f, 3.f,
5250 4.f, 3.f,
5251
5252 // Batch 0, Height 1, Width (2) x Channel (2)
5253 4.f, 4.f,
5254 2.f, 3.f,
5255
5256 // Batch 0, Height 2, Width (2) x Channel (2)
5257 1.f, 2.f,
5258 6.f, 4.f
5259 };
5260
5261 return BatchNormTestImpl<uint8_t>(workloadFactory, inputOutputShape, inputValues, expectedOutputValues,
5262 1.f/20.f, 50, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00005263}
5264
5265LayerTestResult<uint8_t, 4> ConstantUint8Test(armnn::IWorkloadFactory& workloadFactory)
5266{
5267 return ConstantTestImpl<uint8_t>(workloadFactory, 2e-6f, 1);
5268}
5269
5270LayerTestResult<uint8_t, 1> Concatenation1dUint8Test(armnn::IWorkloadFactory& workloadFactory)
5271{
5272 return Concatenation1dTestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5273}
5274
5275LayerTestResult<uint8_t, 2> Concatenation2dDim0Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5276{
5277 return Concatenation2dDim0TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5278}
5279
5280LayerTestResult<uint8_t, 2> Concatenation2dDim1Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5281{
5282 return Concatenation2dDim1TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5283}
5284
5285LayerTestResult<uint8_t, 2> Concatenation2dDim0DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory)
5286{
5287 return Concatenation2dDim0DiffInputDimsTestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5288}
5289
5290LayerTestResult<uint8_t, 2> Concatenation2dDim1DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory)
5291{
5292 return Concatenation2dDim1DiffInputDimsTestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5293}
5294
5295LayerTestResult<uint8_t, 3> Concatenation3dDim0Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5296{
5297 return Concatenation3dDim0TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5298}
5299
5300LayerTestResult<uint8_t, 3> Concatenation3dDim1Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5301{
5302 return Concatenation3dDim1TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5303}
5304
5305LayerTestResult<uint8_t, 3> Concatenation3dDim2Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5306{
5307 return Concatenation3dDim2TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5308}
5309
5310LayerTestResult<uint8_t, 3> Concatenation3dDim0DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory)
5311{
5312 return Concatenation3dDim0TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5313}
5314
5315LayerTestResult<uint8_t, 3> Concatenation3dDim1DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory)
5316{
5317 return Concatenation3dDim1DiffInputDimsTestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5318}
5319
5320LayerTestResult<uint8_t, 3> Concatenation3dDim2DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory)
5321{
5322 return Concatenation3dDim2DiffInputDimsTestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5323}
5324
5325LayerTestResult<float, 4> SimpleMaxPooling2dSize2x2Stride2x2Test(armnn::IWorkloadFactory& workloadFactory,
5326 bool forceNoPadding)
5327{
5328 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<float>(workloadFactory, forceNoPadding);
5329}
5330
5331LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(armnn::IWorkloadFactory& workloadFactory,
5332 bool forceNoPadding)
5333{
5334 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<uint8_t>(workloadFactory, forceNoPadding, 3.0f, -5);
5335}
5336
5337LayerTestResult<float, 4> SimpleMaxPooling2dSize3x3Stride2x4Test(armnn::IWorkloadFactory& workloadFactory,
5338 bool forceNoPadding)
5339{
5340 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<float>(workloadFactory, forceNoPadding);
5341}
5342
5343LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(armnn::IWorkloadFactory& workloadFactory,
5344 bool forceNoPadding)
5345{
5346 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<uint8_t>(workloadFactory, forceNoPadding, 0.1f, 128);
5347}
5348
James Conroy45a9b772018-10-31 11:47:53 +00005349LayerTestResult<float, 4> SimpleMaxPooling2dTest(armnn::IWorkloadFactory& workloadFactory,
5350 const armnn::DataLayoutIndexed& dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00005351{
James Conroy45a9b772018-10-31 11:47:53 +00005352 return SimpleMaxPooling2dTestCommon<float>(workloadFactory, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00005353}
5354
James Conroy45a9b772018-10-31 11:47:53 +00005355LayerTestResult<uint8_t, 4> SimpleMaxPooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory,
5356 const armnn::DataLayoutIndexed& dataLayout)
Francis Murtagh043d0d02018-10-05 14:08:48 +01005357{
James Conroy45a9b772018-10-31 11:47:53 +00005358 return SimpleMaxPooling2dTestCommon<uint8_t>(workloadFactory, dataLayout);
Francis Murtagh043d0d02018-10-05 14:08:48 +01005359}
5360
James Conroy45a9b772018-10-31 11:47:53 +00005361LayerTestResult<float, 4> SimpleAveragePooling2dTest(armnn::IWorkloadFactory& workloadFactory,
5362 const armnn::DataLayoutIndexed& dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00005363{
James Conroy45a9b772018-10-31 11:47:53 +00005364 return SimpleAveragePooling2dTestCommon<float>(workloadFactory, dataLayout);
James Conroy69482272018-10-19 10:41:35 +01005365}
5366
James Conroy45a9b772018-10-31 11:47:53 +00005367LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory,
5368 const armnn::DataLayoutIndexed& dataLayout)
James Conroy69482272018-10-19 10:41:35 +01005369{
James Conroy45a9b772018-10-31 11:47:53 +00005370 return SimpleAveragePooling2dTestCommon<uint8_t>(workloadFactory, dataLayout, 0.5, -1);
telsoa014fcda012018-03-09 14:13:49 +00005371}
5372
surmeh01bceff2f2018-03-29 16:29:27 +01005373LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(armnn::IWorkloadFactory& workloadFactory,
5374 bool forceNoPadding)
5375{
5376 return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<float>(workloadFactory, forceNoPadding);
5377}
5378
telsoa014fcda012018-03-09 14:13:49 +00005379LayerTestResult<float, 4> LargeTensorsAveragePooling2dTest(armnn::IWorkloadFactory& workloadFactory)
5380{
5381 return LargeTensorsAveragePooling2dTestCommon<float>(workloadFactory);
5382}
5383
5384LayerTestResult<uint8_t, 4> LargeTensorsAveragePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
5385{
5386 return LargeTensorsAveragePooling2dTestCommon<uint8_t>(workloadFactory, 0.5, -1);
5387}
5388
James Conroy45a9b772018-10-31 11:47:53 +00005389LayerTestResult<float, 4> SimpleL2Pooling2dTest(armnn::IWorkloadFactory& workloadFactory,
5390 const armnn::DataLayoutIndexed& dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00005391{
James Conroy45a9b772018-10-31 11:47:53 +00005392 return SimpleL2Pooling2dTestCommon<float>(workloadFactory, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00005393}
5394
James Conroy45a9b772018-10-31 11:47:53 +00005395LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory,
5396 const armnn::DataLayoutIndexed& dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00005397{
James Conroy45a9b772018-10-31 11:47:53 +00005398 return SimpleL2Pooling2dTestCommon<uint8_t>(workloadFactory, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00005399}
5400
5401LayerTestResult<float, 4> L2Pooling2dSize3Stride1Test(armnn::IWorkloadFactory& workloadFactory)
5402{
5403 return L2Pooling2dSize3Stride1TestCommon<float>(workloadFactory);
5404}
5405
5406LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride1Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5407{
5408 return L2Pooling2dSize3Stride1TestCommon<uint8_t>(workloadFactory);
5409}
5410
5411LayerTestResult<float, 4> L2Pooling2dSize3Stride3Test(armnn::IWorkloadFactory& workloadFactory)
5412{
5413 return L2Pooling2dSize3Stride3TestCommon<float>(workloadFactory);
5414}
5415
5416LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride3Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5417{
5418 return L2Pooling2dSize3Stride3TestCommon<uint8_t>(workloadFactory);
5419}
5420
5421LayerTestResult<float, 4> L2Pooling2dSize3Stride4Test(armnn::IWorkloadFactory& workloadFactory)
5422{
5423 return L2Pooling2dSize3Stride4TestCommon<float>(workloadFactory);
5424}
5425
5426LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride4Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5427{
5428 return L2Pooling2dSize3Stride4TestCommon<uint8_t>(workloadFactory);
5429}
5430
5431LayerTestResult<float, 4> L2Pooling2dSize7Test(armnn::IWorkloadFactory& workloadFactory)
5432{
5433 return L2Pooling2dSize7TestCommon<float>(workloadFactory);
5434}
5435
5436LayerTestResult<uint8_t, 4> L2Pooling2dSize7Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5437{
5438 return L2Pooling2dSize7TestCommon<uint8_t>(workloadFactory);
5439}
5440
5441LayerTestResult<float, 4> L2Pooling2dSize9Test(armnn::IWorkloadFactory& workloadFactory)
5442{
5443 return L2Pooling2dSize9TestCommon<float>(workloadFactory);
5444}
5445
5446LayerTestResult<uint8_t, 4> L2Pooling2dSize9Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5447{
5448 return L2Pooling2dSize9TestCommon<uint8_t>(workloadFactory);
5449}
5450
5451LayerTestResult<float, 4> AsymmetricNonSquarePooling2dTest(armnn::IWorkloadFactory& workloadFactory)
5452{
5453 return AsymmetricNonSquarePooling2dTestCommon<float>(workloadFactory);
5454}
5455
5456LayerTestResult<uint8_t, 4> AsymmetricNonSquarePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
5457{
5458 return AsymmetricNonSquarePooling2dTestCommon<uint8_t>(workloadFactory);
5459}
5460
5461LayerTestResult<float, 4> ComparePooling2dTest(armnn::IWorkloadFactory& workloadFactory,
5462 armnn::IWorkloadFactory& refWorkloadFactory,
5463 armnn::PoolingAlgorithm poolingType)
5464{
5465 return ComparePooling2dTestCommon<float>(workloadFactory, refWorkloadFactory, poolingType);
5466}
5467
5468LayerTestResult<uint8_t, 4> ComparePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory,
5469 armnn::IWorkloadFactory& refWorkloadFactory,
5470 armnn::PoolingAlgorithm poolingType)
5471{
5472 return ComparePooling2dTestCommon<uint8_t>(workloadFactory, refWorkloadFactory, poolingType, 0.1f, 128);
5473}
5474
5475LayerTestResult<float, 2> FullyConnectedLargeTest(armnn::IWorkloadFactory& workloadFactory,
5476 bool transposeWeights)
5477{
5478 return FullyConnectedLargeTestCommon<float>(workloadFactory, transposeWeights);
5479}
5480
5481LayerTestResult<float, 4> IgnorePaddingSimpleMaxPooling2dTest(armnn::IWorkloadFactory& workloadFactory)
5482{
5483 return IgnorePaddingSimpleMaxPooling2dTestCommon<float>(workloadFactory);
5484}
5485
5486LayerTestResult<uint8_t, 4> IgnorePaddingSimpleMaxPooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
5487{
5488 return IgnorePaddingSimpleMaxPooling2dTestCommon<uint8_t>(workloadFactory, 1.0f, -5);
5489}
5490
5491LayerTestResult<float, 4> IgnorePaddingMaxPooling2dSize3Test(armnn::IWorkloadFactory& workloadFactory)
5492{
5493 return IgnorePaddingMaxPooling2dSize3TestCommon<float>(workloadFactory);
5494}
5495
5496LayerTestResult<uint8_t, 4> IgnorePaddingMaxPooling2dSize3Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5497{
5498 return IgnorePaddingMaxPooling2dSize3TestCommon<uint8_t>(workloadFactory, 1.0f, -5);
5499}
5500
5501LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dTest(armnn::IWorkloadFactory& workloadFactory)
5502{
5503 return IgnorePaddingSimpleAveragePooling2dTestCommon<float>(workloadFactory);
5504}
5505
5506LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
5507{
5508 return IgnorePaddingSimpleAveragePooling2dTestCommon<uint8_t>(workloadFactory);
5509}
5510
5511LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTest(armnn::IWorkloadFactory& workloadFactory)
5512{
5513 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<float>(workloadFactory);
5514}
5515
5516LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(
5517 armnn::IWorkloadFactory& workloadFactory)
5518{
5519 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<uint8_t>(workloadFactory);
5520}
5521
5522LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3Test(armnn::IWorkloadFactory& workloadFactory)
5523{
5524 return IgnorePaddingAveragePooling2dSize3TestCommon<float>(workloadFactory);
5525}
5526
5527LayerTestResult<uint8_t, 4> IgnorePaddingAveragePooling2dSize3Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5528{
5529 return IgnorePaddingAveragePooling2dSize3TestCommon<uint8_t>(workloadFactory);
5530}
5531
5532LayerTestResult<float, 4> IgnorePaddingSimpleL2Pooling2dTest(armnn::IWorkloadFactory& workloadFactory)
5533{
5534 return IgnorePaddingSimpleL2Pooling2dTestCommon<float>(workloadFactory);
5535}
5536
5537LayerTestResult<uint8_t, 4> IgnorePaddingSimpleL2Pooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
5538{
5539 return IgnorePaddingSimpleL2Pooling2dTestCommon<uint8_t>(workloadFactory);
5540}
5541
5542LayerTestResult<float, 4> IgnorePaddingL2Pooling2dSize3Test(armnn::IWorkloadFactory& workloadFactory)
5543{
5544 return IgnorePaddingL2Pooling2dSize3TestCommon<float>(workloadFactory);
5545}
5546
5547LayerTestResult<uint8_t, 4> IgnorePaddingL2Pooling2dSize3Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5548{
5549 return IgnorePaddingL2Pooling2dSize3TestCommon<uint8_t>(workloadFactory);
5550}
5551
5552LayerTestResult<float, 4> SimplePermuteFloat32Test(armnn::IWorkloadFactory& workloadFactory)
5553{
5554 return SimplePermuteFloat32TestCommon(workloadFactory);
5555};
5556
5557LayerTestResult<uint8_t, 4> SimplePermuteUint8Test(armnn::IWorkloadFactory& workloadFactory)
5558{
5559 return SimplePermuteUint8TestCommon(workloadFactory);
5560};
surmeh01bceff2f2018-03-29 16:29:27 +01005561
5562LayerTestResult<float, 4> PermuteFloat32ValueSet1Test(armnn::IWorkloadFactory& workloadFactory)
5563{
5564 return PermuteFloat32ValueSet1TestCommon(workloadFactory);
5565};
5566
5567LayerTestResult<float, 4> PermuteFloat32ValueSet2Test(armnn::IWorkloadFactory& workloadFactory)
5568{
5569 return PermuteFloat32ValueSet2TestCommon(workloadFactory);
5570};
5571
5572LayerTestResult<float, 4> PermuteFloat32ValueSet3Test(armnn::IWorkloadFactory& workloadFactory)
5573{
5574 return PermuteFloat32ValueSet3TestCommon(workloadFactory);
narpra011e4c31d2018-09-28 11:07:51 +01005575};
5576
5577namespace
5578{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005579
narpra011e4c31d2018-09-28 11:07:51 +01005580template <typename T, std::size_t InputDim, std::size_t OutputDim>
5581LayerTestResult<T, OutputDim> MeanTestHelper(armnn::IWorkloadFactory& workloadFactory,
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005582 const unsigned int* inputShape,
5583 const std::vector<T>& inputData,
5584 const std::vector<unsigned int>& axis,
5585 bool keepDims,
5586 const unsigned int* outputShape,
5587 const std::vector<T>& outputData,
5588 float scale = 1.0f,
5589 int32_t offset = 0)
narpra011e4c31d2018-09-28 11:07:51 +01005590{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005591 auto dataType = (std::is_same<T, uint8_t>::value ? armnn::DataType::QuantisedAsymm8 : armnn::DataType::Float32);
narpra011e4c31d2018-09-28 11:07:51 +01005592
5593 armnn::TensorInfo inputTensorInfo(InputDim, inputShape, dataType);
5594 armnn::TensorInfo outputTensorInfo(OutputDim, outputShape, dataType);
5595
5596 inputTensorInfo.SetQuantizationScale(scale);
5597 inputTensorInfo.SetQuantizationOffset(offset);
5598
5599 outputTensorInfo.SetQuantizationScale(scale);
5600 outputTensorInfo.SetQuantizationOffset(offset);
5601
5602 auto input = MakeTensor<T, InputDim>(inputTensorInfo, inputData);
5603
5604 LayerTestResult<T, OutputDim> result(outputTensorInfo);
5605 result.outputExpected = MakeTensor<T, OutputDim>(outputTensorInfo, outputData);
5606
5607 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5608 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5609
5610 armnn::MeanQueueDescriptor data;
5611 data.m_Parameters.m_Axis = axis;
5612 data.m_Parameters.m_KeepDims = keepDims;
5613 armnn::WorkloadInfo info;
5614 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
5615 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
5616
5617 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMean(data, info);
5618
5619 inputHandle->Allocate();
5620 outputHandle->Allocate();
5621
5622 CopyDataToITensorHandle(inputHandle.get(), input.origin());
5623
narpra011e4c31d2018-09-28 11:07:51 +01005624 workload->Execute();
5625
5626 CopyDataFromITensorHandle(result.output.origin(), outputHandle.get());
5627
5628 return result;
5629}
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005630
narpra011e4c31d2018-09-28 11:07:51 +01005631} // anonymous namespace
5632
5633LayerTestResult<uint8_t, 1> MeanUint8SimpleTest(armnn::IWorkloadFactory& workloadFactory)
5634{
5635 const unsigned int inputShape[] = { 3, 2 };
5636 const unsigned int outputShape[] = { 1 };
5637
5638 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
5639 std::vector<uint8_t> output({ 2 });
5640
5641 return MeanTestHelper<uint8_t, 2, 1>(workloadFactory, inputShape, input, {}, false, outputShape, output);
5642}
5643
5644LayerTestResult<uint8_t, 3> MeanUint8SimpleAxisTest(armnn::IWorkloadFactory& workloadFactory)
5645{
5646 const unsigned int inputShape[] = { 1, 1, 3, 2 };
5647 const unsigned int outputShape[] = { 1, 1, 2 };
5648
5649 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
5650 std::vector<uint8_t> output({ 2, 2 });
5651
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005652 return MeanTestHelper<uint8_t, 4, 3>(workloadFactory, inputShape, input, { 2 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01005653}
5654
5655LayerTestResult<uint8_t, 4> MeanUint8KeepDimsTest(armnn::IWorkloadFactory& workloadFactory)
5656{
5657 const unsigned int inputShape[] = { 1, 1, 3, 2 };
5658 const unsigned int outputShape[] = { 1, 1, 1, 2 };
5659
5660 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
5661 std::vector<uint8_t> output({ 2, 2 });
5662
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005663 return MeanTestHelper<uint8_t, 4, 4>(workloadFactory, inputShape, input, { 2 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01005664}
5665
5666LayerTestResult<uint8_t, 4> MeanUint8MultipleDimsTest(armnn::IWorkloadFactory& workloadFactory)
5667{
5668 const unsigned int inputShape[] = { 2, 3, 1, 2 };
5669 const unsigned int outputShape[] = { 1, 3, 1, 1 };
5670
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005671 std::vector<uint8_t> input({ 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6 });
narpra011e4c31d2018-09-28 11:07:51 +01005672 std::vector<uint8_t> output({ 1, 3, 5 });
5673
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005674 return MeanTestHelper<uint8_t, 4, 4>(workloadFactory, inputShape, input, { 0, 3 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01005675}
5676
5677LayerTestResult<uint8_t, 1> MeanVtsUint8Test(armnn::IWorkloadFactory& workloadFactory)
5678{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005679 const unsigned int inputShape[] = { 4, 3, 2 };
narpra011e4c31d2018-09-28 11:07:51 +01005680 const unsigned int outputShape[] = { 2 };
5681
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005682 std::vector<uint8_t> input({ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
5683 24 });
5684 std::vector<uint8_t> output({ 12, 13 });
narpra011e4c31d2018-09-28 11:07:51 +01005685
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005686 return MeanTestHelper<uint8_t, 3, 1>(workloadFactory, inputShape, input, { 0, 1 }, false, outputShape,
5687 output, 0.8f, 5);
narpra011e4c31d2018-09-28 11:07:51 +01005688}
5689
5690LayerTestResult<float, 1> MeanFloatSimpleTest(armnn::IWorkloadFactory& workloadFactory)
5691{
5692 const unsigned int inputShape[] = { 3, 2 };
5693 const unsigned int outputShape[] = { 1 };
5694
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005695 std::vector<float> input({ 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f });
5696 std::vector<float> output({ 2.0f });
narpra011e4c31d2018-09-28 11:07:51 +01005697
5698 return MeanTestHelper<float, 2, 1>(workloadFactory, inputShape, input, {}, false, outputShape, output);
5699}
5700
5701LayerTestResult<float, 3> MeanFloatSimpleAxisTest(armnn::IWorkloadFactory& workloadFactory)
5702{
5703 const unsigned int inputShape[] = { 2, 3, 1, 2 };
5704 const unsigned int outputShape[] = { 3, 1, 2 };
5705
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005706 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f });
5707 std::vector<float> output({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f });
narpra011e4c31d2018-09-28 11:07:51 +01005708
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005709 return MeanTestHelper<float, 4, 3>(workloadFactory, inputShape, input, { 0 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01005710}
5711
5712LayerTestResult<float, 4> MeanFloatKeepDimsTest(armnn::IWorkloadFactory& workloadFactory)
5713{
5714 const unsigned int inputShape[] = { 1, 1, 3, 2 };
5715 const unsigned int outputShape[] = { 1, 1, 1, 2 };
5716
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005717 std::vector<float> input({ 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f });
5718 std::vector<float> output({ 2.0f, 2.0f });
narpra011e4c31d2018-09-28 11:07:51 +01005719
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005720 return MeanTestHelper<float, 4, 4>(workloadFactory, inputShape, input, { 2 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01005721}
5722
5723LayerTestResult<float, 4> MeanFloatMultipleDimsTest(armnn::IWorkloadFactory& workloadFactory)
5724{
5725 const unsigned int inputShape[] = { 2, 3, 1, 2 };
5726 const unsigned int outputShape[] = { 1, 3, 1, 1 };
5727
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005728 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f });
5729 std::vector<float> output({ 1.5f, 3.5f, 5.5f });
narpra011e4c31d2018-09-28 11:07:51 +01005730
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005731 return MeanTestHelper<float, 4, 4>(workloadFactory, inputShape, input, { 0, 3 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01005732}
5733
5734LayerTestResult<float, 1> MeanVtsFloat1Test(armnn::IWorkloadFactory& workloadFactory)
5735{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005736 const unsigned int inputShape[] = { 4, 3, 2 };
narpra011e4c31d2018-09-28 11:07:51 +01005737 const unsigned int outputShape[] = { 2 };
5738
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005739 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f,
5740 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f });
5741 std::vector<float> output({ 12.0f, 13.0f });
narpra011e4c31d2018-09-28 11:07:51 +01005742
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005743 return MeanTestHelper<float, 3, 1>(workloadFactory, inputShape, input, { 0, 1 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01005744}
5745
5746LayerTestResult<float, 3> MeanVtsFloat2Test(armnn::IWorkloadFactory& workloadFactory)
5747{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005748 const unsigned int inputShape[] = { 4, 3, 2 };
5749 const unsigned int outputShape[] = { 1, 3, 1 };
narpra011e4c31d2018-09-28 11:07:51 +01005750
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005751 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f,
5752 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f });
5753 std::vector<float> output({ 10.5f, 12.5f, 14.5f });
narpra011e4c31d2018-09-28 11:07:51 +01005754
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005755 return MeanTestHelper<float, 3, 3>(workloadFactory, inputShape, input, { 0, 2 }, true, outputShape, output);
5756}
5757
5758LayerTestResult<float, 3> MeanVtsFloat3Test(armnn::IWorkloadFactory& workloadFactory)
5759{
5760 const unsigned int inputShape[] = { 1, 2, 2, 1 };
5761 const unsigned int outputShape[] = { 1, 2, 1 };
5762
5763 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f });
5764 std::vector<float> output({ 1.5f, 3.5f });
5765
5766 return MeanTestHelper<float, 4, 3>(workloadFactory, inputShape, input, { 2 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01005767}
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01005768
5769LayerTestResult<float, 4> AdditionAfterMaxPoolTest(armnn::IWorkloadFactory& workloadFactory)
5770{
5771 // Create Initial Tensor
5772 // 1, 2, 3
5773 // 4, 5, 6
5774 // 7, 8, 9
5775
5776 armnn::TensorInfo poolingInputTensorInfo({ 1, 1, 3, 3}, armnn::GetDataType<float>());
5777 armnn::TensorInfo poolingOutputTensorInfo({ 1, 1, 2, 2}, armnn::GetDataType<float>());
5778
5779 boost::multi_array<float, 4> poolingInput = MakeTensor<float,4>(poolingInputTensorInfo,
5780 {1, 2, 3,
5781 4, 5, 6,
5782 7, 8, 9
5783 });
5784
5785 std::unique_ptr<armnn::ITensorHandle> poolingInputHandle =
5786 workloadFactory.CreateTensorHandle(poolingInputTensorInfo);
5787 std::unique_ptr<armnn::ITensorHandle> poolingOutputHandle =
5788 workloadFactory.CreateTensorHandle(poolingOutputTensorInfo);
5789
5790 // Apply MaxPool poolSize = 1x1, stride=2x2
5791 // Result =
5792 // 1, 3
5793 // 7, 9
5794 armnn::Pooling2dDescriptor descriptor;
5795 descriptor.m_PoolHeight = 1;
5796 descriptor.m_PoolWidth = 1;
5797 descriptor.m_StrideX = 2;
5798 descriptor.m_StrideY = 2;
5799 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
5800
5801 armnn::Pooling2dQueueDescriptor queueDescriptor;
5802 queueDescriptor.m_Parameters = descriptor;
5803 armnn::WorkloadInfo workloadInfo;
5804 AddInputToWorkload(queueDescriptor, workloadInfo, poolingInputTensorInfo, poolingInputHandle.get());
5805 AddOutputToWorkload(queueDescriptor, workloadInfo, poolingOutputTensorInfo, poolingOutputHandle.get());
5806
5807 // Create the MaxPool
5808 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
5809
5810 //LayerTestResult<float, 4> result(poolingOutputTensorInfo);
5811 auto shape( GetTensorShapeAsArray<4>(poolingOutputTensorInfo));
5812 boost::multi_array<float, 4> resultMaxPool;
5813 resultMaxPool.resize(shape);
5814
5815
5816 // Create addition with another tensor the same size
5817 // This would be the result to apply a Conv2d with kernel ones(2) and stride 1x1
5818 // with the initial tensor.
5819 // 12, 16
5820 // 24, 28
5821
5822 armnn::TensorInfo addInputTensorInfo({ 1,1,2,2}, armnn::GetDataType<float>());
5823 armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2}, armnn::GetDataType<float>());
5824
5825 boost::multi_array<float, 4> addInput = MakeTensor<float,4>(addInputTensorInfo,
5826 {12, 16,
5827 24, 28,
5828 });
5829
5830 // Expected output tensor after MaxPool and Addition.
5831 LayerTestResult<float,4> addRet(addOutputTensorInfo);
5832 addRet.outputExpected = MakeTensor<float, 4>(addOutputTensorInfo, std::vector<float>(
5833 {
5834 13, 19,
5835 31, 37
5836 }));
5837
5838 std::unique_ptr<armnn::ITensorHandle> addInputHandle = workloadFactory.CreateTensorHandle(addInputTensorInfo);
5839 std::unique_ptr<armnn::ITensorHandle> addOutputHandle = workloadFactory.CreateTensorHandle(addOutputTensorInfo);
5840
5841 armnn::AdditionQueueDescriptor data;
5842 armnn::WorkloadInfo info;
5843
5844 // Add the output of the MaxPool and the new tensor
5845 AddInputToWorkload(data, info, poolingOutputTensorInfo, poolingOutputHandle.get());
5846 AddInputToWorkload(data, info, addInputTensorInfo, addInputHandle.get());
5847 AddOutputToWorkload(data, info, addOutputTensorInfo, addOutputHandle.get());
5848
5849 std::unique_ptr<armnn::IWorkload> addWorkload = workloadFactory.CreateAddition(data, info);
5850
5851 poolingInputHandle->Allocate();
5852 poolingOutputHandle->Allocate();
5853 addInputHandle->Allocate();
5854 addOutputHandle->Allocate();
5855
5856 CopyDataToITensorHandle(poolingInputHandle.get(), &poolingInput[0][0][0][0]);
5857 CopyDataFromITensorHandle(&resultMaxPool[0][0][0][0], poolingOutputHandle.get());
5858
5859 CopyDataToITensorHandle(poolingOutputHandle.get(), &resultMaxPool[0][0][0][0]);
5860 CopyDataToITensorHandle(addInputHandle.get(), &addInput[0][0][0][0]);
5861
5862 workload->Execute();
5863 addWorkload->Execute();
5864
5865 CopyDataFromITensorHandle(&addRet.output[0][0][0][0], addOutputHandle.get());
5866
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01005867 return addRet;
5868}
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00005869
5870LayerTestResult<float, 4> SpaceToBatchNdSimpleFloat32Test(armnn::IWorkloadFactory& workloadFactory)
5871{
5872 return SpaceToBatchNdSimpleTest<float>(workloadFactory);
5873}
5874
5875LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsFloat32Test(armnn::IWorkloadFactory& workloadFactory)
5876{
5877 return SpaceToBatchNdMultiChannelsTest<float>(workloadFactory);
5878}
5879
5880LayerTestResult<float, 4> SpaceToBatchNdMultiBlockFloat32Test(armnn::IWorkloadFactory& workloadFactory)
5881{
5882 return SpaceToBatchNdMultiBlockTest<float>(workloadFactory);
5883}
5884
5885LayerTestResult<float, 4> SpaceToBatchNdPaddingFloat32Test(armnn::IWorkloadFactory& workloadFactory)
5886{
5887 return SpaceToBatchNdPaddingTest<float>(workloadFactory);
5888}
5889
5890LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleUint8Test(armnn::IWorkloadFactory& workloadFactory)
5891{
5892 return SpaceToBatchNdSimpleTest<uint8_t>(workloadFactory);
5893}
5894
5895LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsUint8Test(armnn::IWorkloadFactory& workloadFactory)
5896{
5897 return SpaceToBatchNdMultiChannelsTest<uint8_t>(workloadFactory);
5898}
5899
5900LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockUint8Test(armnn::IWorkloadFactory& workloadFactory)
5901{
5902 return SpaceToBatchNdMultiBlockTest<uint8_t>(workloadFactory);
5903}
5904
5905LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingUint8Test(armnn::IWorkloadFactory& workloadFactory)
5906{
5907 return SpaceToBatchNdPaddingTest<uint8_t>(workloadFactory);
5908}
5909
5910LayerTestResult<float, 4> SpaceToBatchNdSimpleNHWCFloat32Test(armnn::IWorkloadFactory& workloadFactory)
5911{
5912 return SpaceToBatchNdSimpleNHWCTest<float>(workloadFactory);
5913}
5914
5915LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsNHWCFloat32Test(armnn::IWorkloadFactory& workloadFactory)
5916{
5917 return SpaceToBatchNdMultiChannelsNHWCTest<float>(workloadFactory);
5918}
5919
5920LayerTestResult<float, 4> SpaceToBatchNdMultiBlockNHWCFloat32Test(armnn::IWorkloadFactory& workloadFactory)
5921{
5922 return SpaceToBatchNdMultiBlockNHWCTest<float>(workloadFactory);
5923}
5924
5925LayerTestResult<float, 4> SpaceToBatchNdPaddingNHWCFloat32Test(armnn::IWorkloadFactory& workloadFactory)
5926{
5927 return SpaceToBatchNdPaddingNHWCTest<float>(workloadFactory);
5928}
5929
5930LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleNHWCUint8Test(armnn::IWorkloadFactory& workloadFactory)
5931{
5932 return SpaceToBatchNdSimpleNHWCTest<uint8_t>(workloadFactory);
5933}
5934
5935LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsNHWCUint8Test(armnn::IWorkloadFactory& workloadFactory)
5936{
5937 return SpaceToBatchNdMultiChannelsNHWCTest<uint8_t>(workloadFactory);
5938}
5939
5940LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockNHWCUint8Test(armnn::IWorkloadFactory& workloadFactory)
5941{
5942 return SpaceToBatchNdMultiBlockNHWCTest<uint8_t>(workloadFactory);
5943}
5944
5945LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingNHWCUint8Test(armnn::IWorkloadFactory& workloadFactory)
5946{
5947 return SpaceToBatchNdPaddingNHWCTest<uint8_t>(workloadFactory);
5948}
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00005949
5950namespace {
5951
5952template<typename T, std::size_t InputDim, std::size_t OutputDim>
5953LayerTestResult<T, OutputDim> BatchToSpaceNdHelper(armnn::IWorkloadFactory &workloadFactory,
5954 const armnn::DataLayout& dataLayout,
5955 const unsigned int *inputShape,
5956 const std::vector<T> &inputData,
5957 const std::vector<unsigned int> &blockShape,
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00005958 const std::vector<std::pair<unsigned int, unsigned int>> &crops,
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00005959 const unsigned int *outputShape,
5960 const std::vector<T> &outputData,
5961 float scale = 1.0f,
5962 int32_t offset = 0)
5963 {
5964 auto dataType = (std::is_same<T, uint8_t>::value ? armnn::DataType::QuantisedAsymm8 : armnn::DataType::Float32);
5965
5966 armnn::TensorInfo inputTensorInfo(InputDim, inputShape, dataType);
5967 armnn::TensorInfo outputTensorInfo(OutputDim, outputShape, dataType);
5968
5969 inputTensorInfo.SetQuantizationScale(scale);
5970 inputTensorInfo.SetQuantizationOffset(offset);
5971
5972 outputTensorInfo.SetQuantizationScale(scale);
5973 outputTensorInfo.SetQuantizationOffset(offset);
5974
5975 auto input = MakeTensor<T, InputDim>(inputTensorInfo, inputData);
5976
5977 LayerTestResult<T, OutputDim> result(outputTensorInfo);
5978 result.outputExpected = MakeTensor<T, OutputDim>(outputTensorInfo, outputData);
5979
5980 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5981 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5982
5983 armnn::BatchToSpaceNdQueueDescriptor data;
5984 data.m_Parameters.m_DataLayout = dataLayout;
5985 data.m_Parameters.m_BlockShape = blockShape;
5986 data.m_Parameters.m_Crops = crops;
5987 armnn::WorkloadInfo info;
5988 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
5989 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
5990
5991 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchToSpaceNd(data, info);
5992
5993 inputHandle->Allocate();
5994 outputHandle->Allocate();
5995
5996 CopyDataToITensorHandle(inputHandle.get(), input.origin());
5997
5998 workload->Execute();
5999
6000 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6001
6002 return result;
6003}
6004
6005} // anonymous namespace
6006
6007LayerTestResult<float, 4> BatchToSpaceNdNhwcFloat32Test1(armnn::IWorkloadFactory& workloadFactory)
6008{
6009 const unsigned int inputShape[] = {4, 2, 2, 1};
6010 const unsigned int outputShape[] = {1, 4, 4, 1 };
6011
6012 std::vector<float> input
6013 ({
6014 // Batch 0, Height 0, Width (2) x Channel (1)
6015 1.0f, 3.0f,
6016 // Batch 0, Height 1, Width (2) x Channel (1)
6017 9.0f, 11.0f,
6018
6019
6020 // Batch 1, Height 0, Width (2) x Channel (1)
6021 2.0f, 4.0f,
6022 // Batch 1, Height 1, Width (2) x Channel (1)
6023 10.0f, 12.0f,
6024
6025
6026 // Batch 2, Height 0, Width (2) x Channel (1)
6027 5.0f, 7.0f,
6028 // Batch 2, Height 1, Width (2) x Channel (1)
6029 13.0f, 15.0f,
6030
6031 // Batch 3, Height 0, Width (2) x Channel (3)
6032 6.0f, 8.0f,
6033 // Batch 3, Height 1, Width (2) x Channel (1)
6034 14.0f, 16.0f
6035 });
6036
6037 std::vector<float> expectedOutput
6038 ({
6039 1.0f, 2.0f, 3.0f, 4.0f,
6040 5.0f, 6.0f, 7.0f, 8.0f,
6041 9.0f, 10.0f, 11.0f, 12.0f,
6042 13.0f, 14.0f, 15.0f, 16.0f
6043 });
6044
6045 std::vector<unsigned int> blockShape {2, 2};
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00006046 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00006047
6048 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, armnn::DataLayout::NHWC, inputShape, input, blockShape,
6049 crops, outputShape, expectedOutput);
6050}
6051
6052LayerTestResult<float, 4> BatchToSpaceNdNhwcFloat32Test2(armnn::IWorkloadFactory& workloadFactory)
6053{
6054 const unsigned int inputShape[] = {4, 1, 1, 1};
6055 const unsigned int outputShape[] = {1, 2, 2, 1};
6056
6057 std::vector<float> input
6058 ({
6059 // Batch 0, Height 0, Width (2) x Channel (1)
6060 1.0f, 2.0f, 3.0f, 4.0f
6061 });
6062
6063 std::vector<float> expectedOutput({1.0f, 2.0f, 3.0f, 4.0f});
6064
6065 std::vector<unsigned int> blockShape({2, 2});
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00006066 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00006067
6068 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, armnn::DataLayout::NHWC, inputShape, input, blockShape,
6069 crops, outputShape, expectedOutput);
6070}
6071
6072LayerTestResult<float, 4> BatchToSpaceNdNhwcFloat32Test3(armnn::IWorkloadFactory& workloadFactory)
6073{
6074 const unsigned int inputShape[] = {4, 1, 1, 3};
6075 const unsigned int outputShape[] = {1, 2, 2, 3};
6076
6077 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f });
6078
6079 std::vector<float> expectedOutput({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f });
6080
6081 std::vector<unsigned int> blockShape({2, 2});
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00006082 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00006083
6084 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, armnn::DataLayout::NHWC, inputShape, input, blockShape,
6085 crops, outputShape, expectedOutput);
6086}
6087
6088LayerTestResult<float, 4> BatchToSpaceNdNchwFloat32Test1(armnn::IWorkloadFactory &workloadFactory)
6089{
6090 const unsigned int inputShape[] = {4, 3, 1, 1};
6091 const unsigned int outputShape[] = {1, 3, 2, 2};
6092
6093 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f });
6094
6095 std::vector<float> expectedOutput
6096 ({
6097 // Batch 0, Channel 0, Height (2) x Width (2)
6098 1.0f, 4.0f,
6099 7.0f, 10.0f,
6100
6101 // Batch 0, Channel 1, Height (2) x Width (2)
6102 2.0f, 5.0f,
6103 8.0f, 11.0f,
6104
6105 // Batch 0, Channel 2, Height (2) x Width (2)
6106 3.0f, 6.0f,
6107 9.0f, 12.0f,
6108 });
6109
6110 std::vector<unsigned int> blockShape({2, 2});
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00006111 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00006112
6113 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, armnn::DataLayout::NCHW, inputShape, input, blockShape,
6114 crops, outputShape, expectedOutput);
6115}