blob: e536cc9798915a7fe8c355836438488e73bc2780 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#include "LayerTests.hpp"
6
7#include "test/TensorHelpers.hpp"
8#include "TensorCopyUtils.hpp"
surmeh013537c2c2018-05-18 16:31:43 +01009#include "Permute.hpp"
telsoa014fcda012018-03-09 14:13:49 +000010
11#include <boost/test/unit_test.hpp>
surmeh013537c2c2018-05-18 16:31:43 +010012#include <boost/assert.hpp>
telsoa014fcda012018-03-09 14:13:49 +000013
David Beck711fa312018-09-24 10:46:38 +010014#include <armnn/LayerSupport.hpp>
telsoa014fcda012018-03-09 14:13:49 +000015
David Beck711fa312018-09-24 10:46:38 +010016#include <backends/CpuTensorHandle.hpp>
17#include <backends/WorkloadFactory.hpp>
telsoa014fcda012018-03-09 14:13:49 +000018
telsoa014fcda012018-03-09 14:13:49 +000019#include <algorithm>
20#include <boost/cast.hpp>
21
22#include "WorkloadTestUtils.hpp"
23#include "Conv2dTestImpl.hpp"
24#include "BatchNormTestImpl.hpp"
25#include "ActivationTestImpl.hpp"
26#include "Pooling2dTestImpl.hpp"
27#include "ReshapeTestImpl.hpp"
28#include "FullyConnectedTestImpl.hpp"
29#include "SplitterTestImpl.hpp"
30#include "SoftmaxTestImpl.hpp"
31#include "NormTestImpl.hpp"
32#include "PermuteTestImpl.hpp"
telsoa01c577f2c2018-08-31 09:22:23 +010033#include "LstmTestImpl.hpp"
34#include "ConvertFp16ToFp32TestImpl.hpp"
35#include "ConvertFp32ToFp16TestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000036
telsoa01c577f2c2018-08-31 09:22:23 +010037// 3-channel 16x8 image used as common input data for a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000038static std::vector<float> ConvInput3x8x16({
39 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
40 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
41 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
42 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
43 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
44 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
45 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
46 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
47 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
48 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
49 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
50 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
51 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
52 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
53 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
54 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
55 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
56 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
57 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
58 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
59 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
60 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
61 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
62 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
63});
64
telsoa01c577f2c2018-08-31 09:22:23 +010065// 2-channel bias used by a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000066static std::vector<float> Bias2({0, 2});
67
telsoa01c577f2c2018-08-31 09:22:23 +010068// Helper function that returns either Bias2 or an empty vector depending on whether bias is enabled.
telsoa014fcda012018-03-09 14:13:49 +000069template<typename T>
70boost::multi_array<T, 1> GetBias2(bool biasEnabled, float qScale, int32_t qOffset)
71{
72 if(biasEnabled)
73 {
74 armnn::TensorInfo biasDesc({static_cast<unsigned int>(Bias2.size())}, armnn::GetDataType<T>());
75 boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(qScale, qOffset, Bias2));
76 return bias;
77 }
78 else
79 {
80 return boost::multi_array<T, 1>();
81 }
82}
83
84template<typename T>
85LayerTestResult<T, 4> SimpleConvolution2d3x5TestCommon(armnn::IWorkloadFactory& workloadFactory,
86 float qScale,
87 int32_t qOffset,
jimfly010a088a62018-10-25 17:05:05 +010088 bool biasEnabled,
89 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +000090{
telsoa01c577f2c2018-08-31 09:22:23 +010091 // Use common single-batch 3-channel 16x8 image.
telsoa014fcda012018-03-09 14:13:49 +000092 armnn::TensorInfo inputDesc({1, 3, 8, 16}, armnn::GetDataType<T>());
93 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
94
telsoa01c577f2c2018-08-31 09:22:23 +010095 // Use a 2-element batch with 3-channel 3x5 kernels.
telsoa014fcda012018-03-09 14:13:49 +000096 armnn::TensorInfo kernelDesc({2, 3, 5, 3}, armnn::GetDataType<T>());
97 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
98 QuantizedVector<T>(qScale, qOffset, {
99 1, 1, 1,
100 1, -1, 1,
101 1, 1, 1,
102 1, 1, 1,
103 1, 1, 1,
104
105 0, 0, 0,
106 0, 0, 0,
107 0, 0, 0,
108 0, 0, 0,
109 0, 0, 0,
110
111 2, 2, 2,
112 2, 2, 2,
113 2, 2, 2,
114 2, 2, 2,
115 2, 2, 2,
116
117
118 0, 0, 0,
119 0, 0, 0,
120 0, 0, 0,
121 0, 0, 0,
122 0, 0, 0,
123
124 1, 1, 1,
125 1, 1, 1,
126 1, 1, 1,
127 1, 1, 1,
128 1, 1, 1,
129
130 0, 0, 0,
131 0, 0, 0,
132 0, 0, 0,
133 0, 0, 0,
134 0, 0, 0
135 })));
136
telsoa01c577f2c2018-08-31 09:22:23 +0100137 // Expected output is 2 batch elements of a 1-channel 14x4 image.
telsoa014fcda012018-03-09 14:13:49 +0000138 armnn::TensorInfo outputDesc({1, 2, 4, 14}, armnn::GetDataType<T>());
139 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
140 QuantizedVector<T>(qScale, qOffset, {
141 -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24,
142 -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25,
143 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
144 -23.5f, -23.5f, -23.5f,
145 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
146 -23.5f, -23.5f, -23.5f,
147
148 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
149 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
150 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
151 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
152 })));
153
154 return SimpleConvolution2dTestImpl<T>(workloadFactory,
155 input,
156 kernel,
157 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
158 expectedOutput,
159 qScale,
jimfly010a088a62018-10-25 17:05:05 +0100160 qOffset,
161 layout);
telsoa014fcda012018-03-09 14:13:49 +0000162}
163
164template<typename T>
165LayerTestResult<T, 4> SimpleConvolution2d3x3TestCommon(armnn::IWorkloadFactory& workloadFactory,
166 float qScale,
167 int32_t qOffset,
168 bool biasEnabled)
169{
telsoa01c577f2c2018-08-31 09:22:23 +0100170 // Use a 3x3 kernel, which exercises ArmCompute's direct convolution path.
telsoa014fcda012018-03-09 14:13:49 +0000171
telsoa01c577f2c2018-08-31 09:22:23 +0100172 // Use common single-batch 3-channel 16x8 image.
telsoa014fcda012018-03-09 14:13:49 +0000173 armnn::TensorInfo inputDesc({1, 3, 8, 16}, armnn::GetDataType<T>());
174 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
175
telsoa01c577f2c2018-08-31 09:22:23 +0100176 // Use a 2-element batch of 3-channel 3x3 kernels.
telsoa014fcda012018-03-09 14:13:49 +0000177 armnn::TensorInfo kernelDesc({2, 3, 3, 3}, armnn::GetDataType<T>());
178 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
179 QuantizedVector<T>(qScale, qOffset, {
180 1, 1, 1,
181 1, -1, 1,
182 1, 1, 1,
183
184 0, 0, 0,
185 0, 0, 0,
186 0, 0, 0,
187
188 2, 2, 2,
189 2, 2, 2,
190 2, 2, 2,
191
192
193 0, 0, 0,
194 0, 0, 0,
195 0, 0, 0,
196
197 1, 1, 1,
198 1, 1, 1,
199 1, 1, 1,
200
201 0, 0, 0,
202 0, 0, 0,
203 0, 0, 0
204 })));
205
telsoa01c577f2c2018-08-31 09:22:23 +0100206 // Expected output is 1 batch of a 2-channel 14x6 image.
telsoa014fcda012018-03-09 14:13:49 +0000207 armnn::TensorInfo outputDesc({1, 2, 6, 14}, armnn::GetDataType<T>());
208 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
209 QuantizedVector<T>(qScale, qOffset, {
210 -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15,
211 -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16,
212 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
213 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
214 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
215 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
216
217 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
218 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
219 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
220 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
221 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
222 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
223 })));
224
225 return SimpleConvolution2dTestImpl<T>(workloadFactory,
226 input,
227 kernel,
228 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
229 expectedOutput,
230 qScale,
231 qOffset);
232}
233
Francis Murtaghd59116e2018-10-04 16:03:07 +0100234template<typename T>
235LayerTestResult<T, 4> SimpleConvolution2d3x3NhwcTestCommon(armnn::IWorkloadFactory& workloadFactory,
236 float qScale,
237 int32_t qOffset,
238 bool biasEnabled,
239 armnn::DataLayout dataLayout)
240{
241 // Use common single-batch 5x5 image.
242
243 armnn::TensorInfo inputDesc({1, 3, 4, 1}, armnn::GetDataType<T>());
244 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
245 {
246 1, 5, 2, 3,
247 8, 7, 3, 6,
248 3, 3, 9, 1
249 });
250
251
252 // Use a 2-element batch of 3-channel 3x3 kernels.
253 armnn::TensorInfo kernelDesc({1, 3, 3, 1}, armnn::GetDataType<T>());
254 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, {
255 4, 5, 6,
256 0, 0, 0,
257 3, 2, 1
258 });
259
260 // Expected output is 1 batch of a 5x5 image.
261 armnn::TensorInfo outputDesc({1, 3, 4, 1}, armnn::GetDataType<T>());
262
263 const std::vector<float> outputData =
264 {
265 23, 41, 33, 21,
266 44, 65, 76, 52,
267 82, 85, 79, 42
268 };
269
270 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
271
272 return SimpleConvolution2dNhwcTestImpl<T>(workloadFactory,
273 input,
274 kernel,
275 boost::multi_array<T, 1>(),
276 expectedOutput,
277 dataLayout,
278 qScale,
279 qOffset);
280}
281
telsoa014fcda012018-03-09 14:13:49 +0000282LayerTestResult<float, 4> SimpleConvolution2d3x5Test(armnn::IWorkloadFactory& workloadFactory,
jimfly010a088a62018-10-25 17:05:05 +0100283 bool biasEnabled,
284 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000285{
jimfly010a088a62018-10-25 17:05:05 +0100286 return SimpleConvolution2d3x5TestCommon<float>(workloadFactory, 0.f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000287}
288
289LayerTestResult<uint8_t, 4> SimpleConvolution2d3x5Uint8Test(armnn::IWorkloadFactory& workloadFactory,
jimfly010a088a62018-10-25 17:05:05 +0100290 bool biasEnabled,
291 const armnn::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +0000292{
jimfly010a088a62018-10-25 17:05:05 +0100293 return SimpleConvolution2d3x5TestCommon<uint8_t>(workloadFactory, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000294}
295
296LayerTestResult<float, 4> SimpleConvolution2d3x3Test(armnn::IWorkloadFactory& workloadFactory,
297 bool biasEnabled)
298{
299 return SimpleConvolution2d3x3TestCommon<float>(workloadFactory, 0.f, 0, biasEnabled);
300}
301
Francis Murtaghd59116e2018-10-04 16:03:07 +0100302LayerTestResult<float, 4> SimpleConvolution2d3x3NhwcTest(armnn::IWorkloadFactory& workloadFactory,
303 bool biasEnabled)
304{
305 return SimpleConvolution2d3x3NhwcTestCommon<float>(workloadFactory, 0.f, 0, biasEnabled, armnn::DataLayout::NHWC);
306}
307
telsoa014fcda012018-03-09 14:13:49 +0000308LayerTestResult<uint8_t, 4> SimpleConvolution2d3x3Uint8Test(armnn::IWorkloadFactory& workloadFactory,
309 bool biasEnabled)
310{
311 return SimpleConvolution2d3x3TestCommon<uint8_t>(workloadFactory, 0.5f, 50, biasEnabled);
312}
313
314template<typename T>
315LayerTestResult<T, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon(
316 armnn::IWorkloadFactory& workloadFactory,
317 float qScale,
318 int32_t qOffset)
319{
telsoa01c577f2c2018-08-31 09:22:23 +0100320 // Use a single-batch 1-channel 3x3 image as input.
telsoa014fcda012018-03-09 14:13:49 +0000321 armnn::TensorInfo inputDesc({1, 1, 3, 3}, armnn::GetDataType<T>());
322 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
323 QuantizedVector<T>(qScale, qOffset, {
324 11,21,31,
325 12,22,32,
326 13,23,33
327 })));
328
telsoa01c577f2c2018-08-31 09:22:23 +0100329 // Use 1 batch of a 1-channel 2x2 kernel.
telsoa014fcda012018-03-09 14:13:49 +0000330 armnn::TensorInfo kernelDesc({1, 1, 2, 2}, armnn::GetDataType<T>());
331 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
332 QuantizedVector<T>(qScale, qOffset, {
333 -11,-21,
334 -12,-22,
335 })));
336
telsoa01c577f2c2018-08-31 09:22:23 +0100337// Expected output is 1 batch of a 1-channel 6x8 image.
telsoa014fcda012018-03-09 14:13:49 +0000338// Manually calculated like this:
339//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
340//[-11*0 -21*0 -12*0 -22*11 ; -11*0 -21*0 -12*11 -22*21 ; -11*0 -21*0 -12*21 -22*31 ; -11*0 -21*0 -12*31 -22*0 ..]
341//[-11*0 -21*11 -12*0 -22*12 ; -11*11 -21*21 -12*12 -22*22 ; -11*21 -21*31 -12*22 -22*32 ; -11*31 -21*0 -12*32 -22*0 ..]
342//[-11*0 -21*12 -12*0 -22*13 ; -11*12 -21*22 -12*13 -22*23 ; -11*22 -21*32 -12*23 -22*33 ; -11*32 -21*0 -12*33 -22*0 ..]
343//[-11*0 -21*13 -12*0 -22*0 ; -11*13 -21*23 -12*0 -22*0 ; -11*23 -21*33 -12*0 -22*0 ; -11*33 -21*0 -12*0 -22*0 ..]
344//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
345//[..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ..]
346 armnn::TensorInfo outputDesc({1, 1, 8, 6}, armnn::GetDataType<T>());
347 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
348 QuantizedVector<T>(qScale, qOffset, {
349 0, 0, 0, 0, 0, 0,
350 -242, -594, -934, -372, 0, 0,
351 -495, -1190, -1850, -725, 0, 0,
352 -538, -1256, -1916, -748, 0, 0,
353 -273, -626, -946, -363, 0, 0,
354 0, 0, 0, 0, 0, 0,
355 0, 0, 0, 0, 0, 0,
356 0, 0, 0, 0, 0, 0
357 })));
358
359 return SimpleConvolution2dTestImpl<T>(workloadFactory,
360 input,
361 kernel,
362 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(false, qScale, qOffset),
363 expectedOutput,
364 qScale,
365 qOffset,
jimfly010a088a62018-10-25 17:05:05 +0100366 armnn::DataLayout::NCHW,
telsoa01c577f2c2018-08-31 09:22:23 +0100367 1, // Padding left.
368 2, // Padding top.
369 3, // Padding right.
370 4); // Padding bottom.
telsoa014fcda012018-03-09 14:13:49 +0000371}
372
373template<typename T>
374LayerTestResult<T, 4> SimpleConvolution2dAsymmetricPaddingTestCommon(armnn::IWorkloadFactory& workloadFactory,
375 float qScale,
376 int32_t qOffset)
377{
telsoa01c577f2c2018-08-31 09:22:23 +0100378 // Use a single-batch 1-channel 5x5 image as input.
telsoa014fcda012018-03-09 14:13:49 +0000379 armnn::TensorInfo inputDesc({ 1, 1, 5, 5 }, armnn::GetDataType<T>());
380 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
381 QuantizedVector<T>(qScale, qOffset, {
382 11,21,31,41,51,
383 12,22,32,42,52,
384 13,23,33,43,53,
385 14,24,34,44,54,
386 15,25,35,45,55,
387 })));
388
telsoa01c577f2c2018-08-31 09:22:23 +0100389 // Use 1 batch of a 1-channel 4x4 kernel.
telsoa014fcda012018-03-09 14:13:49 +0000390 armnn::TensorInfo kernelDesc({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
391 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
392 QuantizedVector<T>(qScale, qOffset, {
393 -11,-21,-31,-41,
394 -12,-22,-32,-42,
395 -13,-23,-33,-43,
396 -14,-24,-34,-44,
397 })));
398
telsoa01c577f2c2018-08-31 09:22:23 +0100399 // Expected output is 1 batch of a 1-channel 5x5 image.
telsoa014fcda012018-03-09 14:13:49 +0000400 armnn::TensorInfo outputDesc({ 1, 1, 5, 5 }, armnn::GetDataType<T>());
401 std::vector<T> myVec(outputDesc.GetNumElements(), 0);
402 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
403 QuantizedVector<T>(qScale, qOffset, {
telsoa014fcda012018-03-09 14:13:49 +0000404 -7140, -10580, -13940, -9300, -5230,
405 -9590, -14120, -18520, -12290, -6860,
406 -9980, -14560, -18960, -12560, -7000,
407 -7518, -10904, -14144, -9318, -5152,
surmeh013537c2c2018-05-18 16:31:43 +0100408 -5032, -7256, -9376, -6142, -3368,
telsoa014fcda012018-03-09 14:13:49 +0000409 })));
410
411 return SimpleConvolution2dTestImpl<T>(workloadFactory,
412 input,
413 kernel,
414 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(false, qScale, qOffset),
415 expectedOutput,
416 qScale,
417 qOffset,
jimfly010a088a62018-10-25 17:05:05 +0100418 armnn::DataLayout::NCHW,
telsoa01c577f2c2018-08-31 09:22:23 +0100419 1, // Padding left.
420 1, // Padding top.
421 2, // Padding right.
422 2); // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100423}
424
425template<typename T>
426LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(armnn::IWorkloadFactory& workloadFactory,
427 float qScale,
428 int32_t qOffset,
429 bool biasEnabled)
430{
telsoa01c577f2c2018-08-31 09:22:23 +0100431 // Use a single-batch 2-channel 5x5 image as input.
surmeh013537c2c2018-05-18 16:31:43 +0100432 armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5 }, armnn::GetDataType<T>());
433 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
434 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
435 0, 1, 2, 3, 4,
436 5, 6, 7, 8, 9,
437 10, 11, 12, 13, 14,
438 15, 16, 17, 18, 19,
439 20, 21, 22, 23, 24,
440
441 25, 26, 27, 28, 29,
442 30, 31, 32, 33, 34,
443 35, 36, 37, 38, 39,
444 40, 41, 42, 43, 44,
445 45, 46, 47, 48, 49
446 })));
447
telsoa01c577f2c2018-08-31 09:22:23 +0100448 // Use a depth multiplier of 1 on a 2-channel 4x4 kernel.
surmeh013537c2c2018-05-18 16:31:43 +0100449 armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, armnn::GetDataType<T>());
450 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
451 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
452 32, 31, 30, 29,
453 28, 27, 26, 25,
454 24, 23, 22, 21,
455 20, 19, 18, 17,
456
457 16, 15, 14, 13,
458 12, 11, 10, 9,
459 8, 7, 6, 5,
460 4, 3, 2, 1
461 })));
462
telsoa01c577f2c2018-08-31 09:22:23 +0100463 // Expected output is 1 batch of a 2-channel 5x5 image.
464 // Calculated using the python tensorflow library with strideX=1, strideY=1.
surmeh013537c2c2018-05-18 16:31:43 +0100465 armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5 }, armnn::GetDataType<T>());
466 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
467 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
468 1062, 1580, 1850, 1530, 1117,
469 2140, 3108, 3500, 2842, 2042,
470 3580, 5068, 5460, 4342, 3062,
471 3618, 5072, 5390, 4248, 2971,
472 3074, 4282, 4510, 3533, 2457,
473 1550, 2284, 2362, 1955, 1428,
474 2910, 4206, 4342, 3528, 2536,
475 3390, 4886, 5022, 4068, 2916,
476 3566, 5056, 5182, 4133, 2922,
477 3100, 4352, 4452, 3517, 2465
478 })));
479
480 return DepthwiseConvolution2dAsymmetricTestImpl<T>(workloadFactory,
481 input,
482 kernel,
483 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
484 expectedOutput,
485 qScale,
486 qOffset,
telsoa01c577f2c2018-08-31 09:22:23 +0100487 1, // Padding left.
488 1, // Padding top.
489 2, // Padding right.
490 2, // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100491 1, // strideX
492 1); // strideY
telsoa014fcda012018-03-09 14:13:49 +0000493}
494
Nikhil Rajcec6b652018-10-12 13:51:57 +0100495template<typename T>
496LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestCommon(armnn::IWorkloadFactory& workloadFactory,
497 float qScale,
498 int32_t qOffset,
499 bool biasEnabled)
500{
501 armnn::TensorInfo inputTensorInfo({ 1, 5, 5, 2}, armnn::GetDataType<T>());
502 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
503 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
504 0, 25,
505 1, 26,
506 2, 27,
507 3, 28,
508 4, 29,
509
510 5, 30,
511 6, 31,
512 7, 32,
513 8, 33,
514 9, 34,
515
516 10, 35,
517 11, 36,
518 12, 37,
519 13, 38,
520 14, 39,
521
522 15, 40,
523 16, 41,
524 17, 42,
525 18, 43,
526 19, 44,
527
528 20, 45,
529 21, 46,
530 22, 47,
531 23, 48,
532 24, 49
533 })));
534
535 armnn::TensorInfo kernelTensorInfo({ 1, 4, 4, 2}, armnn::GetDataType<T>());
536 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
537 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
538 32, 16,
539 31, 15,
540 30, 14,
541 29, 13,
542
543 28, 12,
544 27, 11,
545 26, 10,
546 25, 9,
547
548 24, 8,
549 23, 7,
550 22, 6,
551 21, 5,
552
553 20, 4,
554 19, 3,
555 18, 2,
556 17, 1
557 })));
558
559 armnn::TensorInfo outputTensorInfo({ 1, 5, 5, 2}, armnn::GetDataType<T>());
560 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
561 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
562 1062, 1550,
563 1580, 2284,
564 1850, 2362,
565 1530, 1955,
566 1117, 1428,
567
568 2140, 2910,
569 3108, 4206,
570 3500, 4342,
571 2842, 3528,
572 2042, 2536,
573
574 3580, 3390,
575 5068, 4886,
576 5460, 5022,
577 4342, 4068,
578 3062, 2916,
579
580 3618, 3566,
581 5072, 5056,
582 5390, 5182,
583 4248, 4133,
584 2971, 2922,
585
586 3074, 3100,
587 4282, 4352,
588 4510, 4452,
589 3533, 3517,
590 2457, 2465
591 })));
592
593 return DepthwiseConvolution2dNhwcTestImpl<T>(workloadFactory,
594 input,
595 kernel,
596 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
597 expectedOutput,
598 qScale,
599 qOffset,
600 1, // Padding left.
601 1, // Padding top.
602 2, // Padding right.
603 2, // Padding bottom.
604 1, // strideX
605 1); // strideY
606}
607
telsoa014fcda012018-03-09 14:13:49 +0000608LayerTestResult<float, 4>
609Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest(armnn::IWorkloadFactory& workloadFactory)
610{
611 return Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon<float>(workloadFactory, 0.0f, 0);
612}
613
614LayerTestResult<float, 4> Convolution2dAsymmetricPaddingTest(armnn::IWorkloadFactory& workloadFactory)
615{
616 return SimpleConvolution2dAsymmetricPaddingTestCommon<float>(workloadFactory, 0.0f, 0);
617}
618
619LayerTestResult<float, 4> DepthwiseConvolution2dTest(armnn::IWorkloadFactory& workloadFactory,
620 bool biasEnabled)
621{
622 return DepthwiseConvolution2dTestImpl<float, float>(workloadFactory, 0.0f, 0, biasEnabled);
623}
624
Nikhil Rajcec6b652018-10-12 13:51:57 +0100625LayerTestResult<float, 4> DepthwiseConvolution2dDepthNhwcTest(armnn::IWorkloadFactory& workloadFactory,
626 bool biasEnabled)
627{
628 return DepthwiseConvolution2dNhwcTestCommon<float>(workloadFactory, 0.0f, 0, biasEnabled);
629}
630
telsoa014fcda012018-03-09 14:13:49 +0000631LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul1Test(armnn::IWorkloadFactory& workloadFactory,
632 bool biasEnabled)
633{
634 return DepthwiseConvolution2dDepthMul1TestImpl<float, float>(workloadFactory, 0.0f, 0, biasEnabled);
635}
636
surmeh013537c2c2018-05-18 16:31:43 +0100637LayerTestResult<float, 4> DepthwiseConvolution2dAsymmetricTest(armnn::IWorkloadFactory& workloadFactory,
638 bool biasEnabled)
639{
640 return DepthwiseConvolution2dAsymmetricTestCommon<float>(workloadFactory, 0.0f, 0, biasEnabled);
641}
642
telsoa014fcda012018-03-09 14:13:49 +0000643LayerTestResult<uint8_t, 4> DepthwiseConvolution2dUint8Test(armnn::IWorkloadFactory& workloadFactory,
644 bool biasEnabled)
645{
646 return DepthwiseConvolution2dTestImpl<uint8_t, int32_t>(workloadFactory, 0.5f, 50, biasEnabled);
647}
648
649LayerTestResult<uint8_t, 4> DepthwiseConvolution2dDepthMul1Uint8Test(armnn::IWorkloadFactory& workloadFactory,
650 bool biasEnabled)
651{
652 return DepthwiseConvolution2dDepthMul1TestImpl<uint8_t, int32_t>(workloadFactory, 0.5f, 50, biasEnabled);
653}
654
655LayerTestResult<float, 4> Convolution1dTest(armnn::IWorkloadFactory& workloadFactory, bool biasEnabled)
656{
657 return Convolution1dTestImpl<float>(workloadFactory, 0.0f, 0, biasEnabled);
658}
659
660LayerTestResult<uint8_t, 4> Convolution1dUint8Test(armnn::IWorkloadFactory& workloadFactory, bool biasEnabled)
661{
662 return Convolution1dTestImpl<uint8_t>(workloadFactory, 0.1f, 128, biasEnabled);
663}
664
665LayerTestResult<float,4> CompareConvolution2dTest(armnn::IWorkloadFactory& workloadFactory,
666 armnn::IWorkloadFactory& refWorkloadFactory)
667{
668 return CompareConvolution2dTestImpl<float>(workloadFactory, refWorkloadFactory);
669}
670
671template<typename T>
672LayerTestResult<T,4> CompareDepthwiseConvolution2dTest(armnn::IWorkloadFactory& workloadFactory,
673 armnn::IWorkloadFactory& refWorkloadFactory)
674{
675 return CompareDepthwiseConvolution2dTestImpl<T>(workloadFactory, refWorkloadFactory);
676}
677
678template LayerTestResult<float, 4> CompareDepthwiseConvolution2dTest<float>(
679 armnn::IWorkloadFactory&, armnn::IWorkloadFactory&);
680template LayerTestResult<uint8_t, 4> CompareDepthwiseConvolution2dTest<uint8_t>(
681 armnn::IWorkloadFactory&, armnn::IWorkloadFactory&);
682
683LayerTestResult<float,4> SimpleNormalizationAcrossTest(armnn::IWorkloadFactory& workloadFactory)
684{
685 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
686 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
687 return SimpleNormalizationTestImpl(workloadFactory, normChannel, normMethod);
688}
689
690LayerTestResult<float,4> SimpleNormalizationWithinTest(armnn::IWorkloadFactory& workloadFactory)
691{
692 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
693 auto normChannel = armnn::NormalizationAlgorithmChannel::Within;
694 return SimpleNormalizationTestImpl(workloadFactory, normChannel, normMethod);
695}
696
narpra0155a97bc2018-10-02 14:35:53 +0100697LayerTestResult<float,4> SimpleNormalizationAcrossNhwcTest(armnn::IWorkloadFactory& workloadFactory)
698{
699 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
700 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Matteo Martincigh8e6f92d2018-10-18 08:45:39 +0100701 return SimpleNormalizationNhwcTestImpl(workloadFactory, normChannel, normMethod);
narpra0155a97bc2018-10-02 14:35:53 +0100702}
703
telsoa014fcda012018-03-09 14:13:49 +0000704LayerTestResult<float,2> SimpleSoftmaxTest(armnn::IWorkloadFactory& workloadFactory, float beta)
705{
706 return SimpleSoftmaxTestImpl<float>(workloadFactory, beta);
707}
708
709LayerTestResult<uint8_t,2> SimpleSoftmaxUint8Test(armnn::IWorkloadFactory& workloadFactory, float beta)
710{
711 return SimpleSoftmaxTestImpl<uint8_t>(workloadFactory, beta);
712}
713
714LayerTestResult<float,4> CompareNormalizationTest(armnn::IWorkloadFactory& workloadFactory,
715 armnn::IWorkloadFactory& refWorkloadFactory,
716 armnn::NormalizationAlgorithmChannel normChannel,
717 armnn::NormalizationAlgorithmMethod normMethod)
718{
719 return CompareNormalizationTestImpl(workloadFactory, refWorkloadFactory, normChannel, normMethod);
720}
721
722LayerTestResult<float,2> CompareSoftmaxTest(armnn::IWorkloadFactory& workloadFactory,
723 armnn::IWorkloadFactory& refWorkloadFactory,
724 float beta)
725{
726 return CompareSoftmaxTestImpl<float>(workloadFactory, refWorkloadFactory, beta);
727}
728
729LayerTestResult<uint8_t,2> CompareSoftmaxUint8Test(armnn::IWorkloadFactory& workloadFactory,
730 armnn::IWorkloadFactory& refWorkloadFactory,
731 float beta)
732{
733 return CompareSoftmaxTestImpl<uint8_t>(workloadFactory, refWorkloadFactory, beta);
734}
735
736std::vector<LayerTestResult<float,3>> SplitterTest(armnn::IWorkloadFactory& workloadFactory)
737{
738 return SplitterTestCommon<float>(workloadFactory);
739}
740
741std::vector<LayerTestResult<uint8_t,3>> SplitterUint8Test(armnn::IWorkloadFactory& workloadFactory)
742{
743 return SplitterTestCommon<uint8_t>(workloadFactory, 1.0f, 0);
744}
745
746LayerTestResult<float, 3> CopyViaSplitterTest(armnn::IWorkloadFactory& workloadFactory)
747{
748 return CopyViaSplitterTestImpl<float>(workloadFactory, 0.0f, 0);
749}
750
751LayerTestResult<uint8_t, 3> CopyViaSplitterUint8Test(armnn::IWorkloadFactory& workloadFactory)
752{
753 return CopyViaSplitterTestImpl<uint8_t>(workloadFactory, 1.0f, 0);
754}
755
telsoa01c577f2c2018-08-31 09:22:23 +0100756LayerTestResult<float, 2> LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest(
757 armnn::IWorkloadFactory& workloadFactory)
758{
759 armnn::TensorInfo inputDesc({ 2, 2 }, armnn::GetDataType<float>());
760 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
761 { 2., 3., 3., 4. }));
762
763 armnn::TensorInfo outputDesc({ 2, 4 }, armnn::GetDataType<float>());
764 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
765 {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
766 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f}));
767 return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl(workloadFactory, input, expectedOutput);
768}
769
770LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest(
771 armnn::IWorkloadFactory& workloadFactory)
772{
773 armnn::TensorInfo inputDesc({ 2, 5 }, armnn::GetDataType<float>());
774 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
775 {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
776 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f}));
777
778 armnn::TensorInfo outputDesc({ 2, 16 }, armnn::GetDataType<float>());
779 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
780 {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f,
781 -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f,
782 -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f,
783 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f,
784 -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f,
785 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f,
786 0.02168f}));
787 return LstmLayerFloat32NoCifgWithPeepholeWithProjectionTestImpl(workloadFactory, input, expectedOutput);
788}
789
790LayerTestResult<float, 2> LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest(armnn::IWorkloadFactory& workloadFactory)
791{
792 armnn::TensorInfo inputDesc({2, 2}, armnn::GetDataType<float>());
793 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
794 {2., 3., 3., 4.}));
795
796
797 armnn::TensorInfo outputDesc({2, 4}, armnn::GetDataType<float>());
798 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
799 {{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
800 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}}));
801
802 return LstmNoCifgNoPeepholeNoProjectionTestImpl(workloadFactory, input, expectedOutput);
803}
804
telsoa014fcda012018-03-09 14:13:49 +0000805LayerTestResult<float,3> MergerTest(armnn::IWorkloadFactory& workloadFactory)
806{
surmeh013537c2c2018-05-18 16:31:43 +0100807 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +0000808 unsigned int outputHeight = 6;
809 unsigned int outputChannels = 3;
810
surmeh013537c2c2018-05-18 16:31:43 +0100811 unsigned int inputWidth1 = 3;
812 unsigned int inputHeight1 = 6;
813 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +0000814
surmeh013537c2c2018-05-18 16:31:43 +0100815 unsigned int inputWidth2 = 3;
816 unsigned int inputHeight2 = 6;
817 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +0000818
telsoa01c577f2c2018-08-31 09:22:23 +0100819 // Define the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +0000820 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::Float32);
821 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::Float32);
822 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +0000823
824 LayerTestResult<float,3> ret(outputTensorInfo);
825
telsoa014fcda012018-03-09 14:13:49 +0000826 ret.outputExpected = MakeTensor<float, 3>(outputTensorInfo, std::vector<float>(
surmeh013537c2c2018-05-18 16:31:43 +0100827 {
828 1.0f, 2.0f, 3.0f,
829 4.0f, 5.0f, 6.0f,
830 7.0f, 8.0f, 9.0f,
831 10.0f, 11.0f, 12.0f,
832 13.0f, 14.0f, 15.0f,
833 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +0000834
surmeh013537c2c2018-05-18 16:31:43 +0100835 19.0f, 20.0f, 21.0f,
836 22.0f, 23.0f, 24.0f,
837 25.0f, 26.0f, 27.0f,
838 28.0f, 29.0f, 30.0f,
839 31.0f, 32.0f, 33.0f,
840 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +0000841
surmeh013537c2c2018-05-18 16:31:43 +0100842 37.0f, 38.0f, 39.0f,
843 40.0f, 41.0f, 42.0f,
844 43.0f, 44.0f, 45.0f,
845 46.0f, 47.0f, 48.0f,
846 49.0f, 50.0f, 51.0f,
847 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +0000848 })
849 );
850
telsoa014fcda012018-03-09 14:13:49 +0000851 auto input1 = MakeTensor<float, 3>(inputTensorInfo1, std::vector<float>(
852 {
surmeh013537c2c2018-05-18 16:31:43 +0100853 1.0f, 2.0f, 3.0f,
854 4.0f, 5.0f, 6.0f,
855 7.0f, 8.0f, 9.0f,
856 10.0f, 11.0f, 12.0f,
857 13.0f, 14.0f, 15.0f,
858 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +0000859
surmeh013537c2c2018-05-18 16:31:43 +0100860 19.0f, 20.0f, 21.0f,
861 22.0f, 23.0f, 24.0f,
862 25.0f, 26.0f, 27.0f,
863 28.0f, 29.0f, 30.0f,
864 31.0f, 32.0f, 33.0f,
865 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +0000866 })
867 );
868
869 auto input2 = MakeTensor<float, 3>(inputTensorInfo2, std::vector<float>(
870 {
surmeh013537c2c2018-05-18 16:31:43 +0100871 37.0f, 38.0f, 39.0f,
872 40.0f, 41.0f, 42.0f,
telsoa014fcda012018-03-09 14:13:49 +0000873 43.0f, 44.0f, 45.0f,
surmeh013537c2c2018-05-18 16:31:43 +0100874 46.0f, 47.0f, 48.0f,
875 49.0f, 50.0f, 51.0f,
876 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +0000877 })
878 );
879
telsoa01c577f2c2018-08-31 09:22:23 +0100880 std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of input[0].
telsoa014fcda012018-03-09 14:13:49 +0000881 armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1);
882
telsoa01c577f2c2018-08-31 09:22:23 +0100883 std::vector<unsigned int> wOrigin2 = {2, 0, 0}; //Extent of the window is defined by size of input[1].
telsoa014fcda012018-03-09 14:13:49 +0000884 armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2);
885
telsoa014fcda012018-03-09 14:13:49 +0000886 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
887
888 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
889
890 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
891 subTensorsSupported ?
892 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
893 workloadFactory.CreateTensorHandle(inputTensorInfo1);
894
895 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
896 subTensorsSupported ?
897 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
898 workloadFactory.CreateTensorHandle(inputTensorInfo2);
899
telsoa014fcda012018-03-09 14:13:49 +0000900 armnn::MergerQueueDescriptor data;
901 armnn::WorkloadInfo info;
902 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
903 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +0000904 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
905
906 data.m_ViewOrigins.push_back(window1);
907 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +0000908
909 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(data, info);
910
911 inputHandle1->Allocate();
912 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +0000913 outputHandle->Allocate();
914
915 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
916 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +0000917
surmeh013537c2c2018-05-18 16:31:43 +0100918 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +0000919 workload->Execute();
920
921 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
922
923 return ret;
924}
925
926LayerTestResult<float,4> AdditionTest(armnn::IWorkloadFactory& workloadFactory)
927{
928 unsigned int batchSize = 2;
929 unsigned int channels = 2;
930 unsigned int height = 2;
931 unsigned int width = 3;
932
933 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
934 armnn::TensorInfo outputTensorInfo;
935
936 unsigned int shape[] = {batchSize, channels, height, width};
937
938 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
939 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
940 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
941
942
943 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, std::vector<float>(
944 {
945 0.0f, 2.0f, 1.0f,
946 0.2f, 1.0f, 2.0f,
947
948 1.0f, 2.0f, 1.0f,
949 0.2f, 1.0f, 2.0f,
950
951 0.0f, 2.0f, 1.0f,
952 4.2f, 1.0f, 2.0f,
953
954 0.0f, 0.0f, 1.0f,
955 0.2f, 1.0f, 2.0f,
956 }));
957
958 auto input2 = MakeTensor<float, 4>(inputTensorInfo2, std::vector<float>(
959 {
960 1.0f, 2.0f, 1.0f,
961 0.0f, 1.0f, 2.0f,
962
963 1.0f, 2.0f, -2.0f,
964 0.2f, 1.0f, 2.0f,
965
966 0.0f, 2.0f, 1.0f,
967 4.2f, 0.0f, -3.0f,
968
969 0.0f, 0.0f, 1.0f,
970 0.7f, 1.0f, 5.0f,
971 }));
972
973 LayerTestResult<float,4> ret(outputTensorInfo);
974 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>(
975 {
976 1.0f, 4.0f, 2.0f,
977 0.2f, 2.0f, 4.0f,
978
979 2.0f, 4.0f, -1.0f,
980 0.4f, 2.0f, 4.0f,
981
982 0.0f, 4.0f, 2.0f,
983 8.4f, 1.0f, -1.0f,
984
985 0.0f, 0.0f, 2.0f,
986 0.9f, 2.0f, 7.0f,
987 }));
988
989 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
990 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
991 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
992
993 armnn::AdditionQueueDescriptor data;
994 armnn::WorkloadInfo info;
995 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
996 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
997 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
998
999 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1000
1001 inputHandle1->Allocate();
1002 inputHandle2->Allocate();
1003 outputHandle->Allocate();
1004
1005 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1006 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1007
surmeh013537c2c2018-05-18 16:31:43 +01001008 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001009 workload->Execute();
1010
1011 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1012
1013 return ret;
1014}
1015
1016template <typename T>
1017LayerTestResult<T, 4> AdditionBroadcastTestImpl(armnn::IWorkloadFactory& workloadFactory,
1018 float qScale,
1019 int32_t qOffset)
1020{
1021 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, armnn::GetDataType<T>());
1022 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, armnn::GetDataType<T>());
1023 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>());
1024
1025 if (armnn::IsQuantizedType<T>())
1026 {
1027 inputTensorInfo1.SetQuantizationScale(qScale);
1028 inputTensorInfo1.SetQuantizationOffset(qOffset);
1029 inputTensorInfo2.SetQuantizationScale(qScale);
1030 inputTensorInfo2.SetQuantizationOffset(qOffset);
1031 outputTensorInfo.SetQuantizationScale(qScale);
1032 outputTensorInfo.SetQuantizationOffset(qOffset);
1033 }
1034
1035 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
1036 {
1037 0.0f,
1038 1.0f,
1039
1040 2.0f,
1041 3.0f,
1042
1043 4.0f,
1044 5.0f,
1045 }));
1046
1047 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
1048 {
1049 0.5f, 1.5f, 2.5f,
1050 3.5f, 4.5f, 5.5f,
1051 }));
1052
1053 LayerTestResult<T,4> ret(outputTensorInfo);
1054 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
1055 {
1056 0.5f, 1.5f, 2.5f,
1057 4.5f, 5.5f, 6.5f,
1058
1059 2.5f, 3.5f, 4.5f,
1060 6.5f, 7.5f, 8.5f,
1061
1062 4.5f, 5.5f, 6.5f,
1063 8.5f, 9.5f, 10.5f,
1064 }));
1065
1066 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1067 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1068 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1069
1070 armnn::AdditionQueueDescriptor data;
1071 armnn::WorkloadInfo info;
1072 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1073 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1074 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1075
1076 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1077
1078 inputHandle1->Allocate();
1079 inputHandle2->Allocate();
1080 outputHandle->Allocate();
1081
1082 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1083 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1084
surmeh013537c2c2018-05-18 16:31:43 +01001085 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001086 workload->Execute();
1087
1088 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1089
1090 return ret;
1091}
1092
1093template <typename T>
1094LayerTestResult<T, 4> AdditionBroadcast1ElementTestImpl(armnn::IWorkloadFactory& workloadFactory,
1095 float qScale,
1096 int32_t qOffset)
1097{
1098 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>());
1099 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, armnn::GetDataType<T>());
1100 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>());
1101
1102 if (armnn::IsQuantizedType<T>())
1103 {
1104 inputTensorInfo1.SetQuantizationScale(qScale);
1105 inputTensorInfo1.SetQuantizationOffset(qOffset);
1106 inputTensorInfo2.SetQuantizationScale(qScale);
1107 inputTensorInfo2.SetQuantizationOffset(qOffset);
1108 outputTensorInfo.SetQuantizationScale(qScale);
1109 outputTensorInfo.SetQuantizationOffset(qOffset);
1110 }
1111
1112 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
1113 {
1114 0.0f, 1.0f, 2.0f,
1115 3.0f, 4.0f, 5.0f,
1116 6.0f, 7.0f, 8.0f,
1117 9.0f, 10.0f, 11.0f,
1118 12.0f, 13.0f, 14.0f,
1119 15.0f, 16.0f, 17.0f,
1120 }));
1121
1122 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
1123 {
1124 0.5f,
1125 }));
1126
1127 LayerTestResult<T,4> ret(outputTensorInfo);
1128 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
1129 {
1130 0.5f, 1.5f, 2.5f,
1131 3.5f, 4.5f, 5.5f,
1132 6.5f, 7.5f, 8.5f,
1133 9.5f, 10.5f, 11.5f,
1134 12.5f, 13.5f, 14.5f,
1135 15.5f, 16.5f, 17.5f,
1136 }));
1137
1138 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1139 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1140 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1141
1142 armnn::AdditionQueueDescriptor data;
1143 armnn::WorkloadInfo info;
1144 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1145 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1146 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1147
1148 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1149
1150 inputHandle1->Allocate();
1151 inputHandle2->Allocate();
1152 outputHandle->Allocate();
1153
1154 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1155 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1156
surmeh013537c2c2018-05-18 16:31:43 +01001157 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001158 workload->Execute();
1159
1160 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1161
1162 return ret;
1163}
1164
1165LayerTestResult<float, 4> AdditionBroadcastTest(armnn::IWorkloadFactory& workloadFactory)
1166{
1167 return AdditionBroadcastTestImpl<float>(workloadFactory, 0.0f, 0);
1168}
1169
1170LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test(armnn::IWorkloadFactory& workloadFactory)
1171{
1172 return AdditionBroadcastTestImpl<uint8_t>(workloadFactory, 2.f, 0);
1173}
1174
1175LayerTestResult<float, 4> AdditionBroadcast1ElementTest(armnn::IWorkloadFactory& workloadFactory)
1176{
1177 return AdditionBroadcast1ElementTestImpl<float>(workloadFactory, 0.0f, 0);
1178}
1179
1180LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test(armnn::IWorkloadFactory& workloadFactory)
1181{
1182 return AdditionBroadcast1ElementTestImpl<uint8_t>(workloadFactory, 0.1333333f, 128);
1183}
1184
1185LayerTestResult<float,4> CompareAdditionTest(armnn::IWorkloadFactory& workloadFactory,
David Beckf195f032018-09-06 16:46:34 +01001186 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00001187{
1188 unsigned int batchSize = 4;
1189 unsigned int channels = 1;
1190 unsigned int height = 2;
1191 unsigned int width = 3;
1192
1193 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
1194 armnn::TensorInfo outputTensorInfo;
1195
1196 unsigned int shape[] = {batchSize, channels, height, width};
1197
1198 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1199 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1200 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1201
1202 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 1232);
1203 auto input2 = MakeRandomTensor<float, 4>(inputTensorInfo2, 456);
1204
1205 LayerTestResult<float,4> ret(outputTensorInfo);
1206
1207 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1208 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1209 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1210
1211 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
1212 std::unique_ptr<armnn::ITensorHandle> inputHandle2Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo2);
1213 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
1214
1215 armnn::AdditionQueueDescriptor data;
1216 armnn::WorkloadInfo info;
1217 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1218 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1219 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1220
1221 armnn::AdditionQueueDescriptor refData = data;
1222 armnn::WorkloadInfo refInfo = info;
1223 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo1, inputHandle1Ref.get());
1224 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get());
1225 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1226
1227 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1228 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateAddition(refData, refInfo);
1229
1230 inputHandle1->Allocate();
1231 inputHandle2->Allocate();
1232 outputHandle->Allocate();
1233 inputHandle1Ref->Allocate();
1234 inputHandle2Ref->Allocate();
1235 outputHandleRef->Allocate();
1236
1237 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1238 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1239 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
1240 CopyDataToITensorHandle(inputHandle2Ref.get(), &input2[0][0][0][0]);
1241
surmeh013537c2c2018-05-18 16:31:43 +01001242 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001243 workload->Execute();
surmeh013537c2c2018-05-18 16:31:43 +01001244 refWorkloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001245 workloadRef->Execute();
1246
1247 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1248 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
1249
1250 return ret;
1251}
1252
surmeh01bceff2f2018-03-29 16:29:27 +01001253namespace {
David Beck5cd01f32018-09-12 16:00:08 +01001254template <typename T>
1255LayerTestResult<T, 4> DivisionTestHelper(armnn::IWorkloadFactory& workloadFactory,
1256 const unsigned int shape0[4],
1257 const std::vector<T>& values0,
1258 float scale0,
1259 int32_t offset0,
1260 const unsigned int shape1[4],
1261 const std::vector<T> & values1,
1262 float scale1,
1263 int32_t offset1,
1264 const unsigned int outShape[4],
1265 const std::vector<T> & outValues,
1266 float outScale,
1267 int32_t outOffset)
1268{
1269 auto dataType = (std::is_same<T, uint8_t>::value ?
1270 armnn::DataType::QuantisedAsymm8 :
1271 armnn::DataType::Float32);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001272
David Beck5cd01f32018-09-12 16:00:08 +01001273 armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
1274 armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
1275 armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001276
David Beck5cd01f32018-09-12 16:00:08 +01001277 inputTensorInfo0.SetQuantizationScale(scale0);
1278 inputTensorInfo0.SetQuantizationOffset(offset0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001279
David Beck5cd01f32018-09-12 16:00:08 +01001280 inputTensorInfo1.SetQuantizationScale(scale1);
1281 inputTensorInfo1.SetQuantizationOffset(offset1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001282
David Beck5cd01f32018-09-12 16:00:08 +01001283 outputTensorInfo.SetQuantizationScale(outScale);
1284 outputTensorInfo.SetQuantizationOffset(outOffset);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001285
David Beck5cd01f32018-09-12 16:00:08 +01001286 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
1287 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001288
David Beck5cd01f32018-09-12 16:00:08 +01001289 LayerTestResult<T, 4> result(outputTensorInfo);
1290 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001291
David Beck5cd01f32018-09-12 16:00:08 +01001292 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
1293 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1294 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001295
David Beck5cd01f32018-09-12 16:00:08 +01001296 armnn::DivisionQueueDescriptor data;
1297 armnn::WorkloadInfo info;
1298 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
1299 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1300 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001301
David Beck5cd01f32018-09-12 16:00:08 +01001302 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDivision(data, info);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001303
David Beck5cd01f32018-09-12 16:00:08 +01001304 inputHandle0->Allocate();
1305 inputHandle1->Allocate();
1306 outputHandle->Allocate();
1307
1308 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
1309 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1310
1311 workloadFactory.Finalize();
1312 workload->Execute();
1313
1314 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
1315
1316 return result;
1317}
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001318} // anonymous namespace
1319
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01001320LayerTestResult<float,4> DivisionByZeroTest(armnn::IWorkloadFactory& workloadFactory)
1321{
1322 const unsigned int width = 2;
1323 const unsigned int height = 2;
1324 const unsigned int channelCount = 2;
1325 const unsigned int batchSize = 2;
1326
1327 unsigned int shape[] = { batchSize, channelCount, height, width };
1328
1329 std::vector<float> input0({
1330 1.f, 1.f, 1.f, 1.f, 0.f, 0.f, 0.f, 0.f,
1331 -1.f, -1.f, -1.f, -1.f, 5.f, 5.f, 5.f, 5.f });
1332
1333 std::vector<float> input1({
1334 0.f, 0.f, -0.f, -0.f, 0.f, 0.f, -0.f, -0.f,
1335 0.f, 0.f, -0.f, -0.f, 5.f, 5.f, 5.f, 5.f });
1336
1337 std::vector<float> output({
1338 INFINITY, INFINITY, -INFINITY, -INFINITY, NAN, NAN, -NAN, -NAN,
1339 -INFINITY, -INFINITY, INFINITY, INFINITY, 1, 1, 1, 1 });
1340
David Beck5cd01f32018-09-12 16:00:08 +01001341 return DivisionTestHelper<float>(workloadFactory,
1342 shape, input0, 1.0f, 0,
1343 shape, input1, 1.0f, 0,
1344 shape, output, 1.0f, 0);
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01001345}
1346
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001347LayerTestResult<float,4> DivisionTest(armnn::IWorkloadFactory& workloadFactory)
1348{
1349 const unsigned int width = 2;
1350 const unsigned int height = 2;
1351 const unsigned int channelCount = 2;
1352 const unsigned int batchSize = 2;
1353
1354 unsigned int shape[] = { batchSize, channelCount, height, width };
1355
1356 std::vector<float> input0({
1357 2, 2, 2, 2, 3, 3, 3, 3,
1358 4, 4, 4, 4, 5, 5, 5, 5 });
1359
1360 std::vector<float> input1({
1361 1, 1, 1, 1, 2, 2, 2, 2,
1362 4, 4, 4, 4, 4, 4, 4, 4 });
1363
1364 std::vector<float> output({
1365 2, 2, 2, 2, 1.5, 1.5, 1.5, 1.5,
1366 1, 1, 1, 1, 1.25, 1.25, 1.25, 1.25 });
1367
David Beck5cd01f32018-09-12 16:00:08 +01001368
1369 return DivisionTestHelper<float>(workloadFactory,
1370 shape, input0, 1.0f, 0,
1371 shape, input1, 1.0f, 0,
1372 shape, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001373}
1374
1375LayerTestResult<float, 4> DivisionBroadcast1ElementTest(armnn::IWorkloadFactory& workloadFactory)
1376{
1377 unsigned int shape0[] = { 1, 2, 2, 2 };
1378 std::vector<float> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
1379
1380 unsigned int shape1[] = { 1, 1, 1, 1 };
1381 std::vector<float> input1({ 2 });
1382
1383 std::vector<float> output({ 1, 2, 3, 4, 5, 6, 7, 8});
1384
David Beck5cd01f32018-09-12 16:00:08 +01001385
1386 return DivisionTestHelper<float>(workloadFactory,
1387 shape0, input0, 1.0f, 0,
1388 shape1, input1, 1.0f, 0,
1389 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001390}
1391
1392LayerTestResult<float, 4> DivisionBroadcast1DVectorTest(armnn::IWorkloadFactory& workloadFactory)
1393{
1394 unsigned int shape0[] = { 1, 3, 3, 2 };
1395 std::vector<float> input0({
1396 1, 4, 3, 8, 5, 12,
1397 7, 16, 9, 20, 11, 24,
1398 13, 28, 15, 32, 17, 36});
1399
1400 unsigned int shape1[] = { 1, 1, 1, 2 };
1401 std::vector<float> input1({ 1, 2 });
1402
1403 std::vector<float> output({
1404 1, 2, 3, 4, 5, 6,
1405 7, 8, 9, 10, 11, 12,
1406 13, 14, 15, 16, 17, 18});
1407
David Beck5cd01f32018-09-12 16:00:08 +01001408 return DivisionTestHelper<float>(workloadFactory,
1409 shape0, input0, 1.0f, 0,
1410 shape1, input1, 1.0f, 0,
1411 shape0, output, 1.0f, 0);
1412}
1413
1414
1415LayerTestResult<uint8_t,4> DivisionUint8Test(armnn::IWorkloadFactory& workloadFactory)
1416{
1417 const unsigned int width = 2;
1418 const unsigned int height = 2;
1419 const unsigned int channelCount = 2;
1420 const unsigned int batchSize = 2;
1421
1422 unsigned int shape[] = { batchSize, channelCount, height, width };
1423
1424 std::vector<uint8_t> input0({2, 2, 2, 2, 3, 3, 3, 3,
1425 4, 4, 4, 4, 5, 5, 5, 5 });
1426
1427 std::vector<uint8_t> input1({1, 1, 1, 1, 2, 2, 2, 2,
1428 4, 4, 4, 4, 4, 4, 4, 4 });
1429
1430 std::vector<uint8_t> output({8, 8, 8, 8, 6, 6, 6, 6,
1431 4, 4, 4, 4, 5, 5, 5, 5});
1432
1433
1434 return DivisionTestHelper<uint8_t>(workloadFactory,
1435 shape, input0, 1.0f, 0,
1436 shape, input1, 1.0f, 0,
1437 shape, output, 0.25f, 0);
1438}
1439
1440LayerTestResult<uint8_t, 4> DivisionBroadcast1ElementUint8Test(armnn::IWorkloadFactory& workloadFactory)
1441{
1442 unsigned int shape0[] = { 1, 2, 2, 2 };
1443 std::vector<uint8_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
1444
1445 unsigned int shape1[] = { 1, 1, 1, 1 };
1446 std::vector<uint8_t> input1({ 2 });
1447
1448 std::vector<uint8_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
1449
1450 return DivisionTestHelper<uint8_t>(workloadFactory,
1451 shape0, input0, 1.0f, 0,
1452 shape1, input1, 1.0f, 0,
1453 shape0, output, 1.0f, 0);
1454}
1455
1456LayerTestResult<uint8_t, 4> DivisionBroadcast1DVectorUint8Test(armnn::IWorkloadFactory& workloadFactory)
1457{
1458 unsigned int shape0[] = { 1, 3, 3, 2 };
1459 std::vector<uint8_t> input0({1, 4, 3, 8, 5, 12,
1460 7, 16, 9, 20, 11, 24,
1461 13, 28, 15, 32, 17, 36});
1462
1463 unsigned int shape1[] = { 1, 1, 1, 2 };
1464 std::vector<uint8_t> input1({ 1, 2 });
1465
1466 std::vector<uint8_t> output({1, 2, 3, 4, 5, 6,
1467 7, 8, 9, 10, 11, 12,
1468 13, 14, 15, 16, 17, 18});
1469
1470 return DivisionTestHelper<uint8_t>(workloadFactory,
1471 shape0, input0, 1.0f, 0,
1472 shape1, input1, 1.0f, 0,
1473 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001474}
1475
1476namespace {
surmeh01bceff2f2018-03-29 16:29:27 +01001477LayerTestResult<float,4> MultiplicationTestHelper(armnn::IWorkloadFactory& workloadFactory,
1478 const unsigned int shape0[4],
1479 const std::vector<float> & values0,
1480 const unsigned int shape1[4],
1481 const std::vector<float> & values1,
1482 const unsigned int outShape[4],
1483 const std::vector<float> & outValues)
telsoa014fcda012018-03-09 14:13:49 +00001484{
surmeh01bceff2f2018-03-29 16:29:27 +01001485 const size_t dimensionCount = 4;
1486 armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, armnn::DataType::Float32};
1487 armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, armnn::DataType::Float32};
1488 armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, armnn::DataType::Float32};
telsoa014fcda012018-03-09 14:13:49 +00001489
surmeh01bceff2f2018-03-29 16:29:27 +01001490 auto input0 = MakeTensor<float, 4>(inputTensorInfo0, values0);
1491 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00001492
1493 LayerTestResult<float,4> ret(outputTensorInfo);
1494
1495 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
1496 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1497 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1498
1499 armnn::MultiplicationQueueDescriptor data;
1500 armnn::WorkloadInfo info;
1501 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
1502 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1503 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1504
1505 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
1506
1507 inputHandle0->Allocate();
1508 inputHandle1->Allocate();
1509 outputHandle->Allocate();
1510
1511 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
1512 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1513
surmeh013537c2c2018-05-18 16:31:43 +01001514 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001515 workload->Execute();
1516
1517 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1518
surmeh01bceff2f2018-03-29 16:29:27 +01001519 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00001520 return ret;
1521}
surmeh01bceff2f2018-03-29 16:29:27 +01001522} // anonymous namespace
1523
1524
1525LayerTestResult<float,4> MultiplicationTest(armnn::IWorkloadFactory& workloadFactory)
1526{
1527 const unsigned int width = 2;
1528 const unsigned int height = 2;
1529 const unsigned int channelCount = 2;
1530 const unsigned int batchSize = 2;
1531
1532 unsigned int shape[] = { batchSize, channelCount, height, width };
1533
1534 std::vector<float> input0({
1535 1, 1, 1, 1, 2, 2, 2, 2,
1536 3, 3, 3, 3, 4, 4, 4, 4 });
1537
1538 std::vector<float> input1({
1539 2, 2, 2, 2, 3, 3, 3, 3,
1540 4, 4, 4, 4, 5, 5, 5, 5 });
1541
1542 std::vector<float> output({
1543 2, 2, 2, 2, 6, 6, 6, 6,
1544 12, 12, 12, 12, 20, 20, 20, 20 });
1545
1546 return MultiplicationTestHelper(workloadFactory,
1547 shape,
1548 input0,
1549 shape,
1550 input1,
1551 shape,
1552 output);
1553}
1554
1555LayerTestResult<float, 4> MultiplicationBroadcast1ElementTest(armnn::IWorkloadFactory& workloadFactory)
1556{
1557 unsigned int shape0[] = { 1, 2, 2, 2 };
1558 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
1559
1560 unsigned int shape1[] = { 1, 1, 1, 1 };
1561 std::vector<float> input1({ 2 });
1562
1563 std::vector<float> output({ 2, 4, 6, 8, 10, 12, 14, 16});
1564
1565 return MultiplicationTestHelper(workloadFactory,
1566 shape0,
1567 input0,
1568 shape1,
1569 input1,
1570 shape0,
1571 output);
1572}
1573
1574LayerTestResult<float, 4> MultiplicationBroadcast1DVectorTest(armnn::IWorkloadFactory& workloadFactory)
1575{
1576 unsigned int shape0[] = { 1, 3, 3, 2 };
1577 std::vector<float> input0({
1578 1, 2, 3, 4, 5, 6,
1579 7, 8, 9, 10, 11, 12,
1580 13, 14, 15, 16, 17, 18});
1581
1582 unsigned int shape1[] = { 1, 1, 1, 2 };
1583 std::vector<float> input1({ 1, 2 });
1584
1585 std::vector<float> output({
1586 1, 4, 3, 8, 5, 12,
1587 7, 16, 9, 20, 11, 24,
1588 13, 28, 15, 32, 17, 36});
1589
1590 return MultiplicationTestHelper(workloadFactory,
1591 shape0,
1592 input0,
1593 shape1,
1594 input1,
1595 shape0,
1596 output);
1597}
telsoa014fcda012018-03-09 14:13:49 +00001598
1599LayerTestResult<float,4> CompareMultiplicationTest(armnn::IWorkloadFactory& workloadFactory,
1600 armnn::IWorkloadFactory& refWorkloadFactory)
1601{
1602 const unsigned int width = 16;
1603 const unsigned int height = 32;
1604 const unsigned int channelCount = 2;
1605 const unsigned int batchSize = 5;
1606
1607 armnn::TensorInfo inputTensorInfo0;
1608 armnn::TensorInfo inputTensorInfo1;
1609 armnn::TensorInfo outputTensorInfo;
1610
1611 constexpr unsigned int shape[] = { batchSize, channelCount, height, width };
1612
1613 inputTensorInfo0 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1614 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1615 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1616
1617 LayerTestResult<float,4> comparisonResult(outputTensorInfo);
1618
1619 auto input0 = MakeRandomTensor<float, 4>(inputTensorInfo0, 803506992);
1620 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 54902257);
1621
1622 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
1623 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1624 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1625
1626 std::unique_ptr<armnn::ITensorHandle> inputHandle0Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo0);
1627 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
1628 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
1629
1630 armnn::MultiplicationQueueDescriptor data;
1631 armnn::WorkloadInfo info;
1632 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
1633 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1634 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1635
1636 armnn::MultiplicationQueueDescriptor refData = data;
1637 armnn::WorkloadInfo refInfo = info;
1638 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo0, inputHandle0Ref.get());
1639 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo1, inputHandle1Ref.get());
1640 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1641
1642 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
1643 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateMultiplication(refData, refInfo);
1644
1645 inputHandle0->Allocate();
1646 inputHandle1->Allocate();
1647 outputHandle->Allocate();
1648 inputHandle0Ref->Allocate();
1649 inputHandle1Ref->Allocate();
1650 outputHandleRef->Allocate();
1651
1652 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
1653 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1654 CopyDataToITensorHandle(inputHandle0Ref.get(), &input0[0][0][0][0]);
1655 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
1656
surmeh013537c2c2018-05-18 16:31:43 +01001657 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001658 workload->Execute();
surmeh013537c2c2018-05-18 16:31:43 +01001659 refWorkloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001660 workloadRef->Execute();
1661
1662 CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get());
1663 CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get());
1664
1665 return comparisonResult;
1666}
1667
1668LayerTestResult<float,4> CompareBatchNormTest(armnn::IWorkloadFactory& workloadFactory,
1669 armnn::IWorkloadFactory& refWorkloadFactory)
1670{
1671 const unsigned int width = 2;
1672 const unsigned int height = 3;
1673 const unsigned int channels = 5;
1674 const unsigned int batchSize = 3;
1675
1676 armnn::TensorInfo inputTensorInfo;
1677 armnn::TensorInfo outputTensorInfo;
1678 armnn::TensorInfo tensorInfo;
1679
1680 constexpr unsigned int shape[] = {batchSize, channels, height, width};
1681 constexpr unsigned int tensorShape[] = {channels};
1682
1683 inputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1684 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1685 tensorInfo = armnn::TensorInfo(1, tensorShape, armnn::DataType::Float32);
1686
1687 auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 21312);
1688
1689 auto mean = MakeRandomTensor<float, 1>(tensorInfo, 123);
1690 auto variance = MakeRandomTensor<float, 1>(tensorInfo, 234, 0.0f);
1691 auto beta = MakeRandomTensor<float, 1>(tensorInfo, 123);
1692 auto gamma = MakeRandomTensor<float, 1>(tensorInfo, 345);
1693
1694 LayerTestResult<float,4> ret(outputTensorInfo);
1695
1696 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
1697 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1698
1699 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
1700 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
1701
1702 armnn::BatchNormalizationQueueDescriptor data;
1703 armnn::WorkloadInfo info;
1704 armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
1705 armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
1706 armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
1707 armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
1708
1709 AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
1710 AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
1711 AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
1712 AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
1713
1714 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1715 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1716 data.m_Mean = &meanTensor;
1717 data.m_Variance = &varianceTensor;
1718 data.m_Beta = &betaTensor;
1719 data.m_Gamma = &gammaTensor;
1720 data.m_Parameters.m_Eps = 0.01f;
1721
1722 armnn::BatchNormalizationQueueDescriptor refData = data;
1723 armnn::WorkloadInfo refInfo = info;
1724 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
1725 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1726
1727 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
1728 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateBatchNormalization(refData, refInfo);
1729
1730 inputHandle->Allocate();
1731 outputHandle->Allocate();
1732 inputHandleRef->Allocate();
1733 outputHandleRef->Allocate();
1734
1735 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
1736 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
1737
surmeh013537c2c2018-05-18 16:31:43 +01001738 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001739 workload->Execute();
surmeh013537c2c2018-05-18 16:31:43 +01001740 refWorkloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001741 workloadRef->Execute();
1742
1743 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1744 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
1745
1746 return ret;
1747}
1748
surmeh013537c2c2018-05-18 16:31:43 +01001749template<typename T>
1750void PermuteTensorData(
1751 armnn::IWorkloadFactory& workloadFactory,
1752 const armnn::PermutationVector& mappings,
1753 armnn::TensorInfo & inputTensorInfo,
1754 const T * inputData,
1755 std::vector<T>& outputData)
telsoa014fcda012018-03-09 14:13:49 +00001756{
surmeh013537c2c2018-05-18 16:31:43 +01001757 BOOST_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
1758 if (inputData == nullptr)
1759 {
1760 // Nullptr is an error in the test. By returning without doing the concatenation
1761 // I expect the caller to fail the test. It still makes sense to report this as
1762 // an assert for Debug builds.
1763 return;
1764 }
telsoa014fcda012018-03-09 14:13:49 +00001765
surmeh013537c2c2018-05-18 16:31:43 +01001766 armnn::TensorInfo outputTensorInfo = armnnUtils::Permuted(inputTensorInfo, mappings);
1767
1768 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
1769 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1770
1771 armnn::PermuteQueueDescriptor queueDescriptor;
1772 queueDescriptor.m_Parameters = armnn::PermuteDescriptor{mappings};
1773 armnn::WorkloadInfo workloadInfo;
1774 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
1775 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
1776
1777 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePermute(queueDescriptor, workloadInfo);
1778
1779 inputHandle->Allocate();
1780 outputHandle->Allocate();
1781
1782 CopyDataToITensorHandle(inputHandle.get(), inputData);
1783
1784 workload->Execute();
1785
1786 outputData.resize(outputTensorInfo.GetNumElements());
1787 CopyDataFromITensorHandle(&outputData[0], outputHandle.get());
1788 inputTensorInfo = outputTensorInfo;
1789}
1790
1791armnn::OriginsDescriptor CreateMergerDescriptorForConcatenation(
1792 const std::vector<armnn::TensorInfo> & inputTensorInfos,
1793 unsigned int concatDim)
1794{
telsoa014fcda012018-03-09 14:13:49 +00001795 std::vector<armnn::TensorShape> shapes;
1796 shapes.reserve(inputTensorInfos.size());
1797 for (const armnn::TensorInfo& it: inputTensorInfos)
1798 {
1799 shapes.push_back(it.GetShape());
1800 }
surmeh013537c2c2018-05-18 16:31:43 +01001801
1802 return armnn::CreateMergerDescriptorForConcatenation(shapes.begin(),
1803 shapes.end(),
1804 concatDim);
1805}
1806
1807//
1808// Concatenation is only supported for N and C dimensions for NCHW. In case of
telsoa01c577f2c2018-08-31 09:22:23 +01001809// <4 dimensions we need to make sure that the concat dimensions are at least
surmeh013537c2c2018-05-18 16:31:43 +01001810// the 3rd slowest iterating one.
1811//
1812
1813bool NeedPermuteForConcat(
1814 const std::vector<armnn::TensorInfo> & inputTensorInfos,
1815 unsigned int concatDim)
1816{
1817 // See note above. Additionally we expect the input shapes to have the
1818 // same number of dimensions.
1819 unsigned int nDimensions = 0;
1820
telsoa01c577f2c2018-08-31 09:22:23 +01001821 // Determine the number of dimensions as well as sanity check them
1822 // agains test implementation issues.
surmeh013537c2c2018-05-18 16:31:43 +01001823 for (auto && tensorInfo : inputTensorInfos)
1824 {
1825 if (!nDimensions)
1826 {
1827 nDimensions = tensorInfo.GetShape().GetNumDimensions();
1828 }
1829 else
1830 {
1831 BOOST_ASSERT_MSG(nDimensions == tensorInfo.GetShape().GetNumDimensions(),
1832 "Input shapes must have the same number of dimensions");
1833 }
1834 }
1835
1836 return (nDimensions-concatDim) < 3;
1837}
1838
1839armnn::TensorShape ExpandTensorShapeTo3dForPermute(const armnn::TensorShape & inputShape)
1840{
1841 unsigned int numDims = inputShape.GetNumDimensions();
1842 if (numDims >= 3)
1843 {
1844 // Nothing to do if the inputShape has at least 3 dimensions.
1845 return inputShape;
1846 }
1847
1848 std::vector<unsigned int> newDims(size_t(3), 1u);
1849 unsigned int expandedBy = 3 - numDims;
1850 for (unsigned int i=0; i<numDims; ++i)
1851 {
1852 newDims[expandedBy+i] = inputShape[i];
1853 }
1854 return armnn::TensorShape(3u, &newDims[0]);
1855}
1856
1857void Generate3dPermuteVectorForConcat(
1858 unsigned int numDimensions,
1859 unsigned int & concatDim,
1860 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutations)
1861{
1862 BOOST_ASSERT_MSG(numDimensions <= 3,
1863 "Only dimensions 1,2 and 3 are supported by this helper");
1864
1865 unsigned int expandedBy = 3 - numDimensions;
1866 unsigned int expandedConcatAxis = concatDim + expandedBy;
1867
1868 if (expandedConcatAxis == 2)
1869 {
1870 concatDim = 0;
1871 armnn::PermutationVector forwardPermutation({1, 2, 0});
1872 armnn::PermutationVector reversePermutation({2, 0, 1});
1873 permutations = std::make_pair(forwardPermutation, reversePermutation);
1874 }
1875 else if (expandedConcatAxis == 1)
1876 {
1877 concatDim = 0;
1878 armnn::PermutationVector forwardPermutation({2, 0, 1});
1879 armnn::PermutationVector reversePermutation({1, 2, 0});
1880 permutations = std::make_pair(forwardPermutation, reversePermutation);
1881 }
1882 else
1883 {
1884 BOOST_ASSERT(expandedConcatAxis == 0);
1885 concatDim = 0;
1886 }
1887}
1888
1889//
1890// Permute the input tensors so we can do a supported concatenation.
1891// Also treat lower than 3d tensors as 3d by adding dummy 1 dimensions
1892// at the front. Finally this function tells what the output shape
1893// of the permuted concatenated tensor is going to be.
1894//
1895template <typename T>
1896void PermuteInputsForConcat(
1897 armnn::IWorkloadFactory& workloadFactory,
1898 std::vector<armnn::TensorInfo> & inputTensorInfos,
1899 std::vector<T *> & inputData,
1900 std::vector<std::vector<T>> & inputDataStorage,
1901 armnn::PermutationVector & permuteVector,
1902 unsigned int & concatDim,
1903 armnn::TensorInfo & outputTensorInfo)
1904{
1905 BOOST_ASSERT_MSG(inputTensorInfos.size() > 1,
1906 "Expecting more than one tensor to be concatenated here");
1907
1908 unsigned int numDims = 0;
1909 unsigned int nthInput = 0;
1910 const armnn::PermutationVector identity({0, 1, 2});
1911
1912 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutations =
1913 std::make_pair(identity, identity);
1914
1915 inputDataStorage.resize(inputData.size());
1916
1917 for (auto && tensorInfo : inputTensorInfos)
1918 {
1919 if (numDims == 0)
1920 {
1921 numDims = tensorInfo.GetShape().GetNumDimensions();
1922 Generate3dPermuteVectorForConcat(numDims, concatDim, permutations);
telsoa01c577f2c2018-08-31 09:22:23 +01001923 // Store the reverese permutation.
surmeh013537c2c2018-05-18 16:31:43 +01001924 permuteVector = permutations.second;
1925 BOOST_ASSERT_MSG(!permuteVector.IsEqual(identity),
1926 "Test logic error, we don't need permutation, so we shouldn't arrive here");
1927 }
1928 else
1929 {
1930 BOOST_ASSERT_MSG(numDims == tensorInfo.GetShape().GetNumDimensions(),
1931 "All inputs must have the same number of dimensions");
1932 }
1933
1934 armnn::TensorInfo newTensorInfo = tensorInfo;
1935 newTensorInfo.SetShape(ExpandTensorShapeTo3dForPermute(tensorInfo.GetShape()));
1936
1937 PermuteTensorData<T>(workloadFactory,
1938 permutations.first,
1939 newTensorInfo,
1940 inputData[nthInput],
1941 inputDataStorage[nthInput]);
1942
1943 inputData[nthInput] = inputDataStorage[nthInput].data();
1944 inputTensorInfos[nthInput] = newTensorInfo;
1945
1946 ++nthInput;
1947 }
1948
1949 outputTensorInfo.SetShape(
1950 armnnUtils::Permuted(
1951 ExpandTensorShapeTo3dForPermute(outputTensorInfo.GetShape()),
1952 permutations.first));
1953}
1954
1955
1956//
1957// This is the pair of PermuteInputsForConcat(...) which permutes back
telsoa01c577f2c2018-08-31 09:22:23 +01001958// the output of the concatenation so we can check it against an expected
surmeh013537c2c2018-05-18 16:31:43 +01001959// output.
1960//
1961template <typename T>
1962void PermuteOutputForConcat(
1963 armnn::IWorkloadFactory& workloadFactory,
1964 const armnn::TensorInfo & tensorInfo,
1965 const armnn::PermutationVector & permuteVector,
1966 std::unique_ptr<armnn::ITensorHandle> && inputDataHandle,
1967 T * data)
1968{
1969 BOOST_ASSERT_MSG(data != nullptr, "data must not be null");
1970 if (data == nullptr)
1971 {
1972 // Nullptr is an error in the test. By returning without doing the permutation
1973 // I expect the caller to fail the test. It still makes sense to report this as
1974 // an assert for Debug builds.
1975 return;
1976 }
1977
1978 armnn::TensorInfo resultTensorInfo = tensorInfo;
1979 std::vector<T> inputData(tensorInfo.GetNumElements());
1980 std::vector<T> outputData;
1981
1982 CopyDataFromITensorHandle(&inputData[0], inputDataHandle.get());
1983
1984 PermuteTensorData<T>(workloadFactory,
1985 permuteVector,
1986 resultTensorInfo,
1987 &inputData[0],
1988 outputData);
1989
1990 ::memcpy(data, &outputData[0], sizeof(T)*outputData.size());
1991}
1992
1993template <typename T>
1994void Concatenate(armnn::IWorkloadFactory& workloadFactory,
1995 std::initializer_list<const armnn::TensorInfo> inputTensorInfosOrig,
1996 std::initializer_list<T *> inputsOrig,
1997 const armnn::TensorInfo& outputTensorInfoOrig,
1998 T * output,
1999 unsigned int concatDim)
2000{
2001 BOOST_ASSERT_MSG(output != nullptr, "output must not be null");
2002 if (output == nullptr)
2003 {
2004 // Nullptr is an error in the test. By returning without doing the permutation
2005 // I expect the caller to fail the test. It still makes sense to report this as
2006 // an assert for Debug builds.
2007 return;
2008 }
2009
2010 armnn::MergerQueueDescriptor queueDescriptor;
2011
telsoa01c577f2c2018-08-31 09:22:23 +01002012 // Saves a copy of the parameters which we might need to change.
surmeh013537c2c2018-05-18 16:31:43 +01002013 std::vector<armnn::TensorInfo> inputTensorInfos(inputTensorInfosOrig.begin(), inputTensorInfosOrig.end());
2014 std::vector<T *> inputs = inputsOrig;
2015 armnn::TensorInfo outputTensorInfo = outputTensorInfoOrig;
2016
2017 armnn::PermutationVector permuteVector{0, 1, 2};
2018
telsoa01c577f2c2018-08-31 09:22:23 +01002019 // Holds and automatically releases memory for the reshaped input data.
surmeh013537c2c2018-05-18 16:31:43 +01002020 std::vector<std::vector<T>> tmpInputDataStorage;
2021
2022 const size_t inputCount = inputTensorInfos.size();
2023
2024 bool needPermuteForConcat = NeedPermuteForConcat(inputTensorInfos, concatDim);
2025
2026 if (needPermuteForConcat)
2027 {
2028 //
2029 // We need to permute the inputs, because concatenation along
telsoa01c577f2c2018-08-31 09:22:23 +01002030 // the requested axis is not supported.
surmeh013537c2c2018-05-18 16:31:43 +01002031 //
2032 PermuteInputsForConcat<T>(workloadFactory,
2033 inputTensorInfos,
2034 inputs,
2035 tmpInputDataStorage,
2036 permuteVector,
2037 concatDim,
2038 outputTensorInfo);
2039 }
2040
2041 armnn::OriginsDescriptor viewsDescriptor = CreateMergerDescriptorForConcatenation(inputTensorInfos, concatDim);
telsoa014fcda012018-03-09 14:13:49 +00002042
2043 queueDescriptor.m_ViewOrigins.reserve(viewsDescriptor.GetNumViews());
2044 for (unsigned int i = 0; i < viewsDescriptor.GetNumViews(); ++i)
2045 {
2046 queueDescriptor.m_ViewOrigins.emplace_back(std::vector<unsigned int>(viewsDescriptor.GetViewOrigin(i),
2047 viewsDescriptor.GetViewOrigin(i) + viewsDescriptor.GetNumDimensions()));
2048 }
2049
telsoa014fcda012018-03-09 14:13:49 +00002050 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2051
2052 std::vector<std::unique_ptr<armnn::ITensorHandle>> inputHandles;
2053 inputHandles.reserve(inputCount);
2054
2055 const bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2056 for (unsigned int i = 0; i < inputCount; ++i)
2057 {
surmeh013537c2c2018-05-18 16:31:43 +01002058 const armnn::TensorInfo& inputTensorInfo = inputTensorInfos[i];
telsoa014fcda012018-03-09 14:13:49 +00002059
2060 std::unique_ptr<armnn::ITensorHandle> inputHandle = subTensorsSupported ?
2061 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo.GetShape(),
2062 queueDescriptor.m_ViewOrigins[i].m_Origin.data())
2063 : workloadFactory.CreateTensorHandle(inputTensorInfo);
2064
2065 inputHandles.emplace_back(std::move(inputHandle));
2066 }
2067
2068 armnn::WorkloadInfo workloadInfo;
2069
2070 for (unsigned int i = 0; i < inputCount; ++i)
2071 {
surmeh013537c2c2018-05-18 16:31:43 +01002072 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfos[i], inputHandles[i].get());
telsoa014fcda012018-03-09 14:13:49 +00002073 }
2074
2075 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
2076
2077 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(queueDescriptor, workloadInfo);
2078
2079 for (auto& inputHandle : inputHandles)
2080 {
2081 inputHandle->Allocate();
2082 }
2083
2084 outputHandle->Allocate();
2085
2086 unsigned int nextInputId = 0;
2087 for (auto& inputHandle : inputHandles)
2088 {
surmeh013537c2c2018-05-18 16:31:43 +01002089 CopyDataToITensorHandle(inputHandle.get(), inputs[nextInputId]);
2090 ++nextInputId;
telsoa014fcda012018-03-09 14:13:49 +00002091 }
2092
surmeh013537c2c2018-05-18 16:31:43 +01002093 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00002094 workload->Execute();
2095
surmeh013537c2c2018-05-18 16:31:43 +01002096 if (needPermuteForConcat)
2097 {
2098 PermuteOutputForConcat<T>(workloadFactory,
2099 outputTensorInfo,
2100 permuteVector,
2101 std::move(outputHandle),
2102 output);
2103 }
2104 else
2105 {
2106 CopyDataFromITensorHandle(output, outputHandle.get());
2107 }
telsoa014fcda012018-03-09 14:13:49 +00002108}
2109
2110template <typename T>
2111LayerTestResult<T, 1> Concatenation1dTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale, int32_t qOffset)
2112{
2113 armnn::TensorInfo inputTensorInfo({ 3 }, armnn::GetDataType<T>());
2114
2115 auto input0 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 1.0f, 2.0f, 3.0f }));
2116 auto input1 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 4.0f, 5.0f, 6.0f }));
2117 auto input2 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 7.0f, 8.0f, 9.0f }));
2118
2119 armnn::TensorInfo outputTensorInfo({ 9 }, armnn::GetDataType<T>());
2120
2121 LayerTestResult<T, 1> result(outputTensorInfo);
2122
2123 std::vector<T> output;
2124 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002125 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002126 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
2127 { input0.data(), input1.data(), input2.data() },
2128 outputTensorInfo,
2129 output.data(),
2130 0);
2131
2132 result.output = MakeTensor<T, 1>(outputTensorInfo, output);
2133 result.outputExpected = MakeTensor<T, 1>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2134 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f
2135 }));
2136
2137 return result;
2138}
2139
2140LayerTestResult<float, 1> Concatenation1dTest(armnn::IWorkloadFactory& workloadFactory)
2141{
2142 return Concatenation1dTestImpl<float>(workloadFactory, 0.0f, 0);
2143}
2144
2145template <typename T>
2146LayerTestResult<T, 2> Concatenation2dTestImpl(armnn::IWorkloadFactory& workloadFactory,
2147 const armnn::TensorInfo& outputTensorInfo,
2148 unsigned int dimension,
2149 const float qScale,
2150 const int32_t qOffset)
2151{
2152 armnn::TensorInfo inputTensorInfo({ 2, 3 }, armnn::GetDataType<T>());
2153
2154 auto input0 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2155 // Batch 0
2156 1.0f, 2.0f, 3.0f,
2157
2158 // Batch 1
2159 10.0f, 11.0f, 12.0f,
2160 }));
2161
2162 auto input1 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2163 // Batch 0
2164 4.0f, 5.0f, 6.0f,
2165
2166 // Batch 1
2167 13.0f, 14.0f, 15.0f,
2168 }));
2169
2170 auto input2 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2171 // Batch 0
2172 7.0f, 8.0f, 9.0f,
2173
2174 // Batch 1
2175 16.0f, 17.0f, 18.0f,
2176 }));
2177
2178 LayerTestResult<T, 2> result(outputTensorInfo);
2179
2180 std::vector<T> output;
2181 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002182 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002183 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
2184 { input0.data(), input1.data(), input2.data() },
2185 outputTensorInfo,
2186 output.data(),
2187 dimension);
2188
2189 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
2190 return result;
2191}
2192
2193template <typename T>
2194LayerTestResult<T, 2> Concatenation2dDim0TestImpl(armnn::IWorkloadFactory& workloadFactory,
2195 float qScale, int32_t qOffset)
2196{
2197 armnn::TensorInfo outputTensorInfo({ 6, 3 }, armnn::GetDataType<T>());
2198
2199 LayerTestResult<T, 2> result = Concatenation2dTestImpl<T>(workloadFactory, outputTensorInfo, 0, qScale, qOffset);
2200 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2201 // Batch 0
2202 1.0f, 2.0f, 3.0f,
2203
2204 // Batch 1
2205 10.0f, 11.0f, 12.0f,
2206
2207 // Batch 2
2208 4.0f, 5.0f, 6.0f,
2209
2210 // Batch 3
2211 13.0f, 14.0f, 15.0f,
2212
2213 // Batch 4
2214 7.0f, 8.0f, 9.0f,
2215
2216 // Batch 5
2217 16.0f, 17.0f, 18.0f,
2218 }));
2219
2220 return result;
2221}
2222
2223LayerTestResult<float, 2> Concatenation2dDim0Test(armnn::IWorkloadFactory& workloadFactory)
2224{
2225 return Concatenation2dDim0TestImpl<float>(workloadFactory, 0.0f, 0);
2226}
2227
2228template <typename T>
2229LayerTestResult<T, 2> Concatenation2dDim1TestImpl(armnn::IWorkloadFactory& workloadFactory,
2230 float qScale, int32_t qOffset)
2231{
2232 armnn::TensorInfo outputTensorInfo({ 2, 9 }, armnn::GetDataType<T>());
2233
2234 LayerTestResult<T, 2> result = Concatenation2dTestImpl<T>(workloadFactory, outputTensorInfo, 1, qScale, qOffset);
2235 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2236 // Batch 0
2237 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
2238
2239 // Batch 1
2240 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f
2241 }));
2242
2243 return result;
2244}
2245
2246LayerTestResult<float, 2> Concatenation2dDim1Test(armnn::IWorkloadFactory& workloadFactory)
2247{
2248 return Concatenation2dDim1TestImpl<float>(workloadFactory, 0.0f, 0);
2249}
2250
2251template <typename T>
2252LayerTestResult<T, 2> Concatenation2dDim0DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2253 int32_t qOffset)
2254{
2255 armnn::TensorInfo input0TensorInfo({ 2, 3 }, armnn::GetDataType<T>());
2256 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2257 // Batch 0
2258 1.0f, 2.0f, 3.0f,
2259
2260 // Batch 1
2261 10.0f, 11.0f, 12.0f,
2262 }));
2263
2264 armnn::TensorInfo input1TensorInfo({ 3, 3 }, armnn::GetDataType<T>());
2265 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2266 // Batch 0
2267 4.0f, 5.0f, 6.0f,
2268
2269 // Batch 1
2270 13.0f, 14.0f, 15.0f,
2271
2272 // Batch 0
2273 7.0f, 8.0f, 9.0f,
2274 }));
2275
2276 armnn::TensorInfo input2TensorInfo({ 1, 3 }, armnn::GetDataType<T>());
2277 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2278 // Batch 1
2279 16.0f, 17.0f, 18.0f,
2280 }));
2281
2282 armnn::TensorInfo outputTensorInfo({ 6, 3 }, armnn::GetDataType<T>());
2283 LayerTestResult<T, 2> result(outputTensorInfo);
2284
2285 std::vector<T> output;
2286 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002287 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002288 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
2289 { input0.data(), input1.data(), input2.data() },
2290 outputTensorInfo,
2291 output.data(),
2292 0);
2293
2294 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
2295 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2296 // Batch 0
2297 1.0f, 2.0f, 3.0f,
2298
2299 // Batch 1
2300 10.0f, 11.0f, 12.0f,
2301
2302 // Batch 2
2303 4.0f, 5.0f, 6.0f,
2304
2305 // Batch 3
2306 13.0f, 14.0f, 15.0f,
2307
2308 // Batch 4
2309 7.0f, 8.0f, 9.0f,
2310
2311 // Batch 5
2312 16.0f, 17.0f, 18.0f,
2313 }));
2314
2315 return result;
2316}
2317
2318LayerTestResult<float, 2> Concatenation2dDim0DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory)
2319{
2320 return Concatenation2dDim0DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
2321}
2322
2323template <typename T>
2324LayerTestResult<T, 2> Concatenation2dDim1DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2325 int32_t qOffset)
2326{
2327 armnn::TensorInfo input0TensorInfo({ 2, 3 }, armnn::GetDataType<T>());
2328 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2329 // Batch 0
2330 1.0f, 2.0f, 3.0f,
2331
2332 // Batch 1
2333 10.0f, 11.0f, 12.0f,
2334 }));
2335
2336 armnn::TensorInfo input1TensorInfo({ 2, 5 }, armnn::GetDataType<T>());
2337 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2338 // Batch 0
2339 4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
2340
2341 // Batch 1
2342 13.0f, 14.0f, 15.0f, 16.0f, 17.0f,
2343 }));
2344
2345 armnn::TensorInfo input2TensorInfo({ 2, 1 }, armnn::GetDataType<T>());
2346 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2347 // Batch 0
2348 9.0f,
2349
2350 // Batch 1
2351 18.0f
2352 }));
2353
2354 armnn::TensorInfo outputTensorInfo({ 2, 9 }, armnn::GetDataType<T>());
2355 LayerTestResult<T, 2> result(outputTensorInfo);
2356
2357 std::vector<T> output;
2358 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002359 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002360 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
2361 { input0.data(), input1.data(), input2.data() },
2362 outputTensorInfo,
2363 output.data(),
2364 1);
2365
2366 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
2367 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2368 // Batch 0
2369 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
2370
2371 // Batch 1
2372 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f,
2373 }));
2374
2375 return result;
2376}
2377
2378LayerTestResult<float, 2> Concatenation2dDim1DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory)
2379{
2380 return Concatenation2dDim1DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
2381}
2382
2383template <typename T>
2384LayerTestResult<T, 3> Concatenation3dTestImpl(armnn::IWorkloadFactory& workloadFactory,
2385 const armnn::TensorInfo& outputTensorInfo,
2386 unsigned int dimension,
2387 float qScale,
2388 int32_t qOffset)
2389{
2390 armnn::TensorInfo inputTensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
2391
2392 auto input0 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2393 // Batch 0, Channel 0
2394 1.0f, 2.0f,
2395
2396 // Batch 0, Channel 1
2397 3.0f, 4.0f,
2398
2399 // Batch 0, Channel 2
2400 5.0f, 6.0f,
2401
2402 // Batch 1, Channel 0
2403 19.0f, 20.0f,
2404
2405 // Batch 1, Channel 1
2406 21.0f, 22.0f,
2407
2408 // Batch 1, Channel 2
2409 23.0f, 24.0f
2410 }));
2411
2412 auto input1 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2413 // Batch 0, Channel 0
2414 7.0f, 8.0f,
2415
2416 // Batch 0, Channel 1
2417 9.0f, 10.0f,
2418
2419 // Batch 0, Channel 2
2420 11.0f, 12.0f,
2421
2422 // Batch 1, Channel 0
2423 25.0f, 26.0f,
2424
2425 // Batch 1, Channel 1
2426 27.0f, 28.0f,
2427
2428 // Batch 1, Channel 2
2429 29.0f, 30.0f
2430 }));
2431
2432 auto input2 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2433 // Batch 0, Channel 0
2434 13.0f, 14.0f,
2435
2436 // Batch 0, Channel 1
2437 15.0f, 16.0f,
2438
2439 // Batch 0, Channel 2
2440 17.0f, 18.0f,
2441
2442 // Batch 1, Channel 0
2443 31.0f, 32.0f,
2444
2445 // Batch 1, Channel 1
2446 33.0f, 34.0f,
2447
2448 // Batch 1, Channel 2
2449 35.0f, 36.0f
2450 }));
2451
2452 LayerTestResult<T, 3> result(outputTensorInfo);
2453
2454 std::vector<T> output;
2455 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002456 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002457 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
2458 { input0.data(), input1.data(), input2.data() },
2459 outputTensorInfo,
2460 output.data(),
2461 dimension);
2462
2463 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
2464 return result;
2465}
2466
2467template <typename T>
2468LayerTestResult<T, 3> Concatenation3dDim0TestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2469 int32_t qOffset)
2470{
2471 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, armnn::GetDataType<T>());
2472
2473 LayerTestResult<T, 3> result = Concatenation3dTestImpl<T>(workloadFactory, outputTensorInfo, 0,
2474 qScale, qOffset);
2475 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2476 // Batch 0, Channel 0
2477 1.0f, 2.0f,
2478
2479 // Batch 0, Channel 1
2480 3.0f, 4.0f,
2481
2482 // Batch 0, Channel 2
2483 5.0f, 6.0f,
2484
2485 // Batch 1, Channel 0
2486 19.0f, 20.0f,
2487
2488 // Batch 1, Channel 1
2489 21.0f, 22.0f,
2490
2491 // Batch 1, Channel 2
2492 23.0f, 24.0f,
2493
2494 // Batch 2, Channel 0
2495 7.0f, 8.0f,
2496
2497 // Batch 2, Channel 1
2498 9.0f, 10.0f,
2499
2500 // Batch 2, Channel 2
2501 11.0f, 12.0f,
2502
2503 // Batch 3, Channel 0
2504 25.0f, 26.0f,
2505
2506 // Batch 3, Channel 1
2507 27.0f, 28.0f,
2508
2509 // Batch 3, Channel 2
2510 29.0f, 30.0f,
2511
2512 // Batch 4, Channel 0
2513 13.0f, 14.0f,
2514
2515 // Batch 4, Channel 1
2516 15.0f, 16.0f,
2517
2518 // Batch 4, Channel 2
2519 17.0f, 18.0f,
2520
2521 // Batch 5, Channel 0
2522 31.0f, 32.0f,
2523
2524 // Batch 5, Channel 1
2525 33.0f, 34.0f,
2526
2527 // Batch 5, Channel 2
2528 35.0f, 36.0f
2529 }));
2530 return result;
2531}
2532
2533LayerTestResult<float, 3> Concatenation3dDim0Test(armnn::IWorkloadFactory& workloadFactory)
2534{
2535 return Concatenation3dDim0TestImpl<float>(workloadFactory, 0.0f, 0);
2536}
2537
2538template <typename T>
2539LayerTestResult<T, 3> Concatenation3dDim1TestImpl(armnn::IWorkloadFactory& workloadFactory,
2540 float qScale, int32_t qOffset)
2541{
2542 armnn::TensorInfo outputTensorInfo({ 2, 9, 2 }, armnn::GetDataType<T>());
2543
2544 LayerTestResult<T, 3> result = Concatenation3dTestImpl<T>(workloadFactory, outputTensorInfo, 1, qScale, qOffset);
2545 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2546 // Batch 0, Channel 0
2547 1.0f, 2.0f,
2548
2549 // Batch 0, Channel 1
2550 3.0f, 4.0f,
2551
2552 // Batch 0, Channel 2
2553 5.0f, 6.0f,
2554
2555 // Batch 0, Channel 3
2556 7.0f, 8.0f,
2557
2558 // Batch 0, Channel 4
2559 9.0f, 10.0f,
2560
2561 // Batch 0, Channel 5
2562 11.0f, 12.0f,
2563
2564 // Batch 0, Channel 6
2565 13.0f, 14.0f,
2566
2567 // Batch 0, Channel 7
2568 15.0f, 16.0f,
2569
2570 // Batch 0, Channel 8
2571 17.0f, 18.0f,
2572
2573 // Batch 1, Channel 0
2574 19.0f, 20.0f,
2575
2576 // Batch 1, Channel 1
2577 21.0f, 22.0f,
2578
2579 // Batch 1, Channel 2
2580 23.0f, 24.0f,
2581
2582 // Batch 1, Channel 3
2583 25.0f, 26.0f,
2584
2585 // Batch 1, Channel 4
2586 27.0f, 28.0f,
2587
2588 // Batch 1, Channel 5
2589 29.0f, 30.0f,
2590
2591 // Batch 1, Channel 6
2592 31.0f, 32.0f,
2593
2594 // Batch 1, Channel 7
2595 33.0f, 34.0f,
2596
2597 // Batch 1, Channel 8
2598 35.0f, 36.0f
2599 }));
2600
2601 return result;
2602}
2603
2604LayerTestResult<float, 3> Concatenation3dDim1Test(armnn::IWorkloadFactory& workloadFactory)
2605{
2606 return Concatenation3dDim1TestImpl<float>(workloadFactory, 0.0f, 0);
2607}
2608
2609template <typename T>
2610LayerTestResult<T, 3> Concatenation3dDim2TestImpl(armnn::IWorkloadFactory& workloadFactory,
2611 float qScale, int32_t qOffset)
2612{
2613 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, armnn::GetDataType<T>());
2614
2615 LayerTestResult<T, 3> result = Concatenation3dTestImpl<T>(workloadFactory, outputTensorInfo, 2, qScale, qOffset);
2616 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2617 // Batch 0, Channel 0
2618 1.0f, 2.0f, 7.0f, 8.0f, 13.0f, 14.0f,
2619
2620 // Batch 0, Channel 1
2621 3.0f, 4.0f, 9.0f, 10.0f, 15.0f, 16.0f,
2622
2623 // Batch 0, Channel 2
2624 5.0f, 6.0f, 11.0f, 12.0f, 17.0f, 18.0f,
2625
2626 // Batch 1, Channel 0
2627 19.0f, 20.0f, 25.0f, 26.0f, 31.0f, 32.0f,
2628
2629 // Batch 1, Channel 1
2630 21.0f, 22.0f, 27.0f, 28.0f, 33.0f, 34.0f,
2631
2632 // Batch 1, Channel 2
2633 23.0f, 24.0f, 29.0f, 30.0f, 35.0f, 36.0f,
2634 }));
2635
2636 return result;
2637}
2638
2639LayerTestResult<float, 3> Concatenation3dDim2Test(armnn::IWorkloadFactory& workloadFactory)
2640{
2641 return Concatenation3dDim2TestImpl<float>(workloadFactory, 0.0f, 0);
2642}
2643
2644template <typename T>
2645LayerTestResult<T, 3> Concatenation3dDim0DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2646 int32_t qOffset)
2647{
2648 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
2649 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2650 // Batch 0, Channel 0
2651 1.0f, 2.0f,
2652
2653 // Batch 0, Channel 1
2654 3.0f, 4.0f,
2655
2656 // Batch 0, Channel 2
2657 5.0f, 6.0f,
2658
2659 // Batch 1, Channel 0
2660 19.0f, 20.0f,
2661
2662 // Batch 1, Channel 1
2663 21.0f, 22.0f,
2664
2665 // Batch 1, Channel 2
2666 23.0f, 24.0f
2667 }));
2668
2669 armnn::TensorInfo input1TensorInfo({ 1, 3, 2 }, armnn::GetDataType<T>());
2670 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2671 // Batch 0, Channel 0
2672 7.0f, 8.0f,
2673
2674 // Batch 0, Channel 1
2675 9.0f, 10.0f,
2676
2677 // Batch 0, Channel 2
2678 11.0f, 12.0f,
2679 }));
2680
2681 armnn::TensorInfo input2TensorInfo({ 3, 3, 2 }, armnn::GetDataType<T>());
2682 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2683 // Batch 0, Channel 0
2684 25.0f, 26.0f,
2685
2686 // Batch 0, Channel 1
2687 27.0f, 28.0f,
2688
2689 // Batch 0, Channel 2
2690 29.0f, 30.0f,
2691
2692 // Batch 1, Channel 0
2693 13.0f, 14.0f,
2694
2695 // Batch 1, Channel 1
2696 15.0f, 16.0f,
2697
2698 // Batch 1, Channel 2
2699 17.0f, 18.0f,
2700
2701 // Batch 2, Channel 0
2702 31.0f, 32.0f,
2703
2704 // Batch 2, Channel 1
2705 33.0f, 34.0f,
2706
2707 // Batch 2, Channel 2
2708 35.0f, 36.0f
2709 }));
2710
2711 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, armnn::GetDataType<T>());
2712 LayerTestResult<T, 3> result(outputTensorInfo);
2713
2714 std::vector<T> output;
2715 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002716 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002717 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
2718 { input0.data(), input1.data(), input2.data() },
2719 outputTensorInfo,
2720 output.data(),
2721 0);
2722
2723 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
2724 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2725 // Batch 0, Channel 0
2726 1.0f, 2.0f,
2727
2728 // Batch 0, Channel 1
2729 3.0f, 4.0f,
2730
2731 // Batch 0, Channel 2
2732 5.0f, 6.0f,
2733
2734 // Batch 1, Channel 0
2735 19.0f, 20.0f,
2736
2737 // Batch 1, Channel 1
2738 21.0f, 22.0f,
2739
2740 // Batch 1, Channel 2
2741 23.0f, 24.0f,
2742
2743 // Batch 2, Channel 0
2744 7.0f, 8.0f,
2745
2746 // Batch 2, Channel 1
2747 9.0f, 10.0f,
2748
2749 // Batch 2, Channel 2
2750 11.0f, 12.0f,
2751
2752 // Batch 3, Channel 0
2753 25.0f, 26.0f,
2754
2755 // Batch 3, Channel 1
2756 27.0f, 28.0f,
2757
2758 // Batch 3, Channel 2
2759 29.0f, 30.0f,
2760
2761 // Batch 4, Channel 0
2762 13.0f, 14.0f,
2763
2764 // Batch 4, Channel 1
2765 15.0f, 16.0f,
2766
2767 // Batch 4, Channel 2
2768 17.0f, 18.0f,
2769
2770 // Batch 5, Channel 0
2771 31.0f, 32.0f,
2772
2773 // Batch 5, Channel 1
2774 33.0f, 34.0f,
2775
2776 // Batch 5, Channel 2
2777 35.0f, 36.0f
2778 }));
2779
2780 return result;
2781}
2782
2783LayerTestResult<float, 3> Concatenation3dDim0DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory)
2784{
2785 return Concatenation3dDim0DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
2786}
2787
2788template <typename T>
2789LayerTestResult<T, 3> Concatenation3dDim1DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2790 int32_t qOffset)
2791{
2792 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
2793 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2794 // Batch 0, Channel 0
2795 1.0f, 2.0f,
2796
2797 // Batch 0, Channel 1
2798 3.0f, 4.0f,
2799
2800 // Batch 0, Channel 2
2801 5.0f, 6.0f,
2802
2803 // Batch 1, Channel 0
2804 19.0f, 20.0f,
2805
2806 // Batch 1, Channel 1
2807 21.0f, 22.0f,
2808
2809 // Batch 1, Channel 2
2810 23.0f, 24.0f
2811 }));
2812
2813 armnn::TensorInfo input1TensorInfo({ 2, 4, 2 }, armnn::GetDataType<T>());
2814 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2815 // Batch 0, Channel 0
2816 7.0f, 8.0f,
2817
2818 // Batch 0, Channel 1
2819 9.0f, 10.0f,
2820
2821 // Batch 0, Channel 2
2822 11.0f, 12.0f,
2823
2824 // Batch 0, Channel 3
2825 25.0f, 26.0f,
2826
2827 // Batch 1, Channel 0
2828 27.0f, 28.0f,
2829
2830 // Batch 1, Channel 1
2831 29.0f, 30.0f,
2832
2833 // Batch 1, Channel 2
2834 13.0f, 14.0f,
2835
2836 // Batch 1, Channel 3
2837 15.0f, 16.0f,
2838 }));
2839
2840 armnn::TensorInfo input2TensorInfo({ 2, 1, 2 }, armnn::GetDataType<T>());
2841 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2842 // Batch 0, Channel 0
2843 17.0f, 18.0f,
2844
2845 // Batch 1, Channel 0
2846 31.0f, 32.0f,
2847 }));
2848
2849 armnn::TensorInfo outputTensorInfo({ 2, 8, 2 }, armnn::GetDataType<T>());
2850 LayerTestResult<T, 3> result(outputTensorInfo);
2851
2852 std::vector<T> output;
2853 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002854 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002855 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
2856 { input0.data(), input1.data(), input2.data() },
2857 outputTensorInfo,
2858 output.data(),
2859 1);
2860
2861 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
2862 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2863 // Batch 0, Channel 0
2864 1.0f, 2.0f,
2865
2866 // Batch 0, Channel 1
2867 3.0f, 4.0f,
2868
2869 // Batch 0, Channel 2
2870 5.0f, 6.0f,
2871
2872 // Batch 0, Channel 3
2873 7.0f, 8.0f,
2874
2875 // Batch 0, Channel 4
2876 9.0f, 10.0f,
2877
2878 // Batch 0, Channel 5
2879 11.0f, 12.0f,
2880
2881 // Batch 0, Channel 6
2882 25.0f, 26.0f,
2883
2884 // Batch 0, Channel 7
2885 17.0f, 18.0f,
2886
2887 // Batch 1, Channel 0
2888 19.0f, 20.0f,
2889
2890 // Batch 1, Channel 1
2891 21.0f, 22.0f,
2892
2893 // Batch 1, Channel 2
2894 23.0f, 24.0f,
2895
2896 // Batch 1, Channel 3
2897 27.0f, 28.0f,
2898
2899 // Batch 1, Channel 4
2900 29.0f, 30.0f,
2901
2902 // Batch 1, Channel 5
2903 13.0f, 14.0f,
2904
2905 // Batch 1, Channel 6
2906 15.0f, 16.0f,
2907
2908 // Batch 1, Channel 7
2909 31.0f, 32.0f,
2910 }));
2911
2912 return result;
2913}
2914
2915LayerTestResult<float, 3> Concatenation3dDim1DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory)
2916{
2917 return Concatenation3dDim1DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
2918}
2919
2920template <typename T>
2921LayerTestResult<T, 3> Concatenation3dDim2DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2922 int32_t qOffset)
2923{
2924 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
2925 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2926 // Batch 0, Channel 0
2927 1.0f, 2.0f,
2928
2929 // Batch 0, Channel 1
2930 3.0f, 4.0f,
2931
2932 // Batch 0, Channel 2
2933 5.0f, 6.0f,
2934
2935 // Batch 1, Channel 0
2936 19.0f, 20.0f,
2937
2938 // Batch 1, Channel 1
2939 21.0f, 22.0f,
2940
2941 // Batch 1, Channel 2
2942 23.0f, 24.0f
2943 }));
2944
2945 armnn::TensorInfo input1TensorInfo({ 2, 3, 1 }, armnn::GetDataType<T>());
2946 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2947 // Batch 0, Channel 0
2948 7.0f,
2949
2950 // Batch 0, Channel 1
2951 9.0f,
2952
2953 // Batch 0, Channel 2
2954 11.0f,
2955
2956 // Batch 1, Channel 0
2957 25.0f,
2958
2959 // Batch 1, Channel 1
2960 27.0f,
2961
2962 // Batch 1, Channel 2
2963 29.0f
2964 }));
2965
2966 armnn::TensorInfo input2TensorInfo({ 2, 3, 3 }, armnn::GetDataType<T>());
2967 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2968 // Batch 0, Channel 0
2969 13.0f, 14.0f, 50.0f,
2970
2971 // Batch 0, Channel 1
2972 15.0f, 16.0f, 51.0f,
2973
2974 // Batch 0, Channel 2
2975 17.0f, 18.0f, 52.0f,
2976
2977 // Batch 1, Channel 0
2978 31.0f, 32.0f, 53.0f,
2979
2980 // Batch 1, Channel 1
2981 33.0f, 34.0f, 54.0f,
2982
2983 // Batch 1, Channel 2
2984 35.0f, 36.0f, 55.0f,
2985 }));
2986
2987 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, armnn::GetDataType<T>());
2988 LayerTestResult<T, 3> result(outputTensorInfo);
2989
2990 std::vector<T> output;
2991 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002992 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002993 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
2994 { input0.data(), input1.data(), input2.data() },
2995 outputTensorInfo,
2996 output.data(),
2997 2);
2998
2999 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
3000 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3001 // Batch 0, Channel 0
3002 1.0f, 2.0f, 7.0f, 13.0f, 14.0f, 50.0f,
3003
3004 // Batch 0, Channel 1
3005 3.0f, 4.0f, 9.0f, 15.0f, 16.0f, 51.0f,
3006
3007 // Batch 0, Channel 2
3008 5.0f, 6.0f, 11.0f, 17.0f, 18.0f, 52.0f,
3009
3010 // Batch 1, Channel 0
3011 19.0f, 20.0f, 25.0f, 31.0f, 32.0f, 53.0f,
3012
3013 // Batch 1, Channel 1
3014 21.0f, 22.0f, 27.0f, 33.0f, 34.0f, 54.0f,
3015
3016 // Batch 1, Channel 2
3017 23.0f, 24.0f, 29.0f, 35.0f, 36.0f, 55.0f,
3018 }));
3019
3020 return result;
3021}
3022
3023LayerTestResult<float, 3> Concatenation3dDim2DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory)
3024{
3025 return Concatenation3dDim2DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
3026}
3027
James Conroy074f3712018-10-03 09:32:03 +01003028LayerTestResult<float, 4> ResizeBilinearNopTestImpl(armnn::IWorkloadFactory& workloadFactory,
3029 const armnn::TensorShape& inputOutputTensorShape,
3030 armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00003031{
James Conroy074f3712018-10-03 09:32:03 +01003032 const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
3033 const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00003034
3035 auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
3036 1.0f, 2.0f, 3.0f, 4.0f,
3037 2.0f, 3.0f, 4.0f, 5.0f,
3038 3.0f, 4.0f, 5.0f, 6.0f,
3039 4.0f, 5.0f, 6.0f, 7.0f
3040 }));
3041
3042 LayerTestResult<float, 4> result(outputTensorInfo);
3043 result.outputExpected = input;
3044
3045 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3046 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3047
3048 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01003049 descriptor.m_Parameters.m_DataLayout = dataLayout;
3050 armnn::WorkloadInfo info;
3051 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3052 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3053
3054 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
3055
3056 inputHandle->Allocate();
3057 outputHandle->Allocate();
3058 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3059
3060 workloadFactory.Finalize();
3061 workload->Execute();
3062
3063 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3064 return result;
3065}
3066
3067LayerTestResult<float, 4> ResizeBilinearNopTest(armnn::IWorkloadFactory& workloadFactory)
3068{
3069 // BatchSize = 1, Channels = 1, Height = 4, Width = 4
3070 const armnn::TensorShape inputOutputShape{ 1, 1, 4, 4 };
3071
3072 return ResizeBilinearNopTestImpl(workloadFactory, inputOutputShape, armnn::DataLayout::NCHW);
3073}
3074
3075LayerTestResult<float, 4> ResizeBilinearNopNhwcTest(armnn::IWorkloadFactory& workloadFactory)
3076{
3077 // BatchSize = 1, Height = 4, Width = 4, Channels = 1
3078 const armnn::TensorShape inputOutputShape{ 1, 4, 4, 1 };
3079
3080 return ResizeBilinearNopTestImpl(workloadFactory, inputOutputShape, armnn::DataLayout::NHWC);
3081}
3082
3083LayerTestResult<float, 4> SimpleResizeBilinearTestImpl(armnn::IWorkloadFactory& workloadFactory,
3084 const armnn::TensorShape& inputTensorShape,
3085 const armnn::TensorShape& outputTensorShape,
3086 armnn::DataLayout dataLayout)
3087{
3088 const armnn::TensorInfo inputTensorInfo(inputTensorShape, armnn::DataType::Float32);
3089 const armnn::TensorInfo outputTensorInfo(outputTensorShape, armnn::DataType::Float32);
3090
3091 auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
3092 1.0f, 255.0f,
3093 200.0f, 250.0f
3094 }));
3095
3096 // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
3097 // then figures out the interpolants and weights. Note this is different to projecting the centre of the
3098 // output texel - and thus we'll expect the output 1x1 matrix to contain, as its single element, the value
3099 // that was at position (0,0) of the input matrix (rather than an average, which we would expect if projecting
3100 // the centre).
3101 LayerTestResult<float, 4> result(outputTensorInfo);
3102 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>({
3103 1.0f
3104 }));
3105
3106 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3107 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3108
3109 armnn::ResizeBilinearQueueDescriptor descriptor;
3110 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00003111 armnn::WorkloadInfo info;
3112 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3113 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3114
3115 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
3116
3117 inputHandle->Allocate();
3118 outputHandle->Allocate();
3119 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3120
surmeh013537c2c2018-05-18 16:31:43 +01003121 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00003122 workload->Execute();
3123
3124 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3125 return result;
3126}
3127
3128LayerTestResult<float, 4> SimpleResizeBilinearTest(armnn::IWorkloadFactory& workloadFactory)
3129{
James Conroy074f3712018-10-03 09:32:03 +01003130 // inputShape: BatchSize = 1, Channels = 1, Height = 2, Width = 2
3131 const armnn::TensorShape inputShape{ 1, 1, 2, 2 };
telsoa014fcda012018-03-09 14:13:49 +00003132
James Conroy074f3712018-10-03 09:32:03 +01003133 // outputShape: BatchSize = 1, Channels = 1, Height = 1, Width = 1
3134 const armnn::TensorShape outputShape{ 1, 1, 1, 1 };
telsoa014fcda012018-03-09 14:13:49 +00003135
James Conroy074f3712018-10-03 09:32:03 +01003136 return SimpleResizeBilinearTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NCHW);
3137}
3138
3139LayerTestResult<float, 4> SimpleResizeBilinearNhwcTest(armnn::IWorkloadFactory& workloadFactory)
3140{
3141 // inputShape: BatchSize = 1, Height = 2, Width = 2, Channels = 1
3142 const armnn::TensorShape inputShape{ 1, 2, 2, 1 };
3143
3144 // outputShape: BatchSize = 1, Height = 1, Width = 1, Channels = 1
3145 const armnn::TensorShape outputShape{ 1, 1, 1, 1 };
3146
3147 return SimpleResizeBilinearTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NHWC);
3148}
3149
3150LayerTestResult<float, 4> ResizeBilinearSqMinTestImpl(armnn::IWorkloadFactory& workloadFactory,
3151 const armnn::TensorShape& inputTensorShape,
3152 const armnn::TensorShape& outputTensorShape,
3153 armnn::DataLayout dataLayout)
3154{
3155 const armnn::TensorInfo inputTensorInfo(inputTensorShape, armnn::DataType::Float32);
3156 const armnn::TensorInfo outputTensorInfo(outputTensorShape, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00003157
3158 auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
James Conroy074f3712018-10-03 09:32:03 +01003159 1.0f, 2.0f, 3.0f, 4.0f,
3160 2.0f, 3.0f, 4.0f, 5.0f,
3161 3.0f, 4.0f, 5.0f, 6.0f,
3162 4.0f, 5.0f, 6.0f, 7.0f
telsoa014fcda012018-03-09 14:13:49 +00003163 }));
3164
telsoa014fcda012018-03-09 14:13:49 +00003165 LayerTestResult<float, 4> result(outputTensorInfo);
3166 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>({
James Conroy074f3712018-10-03 09:32:03 +01003167 1.0f, 3.0f,
3168 3.0f, 5.0f
telsoa014fcda012018-03-09 14:13:49 +00003169 }));
3170
3171 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3172 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3173
3174 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01003175 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00003176 armnn::WorkloadInfo info;
3177 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3178 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3179
3180 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
3181
3182 inputHandle->Allocate();
3183 outputHandle->Allocate();
3184 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3185
surmeh013537c2c2018-05-18 16:31:43 +01003186 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00003187 workload->Execute();
3188
3189 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3190 return result;
3191}
3192
3193LayerTestResult<float, 4> ResizeBilinearSqMinTest(armnn::IWorkloadFactory& workloadFactory)
3194{
James Conroy074f3712018-10-03 09:32:03 +01003195 // inputShape: BatchSize = 1, Channels = 1, Height = 4, Width = 4
3196 const armnn::TensorShape inputShape{ 1, 1, 4, 4 };
telsoa014fcda012018-03-09 14:13:49 +00003197
James Conroy074f3712018-10-03 09:32:03 +01003198 // outputShape: BatchSize = 1, Channels = 1, Height = 2, Width = 2
3199 const armnn::TensorShape outputShape{ 1, 1, 2, 2 };
telsoa014fcda012018-03-09 14:13:49 +00003200
James Conroy074f3712018-10-03 09:32:03 +01003201 return ResizeBilinearSqMinTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NCHW);
3202}
3203
3204LayerTestResult<float, 4> ResizeBilinearSqMinNhwcTest(armnn::IWorkloadFactory& workloadFactory)
3205{
3206 // inputShape: BatchSize = 1, Height = 4, Width = 4, Channels = 1
3207 const armnn::TensorShape inputShape{ 1, 4, 4, 1 };
3208
3209 // outputShape: BatchSize = 1, Height = 2, Width = 2, Channels = 1
3210 const armnn::TensorShape outputShape{ 1, 2, 2, 1 };
3211
3212 return ResizeBilinearSqMinTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NHWC);
3213}
3214
3215LayerTestResult<float, 4> ResizeBilinearMinTestImpl(armnn::IWorkloadFactory& workloadFactory,
3216 const armnn::TensorShape& inputTensorShape,
3217 const armnn::TensorShape& outputTensorShape,
3218 armnn::DataLayout dataLayout)
3219{
3220 const armnn::TensorInfo inputTensorInfo(inputTensorShape, armnn::DataType::Float32);
3221 const armnn::TensorInfo outputTensorInfo(outputTensorShape, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00003222
3223 auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
James Conroy074f3712018-10-03 09:32:03 +01003224 1.0f, 2.0f, 3.0f, 5.0f, 8.0f,
3225 13.0f, 21.0f, 34.0f, 55.0f, 89.0f,
3226 144.0f, 233.0f, 377.0f, 610.0f, 987.0f
telsoa014fcda012018-03-09 14:13:49 +00003227 }));
3228
3229 LayerTestResult<float, 4> result(outputTensorInfo);
3230 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>({
James Conroy074f3712018-10-03 09:32:03 +01003231 1.0f, 2.6666f, 6.0f,
3232 78.5f, 179.3333f, 401.0f
telsoa014fcda012018-03-09 14:13:49 +00003233 }));
3234
3235 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3236 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3237
3238 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01003239 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00003240 armnn::WorkloadInfo info;
3241 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3242 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3243
3244 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
3245
3246 inputHandle->Allocate();
3247 outputHandle->Allocate();
3248 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3249
surmeh013537c2c2018-05-18 16:31:43 +01003250 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00003251 workload->Execute();
3252
3253 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3254 return result;
3255}
3256
3257LayerTestResult<float, 4> ResizeBilinearMinTest(armnn::IWorkloadFactory& workloadFactory)
3258{
James Conroy074f3712018-10-03 09:32:03 +01003259 // inputShape: BatchSize = 1, Channels = 1, Height = 3, Width = 5
3260 const armnn::TensorShape inputShape{ 1, 1, 3, 5 };
telsoa014fcda012018-03-09 14:13:49 +00003261
James Conroy074f3712018-10-03 09:32:03 +01003262 // outputShape: BatchSize = 1, Channels = 1, Height = 2, Width = 3
3263 const armnn::TensorShape outputShape{ 1, 1, 2, 3 };
telsoa014fcda012018-03-09 14:13:49 +00003264
James Conroy074f3712018-10-03 09:32:03 +01003265 return ResizeBilinearMinTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NCHW);
3266}
3267
3268LayerTestResult<float, 4> ResizeBilinearMinNhwcTest(armnn::IWorkloadFactory& workloadFactory)
3269{
3270 // inputShape: BatchSize = 1, Height = 3, Width = 5, Channels = 1
3271 const armnn::TensorShape inputShape{ 1, 3, 5, 1 };
3272
3273 // outputShape: BatchSize = 1, Height = 2, Width = 3, Channels = 1
3274 const armnn::TensorShape outputShape{ 1, 2, 3, 1 };
3275
3276 return ResizeBilinearMinTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NHWC);
3277}
3278
3279LayerTestResult<float, 4> ResizeBilinearMagTestImpl(armnn::IWorkloadFactory& workloadFactory,
3280 const armnn::TensorShape& inputTensorShape,
3281 const armnn::TensorShape& outputTensorShape,
3282 armnn::DataLayout dataLayout)
3283{
3284 const armnn::TensorInfo inputTensorInfo(inputTensorShape, armnn::DataType::Float32);
3285 const armnn::TensorInfo outputTensorInfo(outputTensorShape, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00003286
3287 auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
James Conroy074f3712018-10-03 09:32:03 +01003288 1.0f, 2.0f,
3289 13.0f, 21.0f,
3290 144.0f, 233.0f
telsoa014fcda012018-03-09 14:13:49 +00003291 }));
3292
3293 LayerTestResult<float, 4> result(outputTensorInfo);
3294 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>({
James Conroy074f3712018-10-03 09:32:03 +01003295 1.0f, 1.4f, 1.8f, 2.0f, 2.0f,
3296 13.0f, 16.2f, 19.4f, 21.0f, 21.0f,
3297 144.0f, 179.6f, 215.2f, 233.0f, 233.0f
telsoa014fcda012018-03-09 14:13:49 +00003298 }));
3299
3300 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3301 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3302
3303 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01003304 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00003305 armnn::WorkloadInfo info;
3306 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3307 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3308
3309 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
3310
3311 inputHandle->Allocate();
3312 outputHandle->Allocate();
3313 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3314
surmeh013537c2c2018-05-18 16:31:43 +01003315 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00003316 workload->Execute();
3317
3318 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3319 return result;
3320}
3321
3322LayerTestResult<float, 4> ResizeBilinearMagTest(armnn::IWorkloadFactory& workloadFactory)
3323{
James Conroy074f3712018-10-03 09:32:03 +01003324 // inputShape: BatchSize = 1, Channels = 1, Height = 3, Width = 2
3325 const armnn::TensorShape inputShape{ 1, 1, 3, 2 };
telsoa014fcda012018-03-09 14:13:49 +00003326
James Conroy074f3712018-10-03 09:32:03 +01003327 // outputShape: BatchSize = 1, Channels = 1, Height = 3, Width = 5
3328 const armnn::TensorShape outputShape{ 1, 1, 3, 5 };
telsoa014fcda012018-03-09 14:13:49 +00003329
James Conroy074f3712018-10-03 09:32:03 +01003330 return ResizeBilinearMagTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NCHW);
3331}
telsoa014fcda012018-03-09 14:13:49 +00003332
James Conroy074f3712018-10-03 09:32:03 +01003333LayerTestResult<float, 4> ResizeBilinearMagNhwcTest(armnn::IWorkloadFactory& workloadFactory)
3334{
3335 // inputShape: BatchSize = 1, Height = 3, Width = 2, Channels = 1
3336 const armnn::TensorShape inputShape{ 1, 3, 2, 1 };
telsoa014fcda012018-03-09 14:13:49 +00003337
James Conroy074f3712018-10-03 09:32:03 +01003338 // outputShape: BatchSize = 1, Height = 3, Width = 5, Channels = 1
3339 const armnn::TensorShape outputShape{ 1, 3, 5, 1 };
telsoa014fcda012018-03-09 14:13:49 +00003340
James Conroy074f3712018-10-03 09:32:03 +01003341 return ResizeBilinearMagTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00003342}
3343
3344LayerTestResult<float, 2> FakeQuantizationTest(armnn::IWorkloadFactory& workloadFactory)
3345{
3346 constexpr unsigned int width = 2;
3347 constexpr unsigned int height = 3;
3348
3349 const armnn::TensorInfo tensorInfo({height, width },
3350 armnn::DataType::Float32);
3351 auto input = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
3352 -10.0f, -5.0f,
3353 0.0f, 5.0f,
3354 10.0f, 10.0f
3355 }));
3356
3357 LayerTestResult<float, 2> ret(tensorInfo);
3358
3359 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
3360
3361 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
3362
3363 armnn::FakeQuantizationQueueDescriptor data;
3364 armnn::WorkloadInfo info;
3365
3366 AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
3367 AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
3368 float min = -10.f;
3369 float max = 10.f;
3370
3371 data.m_Parameters.m_Min = min;
3372 data.m_Parameters.m_Max = max;
3373
3374 armnn::PassthroughCpuTensorHandle refHandle(tensorInfo, &ret.outputExpected[0][0]);
3375 armnn::FakeQuantizationQueueDescriptor refData = data;
3376 armnn::WorkloadInfo refInfo = info;
3377 SetWorkloadOutput(refData, refInfo, 0, tensorInfo, &refHandle);
3378
3379 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFakeQuantization(data, info);
3380
3381 inputHandle->Allocate();
3382 outputHandle->Allocate();
3383
3384 CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
3385
surmeh013537c2c2018-05-18 16:31:43 +01003386 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00003387 workload->Execute();
3388
3389 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
3390
3391 ret.outputExpected = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
3392 0.0f, 63.0f,
3393 128.0f, 191.0f,
3394 255.0f, 255.0f
3395 }));
3396 return ret;
3397}
3398
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003399namespace
3400{
3401
3402LayerTestResult<float, 4> L2NormalizationTestImpl(armnn::IWorkloadFactory& workloadFactory,
3403 const armnn::TensorShape& inputOutputTensorShape,
3404 const std::vector<float>& inputValues,
3405 const std::vector<float>& expectedOutputValues,
3406 armnn::DataLayout dataLayout)
3407{
3408 const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
3409 const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
3410
3411 auto inputTensor = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(inputValues));
3412
3413 LayerTestResult<float, 4> result(outputTensorInfo);
3414 result.outputExpected = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(expectedOutputValues));
3415
3416 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3417 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3418
3419 armnn::L2NormalizationQueueDescriptor descriptor;
3420 descriptor.m_Parameters.m_DataLayout = dataLayout;
3421 armnn::WorkloadInfo info;
3422
3423 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3424 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3425
3426 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
3427
3428 inputHandle->Allocate();
3429 outputHandle->Allocate();
3430
3431 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
3432
3433 workloadFactory.Finalize();
3434 workload->Execute();
3435
3436 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3437
3438 return result;
3439}
3440
3441float CalcInvL2Norm(std::initializer_list<float> elements)
3442{
3443 const float reduction = std::accumulate(elements.begin(), elements.end(), 0.0f,
3444 [](float acc, float element) { return acc + element * element; });
3445 return 1.0f / sqrtf(reduction);
3446}
3447
3448} // anonymous namespace
3449
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003450template<typename T>
3451LayerTestResult<T, 2> Pad2dTestCommon(armnn::IWorkloadFactory& workloadFactory, float qScale, int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003452{
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003453 const armnn::TensorShape inputShape{ 3, 3 };
3454 const armnn::TensorShape outputShape{ 7, 7 };
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003455
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003456 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::GetDataType<T>());
3457 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::GetDataType<T>());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003458
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003459 std::vector<T> inputValues(
3460 QuantizedVector<T>(qScale, qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003461 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003462 // Height (3) x Width (3)
3463 4, 8, 6,
3464 7, 4, 4,
3465 3, 2, 4
3466 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003467
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003468 std::vector<T> expectedOutputValues(
3469 QuantizedVector<T>(qScale, qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003470 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003471 0, 0, 0, 0, 0, 0, 0,
3472 0, 0, 0, 0, 0, 0, 0,
3473 0, 0, 4, 8, 6, 0, 0,
3474 0, 0, 7, 4, 4, 0, 0,
3475 0, 0, 3, 2, 4, 0, 0,
3476 0, 0, 0, 0, 0, 0, 0,
3477 0, 0, 0, 0, 0, 0, 0
3478 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003479
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003480 auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003481
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003482 LayerTestResult<T, 2> result(outputTensorInfo);
3483 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003484
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003485 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3486 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003487
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003488 armnn::PadQueueDescriptor descriptor;
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003489
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003490 std::vector<std::pair<unsigned int, unsigned int>> PadList;
3491 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
3492 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003493
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003494 descriptor.m_Parameters.m_PadList = PadList;
3495 armnn::WorkloadInfo info;
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003496
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003497 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3498 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003499
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003500 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003501
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003502 inputHandle->Allocate();
3503 outputHandle->Allocate();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003504
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003505 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003506
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003507 workloadFactory.Finalize();
3508 workload->Execute();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003509
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003510 CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003511
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003512 return result;
3513}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003514
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003515template <typename T>
3516LayerTestResult<T, 3> Pad3dTestCommon(armnn::IWorkloadFactory& workloadFactory, float qScale, int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003517{
3518 const armnn::TensorShape inputShape{ 2, 2, 2 };
3519 const armnn::TensorShape outputShape{ 3, 5, 6 };
3520
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003521 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::GetDataType<T>());
3522 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::GetDataType<T>());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003523
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003524 std::vector<T> inputValues(
3525 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003526 {
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003527 // Channel 0, Height (2) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003528 0, 4,
3529 2, 5,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003530
3531 // Channel 1, Height (2) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003532 6, 1,
3533 5, 2
3534 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003535
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003536 std::vector<T> expectedOutputValues(
3537 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003538 {
3539
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003540 0, 0, 0, 0, 0, 0,
3541 0, 0, 0, 0, 0, 0,
3542 0, 0, 0, 4, 0, 0,
3543 0, 0, 2, 5, 0, 0,
3544 0, 0, 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003545
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003546 0, 0, 0, 0, 0, 0,
3547 0, 0, 0, 0, 0, 0,
3548 0, 0, 6, 1, 0, 0,
3549 0, 0, 5, 2, 0, 0,
3550 0, 0, 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003551
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003552 0, 0, 0, 0, 0, 0,
3553 0, 0, 0, 0, 0, 0,
3554 0, 0, 0, 0, 0, 0,
3555 0, 0, 0, 0, 0, 0,
3556 0, 0, 0, 0, 0, 0
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003557
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003558 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003559
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003560 auto inputTensor = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003561
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003562 LayerTestResult<T, 3> result(outputTensorInfo);
3563 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003564
3565 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3566 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3567
3568 armnn::PadQueueDescriptor descriptor;
3569
3570 std::vector<std::pair<unsigned int, unsigned int>> PadList;
3571 PadList.push_back(std::pair<unsigned int, unsigned int>(0,1));
3572 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
3573 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
3574
3575 descriptor.m_Parameters.m_PadList = PadList;
3576 armnn::WorkloadInfo info;
3577
3578 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3579 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3580
3581 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
3582
3583 inputHandle->Allocate();
3584 outputHandle->Allocate();
3585
3586 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]);
3587
3588 workloadFactory.Finalize();
3589 workload->Execute();
3590
3591 CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
3592
3593 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003594}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003595
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003596template <typename T>
3597LayerTestResult<T, 4> Pad4dTestCommon(armnn::IWorkloadFactory& workloadFactory, float qScale, int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003598{
3599 const armnn::TensorShape inputShape{ 2, 2, 3, 2 };
3600 const armnn::TensorShape outputShape{ 4, 5, 7, 4 };
3601
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003602 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::GetDataType<T>());
3603 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::GetDataType<T>());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003604
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003605 std::vector<T> inputValues(
3606 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003607 {
3608 // Batch 0, Channel 0, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003609 0, 1,
3610 2, 3,
3611 4, 5,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003612
3613 // Batch 0, Channel 1, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003614 6, 7,
3615 8, 9,
3616 10, 11,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003617
3618 // Batch 1, Channel 0, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003619 12, 13,
3620 14, 15,
3621 16, 17,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003622
3623 // Batch 1, Channel 1, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003624 18, 19,
3625 20, 21,
3626 22, 23
3627 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003628
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003629 std::vector<T> expectedOutputValues(
3630 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003631 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003632 0, 0, 0, 0,
3633 0, 0, 0, 0,
3634 0, 0, 0, 0,
3635 0, 0, 0, 0,
3636 0, 0, 0, 0,
3637 0, 0, 0, 0,
3638 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003639
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003640 0, 0, 0, 0,
3641 0, 0, 0, 0,
3642 0, 0, 0, 0,
3643 0, 0, 0, 0,
3644 0, 0, 0, 0,
3645 0, 0, 0, 0,
3646 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003647
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003648 0, 0, 0, 0,
3649 0, 0, 0, 0,
3650 0, 0, 0, 0,
3651 0, 0, 0, 0,
3652 0, 0, 0, 0,
3653 0, 0, 0, 0,
3654 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003655
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003656 0, 0, 0, 0,
3657 0, 0, 0, 0,
3658 0, 0, 0, 0,
3659 0, 0, 0, 0,
3660 0, 0, 0, 0,
3661 0, 0, 0, 0,
3662 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003663
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003664 0, 0, 0, 0,
3665 0, 0, 0, 0,
3666 0, 0, 0, 0,
3667 0, 0, 0, 0,
3668 0, 0, 0, 0,
3669 0, 0, 0, 0,
3670 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003671
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003672 0, 0, 0, 0,
3673 0, 0, 0, 0,
3674 0, 0, 0, 0,
3675 0, 0, 0, 0,
3676 0, 0, 0, 0,
3677 0, 0, 0, 0,
3678 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003679
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003680 0, 0, 0, 0,
3681 0, 0, 0, 0,
3682 0, 0, 0, 0,
3683 0, 0, 0, 0,
3684 0, 0, 0, 0,
3685 0, 0, 0, 0,
3686 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003687
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003688 0, 0, 0, 0,
3689 0, 0, 0, 0,
3690 0, 0, 0, 0,
3691 0, 0, 1, 0,
3692 0, 2, 3, 0,
3693 0, 4, 5, 0,
3694 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003695
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003696 0, 0, 0, 0,
3697 0, 0, 0, 0,
3698 0, 0, 0, 0,
3699 0, 6, 7, 0,
3700 0, 8, 9, 0,
3701 0, 10, 11, 0,
3702 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003703
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003704 0, 0, 0, 0,
3705 0, 0, 0, 0,
3706 0, 0, 0, 0,
3707 0, 0, 0, 0,
3708 0, 0, 0, 0,
3709 0, 0, 0, 0,
3710 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003711
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003712 0, 0, 0, 0,
3713 0, 0, 0, 0,
3714 0, 0, 0, 0,
3715 0, 0, 0, 0,
3716 0, 0, 0, 0,
3717 0, 0, 0, 0,
3718 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003719
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003720 0, 0, 0, 0,
3721 0, 0, 0, 0,
3722 0, 0, 0, 0,
3723 0, 0, 0, 0,
3724 0, 0, 0, 0,
3725 0, 0, 0, 0,
3726 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003727
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003728 0, 0, 0, 0,
3729 0, 0, 0, 0,
3730 0, 0, 0, 0,
3731 0, 12, 13, 0,
3732 0, 14, 15, 0,
3733 0, 16, 17, 0,
3734 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003735
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003736 0, 0, 0, 0,
3737 0, 0, 0, 0,
3738 0, 0, 0, 0,
3739 0, 18, 19, 0,
3740 0, 20, 21, 0,
3741 0, 22, 23, 0,
3742 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003743
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003744 0, 0, 0, 0,
3745 0, 0, 0, 0,
3746 0, 0, 0, 0,
3747 0, 0, 0, 0,
3748 0, 0, 0, 0,
3749 0, 0, 0, 0,
3750 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003751
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003752 0, 0, 0, 0,
3753 0, 0, 0, 0,
3754 0, 0, 0, 0,
3755 0, 0, 0, 0,
3756 0, 0, 0, 0,
3757 0, 0, 0, 0,
3758 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003759
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003760 0, 0, 0, 0,
3761 0, 0, 0, 0,
3762 0, 0, 0, 0,
3763 0, 0, 0, 0,
3764 0, 0, 0, 0,
3765 0, 0, 0, 0,
3766 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003767
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003768 0, 0, 0, 0,
3769 0, 0, 0, 0,
3770 0, 0, 0, 0,
3771 0, 0, 0, 0,
3772 0, 0, 0, 0,
3773 0, 0, 0, 0,
3774 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003775
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003776 0, 0, 0, 0,
3777 0, 0, 0, 0,
3778 0, 0, 0, 0,
3779 0, 0, 0, 0,
3780 0, 0, 0, 0,
3781 0, 0, 0, 0,
3782 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003783
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003784 0, 0, 0, 0,
3785 0, 0, 0, 0,
3786 0, 0, 0, 0,
3787 0, 0, 0, 0,
3788 0, 0, 0, 0,
3789 0, 0, 0, 0,
3790 0, 0, 0, 0
3791 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003792
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003793 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003794
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003795 LayerTestResult<T, 4> result(outputTensorInfo);
3796 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003797
3798 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3799 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3800
3801 armnn::PadQueueDescriptor descriptor;
3802
3803 std::vector<std::pair<unsigned int, unsigned int>> PadList;
3804 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
3805 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
3806 PadList.push_back(std::pair<unsigned int, unsigned int>(3,1));
3807 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
3808
3809 descriptor.m_Parameters.m_PadList = PadList;
3810 armnn::WorkloadInfo info;
3811
3812 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3813 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3814
3815 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
3816
3817 inputHandle->Allocate();
3818 outputHandle->Allocate();
3819
3820 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
3821
3822 workloadFactory.Finalize();
3823
3824 workload->Execute();
3825
3826 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3827
3828 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01003829}
3830
3831LayerTestResult<uint8_t, 2> PadUint82dTest(armnn::IWorkloadFactory& workloadFactory)
3832{
3833 return Pad2dTestCommon<uint8_t>(workloadFactory, 1.0f, 0);
3834}
3835
3836LayerTestResult<uint8_t, 3> PadUint83dTest(armnn::IWorkloadFactory& workloadFactory)
3837{
3838 return Pad3dTestCommon<uint8_t>(workloadFactory, 1.0f, 0);
3839}
3840
3841LayerTestResult<uint8_t, 4> PadUint84dTest(armnn::IWorkloadFactory& workloadFactory)
3842{
3843 return Pad4dTestCommon<uint8_t>(workloadFactory, 1.0f, 0);
3844}
3845
3846LayerTestResult<float, 2> PadFloat322dTest(armnn::IWorkloadFactory& workloadFactory)
3847{
3848 return Pad2dTestCommon<float>(workloadFactory, 0.0f, 0);
3849}
3850
3851LayerTestResult<float, 3> PadFloat323dTest(armnn::IWorkloadFactory& workloadFactory)
3852{
3853 return Pad3dTestCommon<float>(workloadFactory, 0.0f, 0);
3854}
3855
3856LayerTestResult<float, 4> PadFloat324dTest(armnn::IWorkloadFactory& workloadFactory)
3857{
3858 return Pad4dTestCommon<float>(workloadFactory, 0.0f, 0);
3859}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01003860
telsoa014fcda012018-03-09 14:13:49 +00003861LayerTestResult<float, 4> L2Normalization1dTest(armnn::IWorkloadFactory& workloadFactory)
3862{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003863 // Width: 1
3864 // Height: 1
3865 // Channels: 10
3866 // BatchSize: 1
telsoa014fcda012018-03-09 14:13:49 +00003867
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003868 const armnn::TensorShape inputOutputShape{ 1, 10, 1, 1 };
3869 std::vector<float> inputValues
3870 {
3871 // Batch 0, Channel 0, Height (1) x Width (1)
3872 1.0f,
telsoa014fcda012018-03-09 14:13:49 +00003873
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003874 // Batch 0, Channel 1, Height (1) x Width (1)
3875 2.0f,
telsoa014fcda012018-03-09 14:13:49 +00003876
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003877 // Batch 0, Channel 2, Height (1) x Width (1)
3878 3.0f,
telsoa014fcda012018-03-09 14:13:49 +00003879
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003880 // Batch 0, Channel 3, Height (1) x Width (1)
3881 4.0f,
3882
3883 // Batch 0, Channel 4, Height (1) x Width (1)
3884 5.0f,
3885
3886 // Batch 0, Channel 5, Height (1) x Width (1)
3887 6.0f,
3888
3889 // Batch 0, Channel 6, Height (1) x Width (1)
3890 7.0f,
3891
3892 // Batch 0, Channel 7, Height (1) x Width (1)
3893 8.0f,
3894
3895 // Batch 0, Channel 8, Height (1) x Width (1)
3896 9.0f,
3897
3898 // Batch 0, Channel 9, Height (1) x Width (1)
3899 10.0f
3900 };
telsoa014fcda012018-03-09 14:13:49 +00003901 const float approxInvL2Norm = 0.050964719f;
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003902 std::vector<float> expectedOutputValues
3903 {
3904 // Batch 0, Channel 0, Height (1) x Width (1)
telsoa014fcda012018-03-09 14:13:49 +00003905 1.0f * approxInvL2Norm,
3906 2.0f * approxInvL2Norm,
3907 3.0f * approxInvL2Norm,
3908 4.0f * approxInvL2Norm,
3909 5.0f * approxInvL2Norm,
3910 6.0f * approxInvL2Norm,
3911 7.0f * approxInvL2Norm,
3912 8.0f * approxInvL2Norm,
3913 9.0f * approxInvL2Norm,
3914 10.0f * approxInvL2Norm
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003915 };
telsoa014fcda012018-03-09 14:13:49 +00003916
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003917 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
3918 inputValues, expectedOutputValues, armnn::DataLayout::NCHW);
telsoa014fcda012018-03-09 14:13:49 +00003919}
3920
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003921LayerTestResult<float, 4> L2Normalization1dNhwcTest(armnn::IWorkloadFactory& workloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00003922{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003923 // Width: 1
3924 // Height: 1
3925 // Channels: 10
3926 // BatchSize: 1
telsoa014fcda012018-03-09 14:13:49 +00003927
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003928 const armnn::TensorShape inputOutputShape{ 1, 1, 1, 10 };
3929 std::vector<float> inputValues
3930 {
3931 // Batch 0, Height 0, Width (1) x Channel (10)
3932 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f
3933 };
3934 const float approxInvL2Norm = 0.050964719f;
3935 std::vector<float> expectedOutputValues
3936 {
3937 // Batch 0, Height 0, Width (1) x Channel (10)
3938 1.0f * approxInvL2Norm,
3939 2.0f * approxInvL2Norm,
3940 3.0f * approxInvL2Norm,
3941 4.0f * approxInvL2Norm,
3942 5.0f * approxInvL2Norm,
3943 6.0f * approxInvL2Norm,
3944 7.0f * approxInvL2Norm,
3945 8.0f * approxInvL2Norm,
3946 9.0f * approxInvL2Norm,
3947 10.0f * approxInvL2Norm
3948 };
3949
3950 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
3951 inputValues, expectedOutputValues, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00003952}
3953
3954LayerTestResult<float, 4> L2Normalization2dTest(armnn::IWorkloadFactory& workloadFactory)
3955{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003956 // Width: 5
3957 // Height: 1
3958 // Channels: 2
3959 // BatchSize: 1
telsoa014fcda012018-03-09 14:13:49 +00003960
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003961 const armnn::TensorShape inputOutputShape{ 1, 2, 1, 5 };
3962 std::vector<float> inputValues
3963 {
3964 // Batch 0, Channel 0, Height (1) x Width (5)
telsoa014fcda012018-03-09 14:13:49 +00003965 1.0f, 3.0f, 5.0f, 7.0f, 9.0f,
telsoa014fcda012018-03-09 14:13:49 +00003966
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003967 // Batch 0, Channel 1, Height (1) x Width (5)
3968 2.0f, 4.0f, 6.0f, 8.0f, 10.0f
3969 };
3970 std::vector<float> expectedOutputValues
3971 {
3972 // Batch 0, Channel 0, Height (1) x Width (5)
3973 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
3974 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
3975 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
3976 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
telsoa014fcda012018-03-09 14:13:49 +00003977 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
3978
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003979 // Batch 0, Channel 1, Height (1) x Width (5)
3980 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
3981 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
3982 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
3983 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
telsoa014fcda012018-03-09 14:13:49 +00003984 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003985 };
telsoa014fcda012018-03-09 14:13:49 +00003986
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003987 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
3988 inputValues, expectedOutputValues, armnn::DataLayout::NCHW);
3989}
telsoa014fcda012018-03-09 14:13:49 +00003990
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003991LayerTestResult<float, 4> L2Normalization2dNhwcTest(armnn::IWorkloadFactory& workloadFactory)
3992{
3993 // Width: 5
3994 // Height: 1
3995 // Channels: 2
3996 // BatchSize: 1
telsoa014fcda012018-03-09 14:13:49 +00003997
Matteo Martincigh539b44d2018-10-01 09:26:39 +01003998 const armnn::TensorShape inputOutputShape{ 1, 1, 5, 2 };
3999 std::vector<float> inputValues
4000 {
4001 // Batch 0, Height 0, Width (5) x Channel (2)
4002 1.0f, 2.0f,
4003 3.0f, 4.0f,
4004 5.0f, 6.0f,
4005 7.0f, 8.0f,
4006 9.0f, 10.0f
4007 };
4008 std::vector<float> expectedOutputValues
4009 {
4010 // Batch 0, Height 0, Width (5) x Channel (2)
4011 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
4012 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
4013 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
4014 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
4015 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
4016 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
4017 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
4018 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
4019 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
4020 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
4021 };
telsoa014fcda012018-03-09 14:13:49 +00004022
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004023 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
4024 inputValues, expectedOutputValues, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00004025}
4026
4027LayerTestResult<float, 4> L2Normalization3dTest(armnn::IWorkloadFactory& workloadFactory)
4028{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004029 // Width: 3
4030 // Height: 4
4031 // Channels: 2
4032 // BatchSize: 1
telsoa014fcda012018-03-09 14:13:49 +00004033
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004034 const armnn::TensorShape inputOutputShape{ 1, 2, 4, 3 };
4035 std::vector<float> inputValues
4036 {
4037 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004038 119.0f, 21.0f, 150.0f,
4039 149.0f, 32.0f, 179.0f,
4040 15.0f, 227.0f, 141.0f,
4041 147.0f, 199.0f, 220.0f,
4042
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004043 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004044 110.0f, 140.0f, 73.0f,
4045 211.0f, 212.0f, 89.0f,
4046 24.0f, 138.0f, 188.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004047 162.0f, 12.0f, 161.0f
4048 };
4049 std::vector<float> expectedOutputValues
4050 {
4051 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004052 119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
4053 21.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
4054 150.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
4055 149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
4056 32.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
4057 179.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
4058 15.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
4059 227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
4060 141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
4061 147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
4062 199.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
4063 220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
4064
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004065 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004066 110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
4067 140.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
4068 73.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
4069 211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
4070 212.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
4071 89.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
4072 24.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
4073 138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
4074 188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
4075 162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
4076 12.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004077 161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
4078 };
telsoa014fcda012018-03-09 14:13:49 +00004079
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004080 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
4081 inputValues, expectedOutputValues, armnn::DataLayout::NCHW);
4082}
telsoa014fcda012018-03-09 14:13:49 +00004083
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004084LayerTestResult<float, 4> L2Normalization3dNhwcTest(armnn::IWorkloadFactory& workloadFactory)
4085{
4086 // Width: 3
4087 // Height: 4
4088 // Channels: 2
4089 // BatchSize: 1
telsoa014fcda012018-03-09 14:13:49 +00004090
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004091 const armnn::TensorShape inputOutputShape{ 1, 4, 3, 2 };
4092 std::vector<float> inputValues
4093 {
4094 // Batch 0, Height 0, Width (3) x Channel (2)
4095 119.0f, 110.0f,
4096 21.0f, 140.0f,
4097 150.0f, 73.0f,
telsoa014fcda012018-03-09 14:13:49 +00004098
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004099 // Batch 0, Height 1, Width (3) x Channel (2)
4100 149.0f, 211.0f,
4101 32.0f, 212.0f,
4102 179.0f, 89.0f,
telsoa014fcda012018-03-09 14:13:49 +00004103
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004104 // Batch 0, Height 2, Width (3) x Channel (2)
4105 15.0f, 24.0f,
4106 227.0f, 138.0f,
4107 141.0f, 188.0f,
telsoa014fcda012018-03-09 14:13:49 +00004108
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004109 // Batch 0, Height 3, Width (3) x Channel (2)
4110 147.0f, 162.0f,
4111 199.0f, 12.0f,
4112 220.0f, 161.0f
4113 };
4114 std::vector<float> expectedOutputValues
4115 {
4116 // Batch 0, Height 0, Width (3) x Channel (2)
4117 119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
4118 110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
4119 21.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
4120 140.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
4121 150.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
4122 73.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
4123
4124 // Batch 0, Height 1, Width (3) x Channel (2)
4125 149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
4126 211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
4127 32.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
4128 212.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
4129 179.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
4130 89.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
4131
4132 // Batch 0, Height 2, Width (3) x Channel (2)
4133 15.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
4134 24.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
4135 227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
4136 138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
4137 141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
4138 188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
4139
4140 // Batch 0, Height 3, Width (3) x Channel (2)
4141 147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
4142 162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
4143 199.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
4144 12.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
4145 220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
4146 161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
4147 };
4148
4149 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
4150 inputValues, expectedOutputValues, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00004151}
4152
4153LayerTestResult<float, 4> L2Normalization4dTest(armnn::IWorkloadFactory& workloadFactory)
4154{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004155 // Width: 3
4156 // Height: 4
4157 // Channels: 3
4158 // BatchSize: 2
telsoa014fcda012018-03-09 14:13:49 +00004159
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004160 const armnn::TensorShape inputOutputShape{ 2, 3, 4, 3 };
4161 std::vector<float> inputValues
4162 {
4163 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004164 235.0f, 46.0f, 178.0f,
4165 100.0f, 123.0f, 19.0f,
4166 172.0f, 74.0f, 250.0f,
4167 6.0f, 195.0f, 80.0f,
4168
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004169 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004170 113.0f, 95.0f, 202.0f,
4171 77.0f, 114.0f, 71.0f,
4172 122.0f, 246.0f, 166.0f,
4173 82.0f, 28.0f, 37.0f,
4174
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004175 // Batch 0, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004176 56.0f, 170.0f, 162.0f,
4177 194.0f, 89.0f, 254.0f,
4178 12.0f, 209.0f, 200.0f,
4179 1.0f, 64.0f, 54.0f,
4180
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004181 // Batch 1, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004182 67.0f, 90.0f, 49.0f,
4183 7.0f, 163.0f, 18.0f,
4184 25.0f, 117.0f, 103.0f,
4185 247.0f, 59.0f, 189.0f,
4186
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004187 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004188 239.0f, 104.0f, 199.0f,
4189 17.0f, 124.0f, 153.0f,
4190 222.0f, 217.0f, 75.0f,
4191 32.0f, 126.0f, 21.0f,
4192
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004193 // Batch 1, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004194 97.0f, 145.0f, 215.0f,
4195 115.0f, 116.0f, 238.0f,
4196 226.0f, 16.0f, 132.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004197 92.0f, 125.0f, 88.0f
4198 };
4199 std::vector<float> expectedOutputValues
4200 {
4201 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004202 235.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
4203 46.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
4204 178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
4205 100.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
4206 123.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
4207 19.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
4208 172.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
4209 74.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
4210 250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
4211 6.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
4212 195.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
4213 80.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
4214
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004215 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004216 113.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
4217 95.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
4218 202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
4219 77.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
4220 114.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
4221 71.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
4222 122.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
4223 246.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
4224 166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
4225 82.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
4226 28.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
4227 37.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
4228
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004229 // Batch 0, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004230 56.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
4231 170.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
4232 162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
4233 194.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
4234 89.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
4235 254.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
4236 12.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
4237 209.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
4238 200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
4239 1.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
4240 64.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
4241 54.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
4242
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004243 // Batch 1, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004244 67.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
4245 90.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
4246 49.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
4247 7.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
4248 163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
4249 18.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
4250 25.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
4251 117.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
4252 103.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
4253 247.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
4254 59.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
4255 189.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
4256
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004257 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004258 239.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
4259 104.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
4260 199.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
4261 17.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
4262 124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
4263 153.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
4264 222.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
4265 217.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
4266 75.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
4267 32.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
4268 126.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
4269 21.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
4270
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004271 // Batch 1, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00004272 97.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
4273 145.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
4274 215.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
4275 115.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
4276 116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
4277 238.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
4278 226.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
4279 16.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
4280 132.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
4281 92.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
4282 125.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004283 88.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f })
4284 };
telsoa014fcda012018-03-09 14:13:49 +00004285
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004286 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
4287 inputValues, expectedOutputValues, armnn::DataLayout::NCHW);
4288}
telsoa014fcda012018-03-09 14:13:49 +00004289
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004290LayerTestResult<float, 4> L2Normalization4dNhwcTest(armnn::IWorkloadFactory& workloadFactory)
4291{
4292 // Width: 3
4293 // Height: 4
4294 // Channels: 3
4295 // BatchSize: 2
telsoa014fcda012018-03-09 14:13:49 +00004296
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004297 const armnn::TensorShape inputOutputShape{ 2, 4, 3, 3 };
4298 std::vector<float> inputValues
4299 {
4300 // Batch 0, Height 0, Width (3) x Channel (3)
4301 235.0f, 113.0f, 56.0f,
4302 46.0f, 95.0f, 170.0f,
4303 178.0f, 202.0f, 162.0f,
telsoa014fcda012018-03-09 14:13:49 +00004304
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004305 // Batch 0, Height 1, Width (3) x Channel (3)
4306 100.0f, 77.0f, 194.0f,
4307 123.0f, 114.0f, 89.0f,
4308 19.0f, 71.0f, 254.0f,
telsoa014fcda012018-03-09 14:13:49 +00004309
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004310 // Batch 0, Height 2, Width (3) x Channel (3)
4311 172.0f, 122.0f, 12.0f,
4312 74.0f, 246.0f, 209.0f,
4313 250.0f, 166.0f, 200.0f,
telsoa014fcda012018-03-09 14:13:49 +00004314
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004315 // Batch 0, Height 3, Width (3) x Channel (3)
4316 6.0f, 82.0f, 1.0f,
4317 195.0f, 28.0f, 64.0f,
4318 80.0f, 37.0f, 54.0f,
4319
4320 // Batch 1, Height 0, Width (3) x Channel (3)
4321 67.0f, 239.0f, 97.0f,
4322 90.0f, 104.0f, 145.0f,
4323 49.0f, 199.0f, 215.0f,
4324
4325 // Batch 1, Height 1, Width (3) x Channel (3)
4326 7.0f, 17.0f, 115.0f,
4327 163.0f, 124.0f, 116.0f,
4328 18.0f, 153.0f, 238.0f,
4329
4330 // Batch 1, Height 2, Width (3) x Channel (3)
4331 25.0f, 222.0f, 226.0f,
4332 117.0f, 217.0f, 16.0f,
4333 103.0f, 75.0f, 132.0f,
4334
4335 // Batch 1, Height 3, Width (3) x Channel (3)
4336 247.0f, 32.0f, 92.0f,
4337 59.0f, 126.0f, 125.0f,
4338 189.0f, 21.0f, 88.0f
4339 };
4340 std::vector<float> expectedOutputValues
4341 {
4342 // Batch 0, Height 0, Width (3) x Channel (3)
4343 235.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
4344 113.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
4345 56.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
4346 46.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
4347 95.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
4348 170.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
4349 178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
4350 202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
4351 162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
4352
4353 // Batch 0, Height 1, Width (3) x Channel (3)
4354 100.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
4355 77.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
4356 194.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
4357 123.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
4358 114.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
4359 89.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
4360 19.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
4361 71.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
4362 254.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
4363
4364 // Batch 0, Height 2, Width (3) x Channel (3)
4365 172.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
4366 122.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
4367 12.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
4368 74.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
4369 246.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
4370 209.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
4371 250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
4372 166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
4373 200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
4374
4375 // Batch 0, Height 3, Width (3) x Channel (3)
4376 6.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
4377 82.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
4378 1.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
4379 195.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
4380 28.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
4381 64.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
4382 80.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
4383 37.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
4384 54.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
4385
4386 // Batch 1, Height 0, Width (3) x Channel (3)
4387 67.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
4388 239.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
4389 97.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
4390 90.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
4391 104.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
4392 145.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
4393 49.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
4394 199.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
4395 215.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
4396
4397 // Batch 1, Height 1, Width (3) x Channel (3)
4398 7.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
4399 17.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
4400 115.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
4401 163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
4402 124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
4403 116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
4404 18.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
4405 153.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
4406 238.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
4407
4408 // Batch 1, Height 2, Width (3) x Channel (3)
4409 25.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
4410 222.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
4411 226.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
4412 117.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
4413 217.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
4414 16.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
4415 103.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
4416 75.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
4417 132.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
4418
4419 // Batch 1, Height 3, Width (3) x Channel (3)
4420 247.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
4421 32.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
4422 92.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
4423 59.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
4424 126.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
4425 125.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
4426 189.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
4427 21.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
4428 88.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f })
4429 };
4430
4431 return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
4432 inputValues, expectedOutputValues, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00004433}
4434
4435template <typename T>
4436LayerTestResult<T, 4> ConstantTestImpl(armnn::IWorkloadFactory& workloadFactory,
4437 float qScale,
4438 int32_t qOffset)
4439{
4440 constexpr unsigned int inputWidth = 3;
4441 constexpr unsigned int inputHeight = 4;
4442 constexpr unsigned int inputChannels = 3;
4443 constexpr unsigned int inputBatchSize = 2;
4444
4445 constexpr unsigned int outputWidth = inputWidth;
4446 constexpr unsigned int outputHeight = inputHeight;
4447 constexpr unsigned int outputChannels = inputChannels;
4448 constexpr unsigned int outputBatchSize = inputBatchSize;
4449
4450 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
4451 armnn::GetDataType<T>());
4452
4453 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
4454 armnn::GetDataType<T>());
4455
4456 // Set quantization parameters if the requested type is a quantized type.
4457 if(armnn::IsQuantizedType<T>())
4458 {
4459 inputTensorInfo.SetQuantizationScale(qScale);
4460 inputTensorInfo.SetQuantizationOffset(qOffset);
4461 outputTensorInfo.SetQuantizationScale(qScale);
4462 outputTensorInfo.SetQuantizationOffset(qOffset);
4463 }
4464
4465 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
4466 QuantizedVector<T>(qScale, qOffset, {
4467 // Batch 0, Channel 0
4468 235.0f, 46.0f, 178.0f,
4469 100.0f, 123.0f, 19.0f,
4470 172.0f, 74.0f, 250.0f,
4471 6.0f, 195.0f, 80.0f,
4472
4473 // Batch 0, Channel 1
4474 113.0f, 95.0f, 202.0f,
4475 77.0f, 114.0f, 71.0f,
4476 122.0f, 246.0f, 166.0f,
4477 82.0f, 28.0f, 37.0f,
4478
4479 // Batch 0, Channel 2
4480 56.0f, 170.0f, 162.0f,
4481 194.0f, 89.0f, 254.0f,
4482 12.0f, 209.0f, 200.0f,
4483 1.0f, 64.0f, 54.0f,
4484
4485 // Batch 1, Channel 0
4486 67.0f, 90.0f, 49.0f,
4487 7.0f, 163.0f, 18.0f,
4488 25.0f, 117.0f, 103.0f,
4489 247.0f, 59.0f, 189.0f,
4490
4491 // Batch 1, Channel 1
4492 239.0f, 104.0f, 199.0f,
4493 17.0f, 124.0f, 153.0f,
4494 222.0f, 217.0f, 75.0f,
4495 32.0f, 126.0f, 21.0f,
4496
4497 // Batch 1, Channel 2
4498 97.0f, 145.0f, 215.0f,
4499 115.0f, 116.0f, 238.0f,
4500 226.0f, 16.0f, 132.0f,
4501 92.0f, 125.0f, 88.0f,
4502 })));
4503
4504 LayerTestResult<T, 4> result(outputTensorInfo);
4505 result.outputExpected = input;
4506
4507 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4508
4509 armnn::ScopedCpuTensorHandle constantTensor(inputTensorInfo);
4510 AllocateAndCopyDataToITensorHandle(&constantTensor, &input[0][0][0][0]);
4511
4512 armnn::ConstantQueueDescriptor descriptor;
4513 descriptor.m_LayerOutput = &constantTensor;
4514
4515 armnn::WorkloadInfo info;
4516 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4517
4518 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConstant(descriptor, info);
4519
4520 outputHandle->Allocate();
4521
surmeh013537c2c2018-05-18 16:31:43 +01004522 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00004523 workload->Execute();
4524
4525 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4526 return result;
4527}
4528
4529LayerTestResult<float, 4> ConstantTest(armnn::IWorkloadFactory& workloadFactory)
4530{
4531 return ConstantTestImpl<float>(workloadFactory, 0.0f, 0);
4532}
4533
4534LayerTestResult<uint8_t, 4> ConstantTestUint8(armnn::IWorkloadFactory& workloadFactory)
4535{
4536 return ConstantTestImpl<uint8_t>(workloadFactory, 1.0f, 0);
4537}
4538
4539LayerTestResult<uint8_t, 3> MergerUint8Test(armnn::IWorkloadFactory& workloadFactory)
4540{
surmeh013537c2c2018-05-18 16:31:43 +01004541 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +00004542 unsigned int outputHeight = 6;
4543 unsigned int outputChannels = 3;
4544
surmeh013537c2c2018-05-18 16:31:43 +01004545 unsigned int inputWidth1 = 3;
4546 unsigned int inputHeight1 = 6;
4547 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +00004548
surmeh013537c2c2018-05-18 16:31:43 +01004549 unsigned int inputWidth2 = 3;
4550 unsigned int inputHeight2 = 6;
4551 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +00004552
telsoa01c577f2c2018-08-31 09:22:23 +01004553 // Defines the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +00004554 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
4555 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
4556 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
telsoa014fcda012018-03-09 14:13:49 +00004557
telsoa01c577f2c2018-08-31 09:22:23 +01004558 // Arbitrary scale and offsets. They don't really matter as the merger operator doesn't dequantize/quantize them.
telsoa014fcda012018-03-09 14:13:49 +00004559 const float scale = 0.13497836f;
4560 const int32_t offset = -7;
4561
4562 outputTensorInfo.SetQuantizationScale(scale);
4563 outputTensorInfo.SetQuantizationOffset(offset);
4564 inputTensorInfo1.SetQuantizationScale(scale);
4565 inputTensorInfo1.SetQuantizationOffset(offset);
4566 inputTensorInfo2.SetQuantizationScale(scale);
4567 inputTensorInfo2.SetQuantizationOffset(offset);
telsoa014fcda012018-03-09 14:13:49 +00004568
4569 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
4570
4571 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
surmeh013537c2c2018-05-18 16:31:43 +01004572 {
4573 1, 2, 3,
4574 4, 5, 6,
4575 7, 8, 9,
4576 10, 11, 12,
4577 13, 14, 15,
4578 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00004579
surmeh013537c2c2018-05-18 16:31:43 +01004580 19, 20, 21,
4581 22, 23, 24,
4582 25, 26, 27,
4583 28, 29, 30,
4584 31, 32, 33,
4585 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00004586
surmeh013537c2c2018-05-18 16:31:43 +01004587 37, 38, 39,
4588 40, 41, 42,
4589 43, 44, 45,
4590 46, 47, 48,
4591 49, 50, 51,
4592 52, 53, 54,
4593 })
telsoa014fcda012018-03-09 14:13:49 +00004594 );
4595
telsoa014fcda012018-03-09 14:13:49 +00004596 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
4597 {
surmeh013537c2c2018-05-18 16:31:43 +01004598 1, 2, 3,
4599 4, 5, 6,
4600 7, 8, 9,
4601 10, 11, 12,
4602 13, 14, 15,
4603 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00004604
surmeh013537c2c2018-05-18 16:31:43 +01004605 19, 20, 21,
4606 22, 23, 24,
4607 25, 26, 27,
4608 28, 29, 30,
4609 31, 32, 33,
4610 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00004611 })
4612 );
4613
4614 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
4615 {
surmeh013537c2c2018-05-18 16:31:43 +01004616 37, 38, 39,
4617 40, 41, 42,
telsoa014fcda012018-03-09 14:13:49 +00004618 43, 44, 45,
surmeh013537c2c2018-05-18 16:31:43 +01004619 46, 47, 48,
4620 49, 50, 51,
4621 52, 53, 54,
telsoa014fcda012018-03-09 14:13:49 +00004622 })
4623 );
4624
telsoa01c577f2c2018-08-31 09:22:23 +01004625 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
telsoa014fcda012018-03-09 14:13:49 +00004626 armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1);
4627
telsoa01c577f2c2018-08-31 09:22:23 +01004628 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
telsoa014fcda012018-03-09 14:13:49 +00004629 armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2);
4630
telsoa014fcda012018-03-09 14:13:49 +00004631
4632 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4633
4634 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
4635
4636 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
4637 subTensorsSupported ?
4638 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
4639 workloadFactory.CreateTensorHandle(inputTensorInfo1);
4640
4641 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
4642 subTensorsSupported ?
4643 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
4644 workloadFactory.CreateTensorHandle(inputTensorInfo2);
4645
telsoa014fcda012018-03-09 14:13:49 +00004646
4647 armnn::MergerQueueDescriptor data;
4648 armnn::WorkloadInfo info;
4649 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
4650 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +00004651 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4652
4653 data.m_ViewOrigins.push_back(window1);
4654 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +00004655
4656 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(data, info);
4657
4658 inputHandle1->Allocate();
4659 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00004660 outputHandle->Allocate();
4661
4662 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
4663 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00004664
surmeh013537c2c2018-05-18 16:31:43 +01004665 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00004666 workload->Execute();
4667
4668 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
4669
4670 return ret;
4671}
4672
4673LayerTestResult<uint8_t, 4> AdditionUint8Test(armnn::IWorkloadFactory& workloadFactory)
4674{
4675 unsigned int batchSize = 1;
4676 unsigned int channels = 2;
4677 unsigned int height = 2;
4678 unsigned int width = 3;
4679
4680 const float scale = 7.0f;
4681 const int32_t offset = 3;
4682
4683 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
4684 armnn::TensorInfo outputTensorInfo;
4685
4686 const unsigned int shape[] = { batchSize, channels, height, width };
4687 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
4688 inputTensorInfo1.SetQuantizationScale(scale);
4689 inputTensorInfo1.SetQuantizationOffset(offset);
4690
4691 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
4692 inputTensorInfo2.SetQuantizationScale(scale);
4693 inputTensorInfo2.SetQuantizationOffset(offset);
4694
4695 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
4696 outputTensorInfo.SetQuantizationScale(scale);
4697 outputTensorInfo.SetQuantizationOffset(offset);
4698
telsoa01c577f2c2018-08-31 09:22:23 +01004699 // See dequantized values to the right.
telsoa014fcda012018-03-09 14:13:49 +00004700 auto input1 = MakeTensor<uint8_t, 4>(inputTensorInfo1, std::vector<uint8_t>(
4701 {
4702 63, 35, 77, 70, 56, 112, // 420, 224, 518, 469, 371, 763
4703 203, 28, 252, 168, 245, 91 // 1400, 175, 1743, 1155, 1694, 616
4704 }));
4705
telsoa01c577f2c2018-08-31 09:22:23 +01004706 // See dequantized values to the right.
telsoa014fcda012018-03-09 14:13:49 +00004707 auto input2 = MakeTensor<uint8_t, 4>(inputTensorInfo1, std::vector<uint8_t>(
4708 {
4709 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
4710 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
4711 }));
4712
telsoa01c577f2c2018-08-31 09:22:23 +01004713 // See dequantized values to the right.
telsoa014fcda012018-03-09 14:13:49 +00004714 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
4715 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>(
4716 {
4717 81, 39, 249, 255, 228, 255, // 546, 252, 1722, 2065(clamped), 1575, 2212(clamped)
4718 255, 186, 255, 186, 255, 214, // 2261(clamped), 1281, 2163(clamped), 1281, 2408(clamped), 1477
4719 }));
4720
4721 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
4722 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
4723 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4724
4725 armnn::AdditionQueueDescriptor data;
4726 armnn::WorkloadInfo info;
4727 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
4728 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
4729 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4730
4731 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
4732
4733 inputHandle1->Allocate();
4734 inputHandle2->Allocate();
4735 outputHandle->Allocate();
4736
4737 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
4738 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
4739
surmeh013537c2c2018-05-18 16:31:43 +01004740 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00004741 workload->Execute();
4742
4743 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4744
4745 return result;
4746}
4747
surmeh01bceff2f2018-03-29 16:29:27 +01004748namespace
telsoa014fcda012018-03-09 14:13:49 +00004749{
surmeh01bceff2f2018-03-29 16:29:27 +01004750LayerTestResult<uint8_t, 4> MultiplicationUint8TestHelper(armnn::IWorkloadFactory& workloadFactory,
4751 const unsigned int shape0[4],
4752 const std::vector<uint8_t> & values0,
4753 float scale0,
4754 int32_t offset0,
4755 const unsigned int shape1[4],
4756 const std::vector<uint8_t> & values1,
4757 float scale1,
4758 int32_t offset1,
4759 const unsigned int outShape[4],
4760 const std::vector<uint8_t> & outValues,
4761 float outScale,
4762 int32_t outOffset)
4763{
4764 armnn::TensorInfo inputTensorInfo0(4, shape0, armnn::DataType::QuantisedAsymm8);
4765 armnn::TensorInfo inputTensorInfo1(4, shape1, armnn::DataType::QuantisedAsymm8);
4766 armnn::TensorInfo outputTensorInfo(4, outShape, armnn::DataType::QuantisedAsymm8);
telsoa014fcda012018-03-09 14:13:49 +00004767
surmeh01bceff2f2018-03-29 16:29:27 +01004768 inputTensorInfo0.SetQuantizationScale(scale0);
4769 inputTensorInfo0.SetQuantizationOffset(offset0);
telsoa014fcda012018-03-09 14:13:49 +00004770
surmeh01bceff2f2018-03-29 16:29:27 +01004771 inputTensorInfo1.SetQuantizationScale(scale1);
4772 inputTensorInfo1.SetQuantizationOffset(offset1);
telsoa014fcda012018-03-09 14:13:49 +00004773
surmeh01bceff2f2018-03-29 16:29:27 +01004774 outputTensorInfo.SetQuantizationScale(outScale);
4775 outputTensorInfo.SetQuantizationOffset(outOffset);
telsoa014fcda012018-03-09 14:13:49 +00004776
surmeh01bceff2f2018-03-29 16:29:27 +01004777 auto input0 = MakeTensor<uint8_t, 4>(inputTensorInfo0, values0);
4778 auto input1 = MakeTensor<uint8_t, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00004779
telsoa014fcda012018-03-09 14:13:49 +00004780 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
surmeh01bceff2f2018-03-29 16:29:27 +01004781 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00004782
surmeh01bceff2f2018-03-29 16:29:27 +01004783 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
telsoa014fcda012018-03-09 14:13:49 +00004784 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
telsoa014fcda012018-03-09 14:13:49 +00004785 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4786
4787 armnn::MultiplicationQueueDescriptor data;
4788 armnn::WorkloadInfo info;
surmeh01bceff2f2018-03-29 16:29:27 +01004789 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
4790 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
telsoa014fcda012018-03-09 14:13:49 +00004791 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4792
4793 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
4794
surmeh01bceff2f2018-03-29 16:29:27 +01004795 inputHandle0->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00004796 inputHandle1->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00004797 outputHandle->Allocate();
4798
surmeh01bceff2f2018-03-29 16:29:27 +01004799 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00004800 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00004801
surmeh013537c2c2018-05-18 16:31:43 +01004802 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00004803 workload->Execute();
4804
4805 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4806
4807 return result;
4808}
surmeh01bceff2f2018-03-29 16:29:27 +01004809} // anonymous namespace
4810
4811LayerTestResult<uint8_t, 4> MultiplicationUint8Test(armnn::IWorkloadFactory& workloadFactory)
4812{
4813 unsigned int batchSize = 1;
4814 unsigned int channels = 2;
4815 unsigned int height = 2;
4816 unsigned int width = 3;
4817 const unsigned int shape[] = { batchSize, channels, height, width };
4818
telsoa01c577f2c2018-08-31 09:22:23 +01004819 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01004820 std::vector<uint8_t> input0({
4821 62, 37, 3, 172, 13, 111, // 244, 144, 8, 684, 48, 440,
4822 188, 20, 73, 31, 23, 31 // 748, 76, 288, 120, 88, 120
4823 });
4824
telsoa01c577f2c2018-08-31 09:22:23 +01004825 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01004826 std::vector<uint8_t> input1({
4827 126, 240, 252, 183, 121, 247, // 384, 726, 762, 555, 369, 747,
4828 48, 115, 151, 79, 78, 97 // 150, 351, 459, 243, 240, 297
4829 });
4830
telsoa01c577f2c2018-08-31 09:22:23 +01004831 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01004832 std::vector<uint8_t> output(
4833 {
4834 64, 72, 0, 255, 8, 236, // 93696, 104544, 6096(clamped), 379620(clamped), 17712, 328680,
4835 77, 15, 92, 16, 10, 21, // 112200, 26676, 132192, 29160, 21120, 35640
4836 });
4837
4838 return MultiplicationUint8TestHelper(workloadFactory,
4839 shape,
4840 input0,
4841 4.0f,
4842 1,
4843 shape,
4844 input1,
4845 3.0f,
4846 -2,
4847 shape,
4848 output,
telsoa01c577f2c2018-08-31 09:22:23 +01004849 1366.255f, // Scale/offset chosen to have output values out of range.
surmeh01bceff2f2018-03-29 16:29:27 +01004850 -5);
4851}
4852
4853LayerTestResult<uint8_t, 4> MultiplicationBroadcast1ElementUint8Test(armnn::IWorkloadFactory& workloadFactory)
4854{
4855 const unsigned int shape0[] = { 1, 2, 2, 3 };
4856 const unsigned int shape1[] = { 1, 1, 1, 1 };
4857
4858 std::vector<uint8_t> input0({
4859 1, 2, 3, 4, 5, 6,
4860 7, 8, 9, 10, 11, 12
4861 });
4862
4863 std::vector<uint8_t> input1({2});
4864
4865 std::vector<uint8_t> output({
4866 2, 4, 6, 8, 10, 12,
4867 14, 16, 18, 20, 22, 24
4868 });
4869
4870 return MultiplicationUint8TestHelper(workloadFactory,
4871 shape0,
4872 input0,
4873 1.0f,
4874 0,
4875 shape1,
4876 input1,
4877 1.0f,
4878 0,
4879 shape0,
4880 output,
4881 1.0f,
4882 0);
4883}
4884
4885LayerTestResult<uint8_t, 4> MultiplicationBroadcast1DVectorUint8Test(armnn::IWorkloadFactory& workloadFactory)
4886{
4887 const unsigned int shape0[] = { 1, 2, 2, 3 };
4888 const unsigned int shape1[] = { 1, 1, 1, 3 };
4889
4890 std::vector<uint8_t> input0({
4891 1, 2, 3, 4, 5, 6,
4892 7, 8, 9, 10, 11, 12
4893 });
4894
4895 std::vector<uint8_t> input1({1, 2, 3});
4896
4897 std::vector<uint8_t> output({
4898 1, 4, 9, 4, 10, 18,
4899 7, 16, 27, 10, 22, 36
4900 });
4901
4902 return MultiplicationUint8TestHelper(workloadFactory,
4903 shape0,
4904 input0,
4905 1.0f,
4906 0,
4907 shape1,
4908 input1,
4909 1.0f,
4910 0,
4911 shape0,
4912 output,
4913 1.0f,
4914 0);
4915}
telsoa014fcda012018-03-09 14:13:49 +00004916
David Beckf195f032018-09-06 16:46:34 +01004917namespace
4918{
4919template <typename T>
4920LayerTestResult<T, 4> SubtractionTestHelper(armnn::IWorkloadFactory& workloadFactory,
4921 const unsigned int shape0[4],
4922 const std::vector<T>& values0,
4923 float scale0,
4924 int32_t offset0,
4925 const unsigned int shape1[4],
4926 const std::vector<T> & values1,
4927 float scale1,
4928 int32_t offset1,
4929 const unsigned int outShape[4],
4930 const std::vector<T> & outValues,
4931 float outScale,
4932 int32_t outOffset)
4933{
4934 auto dataType = (std::is_same<T, uint8_t>::value ?
4935 armnn::DataType::QuantisedAsymm8 :
4936 armnn::DataType::Float32);
4937
4938 armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
4939 armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
4940 armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
4941
4942 inputTensorInfo0.SetQuantizationScale(scale0);
4943 inputTensorInfo0.SetQuantizationOffset(offset0);
4944
4945 inputTensorInfo1.SetQuantizationScale(scale1);
4946 inputTensorInfo1.SetQuantizationOffset(offset1);
4947
4948 outputTensorInfo.SetQuantizationScale(outScale);
4949 outputTensorInfo.SetQuantizationOffset(outOffset);
4950
4951 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
4952 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
4953
4954 LayerTestResult<T, 4> result(outputTensorInfo);
4955 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
4956
4957 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
4958 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
4959 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4960
4961 armnn::SubtractionQueueDescriptor data;
4962 armnn::WorkloadInfo info;
4963 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
4964 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
4965 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4966
4967 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSubtraction(data, info);
4968
4969 inputHandle0->Allocate();
4970 inputHandle1->Allocate();
4971 outputHandle->Allocate();
4972
4973 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
4974 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
4975
4976 workloadFactory.Finalize();
4977 workload->Execute();
4978
4979 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4980
4981 return result;
4982}
4983} // anonymous namespace
4984
4985LayerTestResult<uint8_t, 4> SubtractionUint8Test(armnn::IWorkloadFactory& workloadFactory)
4986{
4987 const unsigned int shape0[] = { 1, 1, 2, 2 };
4988 const unsigned int shape1[] = { 1, 1, 2, 2 };
4989
4990 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
4991 std::vector<uint8_t> input1({ 1, 2, 1, 2 });
4992 std::vector<uint8_t> output({ 3, 3, 5, 5 });
4993
4994 return SubtractionTestHelper(workloadFactory,
4995 shape0, input0, 0.5f, 2,
4996 shape1, input1, 1.0f, 0,
4997 shape0, output, 1.0f, 0);
4998}
4999
5000LayerTestResult<uint8_t, 4> SubtractionBroadcast1ElementUint8Test(armnn::IWorkloadFactory& workloadFactory)
5001{
5002 const unsigned int shape0[] = { 1, 1, 2, 2 };
5003 const unsigned int shape1[] = { 1, 1, 1, 1 };
5004
5005 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
5006 std::vector<uint8_t> input1({ 2 });
5007 std::vector<uint8_t> output({ 5, 6, 7, 8 });
5008
5009 return SubtractionTestHelper(workloadFactory,
5010 shape0, input0, 0.5f, 2,
5011 shape1, input1, 1.0f, 0,
5012 shape0, output, 1.0f, 3);
5013}
5014
5015LayerTestResult<uint8_t, 4> SubtractionBroadcastUint8Test(armnn::IWorkloadFactory& workloadFactory)
5016{
5017 const unsigned int shape0[] = { 1, 1, 2, 2 };
5018 const unsigned int shape1[] = { 1, 1, 2, 1 };
5019
5020 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
5021 std::vector<uint8_t> input1({ 2, 1 });
5022 std::vector<uint8_t> output({ 8, 11, 12, 15 });
5023
5024 return SubtractionTestHelper(workloadFactory,
5025 shape0, input0, 1.0f, 0,
5026 shape1, input1, 1.0f, 0,
5027 shape0, output, 1.0f, 0);
5028}
5029
5030LayerTestResult<float, 4> SubtractionTest(armnn::IWorkloadFactory& workloadFactory)
5031{
5032 const unsigned int shape0[] = { 1, 1, 2, 2 };
5033 const unsigned int shape1[] = { 1, 1, 2, 2 };
5034
5035 std::vector<float> input0({ 1, 2, 3, 4 });
5036 std::vector<float> input1({ 1, -1, 0, 2 });
5037 std::vector<float> output({ 0, 3, 3, 2 });
5038
5039 return SubtractionTestHelper(workloadFactory,
5040 shape0, input0, 1.0f, 0,
5041 shape1, input1, 1.0f, 0,
5042 shape0, output, 1.0f, 0);
5043}
5044
5045LayerTestResult<float, 4> SubtractionBroadcast1ElementTest(armnn::IWorkloadFactory& workloadFactory)
5046{
5047 const unsigned int shape0[] = { 1, 1, 2, 2 };
5048 const unsigned int shape1[] = { 1, 1, 1, 1 };
5049
5050 std::vector<float> input0({ 1, 2, 3, 4 });
5051 std::vector<float> input1({ 10 });
5052 std::vector<float> output({ -9, -8, -7, -6 });
5053
5054 return SubtractionTestHelper(workloadFactory,
5055 shape0, input0, 1.0f, 0,
5056 shape1, input1, 1.0f, 0,
5057 shape0, output, 1.0f, 0);
5058}
5059
5060LayerTestResult<float, 4> SubtractionBroadcastTest(armnn::IWorkloadFactory& workloadFactory)
5061{
5062 const unsigned int shape0[] = { 1, 1, 2, 2 };
5063 const unsigned int shape1[] = { 1, 1, 1, 2 };
5064
5065 std::vector<float> input0({ 1, 2, 3, 4 });
5066 std::vector<float> input1({ 10, -5 });
5067 std::vector<float> output({ -9, 7, -7, 9 });
5068
5069 return SubtractionTestHelper(workloadFactory,
5070 shape0, input0, 1.0f, 0,
5071 shape1, input1, 1.0f, 0,
5072 shape0, output, 1.0f, 0);
5073}
5074
telsoa014fcda012018-03-09 14:13:49 +00005075LayerTestResult<uint8_t, 4> ResizeBilinearNopUint8Test(armnn::IWorkloadFactory& workloadFactory)
5076{
5077 constexpr unsigned int inputWidth = 4;
5078 constexpr unsigned int inputHeight = 4;
5079 constexpr unsigned int inputChannels = 1;
5080 constexpr unsigned int inputBatchSize = 1;
5081
5082 constexpr unsigned int outputWidth = inputWidth;
5083 constexpr unsigned int outputHeight = inputHeight;
5084 constexpr unsigned int outputChannels = inputChannels;
5085 constexpr unsigned int outputBatchSize = inputBatchSize;
5086
5087 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
5088 armnn::DataType::QuantisedAsymm8);
5089 inputTensorInfo.SetQuantizationScale(1.5f);
5090 inputTensorInfo.SetQuantizationOffset(-3);
5091
5092 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
5093 armnn::DataType::QuantisedAsymm8);
5094 outputTensorInfo.SetQuantizationScale(1.5f);
5095 outputTensorInfo.SetQuantizationOffset(-3);
5096
5097 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
5098 1, 2, 3, 4,
5099 2, 3, 4, 5,
5100 3, 4, 5, 6,
5101 4, 5, 6, 7
5102 }));
5103
5104 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
5105 result.outputExpected = input;
5106
5107 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5108 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5109
5110 armnn::ResizeBilinearQueueDescriptor descriptor;
5111 armnn::WorkloadInfo info;
5112 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5113 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5114
5115 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5116
5117 inputHandle->Allocate();
5118 outputHandle->Allocate();
5119 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5120
surmeh013537c2c2018-05-18 16:31:43 +01005121 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00005122 workload->Execute();
5123
5124 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5125 return result;
5126}
5127
5128LayerTestResult<uint8_t, 4> SimpleResizeBilinearUint8Test(armnn::IWorkloadFactory& workloadFactory)
5129{
5130 constexpr unsigned int inputWidth = 2;
5131 constexpr unsigned int inputHeight = 2;
5132 constexpr unsigned int inputChannels = 1;
5133 constexpr unsigned int inputBatchSize = 1;
5134
5135 constexpr unsigned int outputWidth = inputWidth / 2;
5136 constexpr unsigned int outputHeight = inputHeight / 2;
5137 constexpr unsigned int outputChannels = inputChannels;
5138 constexpr unsigned int outputBatchSize = inputBatchSize;
5139
5140 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
5141 armnn::DataType::QuantisedAsymm8);
5142 inputTensorInfo.SetQuantizationScale(0.1567f);
5143 inputTensorInfo.SetQuantizationOffset(1);
5144
5145 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
5146 armnn::DataType::QuantisedAsymm8);
5147 outputTensorInfo.SetQuantizationScale(0.1567f);
5148 outputTensorInfo.SetQuantizationOffset(1);
5149
5150 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
5151 1, 255,
5152 200, 250
5153 }));
5154
5155 // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
5156 // then figures out the interpolants and weights. Note this is different to projecting the centre of the
telsoa01c577f2c2018-08-31 09:22:23 +01005157 // output texel - and thus we'll expect the output 1x1 matrix to contain, as its single element, the value
telsoa014fcda012018-03-09 14:13:49 +00005158 // that was at position (0,0) of the input matrix (rather than an average, which we would expect if projecting
5159 // the centre).
5160 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
5161 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
5162 1
5163 }));
5164
5165 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5166 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5167
5168 armnn::ResizeBilinearQueueDescriptor descriptor;
5169 armnn::WorkloadInfo info;
5170 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5171 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5172
5173 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5174
5175 inputHandle->Allocate();
5176 outputHandle->Allocate();
5177 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5178
surmeh013537c2c2018-05-18 16:31:43 +01005179 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00005180 workload->Execute();
5181
5182 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5183 return result;
5184}
5185
5186LayerTestResult<uint8_t, 4> ResizeBilinearSqMinUint8Test(armnn::IWorkloadFactory& workloadFactory)
5187{
5188 constexpr unsigned int inputWidth = 4;
5189 constexpr unsigned int inputHeight = 4;
5190 constexpr unsigned int inputChannels = 1;
5191 constexpr unsigned int inputBatchSize = 1;
5192
5193 constexpr unsigned int outputWidth = inputWidth / 2;
5194 constexpr unsigned int outputHeight = inputHeight / 2;
5195 constexpr unsigned int outputChannels = inputChannels;
5196 constexpr unsigned int outputBatchSize = inputBatchSize;
5197
5198 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
5199 armnn::DataType::QuantisedAsymm8);
5200 inputTensorInfo.SetQuantizationScale(3.141592f);
5201 inputTensorInfo.SetQuantizationOffset(3);
5202
5203 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
5204 armnn::DataType::QuantisedAsymm8);
5205 outputTensorInfo.SetQuantizationScale(3.141592f);
5206 outputTensorInfo.SetQuantizationOffset(3);
5207
5208 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
5209 1, 2, 3, 4,
5210 2, 3, 4, 5,
5211 3, 4, 5, 6,
5212 4, 5, 6, 7
5213 }));
5214
5215 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
5216 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
5217 1, 3,
5218 3, 5
5219 }));
5220
5221 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5222 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5223
5224 armnn::ResizeBilinearQueueDescriptor descriptor;
5225 armnn::WorkloadInfo info;
5226 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5227 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5228
5229 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5230
5231 inputHandle->Allocate();
5232 outputHandle->Allocate();
5233 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5234
surmeh013537c2c2018-05-18 16:31:43 +01005235 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00005236 workload->Execute();
5237
5238 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5239 return result;
5240}
5241
5242LayerTestResult<uint8_t, 4> ResizeBilinearMinUint8Test(armnn::IWorkloadFactory& workloadFactory)
5243{
5244 constexpr unsigned int inputWidth = 3;
5245 constexpr unsigned int inputHeight = 2;
5246 constexpr unsigned int inputChannels = 1;
5247 constexpr unsigned int inputBatchSize = 1;
5248
5249 constexpr unsigned int outputWidth = 2;
5250 constexpr unsigned int outputHeight = 1;
5251 constexpr unsigned int outputChannels = inputChannels;
5252 constexpr unsigned int outputBatchSize = inputBatchSize;
5253
5254 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
5255 armnn::DataType::QuantisedAsymm8);
5256 inputTensorInfo.SetQuantizationScale(1.5f);
5257 inputTensorInfo.SetQuantizationOffset(-1);
5258
5259 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
5260 armnn::DataType::QuantisedAsymm8);
5261 outputTensorInfo.SetQuantizationScale(1.5f);
5262 outputTensorInfo.SetQuantizationOffset(-1);
5263
5264 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
5265 1, 2, 3, // 3.0, 4.5, 6.0
5266 5, 8, 13 // 9.0, 13.5, 21.0
5267 }));
5268
5269 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
5270 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
5271 1, 3 // 3.0, 5.25
5272 }));
5273
5274 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5275 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5276
5277 armnn::ResizeBilinearQueueDescriptor descriptor;
5278 armnn::WorkloadInfo info;
5279 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5280 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5281
5282 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5283
5284 inputHandle->Allocate();
5285 outputHandle->Allocate();
5286
5287 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5288
surmeh013537c2c2018-05-18 16:31:43 +01005289 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00005290 workload->Execute();
5291
5292 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5293 return result;
5294}
5295
5296LayerTestResult<uint8_t, 4> ResizeBilinearMagUint8Test(armnn::IWorkloadFactory& workloadFactory)
5297{
5298 constexpr unsigned int inputWidth = 2;
5299 constexpr unsigned int inputHeight = 3;
5300 constexpr unsigned int inputChannels = 1;
5301 constexpr unsigned int inputBatchSize = 1;
5302
5303 constexpr unsigned int outputWidth = 5;
5304 constexpr unsigned int outputHeight = 3;
5305 constexpr unsigned int outputChannels = inputChannels;
5306 constexpr unsigned int outputBatchSize = inputBatchSize;
5307
5308 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
5309 armnn::DataType::QuantisedAsymm8);
5310 inputTensorInfo.SetQuantizationScale(0.010765f);
5311 inputTensorInfo.SetQuantizationOffset(7);
5312
5313 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
5314 armnn::DataType::QuantisedAsymm8);
5315 outputTensorInfo.SetQuantizationScale(0.010132f);
5316 outputTensorInfo.SetQuantizationOffset(-18);
5317
5318 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
5319 24, 228, // 0.183005, 2.379065,
5320 105, 128, // 1.05497, 1.302565
5321 230, 71 // 2.400595, 0.68896
5322 }));
5323
5324 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
5325 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
5326 0, 87, 173, 217, 217, // 0.18300501, 1.06142902, 1.93985295, 2.37906504, 2.37906504
5327 86, 96, 106, 111, 111, // 1.05497003, 1.15400803, 1.25304604, 1.30256498, 1.30256498
5328 219, 151, 84, 50, 50 // 2.40059495, 1.71594095, 1.03128707, 0.68896002, 0.68896002
5329 }));
5330
5331 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5332 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5333
5334 armnn::ResizeBilinearQueueDescriptor descriptor;
5335 armnn::WorkloadInfo info;
5336 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5337 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5338
5339 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5340
5341 inputHandle->Allocate();
5342 outputHandle->Allocate();
5343 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5344
surmeh013537c2c2018-05-18 16:31:43 +01005345 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00005346 workload->Execute();
5347
5348 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5349 return result;
5350}
5351
5352LayerTestResult<float, 4> BatchNormTest(armnn::IWorkloadFactory& workloadFactory)
5353{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01005354 // BatchSize: 1
5355 // Channels: 2
5356 // Height: 3
5357 // Width: 2
5358
5359 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
5360 std::vector<float> inputValues
5361 {
5362 // Batch 0, Channel 0, Height (3) x Width (2)
5363 1.f, 4.f,
5364 4.f, 2.f,
5365 1.f, 6.f,
5366
5367 // Batch 0, Channel 1, Height (3) x Width (2)
5368 1.f, 1.f,
5369 4.f, 1.f,
5370 -2.f, 4.f
5371 };
5372 std::vector<float> expectedOutputValues
5373 {
5374 // Batch 0, Channel 0, Height (3) x Width (2)
5375 1.f, 4.f,
5376 4.f, 2.f,
5377 1.f, 6.f,
5378
5379 // Batch 0, Channel 1, Height (3) x Width (2)
5380 3.f, 3.f,
5381 4.f, 3.f,
5382 2.f, 4.f
5383 };
5384
5385 return BatchNormTestImpl<float>(workloadFactory, inputOutputShape, inputValues, expectedOutputValues,
5386 0.f, 0, armnn::DataLayout::NCHW);
5387}
5388
5389LayerTestResult<float, 4> BatchNormNhwcTest(armnn::IWorkloadFactory& workloadFactory)
5390{
5391 // BatchSize: 1
5392 // Height: 3
5393 // Width: 2
5394 // Channels: 2
5395
5396 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
5397 std::vector<float> inputValues
5398 {
5399 // Batch 0, Height 0, Width (2) x Channel (2)
5400 1.f, 1.f,
5401 4.f, 1.f,
5402
5403 // Batch 0, Height 1, Width (2) x Channel (2)
5404 4.f, 4.f,
5405 2.f, 1.f,
5406
5407 // Batch 0, Height 2, Width (2) x Channel (2)
5408 1.f, -2.f,
5409 6.f, 4.f
5410 };
5411 std::vector<float> expectedOutputValues
5412 {
5413 // Batch 0, Height 0, Width (2) x Channel (2)
5414 1.f, 3.f,
5415 4.f, 3.f,
5416
5417 // Batch 0, Height 1, Width (2) x Channel (2)
5418 4.f, 4.f,
5419 2.f, 3.f,
5420
5421 // Batch 0, Height 2, Width (2) x Channel (2)
5422 1.f, 2.f,
5423 6.f, 4.f
5424 };
5425
5426 return BatchNormTestImpl<float>(workloadFactory, inputOutputShape, inputValues, expectedOutputValues,
5427 0.f, 0, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00005428}
5429
5430LayerTestResult<uint8_t, 4> BatchNormUint8Test(armnn::IWorkloadFactory& workloadFactory)
5431{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01005432 // BatchSize: 1
5433 // Channels: 2
5434 // Height: 3
5435 // Width: 2
5436
5437 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
5438 std::vector<float> inputValues
5439 {
5440 // Batch 0, Channel 0, Height (3) x Width (2)
5441 1.f, 4.f,
5442 4.f, 2.f,
5443 1.f, 6.f,
5444
5445 // Batch 0, Channel 1, Height (3) x Width (2)
5446 1.f, 1.f,
5447 4.f, 1.f,
5448 -2.f, 4.f
5449 };
5450 std::vector<float> expectedOutputValues
5451 {
5452 // Batch 0, Channel 0, Height (3) x Width (2)
5453 1.f, 4.f,
5454 4.f, 2.f,
5455 1.f, 6.f,
5456
5457 // Batch 0, Channel 1, Height (3) x Width (2)
5458 3.f, 3.f,
5459 4.f, 3.f,
5460 2.f, 4.f
5461 };
5462
5463 return BatchNormTestImpl<uint8_t>(workloadFactory, inputOutputShape, inputValues, expectedOutputValues,
5464 1.f/20.f, 50, armnn::DataLayout::NCHW);
5465}
5466
5467LayerTestResult<uint8_t, 4> BatchNormUint8NhwcTest(armnn::IWorkloadFactory& workloadFactory)
5468{
5469 // BatchSize: 1
5470 // Height: 3
5471 // Width: 2
5472 // Channels: 2
5473
5474 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
5475 std::vector<float> inputValues
5476 {
5477 // Batch 0, Height 0, Width (2) x Channel (2)
5478 1.f, 1.f,
5479 4.f, 1.f,
5480
5481 // Batch 0, Height 1, Width (2) x Channel (2)
5482 4.f, 4.f,
5483 2.f, 1.f,
5484
5485 // Batch 0, Height 2, Width (2) x Channel (2)
5486 1.f, -2.f,
5487 6.f, 4.f
5488 };
5489 std::vector<float> expectedOutputValues
5490 {
5491 // Batch 0, Height 0, Width (2) x Channel (2)
5492 1.f, 3.f,
5493 4.f, 3.f,
5494
5495 // Batch 0, Height 1, Width (2) x Channel (2)
5496 4.f, 4.f,
5497 2.f, 3.f,
5498
5499 // Batch 0, Height 2, Width (2) x Channel (2)
5500 1.f, 2.f,
5501 6.f, 4.f
5502 };
5503
5504 return BatchNormTestImpl<uint8_t>(workloadFactory, inputOutputShape, inputValues, expectedOutputValues,
5505 1.f/20.f, 50, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00005506}
5507
5508LayerTestResult<uint8_t, 4> ConstantUint8Test(armnn::IWorkloadFactory& workloadFactory)
5509{
5510 return ConstantTestImpl<uint8_t>(workloadFactory, 2e-6f, 1);
5511}
5512
5513LayerTestResult<uint8_t, 1> Concatenation1dUint8Test(armnn::IWorkloadFactory& workloadFactory)
5514{
5515 return Concatenation1dTestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5516}
5517
5518LayerTestResult<uint8_t, 2> Concatenation2dDim0Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5519{
5520 return Concatenation2dDim0TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5521}
5522
5523LayerTestResult<uint8_t, 2> Concatenation2dDim1Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5524{
5525 return Concatenation2dDim1TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5526}
5527
5528LayerTestResult<uint8_t, 2> Concatenation2dDim0DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory)
5529{
5530 return Concatenation2dDim0DiffInputDimsTestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5531}
5532
5533LayerTestResult<uint8_t, 2> Concatenation2dDim1DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory)
5534{
5535 return Concatenation2dDim1DiffInputDimsTestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5536}
5537
5538LayerTestResult<uint8_t, 3> Concatenation3dDim0Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5539{
5540 return Concatenation3dDim0TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5541}
5542
5543LayerTestResult<uint8_t, 3> Concatenation3dDim1Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5544{
5545 return Concatenation3dDim1TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5546}
5547
5548LayerTestResult<uint8_t, 3> Concatenation3dDim2Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5549{
5550 return Concatenation3dDim2TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5551}
5552
5553LayerTestResult<uint8_t, 3> Concatenation3dDim0DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory)
5554{
5555 return Concatenation3dDim0TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5556}
5557
5558LayerTestResult<uint8_t, 3> Concatenation3dDim1DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory)
5559{
5560 return Concatenation3dDim1DiffInputDimsTestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5561}
5562
5563LayerTestResult<uint8_t, 3> Concatenation3dDim2DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory)
5564{
5565 return Concatenation3dDim2DiffInputDimsTestImpl<uint8_t>(workloadFactory, 0.5f, -1);
5566}
5567
5568LayerTestResult<float, 4> SimpleMaxPooling2dSize2x2Stride2x2Test(armnn::IWorkloadFactory& workloadFactory,
5569 bool forceNoPadding)
5570{
5571 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<float>(workloadFactory, forceNoPadding);
5572}
5573
5574LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(armnn::IWorkloadFactory& workloadFactory,
5575 bool forceNoPadding)
5576{
5577 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<uint8_t>(workloadFactory, forceNoPadding, 3.0f, -5);
5578}
5579
5580LayerTestResult<float, 4> SimpleMaxPooling2dSize3x3Stride2x4Test(armnn::IWorkloadFactory& workloadFactory,
5581 bool forceNoPadding)
5582{
5583 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<float>(workloadFactory, forceNoPadding);
5584}
5585
5586LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(armnn::IWorkloadFactory& workloadFactory,
5587 bool forceNoPadding)
5588{
5589 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<uint8_t>(workloadFactory, forceNoPadding, 0.1f, 128);
5590}
5591
5592LayerTestResult<float, 4> SimpleAveragePooling2dTest(armnn::IWorkloadFactory& workloadFactory)
5593{
James Conroy69482272018-10-19 10:41:35 +01005594 return SimpleAveragePooling2dTest<float>(workloadFactory);
telsoa014fcda012018-03-09 14:13:49 +00005595}
5596
Francis Murtagh043d0d02018-10-05 14:08:48 +01005597LayerTestResult<float, 4> SimpleAveragePooling2dNhwcTest(armnn::IWorkloadFactory& workloadFactory)
5598{
James Conroy69482272018-10-19 10:41:35 +01005599 return SimpleAveragePooling2dNhwcTest<float>(workloadFactory);
Francis Murtagh043d0d02018-10-05 14:08:48 +01005600}
5601
telsoa014fcda012018-03-09 14:13:49 +00005602LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
5603{
James Conroy69482272018-10-19 10:41:35 +01005604 return SimpleAveragePooling2dTest<uint8_t>(workloadFactory, 0.5, -1);
5605}
5606
5607LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8NhwcTest(armnn::IWorkloadFactory& workloadFactory)
5608{
5609 return SimpleAveragePooling2dNhwcTest<uint8_t>(workloadFactory, 0.5, -1);
telsoa014fcda012018-03-09 14:13:49 +00005610}
5611
surmeh01bceff2f2018-03-29 16:29:27 +01005612LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(armnn::IWorkloadFactory& workloadFactory,
5613 bool forceNoPadding)
5614{
5615 return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<float>(workloadFactory, forceNoPadding);
5616}
5617
telsoa014fcda012018-03-09 14:13:49 +00005618LayerTestResult<float, 4> LargeTensorsAveragePooling2dTest(armnn::IWorkloadFactory& workloadFactory)
5619{
5620 return LargeTensorsAveragePooling2dTestCommon<float>(workloadFactory);
5621}
5622
5623LayerTestResult<uint8_t, 4> LargeTensorsAveragePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
5624{
5625 return LargeTensorsAveragePooling2dTestCommon<uint8_t>(workloadFactory, 0.5, -1);
5626}
5627
5628LayerTestResult<float, 4> SimpleL2Pooling2dTest(armnn::IWorkloadFactory& workloadFactory)
5629{
5630 return SimpleL2Pooling2dTestCommon<float>(workloadFactory);
5631}
5632
5633LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
5634{
5635 return SimpleL2Pooling2dTestCommon<uint8_t>(workloadFactory);
5636}
5637
5638LayerTestResult<float, 4> L2Pooling2dSize3Stride1Test(armnn::IWorkloadFactory& workloadFactory)
5639{
5640 return L2Pooling2dSize3Stride1TestCommon<float>(workloadFactory);
5641}
5642
5643LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride1Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5644{
5645 return L2Pooling2dSize3Stride1TestCommon<uint8_t>(workloadFactory);
5646}
5647
5648LayerTestResult<float, 4> L2Pooling2dSize3Stride3Test(armnn::IWorkloadFactory& workloadFactory)
5649{
5650 return L2Pooling2dSize3Stride3TestCommon<float>(workloadFactory);
5651}
5652
5653LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride3Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5654{
5655 return L2Pooling2dSize3Stride3TestCommon<uint8_t>(workloadFactory);
5656}
5657
5658LayerTestResult<float, 4> L2Pooling2dSize3Stride4Test(armnn::IWorkloadFactory& workloadFactory)
5659{
5660 return L2Pooling2dSize3Stride4TestCommon<float>(workloadFactory);
5661}
5662
5663LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride4Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5664{
5665 return L2Pooling2dSize3Stride4TestCommon<uint8_t>(workloadFactory);
5666}
5667
5668LayerTestResult<float, 4> L2Pooling2dSize7Test(armnn::IWorkloadFactory& workloadFactory)
5669{
5670 return L2Pooling2dSize7TestCommon<float>(workloadFactory);
5671}
5672
5673LayerTestResult<uint8_t, 4> L2Pooling2dSize7Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5674{
5675 return L2Pooling2dSize7TestCommon<uint8_t>(workloadFactory);
5676}
5677
5678LayerTestResult<float, 4> L2Pooling2dSize9Test(armnn::IWorkloadFactory& workloadFactory)
5679{
5680 return L2Pooling2dSize9TestCommon<float>(workloadFactory);
5681}
5682
5683LayerTestResult<uint8_t, 4> L2Pooling2dSize9Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5684{
5685 return L2Pooling2dSize9TestCommon<uint8_t>(workloadFactory);
5686}
5687
5688LayerTestResult<float, 4> AsymmetricNonSquarePooling2dTest(armnn::IWorkloadFactory& workloadFactory)
5689{
5690 return AsymmetricNonSquarePooling2dTestCommon<float>(workloadFactory);
5691}
5692
5693LayerTestResult<uint8_t, 4> AsymmetricNonSquarePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
5694{
5695 return AsymmetricNonSquarePooling2dTestCommon<uint8_t>(workloadFactory);
5696}
5697
5698LayerTestResult<float, 4> ComparePooling2dTest(armnn::IWorkloadFactory& workloadFactory,
5699 armnn::IWorkloadFactory& refWorkloadFactory,
5700 armnn::PoolingAlgorithm poolingType)
5701{
5702 return ComparePooling2dTestCommon<float>(workloadFactory, refWorkloadFactory, poolingType);
5703}
5704
5705LayerTestResult<uint8_t, 4> ComparePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory,
5706 armnn::IWorkloadFactory& refWorkloadFactory,
5707 armnn::PoolingAlgorithm poolingType)
5708{
5709 return ComparePooling2dTestCommon<uint8_t>(workloadFactory, refWorkloadFactory, poolingType, 0.1f, 128);
5710}
5711
5712LayerTestResult<float, 2> FullyConnectedLargeTest(armnn::IWorkloadFactory& workloadFactory,
5713 bool transposeWeights)
5714{
5715 return FullyConnectedLargeTestCommon<float>(workloadFactory, transposeWeights);
5716}
5717
5718LayerTestResult<float, 4> IgnorePaddingSimpleMaxPooling2dTest(armnn::IWorkloadFactory& workloadFactory)
5719{
5720 return IgnorePaddingSimpleMaxPooling2dTestCommon<float>(workloadFactory);
5721}
5722
5723LayerTestResult<uint8_t, 4> IgnorePaddingSimpleMaxPooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
5724{
5725 return IgnorePaddingSimpleMaxPooling2dTestCommon<uint8_t>(workloadFactory, 1.0f, -5);
5726}
5727
5728LayerTestResult<float, 4> IgnorePaddingMaxPooling2dSize3Test(armnn::IWorkloadFactory& workloadFactory)
5729{
5730 return IgnorePaddingMaxPooling2dSize3TestCommon<float>(workloadFactory);
5731}
5732
5733LayerTestResult<uint8_t, 4> IgnorePaddingMaxPooling2dSize3Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5734{
5735 return IgnorePaddingMaxPooling2dSize3TestCommon<uint8_t>(workloadFactory, 1.0f, -5);
5736}
5737
5738LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dTest(armnn::IWorkloadFactory& workloadFactory)
5739{
5740 return IgnorePaddingSimpleAveragePooling2dTestCommon<float>(workloadFactory);
5741}
5742
5743LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
5744{
5745 return IgnorePaddingSimpleAveragePooling2dTestCommon<uint8_t>(workloadFactory);
5746}
5747
5748LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTest(armnn::IWorkloadFactory& workloadFactory)
5749{
5750 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<float>(workloadFactory);
5751}
5752
5753LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(
5754 armnn::IWorkloadFactory& workloadFactory)
5755{
5756 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<uint8_t>(workloadFactory);
5757}
5758
5759LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3Test(armnn::IWorkloadFactory& workloadFactory)
5760{
5761 return IgnorePaddingAveragePooling2dSize3TestCommon<float>(workloadFactory);
5762}
5763
5764LayerTestResult<uint8_t, 4> IgnorePaddingAveragePooling2dSize3Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5765{
5766 return IgnorePaddingAveragePooling2dSize3TestCommon<uint8_t>(workloadFactory);
5767}
5768
5769LayerTestResult<float, 4> IgnorePaddingSimpleL2Pooling2dTest(armnn::IWorkloadFactory& workloadFactory)
5770{
5771 return IgnorePaddingSimpleL2Pooling2dTestCommon<float>(workloadFactory);
5772}
5773
5774LayerTestResult<uint8_t, 4> IgnorePaddingSimpleL2Pooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
5775{
5776 return IgnorePaddingSimpleL2Pooling2dTestCommon<uint8_t>(workloadFactory);
5777}
5778
5779LayerTestResult<float, 4> IgnorePaddingL2Pooling2dSize3Test(armnn::IWorkloadFactory& workloadFactory)
5780{
5781 return IgnorePaddingL2Pooling2dSize3TestCommon<float>(workloadFactory);
5782}
5783
5784LayerTestResult<uint8_t, 4> IgnorePaddingL2Pooling2dSize3Uint8Test(armnn::IWorkloadFactory& workloadFactory)
5785{
5786 return IgnorePaddingL2Pooling2dSize3TestCommon<uint8_t>(workloadFactory);
5787}
5788
5789LayerTestResult<float, 4> SimplePermuteFloat32Test(armnn::IWorkloadFactory& workloadFactory)
5790{
5791 return SimplePermuteFloat32TestCommon(workloadFactory);
5792};
5793
5794LayerTestResult<uint8_t, 4> SimplePermuteUint8Test(armnn::IWorkloadFactory& workloadFactory)
5795{
5796 return SimplePermuteUint8TestCommon(workloadFactory);
5797};
surmeh01bceff2f2018-03-29 16:29:27 +01005798
5799LayerTestResult<float, 4> PermuteFloat32ValueSet1Test(armnn::IWorkloadFactory& workloadFactory)
5800{
5801 return PermuteFloat32ValueSet1TestCommon(workloadFactory);
5802};
5803
5804LayerTestResult<float, 4> PermuteFloat32ValueSet2Test(armnn::IWorkloadFactory& workloadFactory)
5805{
5806 return PermuteFloat32ValueSet2TestCommon(workloadFactory);
5807};
5808
5809LayerTestResult<float, 4> PermuteFloat32ValueSet3Test(armnn::IWorkloadFactory& workloadFactory)
5810{
5811 return PermuteFloat32ValueSet3TestCommon(workloadFactory);
narpra011e4c31d2018-09-28 11:07:51 +01005812};
5813
5814namespace
5815{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005816
narpra011e4c31d2018-09-28 11:07:51 +01005817template <typename T, std::size_t InputDim, std::size_t OutputDim>
5818LayerTestResult<T, OutputDim> MeanTestHelper(armnn::IWorkloadFactory& workloadFactory,
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005819 const unsigned int* inputShape,
5820 const std::vector<T>& inputData,
5821 const std::vector<unsigned int>& axis,
5822 bool keepDims,
5823 const unsigned int* outputShape,
5824 const std::vector<T>& outputData,
5825 float scale = 1.0f,
5826 int32_t offset = 0)
narpra011e4c31d2018-09-28 11:07:51 +01005827{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005828 auto dataType = (std::is_same<T, uint8_t>::value ? armnn::DataType::QuantisedAsymm8 : armnn::DataType::Float32);
narpra011e4c31d2018-09-28 11:07:51 +01005829
5830 armnn::TensorInfo inputTensorInfo(InputDim, inputShape, dataType);
5831 armnn::TensorInfo outputTensorInfo(OutputDim, outputShape, dataType);
5832
5833 inputTensorInfo.SetQuantizationScale(scale);
5834 inputTensorInfo.SetQuantizationOffset(offset);
5835
5836 outputTensorInfo.SetQuantizationScale(scale);
5837 outputTensorInfo.SetQuantizationOffset(offset);
5838
5839 auto input = MakeTensor<T, InputDim>(inputTensorInfo, inputData);
5840
5841 LayerTestResult<T, OutputDim> result(outputTensorInfo);
5842 result.outputExpected = MakeTensor<T, OutputDim>(outputTensorInfo, outputData);
5843
5844 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5845 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5846
5847 armnn::MeanQueueDescriptor data;
5848 data.m_Parameters.m_Axis = axis;
5849 data.m_Parameters.m_KeepDims = keepDims;
5850 armnn::WorkloadInfo info;
5851 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
5852 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
5853
5854 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMean(data, info);
5855
5856 inputHandle->Allocate();
5857 outputHandle->Allocate();
5858
5859 CopyDataToITensorHandle(inputHandle.get(), input.origin());
5860
5861 workloadFactory.Finalize();
5862 workload->Execute();
5863
5864 CopyDataFromITensorHandle(result.output.origin(), outputHandle.get());
5865
5866 return result;
5867}
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005868
narpra011e4c31d2018-09-28 11:07:51 +01005869} // anonymous namespace
5870
5871LayerTestResult<uint8_t, 1> MeanUint8SimpleTest(armnn::IWorkloadFactory& workloadFactory)
5872{
5873 const unsigned int inputShape[] = { 3, 2 };
5874 const unsigned int outputShape[] = { 1 };
5875
5876 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
5877 std::vector<uint8_t> output({ 2 });
5878
5879 return MeanTestHelper<uint8_t, 2, 1>(workloadFactory, inputShape, input, {}, false, outputShape, output);
5880}
5881
5882LayerTestResult<uint8_t, 3> MeanUint8SimpleAxisTest(armnn::IWorkloadFactory& workloadFactory)
5883{
5884 const unsigned int inputShape[] = { 1, 1, 3, 2 };
5885 const unsigned int outputShape[] = { 1, 1, 2 };
5886
5887 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
5888 std::vector<uint8_t> output({ 2, 2 });
5889
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005890 return MeanTestHelper<uint8_t, 4, 3>(workloadFactory, inputShape, input, { 2 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01005891}
5892
5893LayerTestResult<uint8_t, 4> MeanUint8KeepDimsTest(armnn::IWorkloadFactory& workloadFactory)
5894{
5895 const unsigned int inputShape[] = { 1, 1, 3, 2 };
5896 const unsigned int outputShape[] = { 1, 1, 1, 2 };
5897
5898 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
5899 std::vector<uint8_t> output({ 2, 2 });
5900
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005901 return MeanTestHelper<uint8_t, 4, 4>(workloadFactory, inputShape, input, { 2 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01005902}
5903
5904LayerTestResult<uint8_t, 4> MeanUint8MultipleDimsTest(armnn::IWorkloadFactory& workloadFactory)
5905{
5906 const unsigned int inputShape[] = { 2, 3, 1, 2 };
5907 const unsigned int outputShape[] = { 1, 3, 1, 1 };
5908
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005909 std::vector<uint8_t> input({ 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6 });
narpra011e4c31d2018-09-28 11:07:51 +01005910 std::vector<uint8_t> output({ 1, 3, 5 });
5911
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005912 return MeanTestHelper<uint8_t, 4, 4>(workloadFactory, inputShape, input, { 0, 3 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01005913}
5914
5915LayerTestResult<uint8_t, 1> MeanVtsUint8Test(armnn::IWorkloadFactory& workloadFactory)
5916{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005917 const unsigned int inputShape[] = { 4, 3, 2 };
narpra011e4c31d2018-09-28 11:07:51 +01005918 const unsigned int outputShape[] = { 2 };
5919
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005920 std::vector<uint8_t> input({ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
5921 24 });
5922 std::vector<uint8_t> output({ 12, 13 });
narpra011e4c31d2018-09-28 11:07:51 +01005923
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005924 return MeanTestHelper<uint8_t, 3, 1>(workloadFactory, inputShape, input, { 0, 1 }, false, outputShape,
5925 output, 0.8f, 5);
narpra011e4c31d2018-09-28 11:07:51 +01005926}
5927
5928LayerTestResult<float, 1> MeanFloatSimpleTest(armnn::IWorkloadFactory& workloadFactory)
5929{
5930 const unsigned int inputShape[] = { 3, 2 };
5931 const unsigned int outputShape[] = { 1 };
5932
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005933 std::vector<float> input({ 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f });
5934 std::vector<float> output({ 2.0f });
narpra011e4c31d2018-09-28 11:07:51 +01005935
5936 return MeanTestHelper<float, 2, 1>(workloadFactory, inputShape, input, {}, false, outputShape, output);
5937}
5938
5939LayerTestResult<float, 3> MeanFloatSimpleAxisTest(armnn::IWorkloadFactory& workloadFactory)
5940{
5941 const unsigned int inputShape[] = { 2, 3, 1, 2 };
5942 const unsigned int outputShape[] = { 3, 1, 2 };
5943
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005944 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f });
5945 std::vector<float> output({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f });
narpra011e4c31d2018-09-28 11:07:51 +01005946
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005947 return MeanTestHelper<float, 4, 3>(workloadFactory, inputShape, input, { 0 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01005948}
5949
5950LayerTestResult<float, 4> MeanFloatKeepDimsTest(armnn::IWorkloadFactory& workloadFactory)
5951{
5952 const unsigned int inputShape[] = { 1, 1, 3, 2 };
5953 const unsigned int outputShape[] = { 1, 1, 1, 2 };
5954
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005955 std::vector<float> input({ 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f });
5956 std::vector<float> output({ 2.0f, 2.0f });
narpra011e4c31d2018-09-28 11:07:51 +01005957
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005958 return MeanTestHelper<float, 4, 4>(workloadFactory, inputShape, input, { 2 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01005959}
5960
5961LayerTestResult<float, 4> MeanFloatMultipleDimsTest(armnn::IWorkloadFactory& workloadFactory)
5962{
5963 const unsigned int inputShape[] = { 2, 3, 1, 2 };
5964 const unsigned int outputShape[] = { 1, 3, 1, 1 };
5965
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005966 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f });
5967 std::vector<float> output({ 1.5f, 3.5f, 5.5f });
narpra011e4c31d2018-09-28 11:07:51 +01005968
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005969 return MeanTestHelper<float, 4, 4>(workloadFactory, inputShape, input, { 0, 3 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01005970}
5971
5972LayerTestResult<float, 1> MeanVtsFloat1Test(armnn::IWorkloadFactory& workloadFactory)
5973{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005974 const unsigned int inputShape[] = { 4, 3, 2 };
narpra011e4c31d2018-09-28 11:07:51 +01005975 const unsigned int outputShape[] = { 2 };
5976
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005977 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f,
5978 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f });
5979 std::vector<float> output({ 12.0f, 13.0f });
narpra011e4c31d2018-09-28 11:07:51 +01005980
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005981 return MeanTestHelper<float, 3, 1>(workloadFactory, inputShape, input, { 0, 1 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01005982}
5983
5984LayerTestResult<float, 3> MeanVtsFloat2Test(armnn::IWorkloadFactory& workloadFactory)
5985{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005986 const unsigned int inputShape[] = { 4, 3, 2 };
5987 const unsigned int outputShape[] = { 1, 3, 1 };
narpra011e4c31d2018-09-28 11:07:51 +01005988
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005989 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f,
5990 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f });
5991 std::vector<float> output({ 10.5f, 12.5f, 14.5f });
narpra011e4c31d2018-09-28 11:07:51 +01005992
Matteo Martincigh28dcab62018-10-19 16:40:03 +01005993 return MeanTestHelper<float, 3, 3>(workloadFactory, inputShape, input, { 0, 2 }, true, outputShape, output);
5994}
5995
5996LayerTestResult<float, 3> MeanVtsFloat3Test(armnn::IWorkloadFactory& workloadFactory)
5997{
5998 const unsigned int inputShape[] = { 1, 2, 2, 1 };
5999 const unsigned int outputShape[] = { 1, 2, 1 };
6000
6001 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f });
6002 std::vector<float> output({ 1.5f, 3.5f });
6003
6004 return MeanTestHelper<float, 4, 3>(workloadFactory, inputShape, input, { 2 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01006005}
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01006006
6007LayerTestResult<float, 4> AdditionAfterMaxPoolTest(armnn::IWorkloadFactory& workloadFactory)
6008{
6009 // Create Initial Tensor
6010 // 1, 2, 3
6011 // 4, 5, 6
6012 // 7, 8, 9
6013
6014 armnn::TensorInfo poolingInputTensorInfo({ 1, 1, 3, 3}, armnn::GetDataType<float>());
6015 armnn::TensorInfo poolingOutputTensorInfo({ 1, 1, 2, 2}, armnn::GetDataType<float>());
6016
6017 boost::multi_array<float, 4> poolingInput = MakeTensor<float,4>(poolingInputTensorInfo,
6018 {1, 2, 3,
6019 4, 5, 6,
6020 7, 8, 9
6021 });
6022
6023 std::unique_ptr<armnn::ITensorHandle> poolingInputHandle =
6024 workloadFactory.CreateTensorHandle(poolingInputTensorInfo);
6025 std::unique_ptr<armnn::ITensorHandle> poolingOutputHandle =
6026 workloadFactory.CreateTensorHandle(poolingOutputTensorInfo);
6027
6028 // Apply MaxPool poolSize = 1x1, stride=2x2
6029 // Result =
6030 // 1, 3
6031 // 7, 9
6032 armnn::Pooling2dDescriptor descriptor;
6033 descriptor.m_PoolHeight = 1;
6034 descriptor.m_PoolWidth = 1;
6035 descriptor.m_StrideX = 2;
6036 descriptor.m_StrideY = 2;
6037 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
6038
6039 armnn::Pooling2dQueueDescriptor queueDescriptor;
6040 queueDescriptor.m_Parameters = descriptor;
6041 armnn::WorkloadInfo workloadInfo;
6042 AddInputToWorkload(queueDescriptor, workloadInfo, poolingInputTensorInfo, poolingInputHandle.get());
6043 AddOutputToWorkload(queueDescriptor, workloadInfo, poolingOutputTensorInfo, poolingOutputHandle.get());
6044
6045 // Create the MaxPool
6046 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
6047
6048 //LayerTestResult<float, 4> result(poolingOutputTensorInfo);
6049 auto shape( GetTensorShapeAsArray<4>(poolingOutputTensorInfo));
6050 boost::multi_array<float, 4> resultMaxPool;
6051 resultMaxPool.resize(shape);
6052
6053
6054 // Create addition with another tensor the same size
6055 // This would be the result to apply a Conv2d with kernel ones(2) and stride 1x1
6056 // with the initial tensor.
6057 // 12, 16
6058 // 24, 28
6059
6060 armnn::TensorInfo addInputTensorInfo({ 1,1,2,2}, armnn::GetDataType<float>());
6061 armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2}, armnn::GetDataType<float>());
6062
6063 boost::multi_array<float, 4> addInput = MakeTensor<float,4>(addInputTensorInfo,
6064 {12, 16,
6065 24, 28,
6066 });
6067
6068 // Expected output tensor after MaxPool and Addition.
6069 LayerTestResult<float,4> addRet(addOutputTensorInfo);
6070 addRet.outputExpected = MakeTensor<float, 4>(addOutputTensorInfo, std::vector<float>(
6071 {
6072 13, 19,
6073 31, 37
6074 }));
6075
6076 std::unique_ptr<armnn::ITensorHandle> addInputHandle = workloadFactory.CreateTensorHandle(addInputTensorInfo);
6077 std::unique_ptr<armnn::ITensorHandle> addOutputHandle = workloadFactory.CreateTensorHandle(addOutputTensorInfo);
6078
6079 armnn::AdditionQueueDescriptor data;
6080 armnn::WorkloadInfo info;
6081
6082 // Add the output of the MaxPool and the new tensor
6083 AddInputToWorkload(data, info, poolingOutputTensorInfo, poolingOutputHandle.get());
6084 AddInputToWorkload(data, info, addInputTensorInfo, addInputHandle.get());
6085 AddOutputToWorkload(data, info, addOutputTensorInfo, addOutputHandle.get());
6086
6087 std::unique_ptr<armnn::IWorkload> addWorkload = workloadFactory.CreateAddition(data, info);
6088
6089 poolingInputHandle->Allocate();
6090 poolingOutputHandle->Allocate();
6091 addInputHandle->Allocate();
6092 addOutputHandle->Allocate();
6093
6094 CopyDataToITensorHandle(poolingInputHandle.get(), &poolingInput[0][0][0][0]);
6095 CopyDataFromITensorHandle(&resultMaxPool[0][0][0][0], poolingOutputHandle.get());
6096
6097 CopyDataToITensorHandle(poolingOutputHandle.get(), &resultMaxPool[0][0][0][0]);
6098 CopyDataToITensorHandle(addInputHandle.get(), &addInput[0][0][0][0]);
6099
6100 workload->Execute();
6101 addWorkload->Execute();
6102
6103 CopyDataFromITensorHandle(&addRet.output[0][0][0][0], addOutputHandle.get());
6104
6105 workloadFactory.Finalize();
6106
6107 return addRet;
6108}