blob: 4f6cb93670f54af6d4ddb43777c38ce1ae25d73f [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#include "LayerTests.hpp"
6
7#include "test/TensorHelpers.hpp"
8#include "TensorCopyUtils.hpp"
surmeh013537c2c2018-05-18 16:31:43 +01009#include "Permute.hpp"
telsoa014fcda012018-03-09 14:13:49 +000010
11#include <boost/test/unit_test.hpp>
surmeh013537c2c2018-05-18 16:31:43 +010012#include <boost/assert.hpp>
telsoa014fcda012018-03-09 14:13:49 +000013
David Beck711fa312018-09-24 10:46:38 +010014#include <armnn/LayerSupport.hpp>
telsoa014fcda012018-03-09 14:13:49 +000015
David Beck711fa312018-09-24 10:46:38 +010016#include <backends/CpuTensorHandle.hpp>
17#include <backends/WorkloadFactory.hpp>
telsoa014fcda012018-03-09 14:13:49 +000018
19#ifdef ARMCOMPUTECL_ENABLED
David Beck711fa312018-09-24 10:46:38 +010020#include <backends/ClTensorHandle.hpp>
21#include <backends/aclCommon/ArmComputeTensorUtils.hpp>
telsoa014fcda012018-03-09 14:13:49 +000022#endif
23
24#include <algorithm>
25#include <boost/cast.hpp>
26
27#include "WorkloadTestUtils.hpp"
28#include "Conv2dTestImpl.hpp"
29#include "BatchNormTestImpl.hpp"
30#include "ActivationTestImpl.hpp"
31#include "Pooling2dTestImpl.hpp"
32#include "ReshapeTestImpl.hpp"
33#include "FullyConnectedTestImpl.hpp"
34#include "SplitterTestImpl.hpp"
35#include "SoftmaxTestImpl.hpp"
36#include "NormTestImpl.hpp"
37#include "PermuteTestImpl.hpp"
telsoa01c577f2c2018-08-31 09:22:23 +010038#include "LstmTestImpl.hpp"
39#include "ConvertFp16ToFp32TestImpl.hpp"
40#include "ConvertFp32ToFp16TestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000041
telsoa01c577f2c2018-08-31 09:22:23 +010042// 3-channel 16x8 image used as common input data for a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000043static std::vector<float> ConvInput3x8x16({
44 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
45 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
46 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
47 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
48 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
49 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
50 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
51 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
52 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
53 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
54 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
55 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
56 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
57 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
58 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
59 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
60 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
61 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
62 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
63 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
64 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
65 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
66 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
67 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
68});
69
telsoa01c577f2c2018-08-31 09:22:23 +010070// 2-channel bias used by a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000071static std::vector<float> Bias2({0, 2});
72
telsoa01c577f2c2018-08-31 09:22:23 +010073// Helper function that returns either Bias2 or an empty vector depending on whether bias is enabled.
telsoa014fcda012018-03-09 14:13:49 +000074template<typename T>
75boost::multi_array<T, 1> GetBias2(bool biasEnabled, float qScale, int32_t qOffset)
76{
77 if(biasEnabled)
78 {
79 armnn::TensorInfo biasDesc({static_cast<unsigned int>(Bias2.size())}, armnn::GetDataType<T>());
80 boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(qScale, qOffset, Bias2));
81 return bias;
82 }
83 else
84 {
85 return boost::multi_array<T, 1>();
86 }
87}
88
89template<typename T>
90LayerTestResult<T, 4> SimpleConvolution2d3x5TestCommon(armnn::IWorkloadFactory& workloadFactory,
91 float qScale,
92 int32_t qOffset,
93 bool biasEnabled)
94{
telsoa01c577f2c2018-08-31 09:22:23 +010095 // Use common single-batch 3-channel 16x8 image.
telsoa014fcda012018-03-09 14:13:49 +000096 armnn::TensorInfo inputDesc({1, 3, 8, 16}, armnn::GetDataType<T>());
97 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
98
telsoa01c577f2c2018-08-31 09:22:23 +010099 // Use a 2-element batch with 3-channel 3x5 kernels.
telsoa014fcda012018-03-09 14:13:49 +0000100 armnn::TensorInfo kernelDesc({2, 3, 5, 3}, armnn::GetDataType<T>());
101 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
102 QuantizedVector<T>(qScale, qOffset, {
103 1, 1, 1,
104 1, -1, 1,
105 1, 1, 1,
106 1, 1, 1,
107 1, 1, 1,
108
109 0, 0, 0,
110 0, 0, 0,
111 0, 0, 0,
112 0, 0, 0,
113 0, 0, 0,
114
115 2, 2, 2,
116 2, 2, 2,
117 2, 2, 2,
118 2, 2, 2,
119 2, 2, 2,
120
121
122 0, 0, 0,
123 0, 0, 0,
124 0, 0, 0,
125 0, 0, 0,
126 0, 0, 0,
127
128 1, 1, 1,
129 1, 1, 1,
130 1, 1, 1,
131 1, 1, 1,
132 1, 1, 1,
133
134 0, 0, 0,
135 0, 0, 0,
136 0, 0, 0,
137 0, 0, 0,
138 0, 0, 0
139 })));
140
telsoa01c577f2c2018-08-31 09:22:23 +0100141 // Expected output is 2 batch elements of a 1-channel 14x4 image.
telsoa014fcda012018-03-09 14:13:49 +0000142 armnn::TensorInfo outputDesc({1, 2, 4, 14}, armnn::GetDataType<T>());
143 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
144 QuantizedVector<T>(qScale, qOffset, {
145 -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24,
146 -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25,
147 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
148 -23.5f, -23.5f, -23.5f,
149 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
150 -23.5f, -23.5f, -23.5f,
151
152 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
153 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
154 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
155 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
156 })));
157
158 return SimpleConvolution2dTestImpl<T>(workloadFactory,
159 input,
160 kernel,
161 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
162 expectedOutput,
163 qScale,
164 qOffset);
165}
166
167template<typename T>
168LayerTestResult<T, 4> SimpleConvolution2d3x3TestCommon(armnn::IWorkloadFactory& workloadFactory,
169 float qScale,
170 int32_t qOffset,
171 bool biasEnabled)
172{
telsoa01c577f2c2018-08-31 09:22:23 +0100173 // Use a 3x3 kernel, which exercises ArmCompute's direct convolution path.
telsoa014fcda012018-03-09 14:13:49 +0000174
telsoa01c577f2c2018-08-31 09:22:23 +0100175 // Use common single-batch 3-channel 16x8 image.
telsoa014fcda012018-03-09 14:13:49 +0000176 armnn::TensorInfo inputDesc({1, 3, 8, 16}, armnn::GetDataType<T>());
177 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
178
telsoa01c577f2c2018-08-31 09:22:23 +0100179 // Use a 2-element batch of 3-channel 3x3 kernels.
telsoa014fcda012018-03-09 14:13:49 +0000180 armnn::TensorInfo kernelDesc({2, 3, 3, 3}, armnn::GetDataType<T>());
181 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
182 QuantizedVector<T>(qScale, qOffset, {
183 1, 1, 1,
184 1, -1, 1,
185 1, 1, 1,
186
187 0, 0, 0,
188 0, 0, 0,
189 0, 0, 0,
190
191 2, 2, 2,
192 2, 2, 2,
193 2, 2, 2,
194
195
196 0, 0, 0,
197 0, 0, 0,
198 0, 0, 0,
199
200 1, 1, 1,
201 1, 1, 1,
202 1, 1, 1,
203
204 0, 0, 0,
205 0, 0, 0,
206 0, 0, 0
207 })));
208
telsoa01c577f2c2018-08-31 09:22:23 +0100209 // Expected output is 1 batch of a 2-channel 14x6 image.
telsoa014fcda012018-03-09 14:13:49 +0000210 armnn::TensorInfo outputDesc({1, 2, 6, 14}, armnn::GetDataType<T>());
211 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
212 QuantizedVector<T>(qScale, qOffset, {
213 -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15,
214 -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16,
215 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
216 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
217 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
218 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
219
220 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
221 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
222 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
223 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
224 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
225 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
226 })));
227
228 return SimpleConvolution2dTestImpl<T>(workloadFactory,
229 input,
230 kernel,
231 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
232 expectedOutput,
233 qScale,
234 qOffset);
235}
236
237LayerTestResult<float, 4> SimpleConvolution2d3x5Test(armnn::IWorkloadFactory& workloadFactory,
238 bool biasEnabled)
239{
240 return SimpleConvolution2d3x5TestCommon<float>(workloadFactory, 0.f, 0, biasEnabled);
241}
242
243LayerTestResult<uint8_t, 4> SimpleConvolution2d3x5Uint8Test(armnn::IWorkloadFactory& workloadFactory,
244 bool biasEnabled)
245{
246 return SimpleConvolution2d3x5TestCommon<uint8_t>(workloadFactory, 0.5f, 50, biasEnabled);
247}
248
249LayerTestResult<float, 4> SimpleConvolution2d3x3Test(armnn::IWorkloadFactory& workloadFactory,
250 bool biasEnabled)
251{
252 return SimpleConvolution2d3x3TestCommon<float>(workloadFactory, 0.f, 0, biasEnabled);
253}
254
255LayerTestResult<uint8_t, 4> SimpleConvolution2d3x3Uint8Test(armnn::IWorkloadFactory& workloadFactory,
256 bool biasEnabled)
257{
258 return SimpleConvolution2d3x3TestCommon<uint8_t>(workloadFactory, 0.5f, 50, biasEnabled);
259}
260
261template<typename T>
262LayerTestResult<T, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon(
263 armnn::IWorkloadFactory& workloadFactory,
264 float qScale,
265 int32_t qOffset)
266{
telsoa01c577f2c2018-08-31 09:22:23 +0100267 // Use a single-batch 1-channel 3x3 image as input.
telsoa014fcda012018-03-09 14:13:49 +0000268 armnn::TensorInfo inputDesc({1, 1, 3, 3}, armnn::GetDataType<T>());
269 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
270 QuantizedVector<T>(qScale, qOffset, {
271 11,21,31,
272 12,22,32,
273 13,23,33
274 })));
275
telsoa01c577f2c2018-08-31 09:22:23 +0100276 // Use 1 batch of a 1-channel 2x2 kernel.
telsoa014fcda012018-03-09 14:13:49 +0000277 armnn::TensorInfo kernelDesc({1, 1, 2, 2}, armnn::GetDataType<T>());
278 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
279 QuantizedVector<T>(qScale, qOffset, {
280 -11,-21,
281 -12,-22,
282 })));
283
telsoa01c577f2c2018-08-31 09:22:23 +0100284// Expected output is 1 batch of a 1-channel 6x8 image.
telsoa014fcda012018-03-09 14:13:49 +0000285// Manually calculated like this:
286//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
287//[-11*0 -21*0 -12*0 -22*11 ; -11*0 -21*0 -12*11 -22*21 ; -11*0 -21*0 -12*21 -22*31 ; -11*0 -21*0 -12*31 -22*0 ..]
288//[-11*0 -21*11 -12*0 -22*12 ; -11*11 -21*21 -12*12 -22*22 ; -11*21 -21*31 -12*22 -22*32 ; -11*31 -21*0 -12*32 -22*0 ..]
289//[-11*0 -21*12 -12*0 -22*13 ; -11*12 -21*22 -12*13 -22*23 ; -11*22 -21*32 -12*23 -22*33 ; -11*32 -21*0 -12*33 -22*0 ..]
290//[-11*0 -21*13 -12*0 -22*0 ; -11*13 -21*23 -12*0 -22*0 ; -11*23 -21*33 -12*0 -22*0 ; -11*33 -21*0 -12*0 -22*0 ..]
291//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
292//[..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ..]
293 armnn::TensorInfo outputDesc({1, 1, 8, 6}, armnn::GetDataType<T>());
294 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
295 QuantizedVector<T>(qScale, qOffset, {
296 0, 0, 0, 0, 0, 0,
297 -242, -594, -934, -372, 0, 0,
298 -495, -1190, -1850, -725, 0, 0,
299 -538, -1256, -1916, -748, 0, 0,
300 -273, -626, -946, -363, 0, 0,
301 0, 0, 0, 0, 0, 0,
302 0, 0, 0, 0, 0, 0,
303 0, 0, 0, 0, 0, 0
304 })));
305
306 return SimpleConvolution2dTestImpl<T>(workloadFactory,
307 input,
308 kernel,
309 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(false, qScale, qOffset),
310 expectedOutput,
311 qScale,
312 qOffset,
telsoa01c577f2c2018-08-31 09:22:23 +0100313 1, // Padding left.
314 2, // Padding top.
315 3, // Padding right.
316 4); // Padding bottom.
telsoa014fcda012018-03-09 14:13:49 +0000317}
318
319template<typename T>
320LayerTestResult<T, 4> SimpleConvolution2dAsymmetricPaddingTestCommon(armnn::IWorkloadFactory& workloadFactory,
321 float qScale,
322 int32_t qOffset)
323{
telsoa01c577f2c2018-08-31 09:22:23 +0100324 // Use a single-batch 1-channel 5x5 image as input.
telsoa014fcda012018-03-09 14:13:49 +0000325 armnn::TensorInfo inputDesc({ 1, 1, 5, 5 }, armnn::GetDataType<T>());
326 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
327 QuantizedVector<T>(qScale, qOffset, {
328 11,21,31,41,51,
329 12,22,32,42,52,
330 13,23,33,43,53,
331 14,24,34,44,54,
332 15,25,35,45,55,
333 })));
334
telsoa01c577f2c2018-08-31 09:22:23 +0100335 // Use 1 batch of a 1-channel 4x4 kernel.
telsoa014fcda012018-03-09 14:13:49 +0000336 armnn::TensorInfo kernelDesc({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
337 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
338 QuantizedVector<T>(qScale, qOffset, {
339 -11,-21,-31,-41,
340 -12,-22,-32,-42,
341 -13,-23,-33,-43,
342 -14,-24,-34,-44,
343 })));
344
telsoa01c577f2c2018-08-31 09:22:23 +0100345 // Expected output is 1 batch of a 1-channel 5x5 image.
telsoa014fcda012018-03-09 14:13:49 +0000346 armnn::TensorInfo outputDesc({ 1, 1, 5, 5 }, armnn::GetDataType<T>());
347 std::vector<T> myVec(outputDesc.GetNumElements(), 0);
348 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
349 QuantizedVector<T>(qScale, qOffset, {
telsoa014fcda012018-03-09 14:13:49 +0000350 -7140, -10580, -13940, -9300, -5230,
351 -9590, -14120, -18520, -12290, -6860,
352 -9980, -14560, -18960, -12560, -7000,
353 -7518, -10904, -14144, -9318, -5152,
surmeh013537c2c2018-05-18 16:31:43 +0100354 -5032, -7256, -9376, -6142, -3368,
telsoa014fcda012018-03-09 14:13:49 +0000355 })));
356
357 return SimpleConvolution2dTestImpl<T>(workloadFactory,
358 input,
359 kernel,
360 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(false, qScale, qOffset),
361 expectedOutput,
362 qScale,
363 qOffset,
telsoa01c577f2c2018-08-31 09:22:23 +0100364 1, // Padding left.
365 1, // Padding top.
366 2, // Padding right.
367 2); // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100368}
369
370template<typename T>
371LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(armnn::IWorkloadFactory& workloadFactory,
372 float qScale,
373 int32_t qOffset,
374 bool biasEnabled)
375{
telsoa01c577f2c2018-08-31 09:22:23 +0100376 // Use a single-batch 2-channel 5x5 image as input.
surmeh013537c2c2018-05-18 16:31:43 +0100377 armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5 }, armnn::GetDataType<T>());
378 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
379 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
380 0, 1, 2, 3, 4,
381 5, 6, 7, 8, 9,
382 10, 11, 12, 13, 14,
383 15, 16, 17, 18, 19,
384 20, 21, 22, 23, 24,
385
386 25, 26, 27, 28, 29,
387 30, 31, 32, 33, 34,
388 35, 36, 37, 38, 39,
389 40, 41, 42, 43, 44,
390 45, 46, 47, 48, 49
391 })));
392
telsoa01c577f2c2018-08-31 09:22:23 +0100393 // Use a depth multiplier of 1 on a 2-channel 4x4 kernel.
surmeh013537c2c2018-05-18 16:31:43 +0100394 armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, armnn::GetDataType<T>());
395 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
396 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
397 32, 31, 30, 29,
398 28, 27, 26, 25,
399 24, 23, 22, 21,
400 20, 19, 18, 17,
401
402 16, 15, 14, 13,
403 12, 11, 10, 9,
404 8, 7, 6, 5,
405 4, 3, 2, 1
406 })));
407
telsoa01c577f2c2018-08-31 09:22:23 +0100408 // Expected output is 1 batch of a 2-channel 5x5 image.
409 // Calculated using the python tensorflow library with strideX=1, strideY=1.
surmeh013537c2c2018-05-18 16:31:43 +0100410 armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5 }, armnn::GetDataType<T>());
411 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
412 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
413 1062, 1580, 1850, 1530, 1117,
414 2140, 3108, 3500, 2842, 2042,
415 3580, 5068, 5460, 4342, 3062,
416 3618, 5072, 5390, 4248, 2971,
417 3074, 4282, 4510, 3533, 2457,
418 1550, 2284, 2362, 1955, 1428,
419 2910, 4206, 4342, 3528, 2536,
420 3390, 4886, 5022, 4068, 2916,
421 3566, 5056, 5182, 4133, 2922,
422 3100, 4352, 4452, 3517, 2465
423 })));
424
425 return DepthwiseConvolution2dAsymmetricTestImpl<T>(workloadFactory,
426 input,
427 kernel,
428 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
429 expectedOutput,
430 qScale,
431 qOffset,
telsoa01c577f2c2018-08-31 09:22:23 +0100432 1, // Padding left.
433 1, // Padding top.
434 2, // Padding right.
435 2, // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100436 1, // strideX
437 1); // strideY
telsoa014fcda012018-03-09 14:13:49 +0000438}
439
440LayerTestResult<float, 4>
441Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest(armnn::IWorkloadFactory& workloadFactory)
442{
443 return Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon<float>(workloadFactory, 0.0f, 0);
444}
445
446LayerTestResult<float, 4> Convolution2dAsymmetricPaddingTest(armnn::IWorkloadFactory& workloadFactory)
447{
448 return SimpleConvolution2dAsymmetricPaddingTestCommon<float>(workloadFactory, 0.0f, 0);
449}
450
451LayerTestResult<float, 4> DepthwiseConvolution2dTest(armnn::IWorkloadFactory& workloadFactory,
452 bool biasEnabled)
453{
454 return DepthwiseConvolution2dTestImpl<float, float>(workloadFactory, 0.0f, 0, biasEnabled);
455}
456
457LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul1Test(armnn::IWorkloadFactory& workloadFactory,
458 bool biasEnabled)
459{
460 return DepthwiseConvolution2dDepthMul1TestImpl<float, float>(workloadFactory, 0.0f, 0, biasEnabled);
461}
462
surmeh013537c2c2018-05-18 16:31:43 +0100463LayerTestResult<float, 4> DepthwiseConvolution2dAsymmetricTest(armnn::IWorkloadFactory& workloadFactory,
464 bool biasEnabled)
465{
466 return DepthwiseConvolution2dAsymmetricTestCommon<float>(workloadFactory, 0.0f, 0, biasEnabled);
467}
468
telsoa014fcda012018-03-09 14:13:49 +0000469LayerTestResult<uint8_t, 4> DepthwiseConvolution2dUint8Test(armnn::IWorkloadFactory& workloadFactory,
470 bool biasEnabled)
471{
472 return DepthwiseConvolution2dTestImpl<uint8_t, int32_t>(workloadFactory, 0.5f, 50, biasEnabled);
473}
474
475LayerTestResult<uint8_t, 4> DepthwiseConvolution2dDepthMul1Uint8Test(armnn::IWorkloadFactory& workloadFactory,
476 bool biasEnabled)
477{
478 return DepthwiseConvolution2dDepthMul1TestImpl<uint8_t, int32_t>(workloadFactory, 0.5f, 50, biasEnabled);
479}
480
481LayerTestResult<float, 4> Convolution1dTest(armnn::IWorkloadFactory& workloadFactory, bool biasEnabled)
482{
483 return Convolution1dTestImpl<float>(workloadFactory, 0.0f, 0, biasEnabled);
484}
485
486LayerTestResult<uint8_t, 4> Convolution1dUint8Test(armnn::IWorkloadFactory& workloadFactory, bool biasEnabled)
487{
488 return Convolution1dTestImpl<uint8_t>(workloadFactory, 0.1f, 128, biasEnabled);
489}
490
491LayerTestResult<float,4> CompareConvolution2dTest(armnn::IWorkloadFactory& workloadFactory,
492 armnn::IWorkloadFactory& refWorkloadFactory)
493{
494 return CompareConvolution2dTestImpl<float>(workloadFactory, refWorkloadFactory);
495}
496
497template<typename T>
498LayerTestResult<T,4> CompareDepthwiseConvolution2dTest(armnn::IWorkloadFactory& workloadFactory,
499 armnn::IWorkloadFactory& refWorkloadFactory)
500{
501 return CompareDepthwiseConvolution2dTestImpl<T>(workloadFactory, refWorkloadFactory);
502}
503
504template LayerTestResult<float, 4> CompareDepthwiseConvolution2dTest<float>(
505 armnn::IWorkloadFactory&, armnn::IWorkloadFactory&);
506template LayerTestResult<uint8_t, 4> CompareDepthwiseConvolution2dTest<uint8_t>(
507 armnn::IWorkloadFactory&, armnn::IWorkloadFactory&);
508
509LayerTestResult<float,4> SimpleNormalizationAcrossTest(armnn::IWorkloadFactory& workloadFactory)
510{
511 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
512 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
513 return SimpleNormalizationTestImpl(workloadFactory, normChannel, normMethod);
514}
515
516LayerTestResult<float,4> SimpleNormalizationWithinTest(armnn::IWorkloadFactory& workloadFactory)
517{
518 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
519 auto normChannel = armnn::NormalizationAlgorithmChannel::Within;
520 return SimpleNormalizationTestImpl(workloadFactory, normChannel, normMethod);
521}
522
523LayerTestResult<float,2> SimpleSoftmaxTest(armnn::IWorkloadFactory& workloadFactory, float beta)
524{
525 return SimpleSoftmaxTestImpl<float>(workloadFactory, beta);
526}
527
528LayerTestResult<uint8_t,2> SimpleSoftmaxUint8Test(armnn::IWorkloadFactory& workloadFactory, float beta)
529{
530 return SimpleSoftmaxTestImpl<uint8_t>(workloadFactory, beta);
531}
532
533LayerTestResult<float,4> CompareNormalizationTest(armnn::IWorkloadFactory& workloadFactory,
534 armnn::IWorkloadFactory& refWorkloadFactory,
535 armnn::NormalizationAlgorithmChannel normChannel,
536 armnn::NormalizationAlgorithmMethod normMethod)
537{
538 return CompareNormalizationTestImpl(workloadFactory, refWorkloadFactory, normChannel, normMethod);
539}
540
541LayerTestResult<float,2> CompareSoftmaxTest(armnn::IWorkloadFactory& workloadFactory,
542 armnn::IWorkloadFactory& refWorkloadFactory,
543 float beta)
544{
545 return CompareSoftmaxTestImpl<float>(workloadFactory, refWorkloadFactory, beta);
546}
547
548LayerTestResult<uint8_t,2> CompareSoftmaxUint8Test(armnn::IWorkloadFactory& workloadFactory,
549 armnn::IWorkloadFactory& refWorkloadFactory,
550 float beta)
551{
552 return CompareSoftmaxTestImpl<uint8_t>(workloadFactory, refWorkloadFactory, beta);
553}
554
555std::vector<LayerTestResult<float,3>> SplitterTest(armnn::IWorkloadFactory& workloadFactory)
556{
557 return SplitterTestCommon<float>(workloadFactory);
558}
559
560std::vector<LayerTestResult<uint8_t,3>> SplitterUint8Test(armnn::IWorkloadFactory& workloadFactory)
561{
562 return SplitterTestCommon<uint8_t>(workloadFactory, 1.0f, 0);
563}
564
565LayerTestResult<float, 3> CopyViaSplitterTest(armnn::IWorkloadFactory& workloadFactory)
566{
567 return CopyViaSplitterTestImpl<float>(workloadFactory, 0.0f, 0);
568}
569
570LayerTestResult<uint8_t, 3> CopyViaSplitterUint8Test(armnn::IWorkloadFactory& workloadFactory)
571{
572 return CopyViaSplitterTestImpl<uint8_t>(workloadFactory, 1.0f, 0);
573}
574
telsoa01c577f2c2018-08-31 09:22:23 +0100575LayerTestResult<float, 2> LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest(
576 armnn::IWorkloadFactory& workloadFactory)
577{
578 armnn::TensorInfo inputDesc({ 2, 2 }, armnn::GetDataType<float>());
579 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
580 { 2., 3., 3., 4. }));
581
582 armnn::TensorInfo outputDesc({ 2, 4 }, armnn::GetDataType<float>());
583 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
584 {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
585 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f}));
586 return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl(workloadFactory, input, expectedOutput);
587}
588
589LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest(
590 armnn::IWorkloadFactory& workloadFactory)
591{
592 armnn::TensorInfo inputDesc({ 2, 5 }, armnn::GetDataType<float>());
593 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
594 {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
595 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f}));
596
597 armnn::TensorInfo outputDesc({ 2, 16 }, armnn::GetDataType<float>());
598 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
599 {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f,
600 -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f,
601 -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f,
602 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f,
603 -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f,
604 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f,
605 0.02168f}));
606 return LstmLayerFloat32NoCifgWithPeepholeWithProjectionTestImpl(workloadFactory, input, expectedOutput);
607}
608
609LayerTestResult<float, 2> LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest(armnn::IWorkloadFactory& workloadFactory)
610{
611 armnn::TensorInfo inputDesc({2, 2}, armnn::GetDataType<float>());
612 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
613 {2., 3., 3., 4.}));
614
615
616 armnn::TensorInfo outputDesc({2, 4}, armnn::GetDataType<float>());
617 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
618 {{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
619 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}}));
620
621 return LstmNoCifgNoPeepholeNoProjectionTestImpl(workloadFactory, input, expectedOutput);
622}
623
telsoa014fcda012018-03-09 14:13:49 +0000624LayerTestResult<float,3> MergerTest(armnn::IWorkloadFactory& workloadFactory)
625{
surmeh013537c2c2018-05-18 16:31:43 +0100626 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +0000627 unsigned int outputHeight = 6;
628 unsigned int outputChannels = 3;
629
surmeh013537c2c2018-05-18 16:31:43 +0100630 unsigned int inputWidth1 = 3;
631 unsigned int inputHeight1 = 6;
632 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +0000633
surmeh013537c2c2018-05-18 16:31:43 +0100634 unsigned int inputWidth2 = 3;
635 unsigned int inputHeight2 = 6;
636 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +0000637
telsoa01c577f2c2018-08-31 09:22:23 +0100638 // Define the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +0000639 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::Float32);
640 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::Float32);
641 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +0000642
643 LayerTestResult<float,3> ret(outputTensorInfo);
644
telsoa014fcda012018-03-09 14:13:49 +0000645 ret.outputExpected = MakeTensor<float, 3>(outputTensorInfo, std::vector<float>(
surmeh013537c2c2018-05-18 16:31:43 +0100646 {
647 1.0f, 2.0f, 3.0f,
648 4.0f, 5.0f, 6.0f,
649 7.0f, 8.0f, 9.0f,
650 10.0f, 11.0f, 12.0f,
651 13.0f, 14.0f, 15.0f,
652 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +0000653
surmeh013537c2c2018-05-18 16:31:43 +0100654 19.0f, 20.0f, 21.0f,
655 22.0f, 23.0f, 24.0f,
656 25.0f, 26.0f, 27.0f,
657 28.0f, 29.0f, 30.0f,
658 31.0f, 32.0f, 33.0f,
659 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +0000660
surmeh013537c2c2018-05-18 16:31:43 +0100661 37.0f, 38.0f, 39.0f,
662 40.0f, 41.0f, 42.0f,
663 43.0f, 44.0f, 45.0f,
664 46.0f, 47.0f, 48.0f,
665 49.0f, 50.0f, 51.0f,
666 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +0000667 })
668 );
669
telsoa014fcda012018-03-09 14:13:49 +0000670 auto input1 = MakeTensor<float, 3>(inputTensorInfo1, std::vector<float>(
671 {
surmeh013537c2c2018-05-18 16:31:43 +0100672 1.0f, 2.0f, 3.0f,
673 4.0f, 5.0f, 6.0f,
674 7.0f, 8.0f, 9.0f,
675 10.0f, 11.0f, 12.0f,
676 13.0f, 14.0f, 15.0f,
677 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +0000678
surmeh013537c2c2018-05-18 16:31:43 +0100679 19.0f, 20.0f, 21.0f,
680 22.0f, 23.0f, 24.0f,
681 25.0f, 26.0f, 27.0f,
682 28.0f, 29.0f, 30.0f,
683 31.0f, 32.0f, 33.0f,
684 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +0000685 })
686 );
687
688 auto input2 = MakeTensor<float, 3>(inputTensorInfo2, std::vector<float>(
689 {
surmeh013537c2c2018-05-18 16:31:43 +0100690 37.0f, 38.0f, 39.0f,
691 40.0f, 41.0f, 42.0f,
telsoa014fcda012018-03-09 14:13:49 +0000692 43.0f, 44.0f, 45.0f,
surmeh013537c2c2018-05-18 16:31:43 +0100693 46.0f, 47.0f, 48.0f,
694 49.0f, 50.0f, 51.0f,
695 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +0000696 })
697 );
698
telsoa01c577f2c2018-08-31 09:22:23 +0100699 std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of input[0].
telsoa014fcda012018-03-09 14:13:49 +0000700 armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1);
701
telsoa01c577f2c2018-08-31 09:22:23 +0100702 std::vector<unsigned int> wOrigin2 = {2, 0, 0}; //Extent of the window is defined by size of input[1].
telsoa014fcda012018-03-09 14:13:49 +0000703 armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2);
704
telsoa014fcda012018-03-09 14:13:49 +0000705 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
706
707 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
708
709 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
710 subTensorsSupported ?
711 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
712 workloadFactory.CreateTensorHandle(inputTensorInfo1);
713
714 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
715 subTensorsSupported ?
716 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
717 workloadFactory.CreateTensorHandle(inputTensorInfo2);
718
telsoa014fcda012018-03-09 14:13:49 +0000719 armnn::MergerQueueDescriptor data;
720 armnn::WorkloadInfo info;
721 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
722 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +0000723 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
724
725 data.m_ViewOrigins.push_back(window1);
726 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +0000727
728 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(data, info);
729
730 inputHandle1->Allocate();
731 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +0000732 outputHandle->Allocate();
733
734 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
735 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +0000736
surmeh013537c2c2018-05-18 16:31:43 +0100737 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +0000738 workload->Execute();
739
740 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
741
742 return ret;
743}
744
745LayerTestResult<float,4> AdditionTest(armnn::IWorkloadFactory& workloadFactory)
746{
747 unsigned int batchSize = 2;
748 unsigned int channels = 2;
749 unsigned int height = 2;
750 unsigned int width = 3;
751
752 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
753 armnn::TensorInfo outputTensorInfo;
754
755 unsigned int shape[] = {batchSize, channels, height, width};
756
757 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
758 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
759 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
760
761
762 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, std::vector<float>(
763 {
764 0.0f, 2.0f, 1.0f,
765 0.2f, 1.0f, 2.0f,
766
767 1.0f, 2.0f, 1.0f,
768 0.2f, 1.0f, 2.0f,
769
770 0.0f, 2.0f, 1.0f,
771 4.2f, 1.0f, 2.0f,
772
773 0.0f, 0.0f, 1.0f,
774 0.2f, 1.0f, 2.0f,
775 }));
776
777 auto input2 = MakeTensor<float, 4>(inputTensorInfo2, std::vector<float>(
778 {
779 1.0f, 2.0f, 1.0f,
780 0.0f, 1.0f, 2.0f,
781
782 1.0f, 2.0f, -2.0f,
783 0.2f, 1.0f, 2.0f,
784
785 0.0f, 2.0f, 1.0f,
786 4.2f, 0.0f, -3.0f,
787
788 0.0f, 0.0f, 1.0f,
789 0.7f, 1.0f, 5.0f,
790 }));
791
792 LayerTestResult<float,4> ret(outputTensorInfo);
793 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>(
794 {
795 1.0f, 4.0f, 2.0f,
796 0.2f, 2.0f, 4.0f,
797
798 2.0f, 4.0f, -1.0f,
799 0.4f, 2.0f, 4.0f,
800
801 0.0f, 4.0f, 2.0f,
802 8.4f, 1.0f, -1.0f,
803
804 0.0f, 0.0f, 2.0f,
805 0.9f, 2.0f, 7.0f,
806 }));
807
808 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
809 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
810 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
811
812 armnn::AdditionQueueDescriptor data;
813 armnn::WorkloadInfo info;
814 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
815 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
816 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
817
818 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
819
820 inputHandle1->Allocate();
821 inputHandle2->Allocate();
822 outputHandle->Allocate();
823
824 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
825 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
826
surmeh013537c2c2018-05-18 16:31:43 +0100827 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +0000828 workload->Execute();
829
830 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
831
832 return ret;
833}
834
835template <typename T>
836LayerTestResult<T, 4> AdditionBroadcastTestImpl(armnn::IWorkloadFactory& workloadFactory,
837 float qScale,
838 int32_t qOffset)
839{
840 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, armnn::GetDataType<T>());
841 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, armnn::GetDataType<T>());
842 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>());
843
844 if (armnn::IsQuantizedType<T>())
845 {
846 inputTensorInfo1.SetQuantizationScale(qScale);
847 inputTensorInfo1.SetQuantizationOffset(qOffset);
848 inputTensorInfo2.SetQuantizationScale(qScale);
849 inputTensorInfo2.SetQuantizationOffset(qOffset);
850 outputTensorInfo.SetQuantizationScale(qScale);
851 outputTensorInfo.SetQuantizationOffset(qOffset);
852 }
853
854 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
855 {
856 0.0f,
857 1.0f,
858
859 2.0f,
860 3.0f,
861
862 4.0f,
863 5.0f,
864 }));
865
866 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
867 {
868 0.5f, 1.5f, 2.5f,
869 3.5f, 4.5f, 5.5f,
870 }));
871
872 LayerTestResult<T,4> ret(outputTensorInfo);
873 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
874 {
875 0.5f, 1.5f, 2.5f,
876 4.5f, 5.5f, 6.5f,
877
878 2.5f, 3.5f, 4.5f,
879 6.5f, 7.5f, 8.5f,
880
881 4.5f, 5.5f, 6.5f,
882 8.5f, 9.5f, 10.5f,
883 }));
884
885 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
886 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
887 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
888
889 armnn::AdditionQueueDescriptor data;
890 armnn::WorkloadInfo info;
891 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
892 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
893 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
894
895 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
896
897 inputHandle1->Allocate();
898 inputHandle2->Allocate();
899 outputHandle->Allocate();
900
901 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
902 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
903
surmeh013537c2c2018-05-18 16:31:43 +0100904 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +0000905 workload->Execute();
906
907 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
908
909 return ret;
910}
911
912template <typename T>
913LayerTestResult<T, 4> AdditionBroadcast1ElementTestImpl(armnn::IWorkloadFactory& workloadFactory,
914 float qScale,
915 int32_t qOffset)
916{
917 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>());
918 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, armnn::GetDataType<T>());
919 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>());
920
921 if (armnn::IsQuantizedType<T>())
922 {
923 inputTensorInfo1.SetQuantizationScale(qScale);
924 inputTensorInfo1.SetQuantizationOffset(qOffset);
925 inputTensorInfo2.SetQuantizationScale(qScale);
926 inputTensorInfo2.SetQuantizationOffset(qOffset);
927 outputTensorInfo.SetQuantizationScale(qScale);
928 outputTensorInfo.SetQuantizationOffset(qOffset);
929 }
930
931 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
932 {
933 0.0f, 1.0f, 2.0f,
934 3.0f, 4.0f, 5.0f,
935 6.0f, 7.0f, 8.0f,
936 9.0f, 10.0f, 11.0f,
937 12.0f, 13.0f, 14.0f,
938 15.0f, 16.0f, 17.0f,
939 }));
940
941 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
942 {
943 0.5f,
944 }));
945
946 LayerTestResult<T,4> ret(outputTensorInfo);
947 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
948 {
949 0.5f, 1.5f, 2.5f,
950 3.5f, 4.5f, 5.5f,
951 6.5f, 7.5f, 8.5f,
952 9.5f, 10.5f, 11.5f,
953 12.5f, 13.5f, 14.5f,
954 15.5f, 16.5f, 17.5f,
955 }));
956
957 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
958 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
959 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
960
961 armnn::AdditionQueueDescriptor data;
962 armnn::WorkloadInfo info;
963 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
964 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
965 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
966
967 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
968
969 inputHandle1->Allocate();
970 inputHandle2->Allocate();
971 outputHandle->Allocate();
972
973 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
974 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
975
surmeh013537c2c2018-05-18 16:31:43 +0100976 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +0000977 workload->Execute();
978
979 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
980
981 return ret;
982}
983
984LayerTestResult<float, 4> AdditionBroadcastTest(armnn::IWorkloadFactory& workloadFactory)
985{
986 return AdditionBroadcastTestImpl<float>(workloadFactory, 0.0f, 0);
987}
988
989LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test(armnn::IWorkloadFactory& workloadFactory)
990{
991 return AdditionBroadcastTestImpl<uint8_t>(workloadFactory, 2.f, 0);
992}
993
994LayerTestResult<float, 4> AdditionBroadcast1ElementTest(armnn::IWorkloadFactory& workloadFactory)
995{
996 return AdditionBroadcast1ElementTestImpl<float>(workloadFactory, 0.0f, 0);
997}
998
999LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test(armnn::IWorkloadFactory& workloadFactory)
1000{
1001 return AdditionBroadcast1ElementTestImpl<uint8_t>(workloadFactory, 0.1333333f, 128);
1002}
1003
1004LayerTestResult<float,4> CompareAdditionTest(armnn::IWorkloadFactory& workloadFactory,
David Beckf195f032018-09-06 16:46:34 +01001005 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00001006{
1007 unsigned int batchSize = 4;
1008 unsigned int channels = 1;
1009 unsigned int height = 2;
1010 unsigned int width = 3;
1011
1012 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
1013 armnn::TensorInfo outputTensorInfo;
1014
1015 unsigned int shape[] = {batchSize, channels, height, width};
1016
1017 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1018 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1019 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1020
1021 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 1232);
1022 auto input2 = MakeRandomTensor<float, 4>(inputTensorInfo2, 456);
1023
1024 LayerTestResult<float,4> ret(outputTensorInfo);
1025
1026 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1027 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1028 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1029
1030 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
1031 std::unique_ptr<armnn::ITensorHandle> inputHandle2Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo2);
1032 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
1033
1034 armnn::AdditionQueueDescriptor data;
1035 armnn::WorkloadInfo info;
1036 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1037 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1038 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1039
1040 armnn::AdditionQueueDescriptor refData = data;
1041 armnn::WorkloadInfo refInfo = info;
1042 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo1, inputHandle1Ref.get());
1043 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get());
1044 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1045
1046 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1047 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateAddition(refData, refInfo);
1048
1049 inputHandle1->Allocate();
1050 inputHandle2->Allocate();
1051 outputHandle->Allocate();
1052 inputHandle1Ref->Allocate();
1053 inputHandle2Ref->Allocate();
1054 outputHandleRef->Allocate();
1055
1056 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1057 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1058 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
1059 CopyDataToITensorHandle(inputHandle2Ref.get(), &input2[0][0][0][0]);
1060
surmeh013537c2c2018-05-18 16:31:43 +01001061 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001062 workload->Execute();
surmeh013537c2c2018-05-18 16:31:43 +01001063 refWorkloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001064 workloadRef->Execute();
1065
1066 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1067 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
1068
1069 return ret;
1070}
1071
surmeh01bceff2f2018-03-29 16:29:27 +01001072namespace {
David Beck5cd01f32018-09-12 16:00:08 +01001073template <typename T>
1074LayerTestResult<T, 4> DivisionTestHelper(armnn::IWorkloadFactory& workloadFactory,
1075 const unsigned int shape0[4],
1076 const std::vector<T>& values0,
1077 float scale0,
1078 int32_t offset0,
1079 const unsigned int shape1[4],
1080 const std::vector<T> & values1,
1081 float scale1,
1082 int32_t offset1,
1083 const unsigned int outShape[4],
1084 const std::vector<T> & outValues,
1085 float outScale,
1086 int32_t outOffset)
1087{
1088 auto dataType = (std::is_same<T, uint8_t>::value ?
1089 armnn::DataType::QuantisedAsymm8 :
1090 armnn::DataType::Float32);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001091
David Beck5cd01f32018-09-12 16:00:08 +01001092 armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
1093 armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
1094 armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001095
David Beck5cd01f32018-09-12 16:00:08 +01001096 inputTensorInfo0.SetQuantizationScale(scale0);
1097 inputTensorInfo0.SetQuantizationOffset(offset0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001098
David Beck5cd01f32018-09-12 16:00:08 +01001099 inputTensorInfo1.SetQuantizationScale(scale1);
1100 inputTensorInfo1.SetQuantizationOffset(offset1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001101
David Beck5cd01f32018-09-12 16:00:08 +01001102 outputTensorInfo.SetQuantizationScale(outScale);
1103 outputTensorInfo.SetQuantizationOffset(outOffset);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001104
David Beck5cd01f32018-09-12 16:00:08 +01001105 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
1106 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001107
David Beck5cd01f32018-09-12 16:00:08 +01001108 LayerTestResult<T, 4> result(outputTensorInfo);
1109 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001110
David Beck5cd01f32018-09-12 16:00:08 +01001111 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
1112 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1113 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001114
David Beck5cd01f32018-09-12 16:00:08 +01001115 armnn::DivisionQueueDescriptor data;
1116 armnn::WorkloadInfo info;
1117 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
1118 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1119 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001120
David Beck5cd01f32018-09-12 16:00:08 +01001121 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDivision(data, info);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001122
David Beck5cd01f32018-09-12 16:00:08 +01001123 inputHandle0->Allocate();
1124 inputHandle1->Allocate();
1125 outputHandle->Allocate();
1126
1127 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
1128 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1129
1130 workloadFactory.Finalize();
1131 workload->Execute();
1132
1133 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
1134
1135 return result;
1136}
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001137} // anonymous namespace
1138
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01001139LayerTestResult<float,4> DivisionByZeroTest(armnn::IWorkloadFactory& workloadFactory)
1140{
1141 const unsigned int width = 2;
1142 const unsigned int height = 2;
1143 const unsigned int channelCount = 2;
1144 const unsigned int batchSize = 2;
1145
1146 unsigned int shape[] = { batchSize, channelCount, height, width };
1147
1148 std::vector<float> input0({
1149 1.f, 1.f, 1.f, 1.f, 0.f, 0.f, 0.f, 0.f,
1150 -1.f, -1.f, -1.f, -1.f, 5.f, 5.f, 5.f, 5.f });
1151
1152 std::vector<float> input1({
1153 0.f, 0.f, -0.f, -0.f, 0.f, 0.f, -0.f, -0.f,
1154 0.f, 0.f, -0.f, -0.f, 5.f, 5.f, 5.f, 5.f });
1155
1156 std::vector<float> output({
1157 INFINITY, INFINITY, -INFINITY, -INFINITY, NAN, NAN, -NAN, -NAN,
1158 -INFINITY, -INFINITY, INFINITY, INFINITY, 1, 1, 1, 1 });
1159
David Beck5cd01f32018-09-12 16:00:08 +01001160 return DivisionTestHelper<float>(workloadFactory,
1161 shape, input0, 1.0f, 0,
1162 shape, input1, 1.0f, 0,
1163 shape, output, 1.0f, 0);
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01001164}
1165
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001166LayerTestResult<float,4> DivisionTest(armnn::IWorkloadFactory& workloadFactory)
1167{
1168 const unsigned int width = 2;
1169 const unsigned int height = 2;
1170 const unsigned int channelCount = 2;
1171 const unsigned int batchSize = 2;
1172
1173 unsigned int shape[] = { batchSize, channelCount, height, width };
1174
1175 std::vector<float> input0({
1176 2, 2, 2, 2, 3, 3, 3, 3,
1177 4, 4, 4, 4, 5, 5, 5, 5 });
1178
1179 std::vector<float> input1({
1180 1, 1, 1, 1, 2, 2, 2, 2,
1181 4, 4, 4, 4, 4, 4, 4, 4 });
1182
1183 std::vector<float> output({
1184 2, 2, 2, 2, 1.5, 1.5, 1.5, 1.5,
1185 1, 1, 1, 1, 1.25, 1.25, 1.25, 1.25 });
1186
David Beck5cd01f32018-09-12 16:00:08 +01001187
1188 return DivisionTestHelper<float>(workloadFactory,
1189 shape, input0, 1.0f, 0,
1190 shape, input1, 1.0f, 0,
1191 shape, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001192}
1193
1194LayerTestResult<float, 4> DivisionBroadcast1ElementTest(armnn::IWorkloadFactory& workloadFactory)
1195{
1196 unsigned int shape0[] = { 1, 2, 2, 2 };
1197 std::vector<float> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
1198
1199 unsigned int shape1[] = { 1, 1, 1, 1 };
1200 std::vector<float> input1({ 2 });
1201
1202 std::vector<float> output({ 1, 2, 3, 4, 5, 6, 7, 8});
1203
David Beck5cd01f32018-09-12 16:00:08 +01001204
1205 return DivisionTestHelper<float>(workloadFactory,
1206 shape0, input0, 1.0f, 0,
1207 shape1, input1, 1.0f, 0,
1208 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001209}
1210
1211LayerTestResult<float, 4> DivisionBroadcast1DVectorTest(armnn::IWorkloadFactory& workloadFactory)
1212{
1213 unsigned int shape0[] = { 1, 3, 3, 2 };
1214 std::vector<float> input0({
1215 1, 4, 3, 8, 5, 12,
1216 7, 16, 9, 20, 11, 24,
1217 13, 28, 15, 32, 17, 36});
1218
1219 unsigned int shape1[] = { 1, 1, 1, 2 };
1220 std::vector<float> input1({ 1, 2 });
1221
1222 std::vector<float> output({
1223 1, 2, 3, 4, 5, 6,
1224 7, 8, 9, 10, 11, 12,
1225 13, 14, 15, 16, 17, 18});
1226
David Beck5cd01f32018-09-12 16:00:08 +01001227 return DivisionTestHelper<float>(workloadFactory,
1228 shape0, input0, 1.0f, 0,
1229 shape1, input1, 1.0f, 0,
1230 shape0, output, 1.0f, 0);
1231}
1232
1233
1234LayerTestResult<uint8_t,4> DivisionUint8Test(armnn::IWorkloadFactory& workloadFactory)
1235{
1236 const unsigned int width = 2;
1237 const unsigned int height = 2;
1238 const unsigned int channelCount = 2;
1239 const unsigned int batchSize = 2;
1240
1241 unsigned int shape[] = { batchSize, channelCount, height, width };
1242
1243 std::vector<uint8_t> input0({2, 2, 2, 2, 3, 3, 3, 3,
1244 4, 4, 4, 4, 5, 5, 5, 5 });
1245
1246 std::vector<uint8_t> input1({1, 1, 1, 1, 2, 2, 2, 2,
1247 4, 4, 4, 4, 4, 4, 4, 4 });
1248
1249 std::vector<uint8_t> output({8, 8, 8, 8, 6, 6, 6, 6,
1250 4, 4, 4, 4, 5, 5, 5, 5});
1251
1252
1253 return DivisionTestHelper<uint8_t>(workloadFactory,
1254 shape, input0, 1.0f, 0,
1255 shape, input1, 1.0f, 0,
1256 shape, output, 0.25f, 0);
1257}
1258
1259LayerTestResult<uint8_t, 4> DivisionBroadcast1ElementUint8Test(armnn::IWorkloadFactory& workloadFactory)
1260{
1261 unsigned int shape0[] = { 1, 2, 2, 2 };
1262 std::vector<uint8_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
1263
1264 unsigned int shape1[] = { 1, 1, 1, 1 };
1265 std::vector<uint8_t> input1({ 2 });
1266
1267 std::vector<uint8_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
1268
1269 return DivisionTestHelper<uint8_t>(workloadFactory,
1270 shape0, input0, 1.0f, 0,
1271 shape1, input1, 1.0f, 0,
1272 shape0, output, 1.0f, 0);
1273}
1274
1275LayerTestResult<uint8_t, 4> DivisionBroadcast1DVectorUint8Test(armnn::IWorkloadFactory& workloadFactory)
1276{
1277 unsigned int shape0[] = { 1, 3, 3, 2 };
1278 std::vector<uint8_t> input0({1, 4, 3, 8, 5, 12,
1279 7, 16, 9, 20, 11, 24,
1280 13, 28, 15, 32, 17, 36});
1281
1282 unsigned int shape1[] = { 1, 1, 1, 2 };
1283 std::vector<uint8_t> input1({ 1, 2 });
1284
1285 std::vector<uint8_t> output({1, 2, 3, 4, 5, 6,
1286 7, 8, 9, 10, 11, 12,
1287 13, 14, 15, 16, 17, 18});
1288
1289 return DivisionTestHelper<uint8_t>(workloadFactory,
1290 shape0, input0, 1.0f, 0,
1291 shape1, input1, 1.0f, 0,
1292 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001293}
1294
1295namespace {
surmeh01bceff2f2018-03-29 16:29:27 +01001296LayerTestResult<float,4> MultiplicationTestHelper(armnn::IWorkloadFactory& workloadFactory,
1297 const unsigned int shape0[4],
1298 const std::vector<float> & values0,
1299 const unsigned int shape1[4],
1300 const std::vector<float> & values1,
1301 const unsigned int outShape[4],
1302 const std::vector<float> & outValues)
telsoa014fcda012018-03-09 14:13:49 +00001303{
surmeh01bceff2f2018-03-29 16:29:27 +01001304 const size_t dimensionCount = 4;
1305 armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, armnn::DataType::Float32};
1306 armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, armnn::DataType::Float32};
1307 armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, armnn::DataType::Float32};
telsoa014fcda012018-03-09 14:13:49 +00001308
surmeh01bceff2f2018-03-29 16:29:27 +01001309 auto input0 = MakeTensor<float, 4>(inputTensorInfo0, values0);
1310 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00001311
1312 LayerTestResult<float,4> ret(outputTensorInfo);
1313
1314 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
1315 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1316 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1317
1318 armnn::MultiplicationQueueDescriptor data;
1319 armnn::WorkloadInfo info;
1320 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
1321 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1322 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1323
1324 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
1325
1326 inputHandle0->Allocate();
1327 inputHandle1->Allocate();
1328 outputHandle->Allocate();
1329
1330 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
1331 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1332
surmeh013537c2c2018-05-18 16:31:43 +01001333 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001334 workload->Execute();
1335
1336 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1337
surmeh01bceff2f2018-03-29 16:29:27 +01001338 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00001339 return ret;
1340}
surmeh01bceff2f2018-03-29 16:29:27 +01001341} // anonymous namespace
1342
1343
1344LayerTestResult<float,4> MultiplicationTest(armnn::IWorkloadFactory& workloadFactory)
1345{
1346 const unsigned int width = 2;
1347 const unsigned int height = 2;
1348 const unsigned int channelCount = 2;
1349 const unsigned int batchSize = 2;
1350
1351 unsigned int shape[] = { batchSize, channelCount, height, width };
1352
1353 std::vector<float> input0({
1354 1, 1, 1, 1, 2, 2, 2, 2,
1355 3, 3, 3, 3, 4, 4, 4, 4 });
1356
1357 std::vector<float> input1({
1358 2, 2, 2, 2, 3, 3, 3, 3,
1359 4, 4, 4, 4, 5, 5, 5, 5 });
1360
1361 std::vector<float> output({
1362 2, 2, 2, 2, 6, 6, 6, 6,
1363 12, 12, 12, 12, 20, 20, 20, 20 });
1364
1365 return MultiplicationTestHelper(workloadFactory,
1366 shape,
1367 input0,
1368 shape,
1369 input1,
1370 shape,
1371 output);
1372}
1373
1374LayerTestResult<float, 4> MultiplicationBroadcast1ElementTest(armnn::IWorkloadFactory& workloadFactory)
1375{
1376 unsigned int shape0[] = { 1, 2, 2, 2 };
1377 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
1378
1379 unsigned int shape1[] = { 1, 1, 1, 1 };
1380 std::vector<float> input1({ 2 });
1381
1382 std::vector<float> output({ 2, 4, 6, 8, 10, 12, 14, 16});
1383
1384 return MultiplicationTestHelper(workloadFactory,
1385 shape0,
1386 input0,
1387 shape1,
1388 input1,
1389 shape0,
1390 output);
1391}
1392
1393LayerTestResult<float, 4> MultiplicationBroadcast1DVectorTest(armnn::IWorkloadFactory& workloadFactory)
1394{
1395 unsigned int shape0[] = { 1, 3, 3, 2 };
1396 std::vector<float> input0({
1397 1, 2, 3, 4, 5, 6,
1398 7, 8, 9, 10, 11, 12,
1399 13, 14, 15, 16, 17, 18});
1400
1401 unsigned int shape1[] = { 1, 1, 1, 2 };
1402 std::vector<float> input1({ 1, 2 });
1403
1404 std::vector<float> output({
1405 1, 4, 3, 8, 5, 12,
1406 7, 16, 9, 20, 11, 24,
1407 13, 28, 15, 32, 17, 36});
1408
1409 return MultiplicationTestHelper(workloadFactory,
1410 shape0,
1411 input0,
1412 shape1,
1413 input1,
1414 shape0,
1415 output);
1416}
telsoa014fcda012018-03-09 14:13:49 +00001417
1418LayerTestResult<float,4> CompareMultiplicationTest(armnn::IWorkloadFactory& workloadFactory,
1419 armnn::IWorkloadFactory& refWorkloadFactory)
1420{
1421 const unsigned int width = 16;
1422 const unsigned int height = 32;
1423 const unsigned int channelCount = 2;
1424 const unsigned int batchSize = 5;
1425
1426 armnn::TensorInfo inputTensorInfo0;
1427 armnn::TensorInfo inputTensorInfo1;
1428 armnn::TensorInfo outputTensorInfo;
1429
1430 constexpr unsigned int shape[] = { batchSize, channelCount, height, width };
1431
1432 inputTensorInfo0 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1433 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1434 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1435
1436 LayerTestResult<float,4> comparisonResult(outputTensorInfo);
1437
1438 auto input0 = MakeRandomTensor<float, 4>(inputTensorInfo0, 803506992);
1439 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 54902257);
1440
1441 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
1442 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1443 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1444
1445 std::unique_ptr<armnn::ITensorHandle> inputHandle0Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo0);
1446 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
1447 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
1448
1449 armnn::MultiplicationQueueDescriptor data;
1450 armnn::WorkloadInfo info;
1451 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
1452 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1453 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1454
1455 armnn::MultiplicationQueueDescriptor refData = data;
1456 armnn::WorkloadInfo refInfo = info;
1457 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo0, inputHandle0Ref.get());
1458 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo1, inputHandle1Ref.get());
1459 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1460
1461 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
1462 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateMultiplication(refData, refInfo);
1463
1464 inputHandle0->Allocate();
1465 inputHandle1->Allocate();
1466 outputHandle->Allocate();
1467 inputHandle0Ref->Allocate();
1468 inputHandle1Ref->Allocate();
1469 outputHandleRef->Allocate();
1470
1471 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
1472 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1473 CopyDataToITensorHandle(inputHandle0Ref.get(), &input0[0][0][0][0]);
1474 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
1475
surmeh013537c2c2018-05-18 16:31:43 +01001476 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001477 workload->Execute();
surmeh013537c2c2018-05-18 16:31:43 +01001478 refWorkloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001479 workloadRef->Execute();
1480
1481 CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get());
1482 CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get());
1483
1484 return comparisonResult;
1485}
1486
1487LayerTestResult<float,4> CompareBatchNormTest(armnn::IWorkloadFactory& workloadFactory,
1488 armnn::IWorkloadFactory& refWorkloadFactory)
1489{
1490 const unsigned int width = 2;
1491 const unsigned int height = 3;
1492 const unsigned int channels = 5;
1493 const unsigned int batchSize = 3;
1494
1495 armnn::TensorInfo inputTensorInfo;
1496 armnn::TensorInfo outputTensorInfo;
1497 armnn::TensorInfo tensorInfo;
1498
1499 constexpr unsigned int shape[] = {batchSize, channels, height, width};
1500 constexpr unsigned int tensorShape[] = {channels};
1501
1502 inputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1503 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1504 tensorInfo = armnn::TensorInfo(1, tensorShape, armnn::DataType::Float32);
1505
1506 auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 21312);
1507
1508 auto mean = MakeRandomTensor<float, 1>(tensorInfo, 123);
1509 auto variance = MakeRandomTensor<float, 1>(tensorInfo, 234, 0.0f);
1510 auto beta = MakeRandomTensor<float, 1>(tensorInfo, 123);
1511 auto gamma = MakeRandomTensor<float, 1>(tensorInfo, 345);
1512
1513 LayerTestResult<float,4> ret(outputTensorInfo);
1514
1515 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
1516 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1517
1518 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
1519 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
1520
1521 armnn::BatchNormalizationQueueDescriptor data;
1522 armnn::WorkloadInfo info;
1523 armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
1524 armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
1525 armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
1526 armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
1527
1528 AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
1529 AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
1530 AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
1531 AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
1532
1533 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1534 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1535 data.m_Mean = &meanTensor;
1536 data.m_Variance = &varianceTensor;
1537 data.m_Beta = &betaTensor;
1538 data.m_Gamma = &gammaTensor;
1539 data.m_Parameters.m_Eps = 0.01f;
1540
1541 armnn::BatchNormalizationQueueDescriptor refData = data;
1542 armnn::WorkloadInfo refInfo = info;
1543 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
1544 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1545
1546 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
1547 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateBatchNormalization(refData, refInfo);
1548
1549 inputHandle->Allocate();
1550 outputHandle->Allocate();
1551 inputHandleRef->Allocate();
1552 outputHandleRef->Allocate();
1553
1554 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
1555 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
1556
surmeh013537c2c2018-05-18 16:31:43 +01001557 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001558 workload->Execute();
surmeh013537c2c2018-05-18 16:31:43 +01001559 refWorkloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001560 workloadRef->Execute();
1561
1562 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1563 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
1564
1565 return ret;
1566}
1567
surmeh013537c2c2018-05-18 16:31:43 +01001568template<typename T>
1569void PermuteTensorData(
1570 armnn::IWorkloadFactory& workloadFactory,
1571 const armnn::PermutationVector& mappings,
1572 armnn::TensorInfo & inputTensorInfo,
1573 const T * inputData,
1574 std::vector<T>& outputData)
telsoa014fcda012018-03-09 14:13:49 +00001575{
surmeh013537c2c2018-05-18 16:31:43 +01001576 BOOST_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
1577 if (inputData == nullptr)
1578 {
1579 // Nullptr is an error in the test. By returning without doing the concatenation
1580 // I expect the caller to fail the test. It still makes sense to report this as
1581 // an assert for Debug builds.
1582 return;
1583 }
telsoa014fcda012018-03-09 14:13:49 +00001584
surmeh013537c2c2018-05-18 16:31:43 +01001585 armnn::TensorInfo outputTensorInfo = armnnUtils::Permuted(inputTensorInfo, mappings);
1586
1587 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
1588 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1589
1590 armnn::PermuteQueueDescriptor queueDescriptor;
1591 queueDescriptor.m_Parameters = armnn::PermuteDescriptor{mappings};
1592 armnn::WorkloadInfo workloadInfo;
1593 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
1594 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
1595
1596 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePermute(queueDescriptor, workloadInfo);
1597
1598 inputHandle->Allocate();
1599 outputHandle->Allocate();
1600
1601 CopyDataToITensorHandle(inputHandle.get(), inputData);
1602
1603 workload->Execute();
1604
1605 outputData.resize(outputTensorInfo.GetNumElements());
1606 CopyDataFromITensorHandle(&outputData[0], outputHandle.get());
1607 inputTensorInfo = outputTensorInfo;
1608}
1609
1610armnn::OriginsDescriptor CreateMergerDescriptorForConcatenation(
1611 const std::vector<armnn::TensorInfo> & inputTensorInfos,
1612 unsigned int concatDim)
1613{
telsoa014fcda012018-03-09 14:13:49 +00001614 std::vector<armnn::TensorShape> shapes;
1615 shapes.reserve(inputTensorInfos.size());
1616 for (const armnn::TensorInfo& it: inputTensorInfos)
1617 {
1618 shapes.push_back(it.GetShape());
1619 }
surmeh013537c2c2018-05-18 16:31:43 +01001620
1621 return armnn::CreateMergerDescriptorForConcatenation(shapes.begin(),
1622 shapes.end(),
1623 concatDim);
1624}
1625
1626//
1627// Concatenation is only supported for N and C dimensions for NCHW. In case of
telsoa01c577f2c2018-08-31 09:22:23 +01001628// <4 dimensions we need to make sure that the concat dimensions are at least
surmeh013537c2c2018-05-18 16:31:43 +01001629// the 3rd slowest iterating one.
1630//
1631
1632bool NeedPermuteForConcat(
1633 const std::vector<armnn::TensorInfo> & inputTensorInfos,
1634 unsigned int concatDim)
1635{
1636 // See note above. Additionally we expect the input shapes to have the
1637 // same number of dimensions.
1638 unsigned int nDimensions = 0;
1639
telsoa01c577f2c2018-08-31 09:22:23 +01001640 // Determine the number of dimensions as well as sanity check them
1641 // agains test implementation issues.
surmeh013537c2c2018-05-18 16:31:43 +01001642 for (auto && tensorInfo : inputTensorInfos)
1643 {
1644 if (!nDimensions)
1645 {
1646 nDimensions = tensorInfo.GetShape().GetNumDimensions();
1647 }
1648 else
1649 {
1650 BOOST_ASSERT_MSG(nDimensions == tensorInfo.GetShape().GetNumDimensions(),
1651 "Input shapes must have the same number of dimensions");
1652 }
1653 }
1654
1655 return (nDimensions-concatDim) < 3;
1656}
1657
1658armnn::TensorShape ExpandTensorShapeTo3dForPermute(const armnn::TensorShape & inputShape)
1659{
1660 unsigned int numDims = inputShape.GetNumDimensions();
1661 if (numDims >= 3)
1662 {
1663 // Nothing to do if the inputShape has at least 3 dimensions.
1664 return inputShape;
1665 }
1666
1667 std::vector<unsigned int> newDims(size_t(3), 1u);
1668 unsigned int expandedBy = 3 - numDims;
1669 for (unsigned int i=0; i<numDims; ++i)
1670 {
1671 newDims[expandedBy+i] = inputShape[i];
1672 }
1673 return armnn::TensorShape(3u, &newDims[0]);
1674}
1675
1676void Generate3dPermuteVectorForConcat(
1677 unsigned int numDimensions,
1678 unsigned int & concatDim,
1679 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutations)
1680{
1681 BOOST_ASSERT_MSG(numDimensions <= 3,
1682 "Only dimensions 1,2 and 3 are supported by this helper");
1683
1684 unsigned int expandedBy = 3 - numDimensions;
1685 unsigned int expandedConcatAxis = concatDim + expandedBy;
1686
1687 if (expandedConcatAxis == 2)
1688 {
1689 concatDim = 0;
1690 armnn::PermutationVector forwardPermutation({1, 2, 0});
1691 armnn::PermutationVector reversePermutation({2, 0, 1});
1692 permutations = std::make_pair(forwardPermutation, reversePermutation);
1693 }
1694 else if (expandedConcatAxis == 1)
1695 {
1696 concatDim = 0;
1697 armnn::PermutationVector forwardPermutation({2, 0, 1});
1698 armnn::PermutationVector reversePermutation({1, 2, 0});
1699 permutations = std::make_pair(forwardPermutation, reversePermutation);
1700 }
1701 else
1702 {
1703 BOOST_ASSERT(expandedConcatAxis == 0);
1704 concatDim = 0;
1705 }
1706}
1707
1708//
1709// Permute the input tensors so we can do a supported concatenation.
1710// Also treat lower than 3d tensors as 3d by adding dummy 1 dimensions
1711// at the front. Finally this function tells what the output shape
1712// of the permuted concatenated tensor is going to be.
1713//
1714template <typename T>
1715void PermuteInputsForConcat(
1716 armnn::IWorkloadFactory& workloadFactory,
1717 std::vector<armnn::TensorInfo> & inputTensorInfos,
1718 std::vector<T *> & inputData,
1719 std::vector<std::vector<T>> & inputDataStorage,
1720 armnn::PermutationVector & permuteVector,
1721 unsigned int & concatDim,
1722 armnn::TensorInfo & outputTensorInfo)
1723{
1724 BOOST_ASSERT_MSG(inputTensorInfos.size() > 1,
1725 "Expecting more than one tensor to be concatenated here");
1726
1727 unsigned int numDims = 0;
1728 unsigned int nthInput = 0;
1729 const armnn::PermutationVector identity({0, 1, 2});
1730
1731 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutations =
1732 std::make_pair(identity, identity);
1733
1734 inputDataStorage.resize(inputData.size());
1735
1736 for (auto && tensorInfo : inputTensorInfos)
1737 {
1738 if (numDims == 0)
1739 {
1740 numDims = tensorInfo.GetShape().GetNumDimensions();
1741 Generate3dPermuteVectorForConcat(numDims, concatDim, permutations);
telsoa01c577f2c2018-08-31 09:22:23 +01001742 // Store the reverese permutation.
surmeh013537c2c2018-05-18 16:31:43 +01001743 permuteVector = permutations.second;
1744 BOOST_ASSERT_MSG(!permuteVector.IsEqual(identity),
1745 "Test logic error, we don't need permutation, so we shouldn't arrive here");
1746 }
1747 else
1748 {
1749 BOOST_ASSERT_MSG(numDims == tensorInfo.GetShape().GetNumDimensions(),
1750 "All inputs must have the same number of dimensions");
1751 }
1752
1753 armnn::TensorInfo newTensorInfo = tensorInfo;
1754 newTensorInfo.SetShape(ExpandTensorShapeTo3dForPermute(tensorInfo.GetShape()));
1755
1756 PermuteTensorData<T>(workloadFactory,
1757 permutations.first,
1758 newTensorInfo,
1759 inputData[nthInput],
1760 inputDataStorage[nthInput]);
1761
1762 inputData[nthInput] = inputDataStorage[nthInput].data();
1763 inputTensorInfos[nthInput] = newTensorInfo;
1764
1765 ++nthInput;
1766 }
1767
1768 outputTensorInfo.SetShape(
1769 armnnUtils::Permuted(
1770 ExpandTensorShapeTo3dForPermute(outputTensorInfo.GetShape()),
1771 permutations.first));
1772}
1773
1774
1775//
1776// This is the pair of PermuteInputsForConcat(...) which permutes back
telsoa01c577f2c2018-08-31 09:22:23 +01001777// the output of the concatenation so we can check it against an expected
surmeh013537c2c2018-05-18 16:31:43 +01001778// output.
1779//
1780template <typename T>
1781void PermuteOutputForConcat(
1782 armnn::IWorkloadFactory& workloadFactory,
1783 const armnn::TensorInfo & tensorInfo,
1784 const armnn::PermutationVector & permuteVector,
1785 std::unique_ptr<armnn::ITensorHandle> && inputDataHandle,
1786 T * data)
1787{
1788 BOOST_ASSERT_MSG(data != nullptr, "data must not be null");
1789 if (data == nullptr)
1790 {
1791 // Nullptr is an error in the test. By returning without doing the permutation
1792 // I expect the caller to fail the test. It still makes sense to report this as
1793 // an assert for Debug builds.
1794 return;
1795 }
1796
1797 armnn::TensorInfo resultTensorInfo = tensorInfo;
1798 std::vector<T> inputData(tensorInfo.GetNumElements());
1799 std::vector<T> outputData;
1800
1801 CopyDataFromITensorHandle(&inputData[0], inputDataHandle.get());
1802
1803 PermuteTensorData<T>(workloadFactory,
1804 permuteVector,
1805 resultTensorInfo,
1806 &inputData[0],
1807 outputData);
1808
1809 ::memcpy(data, &outputData[0], sizeof(T)*outputData.size());
1810}
1811
1812template <typename T>
1813void Concatenate(armnn::IWorkloadFactory& workloadFactory,
1814 std::initializer_list<const armnn::TensorInfo> inputTensorInfosOrig,
1815 std::initializer_list<T *> inputsOrig,
1816 const armnn::TensorInfo& outputTensorInfoOrig,
1817 T * output,
1818 unsigned int concatDim)
1819{
1820 BOOST_ASSERT_MSG(output != nullptr, "output must not be null");
1821 if (output == nullptr)
1822 {
1823 // Nullptr is an error in the test. By returning without doing the permutation
1824 // I expect the caller to fail the test. It still makes sense to report this as
1825 // an assert for Debug builds.
1826 return;
1827 }
1828
1829 armnn::MergerQueueDescriptor queueDescriptor;
1830
telsoa01c577f2c2018-08-31 09:22:23 +01001831 // Saves a copy of the parameters which we might need to change.
surmeh013537c2c2018-05-18 16:31:43 +01001832 std::vector<armnn::TensorInfo> inputTensorInfos(inputTensorInfosOrig.begin(), inputTensorInfosOrig.end());
1833 std::vector<T *> inputs = inputsOrig;
1834 armnn::TensorInfo outputTensorInfo = outputTensorInfoOrig;
1835
1836 armnn::PermutationVector permuteVector{0, 1, 2};
1837
telsoa01c577f2c2018-08-31 09:22:23 +01001838 // Holds and automatically releases memory for the reshaped input data.
surmeh013537c2c2018-05-18 16:31:43 +01001839 std::vector<std::vector<T>> tmpInputDataStorage;
1840
1841 const size_t inputCount = inputTensorInfos.size();
1842
1843 bool needPermuteForConcat = NeedPermuteForConcat(inputTensorInfos, concatDim);
1844
1845 if (needPermuteForConcat)
1846 {
1847 //
1848 // We need to permute the inputs, because concatenation along
telsoa01c577f2c2018-08-31 09:22:23 +01001849 // the requested axis is not supported.
surmeh013537c2c2018-05-18 16:31:43 +01001850 //
1851 PermuteInputsForConcat<T>(workloadFactory,
1852 inputTensorInfos,
1853 inputs,
1854 tmpInputDataStorage,
1855 permuteVector,
1856 concatDim,
1857 outputTensorInfo);
1858 }
1859
1860 armnn::OriginsDescriptor viewsDescriptor = CreateMergerDescriptorForConcatenation(inputTensorInfos, concatDim);
telsoa014fcda012018-03-09 14:13:49 +00001861
1862 queueDescriptor.m_ViewOrigins.reserve(viewsDescriptor.GetNumViews());
1863 for (unsigned int i = 0; i < viewsDescriptor.GetNumViews(); ++i)
1864 {
1865 queueDescriptor.m_ViewOrigins.emplace_back(std::vector<unsigned int>(viewsDescriptor.GetViewOrigin(i),
1866 viewsDescriptor.GetViewOrigin(i) + viewsDescriptor.GetNumDimensions()));
1867 }
1868
telsoa014fcda012018-03-09 14:13:49 +00001869 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1870
1871 std::vector<std::unique_ptr<armnn::ITensorHandle>> inputHandles;
1872 inputHandles.reserve(inputCount);
1873
1874 const bool subTensorsSupported = workloadFactory.SupportsSubTensors();
1875 for (unsigned int i = 0; i < inputCount; ++i)
1876 {
surmeh013537c2c2018-05-18 16:31:43 +01001877 const armnn::TensorInfo& inputTensorInfo = inputTensorInfos[i];
telsoa014fcda012018-03-09 14:13:49 +00001878
1879 std::unique_ptr<armnn::ITensorHandle> inputHandle = subTensorsSupported ?
1880 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo.GetShape(),
1881 queueDescriptor.m_ViewOrigins[i].m_Origin.data())
1882 : workloadFactory.CreateTensorHandle(inputTensorInfo);
1883
1884 inputHandles.emplace_back(std::move(inputHandle));
1885 }
1886
1887 armnn::WorkloadInfo workloadInfo;
1888
1889 for (unsigned int i = 0; i < inputCount; ++i)
1890 {
surmeh013537c2c2018-05-18 16:31:43 +01001891 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfos[i], inputHandles[i].get());
telsoa014fcda012018-03-09 14:13:49 +00001892 }
1893
1894 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
1895
1896 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(queueDescriptor, workloadInfo);
1897
1898 for (auto& inputHandle : inputHandles)
1899 {
1900 inputHandle->Allocate();
1901 }
1902
1903 outputHandle->Allocate();
1904
1905 unsigned int nextInputId = 0;
1906 for (auto& inputHandle : inputHandles)
1907 {
surmeh013537c2c2018-05-18 16:31:43 +01001908 CopyDataToITensorHandle(inputHandle.get(), inputs[nextInputId]);
1909 ++nextInputId;
telsoa014fcda012018-03-09 14:13:49 +00001910 }
1911
surmeh013537c2c2018-05-18 16:31:43 +01001912 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001913 workload->Execute();
1914
surmeh013537c2c2018-05-18 16:31:43 +01001915 if (needPermuteForConcat)
1916 {
1917 PermuteOutputForConcat<T>(workloadFactory,
1918 outputTensorInfo,
1919 permuteVector,
1920 std::move(outputHandle),
1921 output);
1922 }
1923 else
1924 {
1925 CopyDataFromITensorHandle(output, outputHandle.get());
1926 }
telsoa014fcda012018-03-09 14:13:49 +00001927}
1928
1929template <typename T>
1930LayerTestResult<T, 1> Concatenation1dTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale, int32_t qOffset)
1931{
1932 armnn::TensorInfo inputTensorInfo({ 3 }, armnn::GetDataType<T>());
1933
1934 auto input0 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 1.0f, 2.0f, 3.0f }));
1935 auto input1 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 4.0f, 5.0f, 6.0f }));
1936 auto input2 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 7.0f, 8.0f, 9.0f }));
1937
1938 armnn::TensorInfo outputTensorInfo({ 9 }, armnn::GetDataType<T>());
1939
1940 LayerTestResult<T, 1> result(outputTensorInfo);
1941
1942 std::vector<T> output;
1943 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01001944 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00001945 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
1946 { input0.data(), input1.data(), input2.data() },
1947 outputTensorInfo,
1948 output.data(),
1949 0);
1950
1951 result.output = MakeTensor<T, 1>(outputTensorInfo, output);
1952 result.outputExpected = MakeTensor<T, 1>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
1953 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f
1954 }));
1955
1956 return result;
1957}
1958
1959LayerTestResult<float, 1> Concatenation1dTest(armnn::IWorkloadFactory& workloadFactory)
1960{
1961 return Concatenation1dTestImpl<float>(workloadFactory, 0.0f, 0);
1962}
1963
1964template <typename T>
1965LayerTestResult<T, 2> Concatenation2dTestImpl(armnn::IWorkloadFactory& workloadFactory,
1966 const armnn::TensorInfo& outputTensorInfo,
1967 unsigned int dimension,
1968 const float qScale,
1969 const int32_t qOffset)
1970{
1971 armnn::TensorInfo inputTensorInfo({ 2, 3 }, armnn::GetDataType<T>());
1972
1973 auto input0 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
1974 // Batch 0
1975 1.0f, 2.0f, 3.0f,
1976
1977 // Batch 1
1978 10.0f, 11.0f, 12.0f,
1979 }));
1980
1981 auto input1 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
1982 // Batch 0
1983 4.0f, 5.0f, 6.0f,
1984
1985 // Batch 1
1986 13.0f, 14.0f, 15.0f,
1987 }));
1988
1989 auto input2 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
1990 // Batch 0
1991 7.0f, 8.0f, 9.0f,
1992
1993 // Batch 1
1994 16.0f, 17.0f, 18.0f,
1995 }));
1996
1997 LayerTestResult<T, 2> result(outputTensorInfo);
1998
1999 std::vector<T> output;
2000 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002001 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002002 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
2003 { input0.data(), input1.data(), input2.data() },
2004 outputTensorInfo,
2005 output.data(),
2006 dimension);
2007
2008 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
2009 return result;
2010}
2011
2012template <typename T>
2013LayerTestResult<T, 2> Concatenation2dDim0TestImpl(armnn::IWorkloadFactory& workloadFactory,
2014 float qScale, int32_t qOffset)
2015{
2016 armnn::TensorInfo outputTensorInfo({ 6, 3 }, armnn::GetDataType<T>());
2017
2018 LayerTestResult<T, 2> result = Concatenation2dTestImpl<T>(workloadFactory, outputTensorInfo, 0, qScale, qOffset);
2019 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2020 // Batch 0
2021 1.0f, 2.0f, 3.0f,
2022
2023 // Batch 1
2024 10.0f, 11.0f, 12.0f,
2025
2026 // Batch 2
2027 4.0f, 5.0f, 6.0f,
2028
2029 // Batch 3
2030 13.0f, 14.0f, 15.0f,
2031
2032 // Batch 4
2033 7.0f, 8.0f, 9.0f,
2034
2035 // Batch 5
2036 16.0f, 17.0f, 18.0f,
2037 }));
2038
2039 return result;
2040}
2041
2042LayerTestResult<float, 2> Concatenation2dDim0Test(armnn::IWorkloadFactory& workloadFactory)
2043{
2044 return Concatenation2dDim0TestImpl<float>(workloadFactory, 0.0f, 0);
2045}
2046
2047template <typename T>
2048LayerTestResult<T, 2> Concatenation2dDim1TestImpl(armnn::IWorkloadFactory& workloadFactory,
2049 float qScale, int32_t qOffset)
2050{
2051 armnn::TensorInfo outputTensorInfo({ 2, 9 }, armnn::GetDataType<T>());
2052
2053 LayerTestResult<T, 2> result = Concatenation2dTestImpl<T>(workloadFactory, outputTensorInfo, 1, qScale, qOffset);
2054 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2055 // Batch 0
2056 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
2057
2058 // Batch 1
2059 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f
2060 }));
2061
2062 return result;
2063}
2064
2065LayerTestResult<float, 2> Concatenation2dDim1Test(armnn::IWorkloadFactory& workloadFactory)
2066{
2067 return Concatenation2dDim1TestImpl<float>(workloadFactory, 0.0f, 0);
2068}
2069
2070template <typename T>
2071LayerTestResult<T, 2> Concatenation2dDim0DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2072 int32_t qOffset)
2073{
2074 armnn::TensorInfo input0TensorInfo({ 2, 3 }, armnn::GetDataType<T>());
2075 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2076 // Batch 0
2077 1.0f, 2.0f, 3.0f,
2078
2079 // Batch 1
2080 10.0f, 11.0f, 12.0f,
2081 }));
2082
2083 armnn::TensorInfo input1TensorInfo({ 3, 3 }, armnn::GetDataType<T>());
2084 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2085 // Batch 0
2086 4.0f, 5.0f, 6.0f,
2087
2088 // Batch 1
2089 13.0f, 14.0f, 15.0f,
2090
2091 // Batch 0
2092 7.0f, 8.0f, 9.0f,
2093 }));
2094
2095 armnn::TensorInfo input2TensorInfo({ 1, 3 }, armnn::GetDataType<T>());
2096 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2097 // Batch 1
2098 16.0f, 17.0f, 18.0f,
2099 }));
2100
2101 armnn::TensorInfo outputTensorInfo({ 6, 3 }, armnn::GetDataType<T>());
2102 LayerTestResult<T, 2> result(outputTensorInfo);
2103
2104 std::vector<T> output;
2105 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002106 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002107 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
2108 { input0.data(), input1.data(), input2.data() },
2109 outputTensorInfo,
2110 output.data(),
2111 0);
2112
2113 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
2114 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2115 // Batch 0
2116 1.0f, 2.0f, 3.0f,
2117
2118 // Batch 1
2119 10.0f, 11.0f, 12.0f,
2120
2121 // Batch 2
2122 4.0f, 5.0f, 6.0f,
2123
2124 // Batch 3
2125 13.0f, 14.0f, 15.0f,
2126
2127 // Batch 4
2128 7.0f, 8.0f, 9.0f,
2129
2130 // Batch 5
2131 16.0f, 17.0f, 18.0f,
2132 }));
2133
2134 return result;
2135}
2136
2137LayerTestResult<float, 2> Concatenation2dDim0DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory)
2138{
2139 return Concatenation2dDim0DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
2140}
2141
2142template <typename T>
2143LayerTestResult<T, 2> Concatenation2dDim1DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2144 int32_t qOffset)
2145{
2146 armnn::TensorInfo input0TensorInfo({ 2, 3 }, armnn::GetDataType<T>());
2147 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2148 // Batch 0
2149 1.0f, 2.0f, 3.0f,
2150
2151 // Batch 1
2152 10.0f, 11.0f, 12.0f,
2153 }));
2154
2155 armnn::TensorInfo input1TensorInfo({ 2, 5 }, armnn::GetDataType<T>());
2156 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2157 // Batch 0
2158 4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
2159
2160 // Batch 1
2161 13.0f, 14.0f, 15.0f, 16.0f, 17.0f,
2162 }));
2163
2164 armnn::TensorInfo input2TensorInfo({ 2, 1 }, armnn::GetDataType<T>());
2165 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2166 // Batch 0
2167 9.0f,
2168
2169 // Batch 1
2170 18.0f
2171 }));
2172
2173 armnn::TensorInfo outputTensorInfo({ 2, 9 }, armnn::GetDataType<T>());
2174 LayerTestResult<T, 2> result(outputTensorInfo);
2175
2176 std::vector<T> output;
2177 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002178 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002179 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
2180 { input0.data(), input1.data(), input2.data() },
2181 outputTensorInfo,
2182 output.data(),
2183 1);
2184
2185 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
2186 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2187 // Batch 0
2188 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
2189
2190 // Batch 1
2191 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f,
2192 }));
2193
2194 return result;
2195}
2196
2197LayerTestResult<float, 2> Concatenation2dDim1DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory)
2198{
2199 return Concatenation2dDim1DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
2200}
2201
2202template <typename T>
2203LayerTestResult<T, 3> Concatenation3dTestImpl(armnn::IWorkloadFactory& workloadFactory,
2204 const armnn::TensorInfo& outputTensorInfo,
2205 unsigned int dimension,
2206 float qScale,
2207 int32_t qOffset)
2208{
2209 armnn::TensorInfo inputTensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
2210
2211 auto input0 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2212 // Batch 0, Channel 0
2213 1.0f, 2.0f,
2214
2215 // Batch 0, Channel 1
2216 3.0f, 4.0f,
2217
2218 // Batch 0, Channel 2
2219 5.0f, 6.0f,
2220
2221 // Batch 1, Channel 0
2222 19.0f, 20.0f,
2223
2224 // Batch 1, Channel 1
2225 21.0f, 22.0f,
2226
2227 // Batch 1, Channel 2
2228 23.0f, 24.0f
2229 }));
2230
2231 auto input1 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2232 // Batch 0, Channel 0
2233 7.0f, 8.0f,
2234
2235 // Batch 0, Channel 1
2236 9.0f, 10.0f,
2237
2238 // Batch 0, Channel 2
2239 11.0f, 12.0f,
2240
2241 // Batch 1, Channel 0
2242 25.0f, 26.0f,
2243
2244 // Batch 1, Channel 1
2245 27.0f, 28.0f,
2246
2247 // Batch 1, Channel 2
2248 29.0f, 30.0f
2249 }));
2250
2251 auto input2 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2252 // Batch 0, Channel 0
2253 13.0f, 14.0f,
2254
2255 // Batch 0, Channel 1
2256 15.0f, 16.0f,
2257
2258 // Batch 0, Channel 2
2259 17.0f, 18.0f,
2260
2261 // Batch 1, Channel 0
2262 31.0f, 32.0f,
2263
2264 // Batch 1, Channel 1
2265 33.0f, 34.0f,
2266
2267 // Batch 1, Channel 2
2268 35.0f, 36.0f
2269 }));
2270
2271 LayerTestResult<T, 3> result(outputTensorInfo);
2272
2273 std::vector<T> output;
2274 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002275 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002276 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
2277 { input0.data(), input1.data(), input2.data() },
2278 outputTensorInfo,
2279 output.data(),
2280 dimension);
2281
2282 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
2283 return result;
2284}
2285
2286template <typename T>
2287LayerTestResult<T, 3> Concatenation3dDim0TestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2288 int32_t qOffset)
2289{
2290 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, armnn::GetDataType<T>());
2291
2292 LayerTestResult<T, 3> result = Concatenation3dTestImpl<T>(workloadFactory, outputTensorInfo, 0,
2293 qScale, qOffset);
2294 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2295 // Batch 0, Channel 0
2296 1.0f, 2.0f,
2297
2298 // Batch 0, Channel 1
2299 3.0f, 4.0f,
2300
2301 // Batch 0, Channel 2
2302 5.0f, 6.0f,
2303
2304 // Batch 1, Channel 0
2305 19.0f, 20.0f,
2306
2307 // Batch 1, Channel 1
2308 21.0f, 22.0f,
2309
2310 // Batch 1, Channel 2
2311 23.0f, 24.0f,
2312
2313 // Batch 2, Channel 0
2314 7.0f, 8.0f,
2315
2316 // Batch 2, Channel 1
2317 9.0f, 10.0f,
2318
2319 // Batch 2, Channel 2
2320 11.0f, 12.0f,
2321
2322 // Batch 3, Channel 0
2323 25.0f, 26.0f,
2324
2325 // Batch 3, Channel 1
2326 27.0f, 28.0f,
2327
2328 // Batch 3, Channel 2
2329 29.0f, 30.0f,
2330
2331 // Batch 4, Channel 0
2332 13.0f, 14.0f,
2333
2334 // Batch 4, Channel 1
2335 15.0f, 16.0f,
2336
2337 // Batch 4, Channel 2
2338 17.0f, 18.0f,
2339
2340 // Batch 5, Channel 0
2341 31.0f, 32.0f,
2342
2343 // Batch 5, Channel 1
2344 33.0f, 34.0f,
2345
2346 // Batch 5, Channel 2
2347 35.0f, 36.0f
2348 }));
2349 return result;
2350}
2351
2352LayerTestResult<float, 3> Concatenation3dDim0Test(armnn::IWorkloadFactory& workloadFactory)
2353{
2354 return Concatenation3dDim0TestImpl<float>(workloadFactory, 0.0f, 0);
2355}
2356
2357template <typename T>
2358LayerTestResult<T, 3> Concatenation3dDim1TestImpl(armnn::IWorkloadFactory& workloadFactory,
2359 float qScale, int32_t qOffset)
2360{
2361 armnn::TensorInfo outputTensorInfo({ 2, 9, 2 }, armnn::GetDataType<T>());
2362
2363 LayerTestResult<T, 3> result = Concatenation3dTestImpl<T>(workloadFactory, outputTensorInfo, 1, qScale, qOffset);
2364 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2365 // Batch 0, Channel 0
2366 1.0f, 2.0f,
2367
2368 // Batch 0, Channel 1
2369 3.0f, 4.0f,
2370
2371 // Batch 0, Channel 2
2372 5.0f, 6.0f,
2373
2374 // Batch 0, Channel 3
2375 7.0f, 8.0f,
2376
2377 // Batch 0, Channel 4
2378 9.0f, 10.0f,
2379
2380 // Batch 0, Channel 5
2381 11.0f, 12.0f,
2382
2383 // Batch 0, Channel 6
2384 13.0f, 14.0f,
2385
2386 // Batch 0, Channel 7
2387 15.0f, 16.0f,
2388
2389 // Batch 0, Channel 8
2390 17.0f, 18.0f,
2391
2392 // Batch 1, Channel 0
2393 19.0f, 20.0f,
2394
2395 // Batch 1, Channel 1
2396 21.0f, 22.0f,
2397
2398 // Batch 1, Channel 2
2399 23.0f, 24.0f,
2400
2401 // Batch 1, Channel 3
2402 25.0f, 26.0f,
2403
2404 // Batch 1, Channel 4
2405 27.0f, 28.0f,
2406
2407 // Batch 1, Channel 5
2408 29.0f, 30.0f,
2409
2410 // Batch 1, Channel 6
2411 31.0f, 32.0f,
2412
2413 // Batch 1, Channel 7
2414 33.0f, 34.0f,
2415
2416 // Batch 1, Channel 8
2417 35.0f, 36.0f
2418 }));
2419
2420 return result;
2421}
2422
2423LayerTestResult<float, 3> Concatenation3dDim1Test(armnn::IWorkloadFactory& workloadFactory)
2424{
2425 return Concatenation3dDim1TestImpl<float>(workloadFactory, 0.0f, 0);
2426}
2427
2428template <typename T>
2429LayerTestResult<T, 3> Concatenation3dDim2TestImpl(armnn::IWorkloadFactory& workloadFactory,
2430 float qScale, int32_t qOffset)
2431{
2432 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, armnn::GetDataType<T>());
2433
2434 LayerTestResult<T, 3> result = Concatenation3dTestImpl<T>(workloadFactory, outputTensorInfo, 2, qScale, qOffset);
2435 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2436 // Batch 0, Channel 0
2437 1.0f, 2.0f, 7.0f, 8.0f, 13.0f, 14.0f,
2438
2439 // Batch 0, Channel 1
2440 3.0f, 4.0f, 9.0f, 10.0f, 15.0f, 16.0f,
2441
2442 // Batch 0, Channel 2
2443 5.0f, 6.0f, 11.0f, 12.0f, 17.0f, 18.0f,
2444
2445 // Batch 1, Channel 0
2446 19.0f, 20.0f, 25.0f, 26.0f, 31.0f, 32.0f,
2447
2448 // Batch 1, Channel 1
2449 21.0f, 22.0f, 27.0f, 28.0f, 33.0f, 34.0f,
2450
2451 // Batch 1, Channel 2
2452 23.0f, 24.0f, 29.0f, 30.0f, 35.0f, 36.0f,
2453 }));
2454
2455 return result;
2456}
2457
2458LayerTestResult<float, 3> Concatenation3dDim2Test(armnn::IWorkloadFactory& workloadFactory)
2459{
2460 return Concatenation3dDim2TestImpl<float>(workloadFactory, 0.0f, 0);
2461}
2462
2463template <typename T>
2464LayerTestResult<T, 3> Concatenation3dDim0DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2465 int32_t qOffset)
2466{
2467 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
2468 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2469 // Batch 0, Channel 0
2470 1.0f, 2.0f,
2471
2472 // Batch 0, Channel 1
2473 3.0f, 4.0f,
2474
2475 // Batch 0, Channel 2
2476 5.0f, 6.0f,
2477
2478 // Batch 1, Channel 0
2479 19.0f, 20.0f,
2480
2481 // Batch 1, Channel 1
2482 21.0f, 22.0f,
2483
2484 // Batch 1, Channel 2
2485 23.0f, 24.0f
2486 }));
2487
2488 armnn::TensorInfo input1TensorInfo({ 1, 3, 2 }, armnn::GetDataType<T>());
2489 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2490 // Batch 0, Channel 0
2491 7.0f, 8.0f,
2492
2493 // Batch 0, Channel 1
2494 9.0f, 10.0f,
2495
2496 // Batch 0, Channel 2
2497 11.0f, 12.0f,
2498 }));
2499
2500 armnn::TensorInfo input2TensorInfo({ 3, 3, 2 }, armnn::GetDataType<T>());
2501 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2502 // Batch 0, Channel 0
2503 25.0f, 26.0f,
2504
2505 // Batch 0, Channel 1
2506 27.0f, 28.0f,
2507
2508 // Batch 0, Channel 2
2509 29.0f, 30.0f,
2510
2511 // Batch 1, Channel 0
2512 13.0f, 14.0f,
2513
2514 // Batch 1, Channel 1
2515 15.0f, 16.0f,
2516
2517 // Batch 1, Channel 2
2518 17.0f, 18.0f,
2519
2520 // Batch 2, Channel 0
2521 31.0f, 32.0f,
2522
2523 // Batch 2, Channel 1
2524 33.0f, 34.0f,
2525
2526 // Batch 2, Channel 2
2527 35.0f, 36.0f
2528 }));
2529
2530 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, armnn::GetDataType<T>());
2531 LayerTestResult<T, 3> result(outputTensorInfo);
2532
2533 std::vector<T> output;
2534 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002535 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002536 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
2537 { input0.data(), input1.data(), input2.data() },
2538 outputTensorInfo,
2539 output.data(),
2540 0);
2541
2542 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
2543 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2544 // Batch 0, Channel 0
2545 1.0f, 2.0f,
2546
2547 // Batch 0, Channel 1
2548 3.0f, 4.0f,
2549
2550 // Batch 0, Channel 2
2551 5.0f, 6.0f,
2552
2553 // Batch 1, Channel 0
2554 19.0f, 20.0f,
2555
2556 // Batch 1, Channel 1
2557 21.0f, 22.0f,
2558
2559 // Batch 1, Channel 2
2560 23.0f, 24.0f,
2561
2562 // Batch 2, Channel 0
2563 7.0f, 8.0f,
2564
2565 // Batch 2, Channel 1
2566 9.0f, 10.0f,
2567
2568 // Batch 2, Channel 2
2569 11.0f, 12.0f,
2570
2571 // Batch 3, Channel 0
2572 25.0f, 26.0f,
2573
2574 // Batch 3, Channel 1
2575 27.0f, 28.0f,
2576
2577 // Batch 3, Channel 2
2578 29.0f, 30.0f,
2579
2580 // Batch 4, Channel 0
2581 13.0f, 14.0f,
2582
2583 // Batch 4, Channel 1
2584 15.0f, 16.0f,
2585
2586 // Batch 4, Channel 2
2587 17.0f, 18.0f,
2588
2589 // Batch 5, Channel 0
2590 31.0f, 32.0f,
2591
2592 // Batch 5, Channel 1
2593 33.0f, 34.0f,
2594
2595 // Batch 5, Channel 2
2596 35.0f, 36.0f
2597 }));
2598
2599 return result;
2600}
2601
2602LayerTestResult<float, 3> Concatenation3dDim0DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory)
2603{
2604 return Concatenation3dDim0DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
2605}
2606
2607template <typename T>
2608LayerTestResult<T, 3> Concatenation3dDim1DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2609 int32_t qOffset)
2610{
2611 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
2612 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2613 // Batch 0, Channel 0
2614 1.0f, 2.0f,
2615
2616 // Batch 0, Channel 1
2617 3.0f, 4.0f,
2618
2619 // Batch 0, Channel 2
2620 5.0f, 6.0f,
2621
2622 // Batch 1, Channel 0
2623 19.0f, 20.0f,
2624
2625 // Batch 1, Channel 1
2626 21.0f, 22.0f,
2627
2628 // Batch 1, Channel 2
2629 23.0f, 24.0f
2630 }));
2631
2632 armnn::TensorInfo input1TensorInfo({ 2, 4, 2 }, armnn::GetDataType<T>());
2633 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2634 // Batch 0, Channel 0
2635 7.0f, 8.0f,
2636
2637 // Batch 0, Channel 1
2638 9.0f, 10.0f,
2639
2640 // Batch 0, Channel 2
2641 11.0f, 12.0f,
2642
2643 // Batch 0, Channel 3
2644 25.0f, 26.0f,
2645
2646 // Batch 1, Channel 0
2647 27.0f, 28.0f,
2648
2649 // Batch 1, Channel 1
2650 29.0f, 30.0f,
2651
2652 // Batch 1, Channel 2
2653 13.0f, 14.0f,
2654
2655 // Batch 1, Channel 3
2656 15.0f, 16.0f,
2657 }));
2658
2659 armnn::TensorInfo input2TensorInfo({ 2, 1, 2 }, armnn::GetDataType<T>());
2660 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2661 // Batch 0, Channel 0
2662 17.0f, 18.0f,
2663
2664 // Batch 1, Channel 0
2665 31.0f, 32.0f,
2666 }));
2667
2668 armnn::TensorInfo outputTensorInfo({ 2, 8, 2 }, armnn::GetDataType<T>());
2669 LayerTestResult<T, 3> result(outputTensorInfo);
2670
2671 std::vector<T> output;
2672 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002673 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002674 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
2675 { input0.data(), input1.data(), input2.data() },
2676 outputTensorInfo,
2677 output.data(),
2678 1);
2679
2680 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
2681 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2682 // Batch 0, Channel 0
2683 1.0f, 2.0f,
2684
2685 // Batch 0, Channel 1
2686 3.0f, 4.0f,
2687
2688 // Batch 0, Channel 2
2689 5.0f, 6.0f,
2690
2691 // Batch 0, Channel 3
2692 7.0f, 8.0f,
2693
2694 // Batch 0, Channel 4
2695 9.0f, 10.0f,
2696
2697 // Batch 0, Channel 5
2698 11.0f, 12.0f,
2699
2700 // Batch 0, Channel 6
2701 25.0f, 26.0f,
2702
2703 // Batch 0, Channel 7
2704 17.0f, 18.0f,
2705
2706 // Batch 1, Channel 0
2707 19.0f, 20.0f,
2708
2709 // Batch 1, Channel 1
2710 21.0f, 22.0f,
2711
2712 // Batch 1, Channel 2
2713 23.0f, 24.0f,
2714
2715 // Batch 1, Channel 3
2716 27.0f, 28.0f,
2717
2718 // Batch 1, Channel 4
2719 29.0f, 30.0f,
2720
2721 // Batch 1, Channel 5
2722 13.0f, 14.0f,
2723
2724 // Batch 1, Channel 6
2725 15.0f, 16.0f,
2726
2727 // Batch 1, Channel 7
2728 31.0f, 32.0f,
2729 }));
2730
2731 return result;
2732}
2733
2734LayerTestResult<float, 3> Concatenation3dDim1DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory)
2735{
2736 return Concatenation3dDim1DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
2737}
2738
2739template <typename T>
2740LayerTestResult<T, 3> Concatenation3dDim2DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2741 int32_t qOffset)
2742{
2743 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
2744 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2745 // Batch 0, Channel 0
2746 1.0f, 2.0f,
2747
2748 // Batch 0, Channel 1
2749 3.0f, 4.0f,
2750
2751 // Batch 0, Channel 2
2752 5.0f, 6.0f,
2753
2754 // Batch 1, Channel 0
2755 19.0f, 20.0f,
2756
2757 // Batch 1, Channel 1
2758 21.0f, 22.0f,
2759
2760 // Batch 1, Channel 2
2761 23.0f, 24.0f
2762 }));
2763
2764 armnn::TensorInfo input1TensorInfo({ 2, 3, 1 }, armnn::GetDataType<T>());
2765 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2766 // Batch 0, Channel 0
2767 7.0f,
2768
2769 // Batch 0, Channel 1
2770 9.0f,
2771
2772 // Batch 0, Channel 2
2773 11.0f,
2774
2775 // Batch 1, Channel 0
2776 25.0f,
2777
2778 // Batch 1, Channel 1
2779 27.0f,
2780
2781 // Batch 1, Channel 2
2782 29.0f
2783 }));
2784
2785 armnn::TensorInfo input2TensorInfo({ 2, 3, 3 }, armnn::GetDataType<T>());
2786 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2787 // Batch 0, Channel 0
2788 13.0f, 14.0f, 50.0f,
2789
2790 // Batch 0, Channel 1
2791 15.0f, 16.0f, 51.0f,
2792
2793 // Batch 0, Channel 2
2794 17.0f, 18.0f, 52.0f,
2795
2796 // Batch 1, Channel 0
2797 31.0f, 32.0f, 53.0f,
2798
2799 // Batch 1, Channel 1
2800 33.0f, 34.0f, 54.0f,
2801
2802 // Batch 1, Channel 2
2803 35.0f, 36.0f, 55.0f,
2804 }));
2805
2806 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, armnn::GetDataType<T>());
2807 LayerTestResult<T, 3> result(outputTensorInfo);
2808
2809 std::vector<T> output;
2810 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002811 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002812 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
2813 { input0.data(), input1.data(), input2.data() },
2814 outputTensorInfo,
2815 output.data(),
2816 2);
2817
2818 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
2819 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2820 // Batch 0, Channel 0
2821 1.0f, 2.0f, 7.0f, 13.0f, 14.0f, 50.0f,
2822
2823 // Batch 0, Channel 1
2824 3.0f, 4.0f, 9.0f, 15.0f, 16.0f, 51.0f,
2825
2826 // Batch 0, Channel 2
2827 5.0f, 6.0f, 11.0f, 17.0f, 18.0f, 52.0f,
2828
2829 // Batch 1, Channel 0
2830 19.0f, 20.0f, 25.0f, 31.0f, 32.0f, 53.0f,
2831
2832 // Batch 1, Channel 1
2833 21.0f, 22.0f, 27.0f, 33.0f, 34.0f, 54.0f,
2834
2835 // Batch 1, Channel 2
2836 23.0f, 24.0f, 29.0f, 35.0f, 36.0f, 55.0f,
2837 }));
2838
2839 return result;
2840}
2841
2842LayerTestResult<float, 3> Concatenation3dDim2DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory)
2843{
2844 return Concatenation3dDim2DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
2845}
2846
2847LayerTestResult<float, 4> ResizeBilinearNopTest(armnn::IWorkloadFactory& workloadFactory)
2848{
2849 constexpr unsigned int inputWidth = 4;
2850 constexpr unsigned int inputHeight = 4;
2851 constexpr unsigned int inputChannels = 1;
2852 constexpr unsigned int inputBatchSize = 1;
2853
2854 constexpr unsigned int outputWidth = inputWidth;
2855 constexpr unsigned int outputHeight = inputHeight;
2856 constexpr unsigned int outputChannels = inputChannels;
2857 constexpr unsigned int outputBatchSize = inputBatchSize;
2858
2859 const armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
2860 armnn::DataType::Float32);
2861 const armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
2862 armnn::DataType::Float32);
2863
2864 auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
2865 1.0f, 2.0f, 3.0f, 4.0f,
2866 2.0f, 3.0f, 4.0f, 5.0f,
2867 3.0f, 4.0f, 5.0f, 6.0f,
2868 4.0f, 5.0f, 6.0f, 7.0f
2869 }));
2870
2871 LayerTestResult<float, 4> result(outputTensorInfo);
2872 result.outputExpected = input;
2873
2874 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
2875 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2876
2877 armnn::ResizeBilinearQueueDescriptor descriptor;
2878 armnn::WorkloadInfo info;
2879 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
2880 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
2881
2882 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
2883
2884 inputHandle->Allocate();
2885 outputHandle->Allocate();
2886 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
2887
surmeh013537c2c2018-05-18 16:31:43 +01002888 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00002889 workload->Execute();
2890
2891 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
2892 return result;
2893}
2894
2895LayerTestResult<float, 4> SimpleResizeBilinearTest(armnn::IWorkloadFactory& workloadFactory)
2896{
2897 constexpr unsigned int inputWidth = 2;
2898 constexpr unsigned int inputHeight = 2;
2899 constexpr unsigned int inputChannels = 1;
2900 constexpr unsigned int inputBatchSize = 1;
2901
2902 constexpr unsigned int outputWidth = inputWidth / 2;
2903 constexpr unsigned int outputHeight = inputHeight / 2;
2904 constexpr unsigned int outputChannels = inputChannels;
2905 constexpr unsigned int outputBatchSize = inputBatchSize;
2906
2907 const armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
2908 armnn::DataType::Float32);
2909 const armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
2910 armnn::DataType::Float32);
2911
2912 auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
2913 1.0f, 255.0f,
2914 200.0f, 250.f,
2915 }));
2916
2917 // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
2918 // then figures out the interpolants and weights. Note this is different to projecting the centre of the
telsoa01c577f2c2018-08-31 09:22:23 +01002919 // output texel - and thus we'll expect the output 1x1 matrix to contain, as its single element, the value
telsoa014fcda012018-03-09 14:13:49 +00002920 // that was at position (0,0) of the input matrix (rather than an average, which we would expect if projecting
2921 // the centre).
2922 LayerTestResult<float, 4> result(outputTensorInfo);
2923 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>({
2924 1.0f
2925 }));
2926
2927 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
2928 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2929
2930 armnn::ResizeBilinearQueueDescriptor descriptor;
2931 armnn::WorkloadInfo info;
2932 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
2933 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
2934
2935 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
2936
2937 inputHandle->Allocate();
2938 outputHandle->Allocate();
2939 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
2940
surmeh013537c2c2018-05-18 16:31:43 +01002941 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00002942 workload->Execute();
2943
2944 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
2945 return result;
2946}
2947
2948LayerTestResult<float, 4> ResizeBilinearSqMinTest(armnn::IWorkloadFactory& workloadFactory)
2949{
2950 constexpr unsigned int inputWidth = 4;
2951 constexpr unsigned int inputHeight = 4;
2952 constexpr unsigned int inputChannels = 1;
2953 constexpr unsigned int inputBatchSize = 1;
2954
2955 constexpr unsigned int outputWidth = inputWidth / 2;
2956 constexpr unsigned int outputHeight = inputHeight / 2;
2957 constexpr unsigned int outputChannels = inputChannels;
2958 constexpr unsigned int outputBatchSize = inputBatchSize;
2959
2960 const armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
2961 armnn::DataType::Float32);
2962 const armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
2963 armnn::DataType::Float32);
2964
2965 auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
2966 1.0f, 2.0f, 3.0f, 4.0f,
2967 2.0f, 3.0f, 4.0f, 5.0f,
2968 3.0f, 4.0f, 5.0f, 6.0f,
2969 4.0f, 5.0f, 6.0f, 7.0f
2970 }));
2971
2972 LayerTestResult<float, 4> result(outputTensorInfo);
2973 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>({
2974 1.f, 3.f,
2975 3.f, 5.f
2976 }));
2977
2978 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
2979 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2980
2981 armnn::ResizeBilinearQueueDescriptor descriptor;
2982 armnn::WorkloadInfo info;
2983 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
2984 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
2985
2986 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
2987
2988 inputHandle->Allocate();
2989 outputHandle->Allocate();
2990 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
2991
surmeh013537c2c2018-05-18 16:31:43 +01002992 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00002993 workload->Execute();
2994
2995 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
2996 return result;
2997}
2998
2999LayerTestResult<float, 4> ResizeBilinearMinTest(armnn::IWorkloadFactory& workloadFactory)
3000{
3001 constexpr unsigned int inputWidth = 5;
3002 constexpr unsigned int inputHeight = 3;
3003 constexpr unsigned int inputChannels = 1;
3004 constexpr unsigned int inputBatchSize = 1;
3005
3006 constexpr unsigned int outputWidth = 3;
3007 constexpr unsigned int outputHeight = 2;
3008 constexpr unsigned int outputChannels = inputChannels;
3009 constexpr unsigned int outputBatchSize = inputBatchSize;
3010
3011 const armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
3012 armnn::DataType::Float32);
3013 const armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
3014 armnn::DataType::Float32);
3015
3016 auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
3017 1.0f, 2.0f, 3.0f, 5.0f, 8.0f,
3018 13.0f, 21.0f, 34.0f, 55.0f, 89.0f,
3019 144.0f, 233.0f, 377.0f, 610.0f, 987.0f
3020 }));
3021
3022 LayerTestResult<float, 4> result(outputTensorInfo);
3023 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>({
3024 1.0f, 2.6666f, 6.0f,
3025 78.5f, 179.3333f, 401.f
3026 }));
3027
3028 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3029 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3030
3031 armnn::ResizeBilinearQueueDescriptor descriptor;
3032 armnn::WorkloadInfo info;
3033 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3034 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3035
3036 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
3037
3038 inputHandle->Allocate();
3039 outputHandle->Allocate();
3040 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3041
surmeh013537c2c2018-05-18 16:31:43 +01003042 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00003043 workload->Execute();
3044
3045 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3046 return result;
3047}
3048
3049LayerTestResult<float, 4> ResizeBilinearMagTest(armnn::IWorkloadFactory& workloadFactory)
3050{
3051 constexpr unsigned int inputWidth = 2;
3052 constexpr unsigned int inputHeight = 3;
3053 constexpr unsigned int inputChannels = 1;
3054 constexpr unsigned int inputBatchSize = 1;
3055
3056 constexpr unsigned int outputWidth = 5;
3057 constexpr unsigned int outputHeight = 3;
3058 constexpr unsigned int outputChannels = inputChannels;
3059 constexpr unsigned int outputBatchSize = inputBatchSize;
3060
3061 const armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
3062 armnn::DataType::Float32);
3063 const armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
3064 armnn::DataType::Float32);
3065
3066 auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
3067 1.0f, 2.0f,
3068 13.0f, 21.0f,
3069 144.0f, 233.0f
3070 }));
3071
3072 LayerTestResult<float, 4> result(outputTensorInfo);
3073 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>({
3074 1.0f, 1.4f, 1.8f, 2.f, 2.f,
3075 13.f, 16.2f, 19.4f, 21.f, 21.f,
3076 144.f, 179.6f, 215.2f, 233.f, 233.f
3077 }));
3078
3079 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3080 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3081
3082 armnn::ResizeBilinearQueueDescriptor descriptor;
3083 armnn::WorkloadInfo info;
3084 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3085 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3086
3087 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
3088
3089 inputHandle->Allocate();
3090 outputHandle->Allocate();
3091 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3092
surmeh013537c2c2018-05-18 16:31:43 +01003093 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00003094 workload->Execute();
3095
3096 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3097 return result;
3098}
3099
3100LayerTestResult<float, 2> FakeQuantizationTest(armnn::IWorkloadFactory& workloadFactory)
3101{
3102 constexpr unsigned int width = 2;
3103 constexpr unsigned int height = 3;
3104
3105 const armnn::TensorInfo tensorInfo({height, width },
3106 armnn::DataType::Float32);
3107 auto input = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
3108 -10.0f, -5.0f,
3109 0.0f, 5.0f,
3110 10.0f, 10.0f
3111 }));
3112
3113 LayerTestResult<float, 2> ret(tensorInfo);
3114
3115 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
3116
3117 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
3118
3119 armnn::FakeQuantizationQueueDescriptor data;
3120 armnn::WorkloadInfo info;
3121
3122 AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
3123 AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
3124 float min = -10.f;
3125 float max = 10.f;
3126
3127 data.m_Parameters.m_Min = min;
3128 data.m_Parameters.m_Max = max;
3129
3130 armnn::PassthroughCpuTensorHandle refHandle(tensorInfo, &ret.outputExpected[0][0]);
3131 armnn::FakeQuantizationQueueDescriptor refData = data;
3132 armnn::WorkloadInfo refInfo = info;
3133 SetWorkloadOutput(refData, refInfo, 0, tensorInfo, &refHandle);
3134
3135 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFakeQuantization(data, info);
3136
3137 inputHandle->Allocate();
3138 outputHandle->Allocate();
3139
3140 CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
3141
surmeh013537c2c2018-05-18 16:31:43 +01003142 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00003143 workload->Execute();
3144
3145 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
3146
3147 ret.outputExpected = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
3148 0.0f, 63.0f,
3149 128.0f, 191.0f,
3150 255.0f, 255.0f
3151 }));
3152 return ret;
3153}
3154
3155LayerTestResult<float, 4> L2Normalization1dTest(armnn::IWorkloadFactory& workloadFactory)
3156{
3157 constexpr unsigned int inputWidth = 1;
3158 constexpr unsigned int inputHeight = 1;
3159 constexpr unsigned int inputChannels = 10;
3160 constexpr unsigned int inputBatchSize = 1;
3161
3162 constexpr unsigned int outputWidth = inputWidth;
3163 constexpr unsigned int outputHeight = inputHeight;
3164 constexpr unsigned int outputChannels = inputChannels;
3165 constexpr unsigned int outputBatchSize = inputBatchSize;
3166
3167 const armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
3168 armnn::DataType::Float32);
3169 const armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
3170 armnn::DataType::Float32);
3171
3172 auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
3173 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f
3174 }));
3175
3176 const float approxInvL2Norm = 0.050964719f;
3177 LayerTestResult<float, 4> result(outputTensorInfo);
3178 result.outputExpected = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
3179 1.0f * approxInvL2Norm,
3180 2.0f * approxInvL2Norm,
3181 3.0f * approxInvL2Norm,
3182 4.0f * approxInvL2Norm,
3183 5.0f * approxInvL2Norm,
3184 6.0f * approxInvL2Norm,
3185 7.0f * approxInvL2Norm,
3186 8.0f * approxInvL2Norm,
3187 9.0f * approxInvL2Norm,
3188 10.0f * approxInvL2Norm
3189 }));
3190
3191 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3192 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3193
3194 armnn::L2NormalizationQueueDescriptor descriptor;
3195 armnn::WorkloadInfo info;
3196 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3197 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3198
3199 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
3200
3201 inputHandle->Allocate();
3202 outputHandle->Allocate();
3203 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3204
surmeh013537c2c2018-05-18 16:31:43 +01003205 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00003206 workload->Execute();
3207
3208 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3209 return result;
3210}
3211
3212namespace
3213{
3214
3215float CalcInvL2Norm(std::initializer_list<float> elements)
3216{
3217 const float reduction = std::accumulate(elements.begin(), elements.end(), 0.0f,
3218 [](float acc, float element) { return acc + element * element; });
3219 return 1.0f / sqrtf(reduction);
3220}
3221
3222}
3223
3224LayerTestResult<float, 4> L2Normalization2dTest(armnn::IWorkloadFactory& workloadFactory)
3225{
3226 constexpr unsigned int inputWidth = 5;
3227 constexpr unsigned int inputHeight = 1;
3228 constexpr unsigned int inputChannels = 2;
3229 constexpr unsigned int inputBatchSize = 1;
3230
3231 constexpr unsigned int outputWidth = inputWidth;
3232 constexpr unsigned int outputHeight = inputHeight;
3233 constexpr unsigned int outputChannels = inputChannels;
3234 constexpr unsigned int outputBatchSize = inputBatchSize;
3235
3236 const armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
3237 armnn::DataType::Float32);
3238 const armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
3239 armnn::DataType::Float32);
3240
3241 auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
3242 1.0f, 3.0f, 5.0f, 7.0f, 9.0f,
3243 2.0f, 4.0f, 6.0f, 8.0f, 10.0f
3244 }));
3245
3246 LayerTestResult<float, 4> result(outputTensorInfo);
3247 result.outputExpected = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
3248 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
3249 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
3250 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
3251 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
3252 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
3253
3254 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
3255 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
3256 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
3257 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
3258 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
3259 }));
3260
3261 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3262 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3263
3264 armnn::L2NormalizationQueueDescriptor descriptor;
3265 armnn::WorkloadInfo info;
3266 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3267 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3268
3269 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
3270
3271 inputHandle->Allocate();
3272 outputHandle->Allocate();
3273 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3274
surmeh013537c2c2018-05-18 16:31:43 +01003275 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00003276 workload->Execute();
3277
3278 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3279 return result;
3280}
3281
3282LayerTestResult<float, 4> L2Normalization3dTest(armnn::IWorkloadFactory& workloadFactory)
3283{
3284 constexpr unsigned int inputWidth = 3;
3285 constexpr unsigned int inputHeight = 4;
3286 constexpr unsigned int inputChannels = 2;
3287 constexpr unsigned int inputBatchSize = 1;
3288
3289 constexpr unsigned int outputWidth = inputWidth;
3290 constexpr unsigned int outputHeight = inputHeight;
3291 constexpr unsigned int outputChannels = inputChannels;
3292 constexpr unsigned int outputBatchSize = inputBatchSize;
3293
3294 const armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
3295 armnn::DataType::Float32);
3296 const armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
3297 armnn::DataType::Float32);
3298
3299 auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
3300 // Channel 0
3301 119.0f, 21.0f, 150.0f,
3302 149.0f, 32.0f, 179.0f,
3303 15.0f, 227.0f, 141.0f,
3304 147.0f, 199.0f, 220.0f,
3305
3306 // Channel 1
3307 110.0f, 140.0f, 73.0f,
3308 211.0f, 212.0f, 89.0f,
3309 24.0f, 138.0f, 188.0f,
3310 162.0f, 12.0f, 161.0f,
3311 }));
3312
3313 LayerTestResult<float, 4> result(outputTensorInfo);
3314 result.outputExpected = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
3315 119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
3316 21.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
3317 150.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
3318 149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
3319 32.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
3320 179.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
3321 15.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
3322 227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
3323 141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
3324 147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
3325 199.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
3326 220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
3327
3328 110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
3329 140.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
3330 73.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
3331 211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
3332 212.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
3333 89.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
3334 24.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
3335 138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
3336 188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
3337 162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
3338 12.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
3339 161.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
3340 }));
3341
3342 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3343 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3344
3345 armnn::L2NormalizationQueueDescriptor descriptor;
3346 armnn::WorkloadInfo info;
3347 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3348 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3349
3350 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
3351
3352 inputHandle->Allocate();
3353 outputHandle->Allocate();
3354 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3355
surmeh013537c2c2018-05-18 16:31:43 +01003356 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00003357 workload->Execute();
3358
3359 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3360 return result;
3361}
3362
3363LayerTestResult<float, 4> L2Normalization4dTest(armnn::IWorkloadFactory& workloadFactory)
3364{
3365 constexpr unsigned int inputWidth = 3;
3366 constexpr unsigned int inputHeight = 4;
3367 constexpr unsigned int inputChannels = 3;
3368 constexpr unsigned int inputBatchSize = 2;
3369
3370 constexpr unsigned int outputWidth = inputWidth;
3371 constexpr unsigned int outputHeight = inputHeight;
3372 constexpr unsigned int outputChannels = inputChannels;
3373 constexpr unsigned int outputBatchSize = inputBatchSize;
3374
3375 const armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
3376 armnn::DataType::Float32);
3377 const armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
3378 armnn::DataType::Float32);
3379
3380 auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
3381 // Batch 0, Channel 0
3382 235.0f, 46.0f, 178.0f,
3383 100.0f, 123.0f, 19.0f,
3384 172.0f, 74.0f, 250.0f,
3385 6.0f, 195.0f, 80.0f,
3386
3387 // Batch 0, Channel 1
3388 113.0f, 95.0f, 202.0f,
3389 77.0f, 114.0f, 71.0f,
3390 122.0f, 246.0f, 166.0f,
3391 82.0f, 28.0f, 37.0f,
3392
3393 // Batch 0, Channel 2
3394 56.0f, 170.0f, 162.0f,
3395 194.0f, 89.0f, 254.0f,
3396 12.0f, 209.0f, 200.0f,
3397 1.0f, 64.0f, 54.0f,
3398
3399 // Batch 1, Channel 0
3400 67.0f, 90.0f, 49.0f,
3401 7.0f, 163.0f, 18.0f,
3402 25.0f, 117.0f, 103.0f,
3403 247.0f, 59.0f, 189.0f,
3404
3405 // Batch 1, Channel 1
3406 239.0f, 104.0f, 199.0f,
3407 17.0f, 124.0f, 153.0f,
3408 222.0f, 217.0f, 75.0f,
3409 32.0f, 126.0f, 21.0f,
3410
3411 // Batch 1, Channel 2
3412 97.0f, 145.0f, 215.0f,
3413 115.0f, 116.0f, 238.0f,
3414 226.0f, 16.0f, 132.0f,
3415 92.0f, 125.0f, 88.0f,
3416 }));
3417
3418 LayerTestResult<float, 4> result(outputTensorInfo);
3419 result.outputExpected = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
3420
3421 // Batch 0, Channel 0
3422 235.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
3423 46.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
3424 178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
3425 100.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
3426 123.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
3427 19.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
3428 172.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
3429 74.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
3430 250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
3431 6.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
3432 195.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
3433 80.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
3434
3435 // Batch 0, Channel 1
3436 113.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
3437 95.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
3438 202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
3439 77.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
3440 114.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
3441 71.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
3442 122.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
3443 246.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
3444 166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
3445 82.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
3446 28.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
3447 37.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
3448
3449 // Batch 0, Channel 2
3450 56.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
3451 170.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
3452 162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
3453 194.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
3454 89.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
3455 254.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
3456 12.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
3457 209.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
3458 200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
3459 1.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
3460 64.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
3461 54.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
3462
3463 // Batch 1, Channel 0
3464 67.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
3465 90.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
3466 49.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
3467 7.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
3468 163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
3469 18.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
3470 25.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
3471 117.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
3472 103.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
3473 247.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
3474 59.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
3475 189.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
3476
3477 // Batch 1, Channel 1
3478 239.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
3479 104.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
3480 199.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
3481 17.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
3482 124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
3483 153.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
3484 222.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
3485 217.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
3486 75.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
3487 32.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
3488 126.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
3489 21.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
3490
3491 // Batch 1, Channel 2
3492 97.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
3493 145.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
3494 215.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
3495 115.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
3496 116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
3497 238.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
3498 226.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
3499 16.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
3500 132.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
3501 92.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
3502 125.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
3503 88.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
3504 }));
3505
3506 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3507 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3508
3509 armnn::L2NormalizationQueueDescriptor descriptor;
3510 armnn::WorkloadInfo info;
3511 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3512 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3513
3514 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
3515
3516 inputHandle->Allocate();
3517 outputHandle->Allocate();
3518 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3519
surmeh013537c2c2018-05-18 16:31:43 +01003520 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00003521 workload->Execute();
3522
3523 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3524 return result;
3525}
3526
3527template <typename T>
3528LayerTestResult<T, 4> ConstantTestImpl(armnn::IWorkloadFactory& workloadFactory,
3529 float qScale,
3530 int32_t qOffset)
3531{
3532 constexpr unsigned int inputWidth = 3;
3533 constexpr unsigned int inputHeight = 4;
3534 constexpr unsigned int inputChannels = 3;
3535 constexpr unsigned int inputBatchSize = 2;
3536
3537 constexpr unsigned int outputWidth = inputWidth;
3538 constexpr unsigned int outputHeight = inputHeight;
3539 constexpr unsigned int outputChannels = inputChannels;
3540 constexpr unsigned int outputBatchSize = inputBatchSize;
3541
3542 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
3543 armnn::GetDataType<T>());
3544
3545 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
3546 armnn::GetDataType<T>());
3547
3548 // Set quantization parameters if the requested type is a quantized type.
3549 if(armnn::IsQuantizedType<T>())
3550 {
3551 inputTensorInfo.SetQuantizationScale(qScale);
3552 inputTensorInfo.SetQuantizationOffset(qOffset);
3553 outputTensorInfo.SetQuantizationScale(qScale);
3554 outputTensorInfo.SetQuantizationOffset(qOffset);
3555 }
3556
3557 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
3558 QuantizedVector<T>(qScale, qOffset, {
3559 // Batch 0, Channel 0
3560 235.0f, 46.0f, 178.0f,
3561 100.0f, 123.0f, 19.0f,
3562 172.0f, 74.0f, 250.0f,
3563 6.0f, 195.0f, 80.0f,
3564
3565 // Batch 0, Channel 1
3566 113.0f, 95.0f, 202.0f,
3567 77.0f, 114.0f, 71.0f,
3568 122.0f, 246.0f, 166.0f,
3569 82.0f, 28.0f, 37.0f,
3570
3571 // Batch 0, Channel 2
3572 56.0f, 170.0f, 162.0f,
3573 194.0f, 89.0f, 254.0f,
3574 12.0f, 209.0f, 200.0f,
3575 1.0f, 64.0f, 54.0f,
3576
3577 // Batch 1, Channel 0
3578 67.0f, 90.0f, 49.0f,
3579 7.0f, 163.0f, 18.0f,
3580 25.0f, 117.0f, 103.0f,
3581 247.0f, 59.0f, 189.0f,
3582
3583 // Batch 1, Channel 1
3584 239.0f, 104.0f, 199.0f,
3585 17.0f, 124.0f, 153.0f,
3586 222.0f, 217.0f, 75.0f,
3587 32.0f, 126.0f, 21.0f,
3588
3589 // Batch 1, Channel 2
3590 97.0f, 145.0f, 215.0f,
3591 115.0f, 116.0f, 238.0f,
3592 226.0f, 16.0f, 132.0f,
3593 92.0f, 125.0f, 88.0f,
3594 })));
3595
3596 LayerTestResult<T, 4> result(outputTensorInfo);
3597 result.outputExpected = input;
3598
3599 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3600
3601 armnn::ScopedCpuTensorHandle constantTensor(inputTensorInfo);
3602 AllocateAndCopyDataToITensorHandle(&constantTensor, &input[0][0][0][0]);
3603
3604 armnn::ConstantQueueDescriptor descriptor;
3605 descriptor.m_LayerOutput = &constantTensor;
3606
3607 armnn::WorkloadInfo info;
3608 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3609
3610 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConstant(descriptor, info);
3611
3612 outputHandle->Allocate();
3613
surmeh013537c2c2018-05-18 16:31:43 +01003614 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00003615 workload->Execute();
3616
3617 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3618 return result;
3619}
3620
3621LayerTestResult<float, 4> ConstantTest(armnn::IWorkloadFactory& workloadFactory)
3622{
3623 return ConstantTestImpl<float>(workloadFactory, 0.0f, 0);
3624}
3625
3626LayerTestResult<uint8_t, 4> ConstantTestUint8(armnn::IWorkloadFactory& workloadFactory)
3627{
3628 return ConstantTestImpl<uint8_t>(workloadFactory, 1.0f, 0);
3629}
3630
3631LayerTestResult<uint8_t, 3> MergerUint8Test(armnn::IWorkloadFactory& workloadFactory)
3632{
surmeh013537c2c2018-05-18 16:31:43 +01003633 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +00003634 unsigned int outputHeight = 6;
3635 unsigned int outputChannels = 3;
3636
surmeh013537c2c2018-05-18 16:31:43 +01003637 unsigned int inputWidth1 = 3;
3638 unsigned int inputHeight1 = 6;
3639 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +00003640
surmeh013537c2c2018-05-18 16:31:43 +01003641 unsigned int inputWidth2 = 3;
3642 unsigned int inputHeight2 = 6;
3643 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +00003644
telsoa01c577f2c2018-08-31 09:22:23 +01003645 // Defines the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +00003646 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
3647 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
3648 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
telsoa014fcda012018-03-09 14:13:49 +00003649
telsoa01c577f2c2018-08-31 09:22:23 +01003650 // Arbitrary scale and offsets. They don't really matter as the merger operator doesn't dequantize/quantize them.
telsoa014fcda012018-03-09 14:13:49 +00003651 const float scale = 0.13497836f;
3652 const int32_t offset = -7;
3653
3654 outputTensorInfo.SetQuantizationScale(scale);
3655 outputTensorInfo.SetQuantizationOffset(offset);
3656 inputTensorInfo1.SetQuantizationScale(scale);
3657 inputTensorInfo1.SetQuantizationOffset(offset);
3658 inputTensorInfo2.SetQuantizationScale(scale);
3659 inputTensorInfo2.SetQuantizationOffset(offset);
telsoa014fcda012018-03-09 14:13:49 +00003660
3661 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
3662
3663 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
surmeh013537c2c2018-05-18 16:31:43 +01003664 {
3665 1, 2, 3,
3666 4, 5, 6,
3667 7, 8, 9,
3668 10, 11, 12,
3669 13, 14, 15,
3670 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00003671
surmeh013537c2c2018-05-18 16:31:43 +01003672 19, 20, 21,
3673 22, 23, 24,
3674 25, 26, 27,
3675 28, 29, 30,
3676 31, 32, 33,
3677 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00003678
surmeh013537c2c2018-05-18 16:31:43 +01003679 37, 38, 39,
3680 40, 41, 42,
3681 43, 44, 45,
3682 46, 47, 48,
3683 49, 50, 51,
3684 52, 53, 54,
3685 })
telsoa014fcda012018-03-09 14:13:49 +00003686 );
3687
telsoa014fcda012018-03-09 14:13:49 +00003688 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
3689 {
surmeh013537c2c2018-05-18 16:31:43 +01003690 1, 2, 3,
3691 4, 5, 6,
3692 7, 8, 9,
3693 10, 11, 12,
3694 13, 14, 15,
3695 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00003696
surmeh013537c2c2018-05-18 16:31:43 +01003697 19, 20, 21,
3698 22, 23, 24,
3699 25, 26, 27,
3700 28, 29, 30,
3701 31, 32, 33,
3702 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00003703 })
3704 );
3705
3706 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
3707 {
surmeh013537c2c2018-05-18 16:31:43 +01003708 37, 38, 39,
3709 40, 41, 42,
telsoa014fcda012018-03-09 14:13:49 +00003710 43, 44, 45,
surmeh013537c2c2018-05-18 16:31:43 +01003711 46, 47, 48,
3712 49, 50, 51,
3713 52, 53, 54,
telsoa014fcda012018-03-09 14:13:49 +00003714 })
3715 );
3716
telsoa01c577f2c2018-08-31 09:22:23 +01003717 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
telsoa014fcda012018-03-09 14:13:49 +00003718 armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1);
3719
telsoa01c577f2c2018-08-31 09:22:23 +01003720 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
telsoa014fcda012018-03-09 14:13:49 +00003721 armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2);
3722
telsoa014fcda012018-03-09 14:13:49 +00003723
3724 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3725
3726 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
3727
3728 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
3729 subTensorsSupported ?
3730 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
3731 workloadFactory.CreateTensorHandle(inputTensorInfo1);
3732
3733 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
3734 subTensorsSupported ?
3735 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
3736 workloadFactory.CreateTensorHandle(inputTensorInfo2);
3737
telsoa014fcda012018-03-09 14:13:49 +00003738
3739 armnn::MergerQueueDescriptor data;
3740 armnn::WorkloadInfo info;
3741 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
3742 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +00003743 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
3744
3745 data.m_ViewOrigins.push_back(window1);
3746 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +00003747
3748 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(data, info);
3749
3750 inputHandle1->Allocate();
3751 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00003752 outputHandle->Allocate();
3753
3754 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
3755 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00003756
surmeh013537c2c2018-05-18 16:31:43 +01003757 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00003758 workload->Execute();
3759
3760 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
3761
3762 return ret;
3763}
3764
3765LayerTestResult<uint8_t, 4> AdditionUint8Test(armnn::IWorkloadFactory& workloadFactory)
3766{
3767 unsigned int batchSize = 1;
3768 unsigned int channels = 2;
3769 unsigned int height = 2;
3770 unsigned int width = 3;
3771
3772 const float scale = 7.0f;
3773 const int32_t offset = 3;
3774
3775 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
3776 armnn::TensorInfo outputTensorInfo;
3777
3778 const unsigned int shape[] = { batchSize, channels, height, width };
3779 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
3780 inputTensorInfo1.SetQuantizationScale(scale);
3781 inputTensorInfo1.SetQuantizationOffset(offset);
3782
3783 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
3784 inputTensorInfo2.SetQuantizationScale(scale);
3785 inputTensorInfo2.SetQuantizationOffset(offset);
3786
3787 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
3788 outputTensorInfo.SetQuantizationScale(scale);
3789 outputTensorInfo.SetQuantizationOffset(offset);
3790
telsoa01c577f2c2018-08-31 09:22:23 +01003791 // See dequantized values to the right.
telsoa014fcda012018-03-09 14:13:49 +00003792 auto input1 = MakeTensor<uint8_t, 4>(inputTensorInfo1, std::vector<uint8_t>(
3793 {
3794 63, 35, 77, 70, 56, 112, // 420, 224, 518, 469, 371, 763
3795 203, 28, 252, 168, 245, 91 // 1400, 175, 1743, 1155, 1694, 616
3796 }));
3797
telsoa01c577f2c2018-08-31 09:22:23 +01003798 // See dequantized values to the right.
telsoa014fcda012018-03-09 14:13:49 +00003799 auto input2 = MakeTensor<uint8_t, 4>(inputTensorInfo1, std::vector<uint8_t>(
3800 {
3801 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
3802 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
3803 }));
3804
telsoa01c577f2c2018-08-31 09:22:23 +01003805 // See dequantized values to the right.
telsoa014fcda012018-03-09 14:13:49 +00003806 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
3807 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>(
3808 {
3809 81, 39, 249, 255, 228, 255, // 546, 252, 1722, 2065(clamped), 1575, 2212(clamped)
3810 255, 186, 255, 186, 255, 214, // 2261(clamped), 1281, 2163(clamped), 1281, 2408(clamped), 1477
3811 }));
3812
3813 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
3814 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
3815 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3816
3817 armnn::AdditionQueueDescriptor data;
3818 armnn::WorkloadInfo info;
3819 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
3820 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
3821 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
3822
3823 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
3824
3825 inputHandle1->Allocate();
3826 inputHandle2->Allocate();
3827 outputHandle->Allocate();
3828
3829 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
3830 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
3831
surmeh013537c2c2018-05-18 16:31:43 +01003832 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00003833 workload->Execute();
3834
3835 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3836
3837 return result;
3838}
3839
surmeh01bceff2f2018-03-29 16:29:27 +01003840namespace
telsoa014fcda012018-03-09 14:13:49 +00003841{
surmeh01bceff2f2018-03-29 16:29:27 +01003842LayerTestResult<uint8_t, 4> MultiplicationUint8TestHelper(armnn::IWorkloadFactory& workloadFactory,
3843 const unsigned int shape0[4],
3844 const std::vector<uint8_t> & values0,
3845 float scale0,
3846 int32_t offset0,
3847 const unsigned int shape1[4],
3848 const std::vector<uint8_t> & values1,
3849 float scale1,
3850 int32_t offset1,
3851 const unsigned int outShape[4],
3852 const std::vector<uint8_t> & outValues,
3853 float outScale,
3854 int32_t outOffset)
3855{
3856 armnn::TensorInfo inputTensorInfo0(4, shape0, armnn::DataType::QuantisedAsymm8);
3857 armnn::TensorInfo inputTensorInfo1(4, shape1, armnn::DataType::QuantisedAsymm8);
3858 armnn::TensorInfo outputTensorInfo(4, outShape, armnn::DataType::QuantisedAsymm8);
telsoa014fcda012018-03-09 14:13:49 +00003859
surmeh01bceff2f2018-03-29 16:29:27 +01003860 inputTensorInfo0.SetQuantizationScale(scale0);
3861 inputTensorInfo0.SetQuantizationOffset(offset0);
telsoa014fcda012018-03-09 14:13:49 +00003862
surmeh01bceff2f2018-03-29 16:29:27 +01003863 inputTensorInfo1.SetQuantizationScale(scale1);
3864 inputTensorInfo1.SetQuantizationOffset(offset1);
telsoa014fcda012018-03-09 14:13:49 +00003865
surmeh01bceff2f2018-03-29 16:29:27 +01003866 outputTensorInfo.SetQuantizationScale(outScale);
3867 outputTensorInfo.SetQuantizationOffset(outOffset);
telsoa014fcda012018-03-09 14:13:49 +00003868
surmeh01bceff2f2018-03-29 16:29:27 +01003869 auto input0 = MakeTensor<uint8_t, 4>(inputTensorInfo0, values0);
3870 auto input1 = MakeTensor<uint8_t, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00003871
telsoa014fcda012018-03-09 14:13:49 +00003872 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
surmeh01bceff2f2018-03-29 16:29:27 +01003873 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00003874
surmeh01bceff2f2018-03-29 16:29:27 +01003875 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
telsoa014fcda012018-03-09 14:13:49 +00003876 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
telsoa014fcda012018-03-09 14:13:49 +00003877 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3878
3879 armnn::MultiplicationQueueDescriptor data;
3880 armnn::WorkloadInfo info;
surmeh01bceff2f2018-03-29 16:29:27 +01003881 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
3882 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
telsoa014fcda012018-03-09 14:13:49 +00003883 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
3884
3885 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
3886
surmeh01bceff2f2018-03-29 16:29:27 +01003887 inputHandle0->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00003888 inputHandle1->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00003889 outputHandle->Allocate();
3890
surmeh01bceff2f2018-03-29 16:29:27 +01003891 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00003892 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00003893
surmeh013537c2c2018-05-18 16:31:43 +01003894 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00003895 workload->Execute();
3896
3897 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3898
3899 return result;
3900}
surmeh01bceff2f2018-03-29 16:29:27 +01003901} // anonymous namespace
3902
3903LayerTestResult<uint8_t, 4> MultiplicationUint8Test(armnn::IWorkloadFactory& workloadFactory)
3904{
3905 unsigned int batchSize = 1;
3906 unsigned int channels = 2;
3907 unsigned int height = 2;
3908 unsigned int width = 3;
3909 const unsigned int shape[] = { batchSize, channels, height, width };
3910
telsoa01c577f2c2018-08-31 09:22:23 +01003911 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01003912 std::vector<uint8_t> input0({
3913 62, 37, 3, 172, 13, 111, // 244, 144, 8, 684, 48, 440,
3914 188, 20, 73, 31, 23, 31 // 748, 76, 288, 120, 88, 120
3915 });
3916
telsoa01c577f2c2018-08-31 09:22:23 +01003917 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01003918 std::vector<uint8_t> input1({
3919 126, 240, 252, 183, 121, 247, // 384, 726, 762, 555, 369, 747,
3920 48, 115, 151, 79, 78, 97 // 150, 351, 459, 243, 240, 297
3921 });
3922
telsoa01c577f2c2018-08-31 09:22:23 +01003923 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01003924 std::vector<uint8_t> output(
3925 {
3926 64, 72, 0, 255, 8, 236, // 93696, 104544, 6096(clamped), 379620(clamped), 17712, 328680,
3927 77, 15, 92, 16, 10, 21, // 112200, 26676, 132192, 29160, 21120, 35640
3928 });
3929
3930 return MultiplicationUint8TestHelper(workloadFactory,
3931 shape,
3932 input0,
3933 4.0f,
3934 1,
3935 shape,
3936 input1,
3937 3.0f,
3938 -2,
3939 shape,
3940 output,
telsoa01c577f2c2018-08-31 09:22:23 +01003941 1366.255f, // Scale/offset chosen to have output values out of range.
surmeh01bceff2f2018-03-29 16:29:27 +01003942 -5);
3943}
3944
3945LayerTestResult<uint8_t, 4> MultiplicationBroadcast1ElementUint8Test(armnn::IWorkloadFactory& workloadFactory)
3946{
3947 const unsigned int shape0[] = { 1, 2, 2, 3 };
3948 const unsigned int shape1[] = { 1, 1, 1, 1 };
3949
3950 std::vector<uint8_t> input0({
3951 1, 2, 3, 4, 5, 6,
3952 7, 8, 9, 10, 11, 12
3953 });
3954
3955 std::vector<uint8_t> input1({2});
3956
3957 std::vector<uint8_t> output({
3958 2, 4, 6, 8, 10, 12,
3959 14, 16, 18, 20, 22, 24
3960 });
3961
3962 return MultiplicationUint8TestHelper(workloadFactory,
3963 shape0,
3964 input0,
3965 1.0f,
3966 0,
3967 shape1,
3968 input1,
3969 1.0f,
3970 0,
3971 shape0,
3972 output,
3973 1.0f,
3974 0);
3975}
3976
3977LayerTestResult<uint8_t, 4> MultiplicationBroadcast1DVectorUint8Test(armnn::IWorkloadFactory& workloadFactory)
3978{
3979 const unsigned int shape0[] = { 1, 2, 2, 3 };
3980 const unsigned int shape1[] = { 1, 1, 1, 3 };
3981
3982 std::vector<uint8_t> input0({
3983 1, 2, 3, 4, 5, 6,
3984 7, 8, 9, 10, 11, 12
3985 });
3986
3987 std::vector<uint8_t> input1({1, 2, 3});
3988
3989 std::vector<uint8_t> output({
3990 1, 4, 9, 4, 10, 18,
3991 7, 16, 27, 10, 22, 36
3992 });
3993
3994 return MultiplicationUint8TestHelper(workloadFactory,
3995 shape0,
3996 input0,
3997 1.0f,
3998 0,
3999 shape1,
4000 input1,
4001 1.0f,
4002 0,
4003 shape0,
4004 output,
4005 1.0f,
4006 0);
4007}
telsoa014fcda012018-03-09 14:13:49 +00004008
David Beckf195f032018-09-06 16:46:34 +01004009namespace
4010{
4011template <typename T>
4012LayerTestResult<T, 4> SubtractionTestHelper(armnn::IWorkloadFactory& workloadFactory,
4013 const unsigned int shape0[4],
4014 const std::vector<T>& values0,
4015 float scale0,
4016 int32_t offset0,
4017 const unsigned int shape1[4],
4018 const std::vector<T> & values1,
4019 float scale1,
4020 int32_t offset1,
4021 const unsigned int outShape[4],
4022 const std::vector<T> & outValues,
4023 float outScale,
4024 int32_t outOffset)
4025{
4026 auto dataType = (std::is_same<T, uint8_t>::value ?
4027 armnn::DataType::QuantisedAsymm8 :
4028 armnn::DataType::Float32);
4029
4030 armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
4031 armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
4032 armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
4033
4034 inputTensorInfo0.SetQuantizationScale(scale0);
4035 inputTensorInfo0.SetQuantizationOffset(offset0);
4036
4037 inputTensorInfo1.SetQuantizationScale(scale1);
4038 inputTensorInfo1.SetQuantizationOffset(offset1);
4039
4040 outputTensorInfo.SetQuantizationScale(outScale);
4041 outputTensorInfo.SetQuantizationOffset(outOffset);
4042
4043 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
4044 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
4045
4046 LayerTestResult<T, 4> result(outputTensorInfo);
4047 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
4048
4049 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
4050 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
4051 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4052
4053 armnn::SubtractionQueueDescriptor data;
4054 armnn::WorkloadInfo info;
4055 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
4056 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
4057 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4058
4059 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSubtraction(data, info);
4060
4061 inputHandle0->Allocate();
4062 inputHandle1->Allocate();
4063 outputHandle->Allocate();
4064
4065 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
4066 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
4067
4068 workloadFactory.Finalize();
4069 workload->Execute();
4070
4071 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4072
4073 return result;
4074}
4075} // anonymous namespace
4076
4077LayerTestResult<uint8_t, 4> SubtractionUint8Test(armnn::IWorkloadFactory& workloadFactory)
4078{
4079 const unsigned int shape0[] = { 1, 1, 2, 2 };
4080 const unsigned int shape1[] = { 1, 1, 2, 2 };
4081
4082 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
4083 std::vector<uint8_t> input1({ 1, 2, 1, 2 });
4084 std::vector<uint8_t> output({ 3, 3, 5, 5 });
4085
4086 return SubtractionTestHelper(workloadFactory,
4087 shape0, input0, 0.5f, 2,
4088 shape1, input1, 1.0f, 0,
4089 shape0, output, 1.0f, 0);
4090}
4091
4092LayerTestResult<uint8_t, 4> SubtractionBroadcast1ElementUint8Test(armnn::IWorkloadFactory& workloadFactory)
4093{
4094 const unsigned int shape0[] = { 1, 1, 2, 2 };
4095 const unsigned int shape1[] = { 1, 1, 1, 1 };
4096
4097 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
4098 std::vector<uint8_t> input1({ 2 });
4099 std::vector<uint8_t> output({ 5, 6, 7, 8 });
4100
4101 return SubtractionTestHelper(workloadFactory,
4102 shape0, input0, 0.5f, 2,
4103 shape1, input1, 1.0f, 0,
4104 shape0, output, 1.0f, 3);
4105}
4106
4107LayerTestResult<uint8_t, 4> SubtractionBroadcastUint8Test(armnn::IWorkloadFactory& workloadFactory)
4108{
4109 const unsigned int shape0[] = { 1, 1, 2, 2 };
4110 const unsigned int shape1[] = { 1, 1, 2, 1 };
4111
4112 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
4113 std::vector<uint8_t> input1({ 2, 1 });
4114 std::vector<uint8_t> output({ 8, 11, 12, 15 });
4115
4116 return SubtractionTestHelper(workloadFactory,
4117 shape0, input0, 1.0f, 0,
4118 shape1, input1, 1.0f, 0,
4119 shape0, output, 1.0f, 0);
4120}
4121
4122LayerTestResult<float, 4> SubtractionTest(armnn::IWorkloadFactory& workloadFactory)
4123{
4124 const unsigned int shape0[] = { 1, 1, 2, 2 };
4125 const unsigned int shape1[] = { 1, 1, 2, 2 };
4126
4127 std::vector<float> input0({ 1, 2, 3, 4 });
4128 std::vector<float> input1({ 1, -1, 0, 2 });
4129 std::vector<float> output({ 0, 3, 3, 2 });
4130
4131 return SubtractionTestHelper(workloadFactory,
4132 shape0, input0, 1.0f, 0,
4133 shape1, input1, 1.0f, 0,
4134 shape0, output, 1.0f, 0);
4135}
4136
4137LayerTestResult<float, 4> SubtractionBroadcast1ElementTest(armnn::IWorkloadFactory& workloadFactory)
4138{
4139 const unsigned int shape0[] = { 1, 1, 2, 2 };
4140 const unsigned int shape1[] = { 1, 1, 1, 1 };
4141
4142 std::vector<float> input0({ 1, 2, 3, 4 });
4143 std::vector<float> input1({ 10 });
4144 std::vector<float> output({ -9, -8, -7, -6 });
4145
4146 return SubtractionTestHelper(workloadFactory,
4147 shape0, input0, 1.0f, 0,
4148 shape1, input1, 1.0f, 0,
4149 shape0, output, 1.0f, 0);
4150}
4151
4152LayerTestResult<float, 4> SubtractionBroadcastTest(armnn::IWorkloadFactory& workloadFactory)
4153{
4154 const unsigned int shape0[] = { 1, 1, 2, 2 };
4155 const unsigned int shape1[] = { 1, 1, 1, 2 };
4156
4157 std::vector<float> input0({ 1, 2, 3, 4 });
4158 std::vector<float> input1({ 10, -5 });
4159 std::vector<float> output({ -9, 7, -7, 9 });
4160
4161 return SubtractionTestHelper(workloadFactory,
4162 shape0, input0, 1.0f, 0,
4163 shape1, input1, 1.0f, 0,
4164 shape0, output, 1.0f, 0);
4165}
4166
telsoa014fcda012018-03-09 14:13:49 +00004167LayerTestResult<uint8_t, 4> ResizeBilinearNopUint8Test(armnn::IWorkloadFactory& workloadFactory)
4168{
4169 constexpr unsigned int inputWidth = 4;
4170 constexpr unsigned int inputHeight = 4;
4171 constexpr unsigned int inputChannels = 1;
4172 constexpr unsigned int inputBatchSize = 1;
4173
4174 constexpr unsigned int outputWidth = inputWidth;
4175 constexpr unsigned int outputHeight = inputHeight;
4176 constexpr unsigned int outputChannels = inputChannels;
4177 constexpr unsigned int outputBatchSize = inputBatchSize;
4178
4179 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
4180 armnn::DataType::QuantisedAsymm8);
4181 inputTensorInfo.SetQuantizationScale(1.5f);
4182 inputTensorInfo.SetQuantizationOffset(-3);
4183
4184 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
4185 armnn::DataType::QuantisedAsymm8);
4186 outputTensorInfo.SetQuantizationScale(1.5f);
4187 outputTensorInfo.SetQuantizationOffset(-3);
4188
4189 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
4190 1, 2, 3, 4,
4191 2, 3, 4, 5,
4192 3, 4, 5, 6,
4193 4, 5, 6, 7
4194 }));
4195
4196 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
4197 result.outputExpected = input;
4198
4199 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4200 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4201
4202 armnn::ResizeBilinearQueueDescriptor descriptor;
4203 armnn::WorkloadInfo info;
4204 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
4205 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4206
4207 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
4208
4209 inputHandle->Allocate();
4210 outputHandle->Allocate();
4211 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
4212
surmeh013537c2c2018-05-18 16:31:43 +01004213 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00004214 workload->Execute();
4215
4216 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4217 return result;
4218}
4219
4220LayerTestResult<uint8_t, 4> SimpleResizeBilinearUint8Test(armnn::IWorkloadFactory& workloadFactory)
4221{
4222 constexpr unsigned int inputWidth = 2;
4223 constexpr unsigned int inputHeight = 2;
4224 constexpr unsigned int inputChannels = 1;
4225 constexpr unsigned int inputBatchSize = 1;
4226
4227 constexpr unsigned int outputWidth = inputWidth / 2;
4228 constexpr unsigned int outputHeight = inputHeight / 2;
4229 constexpr unsigned int outputChannels = inputChannels;
4230 constexpr unsigned int outputBatchSize = inputBatchSize;
4231
4232 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
4233 armnn::DataType::QuantisedAsymm8);
4234 inputTensorInfo.SetQuantizationScale(0.1567f);
4235 inputTensorInfo.SetQuantizationOffset(1);
4236
4237 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
4238 armnn::DataType::QuantisedAsymm8);
4239 outputTensorInfo.SetQuantizationScale(0.1567f);
4240 outputTensorInfo.SetQuantizationOffset(1);
4241
4242 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
4243 1, 255,
4244 200, 250
4245 }));
4246
4247 // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
4248 // then figures out the interpolants and weights. Note this is different to projecting the centre of the
telsoa01c577f2c2018-08-31 09:22:23 +01004249 // output texel - and thus we'll expect the output 1x1 matrix to contain, as its single element, the value
telsoa014fcda012018-03-09 14:13:49 +00004250 // that was at position (0,0) of the input matrix (rather than an average, which we would expect if projecting
4251 // the centre).
4252 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
4253 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
4254 1
4255 }));
4256
4257 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4258 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4259
4260 armnn::ResizeBilinearQueueDescriptor descriptor;
4261 armnn::WorkloadInfo info;
4262 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
4263 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4264
4265 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
4266
4267 inputHandle->Allocate();
4268 outputHandle->Allocate();
4269 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
4270
surmeh013537c2c2018-05-18 16:31:43 +01004271 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00004272 workload->Execute();
4273
4274 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4275 return result;
4276}
4277
4278LayerTestResult<uint8_t, 4> ResizeBilinearSqMinUint8Test(armnn::IWorkloadFactory& workloadFactory)
4279{
4280 constexpr unsigned int inputWidth = 4;
4281 constexpr unsigned int inputHeight = 4;
4282 constexpr unsigned int inputChannels = 1;
4283 constexpr unsigned int inputBatchSize = 1;
4284
4285 constexpr unsigned int outputWidth = inputWidth / 2;
4286 constexpr unsigned int outputHeight = inputHeight / 2;
4287 constexpr unsigned int outputChannels = inputChannels;
4288 constexpr unsigned int outputBatchSize = inputBatchSize;
4289
4290 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
4291 armnn::DataType::QuantisedAsymm8);
4292 inputTensorInfo.SetQuantizationScale(3.141592f);
4293 inputTensorInfo.SetQuantizationOffset(3);
4294
4295 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
4296 armnn::DataType::QuantisedAsymm8);
4297 outputTensorInfo.SetQuantizationScale(3.141592f);
4298 outputTensorInfo.SetQuantizationOffset(3);
4299
4300 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
4301 1, 2, 3, 4,
4302 2, 3, 4, 5,
4303 3, 4, 5, 6,
4304 4, 5, 6, 7
4305 }));
4306
4307 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
4308 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
4309 1, 3,
4310 3, 5
4311 }));
4312
4313 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4314 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4315
4316 armnn::ResizeBilinearQueueDescriptor descriptor;
4317 armnn::WorkloadInfo info;
4318 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
4319 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4320
4321 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
4322
4323 inputHandle->Allocate();
4324 outputHandle->Allocate();
4325 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
4326
surmeh013537c2c2018-05-18 16:31:43 +01004327 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00004328 workload->Execute();
4329
4330 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4331 return result;
4332}
4333
4334LayerTestResult<uint8_t, 4> ResizeBilinearMinUint8Test(armnn::IWorkloadFactory& workloadFactory)
4335{
4336 constexpr unsigned int inputWidth = 3;
4337 constexpr unsigned int inputHeight = 2;
4338 constexpr unsigned int inputChannels = 1;
4339 constexpr unsigned int inputBatchSize = 1;
4340
4341 constexpr unsigned int outputWidth = 2;
4342 constexpr unsigned int outputHeight = 1;
4343 constexpr unsigned int outputChannels = inputChannels;
4344 constexpr unsigned int outputBatchSize = inputBatchSize;
4345
4346 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
4347 armnn::DataType::QuantisedAsymm8);
4348 inputTensorInfo.SetQuantizationScale(1.5f);
4349 inputTensorInfo.SetQuantizationOffset(-1);
4350
4351 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
4352 armnn::DataType::QuantisedAsymm8);
4353 outputTensorInfo.SetQuantizationScale(1.5f);
4354 outputTensorInfo.SetQuantizationOffset(-1);
4355
4356 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
4357 1, 2, 3, // 3.0, 4.5, 6.0
4358 5, 8, 13 // 9.0, 13.5, 21.0
4359 }));
4360
4361 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
4362 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
4363 1, 3 // 3.0, 5.25
4364 }));
4365
4366 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4367 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4368
4369 armnn::ResizeBilinearQueueDescriptor descriptor;
4370 armnn::WorkloadInfo info;
4371 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
4372 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4373
4374 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
4375
4376 inputHandle->Allocate();
4377 outputHandle->Allocate();
4378
4379 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
4380
surmeh013537c2c2018-05-18 16:31:43 +01004381 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00004382 workload->Execute();
4383
4384 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4385 return result;
4386}
4387
4388LayerTestResult<uint8_t, 4> ResizeBilinearMagUint8Test(armnn::IWorkloadFactory& workloadFactory)
4389{
4390 constexpr unsigned int inputWidth = 2;
4391 constexpr unsigned int inputHeight = 3;
4392 constexpr unsigned int inputChannels = 1;
4393 constexpr unsigned int inputBatchSize = 1;
4394
4395 constexpr unsigned int outputWidth = 5;
4396 constexpr unsigned int outputHeight = 3;
4397 constexpr unsigned int outputChannels = inputChannels;
4398 constexpr unsigned int outputBatchSize = inputBatchSize;
4399
4400 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
4401 armnn::DataType::QuantisedAsymm8);
4402 inputTensorInfo.SetQuantizationScale(0.010765f);
4403 inputTensorInfo.SetQuantizationOffset(7);
4404
4405 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
4406 armnn::DataType::QuantisedAsymm8);
4407 outputTensorInfo.SetQuantizationScale(0.010132f);
4408 outputTensorInfo.SetQuantizationOffset(-18);
4409
4410 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
4411 24, 228, // 0.183005, 2.379065,
4412 105, 128, // 1.05497, 1.302565
4413 230, 71 // 2.400595, 0.68896
4414 }));
4415
4416 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
4417 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
4418 0, 87, 173, 217, 217, // 0.18300501, 1.06142902, 1.93985295, 2.37906504, 2.37906504
4419 86, 96, 106, 111, 111, // 1.05497003, 1.15400803, 1.25304604, 1.30256498, 1.30256498
4420 219, 151, 84, 50, 50 // 2.40059495, 1.71594095, 1.03128707, 0.68896002, 0.68896002
4421 }));
4422
4423 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4424 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4425
4426 armnn::ResizeBilinearQueueDescriptor descriptor;
4427 armnn::WorkloadInfo info;
4428 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
4429 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4430
4431 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
4432
4433 inputHandle->Allocate();
4434 outputHandle->Allocate();
4435 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
4436
surmeh013537c2c2018-05-18 16:31:43 +01004437 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00004438 workload->Execute();
4439
4440 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4441 return result;
4442}
4443
4444LayerTestResult<float, 4> BatchNormTest(armnn::IWorkloadFactory& workloadFactory)
4445{
4446 auto ret = BatchNormTestImpl<float>(workloadFactory, 0.f, 0);
4447 return ret;
4448}
4449
4450LayerTestResult<uint8_t, 4> BatchNormUint8Test(armnn::IWorkloadFactory& workloadFactory)
4451{
4452 auto ret = BatchNormTestImpl<uint8_t>(workloadFactory, 1.f/20.f, 50);
4453 return ret;
4454}
4455
4456LayerTestResult<uint8_t, 4> ConstantUint8Test(armnn::IWorkloadFactory& workloadFactory)
4457{
4458 return ConstantTestImpl<uint8_t>(workloadFactory, 2e-6f, 1);
4459}
4460
4461LayerTestResult<uint8_t, 1> Concatenation1dUint8Test(armnn::IWorkloadFactory& workloadFactory)
4462{
4463 return Concatenation1dTestImpl<uint8_t>(workloadFactory, 0.5f, -1);
4464}
4465
4466LayerTestResult<uint8_t, 2> Concatenation2dDim0Uint8Test(armnn::IWorkloadFactory& workloadFactory)
4467{
4468 return Concatenation2dDim0TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
4469}
4470
4471LayerTestResult<uint8_t, 2> Concatenation2dDim1Uint8Test(armnn::IWorkloadFactory& workloadFactory)
4472{
4473 return Concatenation2dDim1TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
4474}
4475
4476LayerTestResult<uint8_t, 2> Concatenation2dDim0DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory)
4477{
4478 return Concatenation2dDim0DiffInputDimsTestImpl<uint8_t>(workloadFactory, 0.5f, -1);
4479}
4480
4481LayerTestResult<uint8_t, 2> Concatenation2dDim1DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory)
4482{
4483 return Concatenation2dDim1DiffInputDimsTestImpl<uint8_t>(workloadFactory, 0.5f, -1);
4484}
4485
4486LayerTestResult<uint8_t, 3> Concatenation3dDim0Uint8Test(armnn::IWorkloadFactory& workloadFactory)
4487{
4488 return Concatenation3dDim0TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
4489}
4490
4491LayerTestResult<uint8_t, 3> Concatenation3dDim1Uint8Test(armnn::IWorkloadFactory& workloadFactory)
4492{
4493 return Concatenation3dDim1TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
4494}
4495
4496LayerTestResult<uint8_t, 3> Concatenation3dDim2Uint8Test(armnn::IWorkloadFactory& workloadFactory)
4497{
4498 return Concatenation3dDim2TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
4499}
4500
4501LayerTestResult<uint8_t, 3> Concatenation3dDim0DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory)
4502{
4503 return Concatenation3dDim0TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
4504}
4505
4506LayerTestResult<uint8_t, 3> Concatenation3dDim1DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory)
4507{
4508 return Concatenation3dDim1DiffInputDimsTestImpl<uint8_t>(workloadFactory, 0.5f, -1);
4509}
4510
4511LayerTestResult<uint8_t, 3> Concatenation3dDim2DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory)
4512{
4513 return Concatenation3dDim2DiffInputDimsTestImpl<uint8_t>(workloadFactory, 0.5f, -1);
4514}
4515
4516LayerTestResult<float, 4> SimpleMaxPooling2dSize2x2Stride2x2Test(armnn::IWorkloadFactory& workloadFactory,
4517 bool forceNoPadding)
4518{
4519 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<float>(workloadFactory, forceNoPadding);
4520}
4521
4522LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(armnn::IWorkloadFactory& workloadFactory,
4523 bool forceNoPadding)
4524{
4525 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<uint8_t>(workloadFactory, forceNoPadding, 3.0f, -5);
4526}
4527
4528LayerTestResult<float, 4> SimpleMaxPooling2dSize3x3Stride2x4Test(armnn::IWorkloadFactory& workloadFactory,
4529 bool forceNoPadding)
4530{
4531 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<float>(workloadFactory, forceNoPadding);
4532}
4533
4534LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(armnn::IWorkloadFactory& workloadFactory,
4535 bool forceNoPadding)
4536{
4537 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<uint8_t>(workloadFactory, forceNoPadding, 0.1f, 128);
4538}
4539
4540LayerTestResult<float, 4> SimpleAveragePooling2dTest(armnn::IWorkloadFactory& workloadFactory)
4541{
4542 return SimpleAveragePooling2dTestCommon<float>(workloadFactory);
4543}
4544
4545LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
4546{
4547 return SimpleAveragePooling2dTestCommon<uint8_t>(workloadFactory, 0.5, -1);
4548}
4549
surmeh01bceff2f2018-03-29 16:29:27 +01004550LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(armnn::IWorkloadFactory& workloadFactory,
4551 bool forceNoPadding)
4552{
4553 return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<float>(workloadFactory, forceNoPadding);
4554}
4555
telsoa014fcda012018-03-09 14:13:49 +00004556LayerTestResult<float, 4> LargeTensorsAveragePooling2dTest(armnn::IWorkloadFactory& workloadFactory)
4557{
4558 return LargeTensorsAveragePooling2dTestCommon<float>(workloadFactory);
4559}
4560
4561LayerTestResult<uint8_t, 4> LargeTensorsAveragePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
4562{
4563 return LargeTensorsAveragePooling2dTestCommon<uint8_t>(workloadFactory, 0.5, -1);
4564}
4565
4566LayerTestResult<float, 4> SimpleL2Pooling2dTest(armnn::IWorkloadFactory& workloadFactory)
4567{
4568 return SimpleL2Pooling2dTestCommon<float>(workloadFactory);
4569}
4570
4571LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
4572{
4573 return SimpleL2Pooling2dTestCommon<uint8_t>(workloadFactory);
4574}
4575
4576LayerTestResult<float, 4> L2Pooling2dSize3Stride1Test(armnn::IWorkloadFactory& workloadFactory)
4577{
4578 return L2Pooling2dSize3Stride1TestCommon<float>(workloadFactory);
4579}
4580
4581LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride1Uint8Test(armnn::IWorkloadFactory& workloadFactory)
4582{
4583 return L2Pooling2dSize3Stride1TestCommon<uint8_t>(workloadFactory);
4584}
4585
4586LayerTestResult<float, 4> L2Pooling2dSize3Stride3Test(armnn::IWorkloadFactory& workloadFactory)
4587{
4588 return L2Pooling2dSize3Stride3TestCommon<float>(workloadFactory);
4589}
4590
4591LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride3Uint8Test(armnn::IWorkloadFactory& workloadFactory)
4592{
4593 return L2Pooling2dSize3Stride3TestCommon<uint8_t>(workloadFactory);
4594}
4595
4596LayerTestResult<float, 4> L2Pooling2dSize3Stride4Test(armnn::IWorkloadFactory& workloadFactory)
4597{
4598 return L2Pooling2dSize3Stride4TestCommon<float>(workloadFactory);
4599}
4600
4601LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride4Uint8Test(armnn::IWorkloadFactory& workloadFactory)
4602{
4603 return L2Pooling2dSize3Stride4TestCommon<uint8_t>(workloadFactory);
4604}
4605
4606LayerTestResult<float, 4> L2Pooling2dSize7Test(armnn::IWorkloadFactory& workloadFactory)
4607{
4608 return L2Pooling2dSize7TestCommon<float>(workloadFactory);
4609}
4610
4611LayerTestResult<uint8_t, 4> L2Pooling2dSize7Uint8Test(armnn::IWorkloadFactory& workloadFactory)
4612{
4613 return L2Pooling2dSize7TestCommon<uint8_t>(workloadFactory);
4614}
4615
4616LayerTestResult<float, 4> L2Pooling2dSize9Test(armnn::IWorkloadFactory& workloadFactory)
4617{
4618 return L2Pooling2dSize9TestCommon<float>(workloadFactory);
4619}
4620
4621LayerTestResult<uint8_t, 4> L2Pooling2dSize9Uint8Test(armnn::IWorkloadFactory& workloadFactory)
4622{
4623 return L2Pooling2dSize9TestCommon<uint8_t>(workloadFactory);
4624}
4625
4626LayerTestResult<float, 4> AsymmetricNonSquarePooling2dTest(armnn::IWorkloadFactory& workloadFactory)
4627{
4628 return AsymmetricNonSquarePooling2dTestCommon<float>(workloadFactory);
4629}
4630
4631LayerTestResult<uint8_t, 4> AsymmetricNonSquarePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
4632{
4633 return AsymmetricNonSquarePooling2dTestCommon<uint8_t>(workloadFactory);
4634}
4635
4636LayerTestResult<float, 4> ComparePooling2dTest(armnn::IWorkloadFactory& workloadFactory,
4637 armnn::IWorkloadFactory& refWorkloadFactory,
4638 armnn::PoolingAlgorithm poolingType)
4639{
4640 return ComparePooling2dTestCommon<float>(workloadFactory, refWorkloadFactory, poolingType);
4641}
4642
4643LayerTestResult<uint8_t, 4> ComparePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory,
4644 armnn::IWorkloadFactory& refWorkloadFactory,
4645 armnn::PoolingAlgorithm poolingType)
4646{
4647 return ComparePooling2dTestCommon<uint8_t>(workloadFactory, refWorkloadFactory, poolingType, 0.1f, 128);
4648}
4649
4650LayerTestResult<float, 2> FullyConnectedLargeTest(armnn::IWorkloadFactory& workloadFactory,
4651 bool transposeWeights)
4652{
4653 return FullyConnectedLargeTestCommon<float>(workloadFactory, transposeWeights);
4654}
4655
4656LayerTestResult<float, 4> IgnorePaddingSimpleMaxPooling2dTest(armnn::IWorkloadFactory& workloadFactory)
4657{
4658 return IgnorePaddingSimpleMaxPooling2dTestCommon<float>(workloadFactory);
4659}
4660
4661LayerTestResult<uint8_t, 4> IgnorePaddingSimpleMaxPooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
4662{
4663 return IgnorePaddingSimpleMaxPooling2dTestCommon<uint8_t>(workloadFactory, 1.0f, -5);
4664}
4665
4666LayerTestResult<float, 4> IgnorePaddingMaxPooling2dSize3Test(armnn::IWorkloadFactory& workloadFactory)
4667{
4668 return IgnorePaddingMaxPooling2dSize3TestCommon<float>(workloadFactory);
4669}
4670
4671LayerTestResult<uint8_t, 4> IgnorePaddingMaxPooling2dSize3Uint8Test(armnn::IWorkloadFactory& workloadFactory)
4672{
4673 return IgnorePaddingMaxPooling2dSize3TestCommon<uint8_t>(workloadFactory, 1.0f, -5);
4674}
4675
4676LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dTest(armnn::IWorkloadFactory& workloadFactory)
4677{
4678 return IgnorePaddingSimpleAveragePooling2dTestCommon<float>(workloadFactory);
4679}
4680
4681LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
4682{
4683 return IgnorePaddingSimpleAveragePooling2dTestCommon<uint8_t>(workloadFactory);
4684}
4685
4686LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTest(armnn::IWorkloadFactory& workloadFactory)
4687{
4688 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<float>(workloadFactory);
4689}
4690
4691LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(
4692 armnn::IWorkloadFactory& workloadFactory)
4693{
4694 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<uint8_t>(workloadFactory);
4695}
4696
4697LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3Test(armnn::IWorkloadFactory& workloadFactory)
4698{
4699 return IgnorePaddingAveragePooling2dSize3TestCommon<float>(workloadFactory);
4700}
4701
4702LayerTestResult<uint8_t, 4> IgnorePaddingAveragePooling2dSize3Uint8Test(armnn::IWorkloadFactory& workloadFactory)
4703{
4704 return IgnorePaddingAveragePooling2dSize3TestCommon<uint8_t>(workloadFactory);
4705}
4706
4707LayerTestResult<float, 4> IgnorePaddingSimpleL2Pooling2dTest(armnn::IWorkloadFactory& workloadFactory)
4708{
4709 return IgnorePaddingSimpleL2Pooling2dTestCommon<float>(workloadFactory);
4710}
4711
4712LayerTestResult<uint8_t, 4> IgnorePaddingSimpleL2Pooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
4713{
4714 return IgnorePaddingSimpleL2Pooling2dTestCommon<uint8_t>(workloadFactory);
4715}
4716
4717LayerTestResult<float, 4> IgnorePaddingL2Pooling2dSize3Test(armnn::IWorkloadFactory& workloadFactory)
4718{
4719 return IgnorePaddingL2Pooling2dSize3TestCommon<float>(workloadFactory);
4720}
4721
4722LayerTestResult<uint8_t, 4> IgnorePaddingL2Pooling2dSize3Uint8Test(armnn::IWorkloadFactory& workloadFactory)
4723{
4724 return IgnorePaddingL2Pooling2dSize3TestCommon<uint8_t>(workloadFactory);
4725}
4726
4727LayerTestResult<float, 4> SimplePermuteFloat32Test(armnn::IWorkloadFactory& workloadFactory)
4728{
4729 return SimplePermuteFloat32TestCommon(workloadFactory);
4730};
4731
4732LayerTestResult<uint8_t, 4> SimplePermuteUint8Test(armnn::IWorkloadFactory& workloadFactory)
4733{
4734 return SimplePermuteUint8TestCommon(workloadFactory);
4735};
surmeh01bceff2f2018-03-29 16:29:27 +01004736
4737LayerTestResult<float, 4> PermuteFloat32ValueSet1Test(armnn::IWorkloadFactory& workloadFactory)
4738{
4739 return PermuteFloat32ValueSet1TestCommon(workloadFactory);
4740};
4741
4742LayerTestResult<float, 4> PermuteFloat32ValueSet2Test(armnn::IWorkloadFactory& workloadFactory)
4743{
4744 return PermuteFloat32ValueSet2TestCommon(workloadFactory);
4745};
4746
4747LayerTestResult<float, 4> PermuteFloat32ValueSet3Test(armnn::IWorkloadFactory& workloadFactory)
4748{
4749 return PermuteFloat32ValueSet3TestCommon(workloadFactory);
narpra011e4c31d2018-09-28 11:07:51 +01004750};
4751
4752namespace
4753{
4754template <typename T, std::size_t InputDim, std::size_t OutputDim>
4755LayerTestResult<T, OutputDim> MeanTestHelper(armnn::IWorkloadFactory& workloadFactory,
4756 const unsigned int* inputShape,
4757 const std::vector<T>& inputData,
4758 const std::vector<unsigned int>& axis,
4759 bool keepDims,
4760 const unsigned int* outputShape,
4761 const std::vector<T>& outputData,
4762 float scale = 1.0f,
4763 int32_t offset = 0)
4764{
4765 auto dataType = (std::is_same<T, uint8_t>::value ?
4766 armnn::DataType::QuantisedAsymm8 :
4767 armnn::DataType::Float32);
4768
4769 armnn::TensorInfo inputTensorInfo(InputDim, inputShape, dataType);
4770 armnn::TensorInfo outputTensorInfo(OutputDim, outputShape, dataType);
4771
4772 inputTensorInfo.SetQuantizationScale(scale);
4773 inputTensorInfo.SetQuantizationOffset(offset);
4774
4775 outputTensorInfo.SetQuantizationScale(scale);
4776 outputTensorInfo.SetQuantizationOffset(offset);
4777
4778 auto input = MakeTensor<T, InputDim>(inputTensorInfo, inputData);
4779
4780 LayerTestResult<T, OutputDim> result(outputTensorInfo);
4781 result.outputExpected = MakeTensor<T, OutputDim>(outputTensorInfo, outputData);
4782
4783 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4784 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4785
4786 armnn::MeanQueueDescriptor data;
4787 data.m_Parameters.m_Axis = axis;
4788 data.m_Parameters.m_KeepDims = keepDims;
4789 armnn::WorkloadInfo info;
4790 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
4791 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4792
4793 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMean(data, info);
4794
4795 inputHandle->Allocate();
4796 outputHandle->Allocate();
4797
4798 CopyDataToITensorHandle(inputHandle.get(), input.origin());
4799
4800 workloadFactory.Finalize();
4801 workload->Execute();
4802
4803 CopyDataFromITensorHandle(result.output.origin(), outputHandle.get());
4804
4805 return result;
4806}
4807} // anonymous namespace
4808
4809LayerTestResult<uint8_t, 1> MeanUint8SimpleTest(armnn::IWorkloadFactory& workloadFactory)
4810{
4811 const unsigned int inputShape[] = { 3, 2 };
4812 const unsigned int outputShape[] = { 1 };
4813
4814 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
4815 std::vector<uint8_t> output({ 2 });
4816
4817 return MeanTestHelper<uint8_t, 2, 1>(workloadFactory, inputShape, input, {}, false, outputShape, output);
4818}
4819
4820LayerTestResult<uint8_t, 3> MeanUint8SimpleAxisTest(armnn::IWorkloadFactory& workloadFactory)
4821{
4822 const unsigned int inputShape[] = { 1, 1, 3, 2 };
4823 const unsigned int outputShape[] = { 1, 1, 2 };
4824
4825 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
4826 std::vector<uint8_t> output({ 2, 2 });
4827
4828 return MeanTestHelper<uint8_t, 4, 3>(workloadFactory, inputShape, input, {2}, false, outputShape, output);
4829}
4830
4831LayerTestResult<uint8_t, 4> MeanUint8KeepDimsTest(armnn::IWorkloadFactory& workloadFactory)
4832{
4833 const unsigned int inputShape[] = { 1, 1, 3, 2 };
4834 const unsigned int outputShape[] = { 1, 1, 1, 2 };
4835
4836 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
4837 std::vector<uint8_t> output({ 2, 2 });
4838
4839 return MeanTestHelper<uint8_t, 4, 4>(workloadFactory, inputShape, input, {2}, true, outputShape, output);
4840}
4841
4842LayerTestResult<uint8_t, 4> MeanUint8MultipleDimsTest(armnn::IWorkloadFactory& workloadFactory)
4843{
4844 const unsigned int inputShape[] = { 2, 3, 1, 2 };
4845 const unsigned int outputShape[] = { 1, 3, 1, 1 };
4846
4847 std::vector<uint8_t> input({ 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6});
4848 std::vector<uint8_t> output({ 1, 3, 5 });
4849
4850 return MeanTestHelper<uint8_t, 4, 4>(workloadFactory, inputShape, input, {0, 3}, true, outputShape, output);
4851}
4852
4853LayerTestResult<uint8_t, 1> MeanVtsUint8Test(armnn::IWorkloadFactory& workloadFactory)
4854{
4855 const unsigned int inputShape[] = {4, 3, 2};
4856 const unsigned int outputShape[] = { 2 };
4857
4858 std::vector<uint8_t> input({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24});
4859 std::vector<uint8_t> output({12, 13});
4860
4861 return MeanTestHelper<uint8_t, 3, 1>(workloadFactory, inputShape, input, {0, 1}, false, outputShape,
4862 output, 0.8f, 5);
4863}
4864
4865LayerTestResult<float, 1> MeanFloatSimpleTest(armnn::IWorkloadFactory& workloadFactory)
4866{
4867 const unsigned int inputShape[] = { 3, 2 };
4868 const unsigned int outputShape[] = { 1 };
4869
4870 std::vector<float> input({ 1., 1., 2., 2., 3., 3. });
4871 std::vector<float> output({ 2. });
4872
4873 return MeanTestHelper<float, 2, 1>(workloadFactory, inputShape, input, {}, false, outputShape, output);
4874}
4875
4876LayerTestResult<float, 3> MeanFloatSimpleAxisTest(armnn::IWorkloadFactory& workloadFactory)
4877{
4878 const unsigned int inputShape[] = { 2, 3, 1, 2 };
4879 const unsigned int outputShape[] = { 3, 1, 2 };
4880
4881 std::vector<float> input({ 1., 2., 3., 4., 5., 6., 1., 2., 3., 4., 5., 6.});
4882 std::vector<float> output({ 1., 2., 3., 4., 5., 6. });
4883
4884 return MeanTestHelper<float, 4, 3>(workloadFactory, inputShape, input, {0}, false, outputShape, output);
4885}
4886
4887LayerTestResult<float, 4> MeanFloatKeepDimsTest(armnn::IWorkloadFactory& workloadFactory)
4888{
4889 const unsigned int inputShape[] = { 1, 1, 3, 2 };
4890 const unsigned int outputShape[] = { 1, 1, 1, 2 };
4891
4892 std::vector<float> input({ 1., 1., 2., 2., 3., 3. });
4893 std::vector<float> output({ 2., 2. });
4894
4895 return MeanTestHelper<float, 4, 4>(workloadFactory, inputShape, input, {2}, true, outputShape, output);
4896}
4897
4898LayerTestResult<float, 4> MeanFloatMultipleDimsTest(armnn::IWorkloadFactory& workloadFactory)
4899{
4900 const unsigned int inputShape[] = { 2, 3, 1, 2 };
4901 const unsigned int outputShape[] = { 1, 3, 1, 1 };
4902
4903 std::vector<float> input({ 1., 2., 3., 4., 5., 6., 1., 2., 3., 4., 5., 6.});
4904 std::vector<float> output({ 1.5, 3.5, 5.5 });
4905
4906 return MeanTestHelper<float, 4, 4>(workloadFactory, inputShape, input, {0, 3}, true, outputShape, output);
4907}
4908
4909LayerTestResult<float, 1> MeanVtsFloat1Test(armnn::IWorkloadFactory& workloadFactory)
4910{
4911 const unsigned int inputShape[] = {4, 3, 2};
4912 const unsigned int outputShape[] = { 2 };
4913
4914 std::vector<float> input({1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f,
4915 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f});
4916 std::vector<float> output({12.0f, 13.0f});
4917
4918 return MeanTestHelper<float, 3, 1>(workloadFactory, inputShape, input, {0, 1}, false, outputShape, output);
4919}
4920
4921LayerTestResult<float, 3> MeanVtsFloat2Test(armnn::IWorkloadFactory& workloadFactory)
4922{
4923 const unsigned int inputShape[] = {4, 3, 2};
4924 const unsigned int outputShape[] = {1, 3, 1 };
4925
4926 std::vector<float> input({1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f,
4927 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f});
4928 std::vector<float> output({10.5f, 12.5f, 14.5f});
4929
4930 return MeanTestHelper<float, 3, 3>(workloadFactory, inputShape, input, {0, 2}, true, outputShape, output);
4931}