blob: 267a8d6bad044974c289089be753ff3639f939fd [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#include "LayerTests.hpp"
6
7#include "test/TensorHelpers.hpp"
8#include "TensorCopyUtils.hpp"
surmeh013537c2c2018-05-18 16:31:43 +01009#include "Permute.hpp"
telsoa014fcda012018-03-09 14:13:49 +000010
11#include <boost/test/unit_test.hpp>
surmeh013537c2c2018-05-18 16:31:43 +010012#include <boost/assert.hpp>
telsoa014fcda012018-03-09 14:13:49 +000013
David Beck711fa312018-09-24 10:46:38 +010014#include <armnn/LayerSupport.hpp>
telsoa014fcda012018-03-09 14:13:49 +000015
David Beck711fa312018-09-24 10:46:38 +010016#include <backends/CpuTensorHandle.hpp>
17#include <backends/WorkloadFactory.hpp>
telsoa014fcda012018-03-09 14:13:49 +000018
19#ifdef ARMCOMPUTECL_ENABLED
David Beckac42efd2018-09-26 17:41:13 +010020#include <backends/cl/ClTensorHandle.hpp>
David Beck711fa312018-09-24 10:46:38 +010021#include <backends/aclCommon/ArmComputeTensorUtils.hpp>
telsoa014fcda012018-03-09 14:13:49 +000022#endif
23
24#include <algorithm>
25#include <boost/cast.hpp>
26
27#include "WorkloadTestUtils.hpp"
28#include "Conv2dTestImpl.hpp"
29#include "BatchNormTestImpl.hpp"
30#include "ActivationTestImpl.hpp"
31#include "Pooling2dTestImpl.hpp"
32#include "ReshapeTestImpl.hpp"
33#include "FullyConnectedTestImpl.hpp"
34#include "SplitterTestImpl.hpp"
35#include "SoftmaxTestImpl.hpp"
36#include "NormTestImpl.hpp"
37#include "PermuteTestImpl.hpp"
telsoa01c577f2c2018-08-31 09:22:23 +010038#include "LstmTestImpl.hpp"
39#include "ConvertFp16ToFp32TestImpl.hpp"
40#include "ConvertFp32ToFp16TestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000041
telsoa01c577f2c2018-08-31 09:22:23 +010042// 3-channel 16x8 image used as common input data for a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000043static std::vector<float> ConvInput3x8x16({
44 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
45 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
46 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
47 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
48 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
49 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
50 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
51 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
52 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
53 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
54 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
55 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
56 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
57 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
58 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
59 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
60 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
61 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
62 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
63 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
64 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
65 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
66 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
67 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
68});
69
telsoa01c577f2c2018-08-31 09:22:23 +010070// 2-channel bias used by a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000071static std::vector<float> Bias2({0, 2});
72
telsoa01c577f2c2018-08-31 09:22:23 +010073// Helper function that returns either Bias2 or an empty vector depending on whether bias is enabled.
telsoa014fcda012018-03-09 14:13:49 +000074template<typename T>
75boost::multi_array<T, 1> GetBias2(bool biasEnabled, float qScale, int32_t qOffset)
76{
77 if(biasEnabled)
78 {
79 armnn::TensorInfo biasDesc({static_cast<unsigned int>(Bias2.size())}, armnn::GetDataType<T>());
80 boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(qScale, qOffset, Bias2));
81 return bias;
82 }
83 else
84 {
85 return boost::multi_array<T, 1>();
86 }
87}
88
89template<typename T>
90LayerTestResult<T, 4> SimpleConvolution2d3x5TestCommon(armnn::IWorkloadFactory& workloadFactory,
91 float qScale,
92 int32_t qOffset,
93 bool biasEnabled)
94{
telsoa01c577f2c2018-08-31 09:22:23 +010095 // Use common single-batch 3-channel 16x8 image.
telsoa014fcda012018-03-09 14:13:49 +000096 armnn::TensorInfo inputDesc({1, 3, 8, 16}, armnn::GetDataType<T>());
97 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
98
telsoa01c577f2c2018-08-31 09:22:23 +010099 // Use a 2-element batch with 3-channel 3x5 kernels.
telsoa014fcda012018-03-09 14:13:49 +0000100 armnn::TensorInfo kernelDesc({2, 3, 5, 3}, armnn::GetDataType<T>());
101 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
102 QuantizedVector<T>(qScale, qOffset, {
103 1, 1, 1,
104 1, -1, 1,
105 1, 1, 1,
106 1, 1, 1,
107 1, 1, 1,
108
109 0, 0, 0,
110 0, 0, 0,
111 0, 0, 0,
112 0, 0, 0,
113 0, 0, 0,
114
115 2, 2, 2,
116 2, 2, 2,
117 2, 2, 2,
118 2, 2, 2,
119 2, 2, 2,
120
121
122 0, 0, 0,
123 0, 0, 0,
124 0, 0, 0,
125 0, 0, 0,
126 0, 0, 0,
127
128 1, 1, 1,
129 1, 1, 1,
130 1, 1, 1,
131 1, 1, 1,
132 1, 1, 1,
133
134 0, 0, 0,
135 0, 0, 0,
136 0, 0, 0,
137 0, 0, 0,
138 0, 0, 0
139 })));
140
telsoa01c577f2c2018-08-31 09:22:23 +0100141 // Expected output is 2 batch elements of a 1-channel 14x4 image.
telsoa014fcda012018-03-09 14:13:49 +0000142 armnn::TensorInfo outputDesc({1, 2, 4, 14}, armnn::GetDataType<T>());
143 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
144 QuantizedVector<T>(qScale, qOffset, {
145 -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24,
146 -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25,
147 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
148 -23.5f, -23.5f, -23.5f,
149 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
150 -23.5f, -23.5f, -23.5f,
151
152 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
153 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
154 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
155 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
156 })));
157
158 return SimpleConvolution2dTestImpl<T>(workloadFactory,
159 input,
160 kernel,
161 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
162 expectedOutput,
163 qScale,
164 qOffset);
165}
166
167template<typename T>
168LayerTestResult<T, 4> SimpleConvolution2d3x3TestCommon(armnn::IWorkloadFactory& workloadFactory,
169 float qScale,
170 int32_t qOffset,
171 bool biasEnabled)
172{
telsoa01c577f2c2018-08-31 09:22:23 +0100173 // Use a 3x3 kernel, which exercises ArmCompute's direct convolution path.
telsoa014fcda012018-03-09 14:13:49 +0000174
telsoa01c577f2c2018-08-31 09:22:23 +0100175 // Use common single-batch 3-channel 16x8 image.
telsoa014fcda012018-03-09 14:13:49 +0000176 armnn::TensorInfo inputDesc({1, 3, 8, 16}, armnn::GetDataType<T>());
177 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
178
telsoa01c577f2c2018-08-31 09:22:23 +0100179 // Use a 2-element batch of 3-channel 3x3 kernels.
telsoa014fcda012018-03-09 14:13:49 +0000180 armnn::TensorInfo kernelDesc({2, 3, 3, 3}, armnn::GetDataType<T>());
181 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
182 QuantizedVector<T>(qScale, qOffset, {
183 1, 1, 1,
184 1, -1, 1,
185 1, 1, 1,
186
187 0, 0, 0,
188 0, 0, 0,
189 0, 0, 0,
190
191 2, 2, 2,
192 2, 2, 2,
193 2, 2, 2,
194
195
196 0, 0, 0,
197 0, 0, 0,
198 0, 0, 0,
199
200 1, 1, 1,
201 1, 1, 1,
202 1, 1, 1,
203
204 0, 0, 0,
205 0, 0, 0,
206 0, 0, 0
207 })));
208
telsoa01c577f2c2018-08-31 09:22:23 +0100209 // Expected output is 1 batch of a 2-channel 14x6 image.
telsoa014fcda012018-03-09 14:13:49 +0000210 armnn::TensorInfo outputDesc({1, 2, 6, 14}, armnn::GetDataType<T>());
211 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
212 QuantizedVector<T>(qScale, qOffset, {
213 -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15,
214 -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16,
215 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
216 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
217 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
218 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
219
220 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
221 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
222 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
223 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
224 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
225 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
226 })));
227
228 return SimpleConvolution2dTestImpl<T>(workloadFactory,
229 input,
230 kernel,
231 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
232 expectedOutput,
233 qScale,
234 qOffset);
235}
236
237LayerTestResult<float, 4> SimpleConvolution2d3x5Test(armnn::IWorkloadFactory& workloadFactory,
238 bool biasEnabled)
239{
240 return SimpleConvolution2d3x5TestCommon<float>(workloadFactory, 0.f, 0, biasEnabled);
241}
242
243LayerTestResult<uint8_t, 4> SimpleConvolution2d3x5Uint8Test(armnn::IWorkloadFactory& workloadFactory,
244 bool biasEnabled)
245{
246 return SimpleConvolution2d3x5TestCommon<uint8_t>(workloadFactory, 0.5f, 50, biasEnabled);
247}
248
249LayerTestResult<float, 4> SimpleConvolution2d3x3Test(armnn::IWorkloadFactory& workloadFactory,
250 bool biasEnabled)
251{
252 return SimpleConvolution2d3x3TestCommon<float>(workloadFactory, 0.f, 0, biasEnabled);
253}
254
255LayerTestResult<uint8_t, 4> SimpleConvolution2d3x3Uint8Test(armnn::IWorkloadFactory& workloadFactory,
256 bool biasEnabled)
257{
258 return SimpleConvolution2d3x3TestCommon<uint8_t>(workloadFactory, 0.5f, 50, biasEnabled);
259}
260
261template<typename T>
262LayerTestResult<T, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon(
263 armnn::IWorkloadFactory& workloadFactory,
264 float qScale,
265 int32_t qOffset)
266{
telsoa01c577f2c2018-08-31 09:22:23 +0100267 // Use a single-batch 1-channel 3x3 image as input.
telsoa014fcda012018-03-09 14:13:49 +0000268 armnn::TensorInfo inputDesc({1, 1, 3, 3}, armnn::GetDataType<T>());
269 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
270 QuantizedVector<T>(qScale, qOffset, {
271 11,21,31,
272 12,22,32,
273 13,23,33
274 })));
275
telsoa01c577f2c2018-08-31 09:22:23 +0100276 // Use 1 batch of a 1-channel 2x2 kernel.
telsoa014fcda012018-03-09 14:13:49 +0000277 armnn::TensorInfo kernelDesc({1, 1, 2, 2}, armnn::GetDataType<T>());
278 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
279 QuantizedVector<T>(qScale, qOffset, {
280 -11,-21,
281 -12,-22,
282 })));
283
telsoa01c577f2c2018-08-31 09:22:23 +0100284// Expected output is 1 batch of a 1-channel 6x8 image.
telsoa014fcda012018-03-09 14:13:49 +0000285// Manually calculated like this:
286//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
287//[-11*0 -21*0 -12*0 -22*11 ; -11*0 -21*0 -12*11 -22*21 ; -11*0 -21*0 -12*21 -22*31 ; -11*0 -21*0 -12*31 -22*0 ..]
288//[-11*0 -21*11 -12*0 -22*12 ; -11*11 -21*21 -12*12 -22*22 ; -11*21 -21*31 -12*22 -22*32 ; -11*31 -21*0 -12*32 -22*0 ..]
289//[-11*0 -21*12 -12*0 -22*13 ; -11*12 -21*22 -12*13 -22*23 ; -11*22 -21*32 -12*23 -22*33 ; -11*32 -21*0 -12*33 -22*0 ..]
290//[-11*0 -21*13 -12*0 -22*0 ; -11*13 -21*23 -12*0 -22*0 ; -11*23 -21*33 -12*0 -22*0 ; -11*33 -21*0 -12*0 -22*0 ..]
291//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
292//[..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ..]
293 armnn::TensorInfo outputDesc({1, 1, 8, 6}, armnn::GetDataType<T>());
294 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
295 QuantizedVector<T>(qScale, qOffset, {
296 0, 0, 0, 0, 0, 0,
297 -242, -594, -934, -372, 0, 0,
298 -495, -1190, -1850, -725, 0, 0,
299 -538, -1256, -1916, -748, 0, 0,
300 -273, -626, -946, -363, 0, 0,
301 0, 0, 0, 0, 0, 0,
302 0, 0, 0, 0, 0, 0,
303 0, 0, 0, 0, 0, 0
304 })));
305
306 return SimpleConvolution2dTestImpl<T>(workloadFactory,
307 input,
308 kernel,
309 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(false, qScale, qOffset),
310 expectedOutput,
311 qScale,
312 qOffset,
telsoa01c577f2c2018-08-31 09:22:23 +0100313 1, // Padding left.
314 2, // Padding top.
315 3, // Padding right.
316 4); // Padding bottom.
telsoa014fcda012018-03-09 14:13:49 +0000317}
318
319template<typename T>
320LayerTestResult<T, 4> SimpleConvolution2dAsymmetricPaddingTestCommon(armnn::IWorkloadFactory& workloadFactory,
321 float qScale,
322 int32_t qOffset)
323{
telsoa01c577f2c2018-08-31 09:22:23 +0100324 // Use a single-batch 1-channel 5x5 image as input.
telsoa014fcda012018-03-09 14:13:49 +0000325 armnn::TensorInfo inputDesc({ 1, 1, 5, 5 }, armnn::GetDataType<T>());
326 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
327 QuantizedVector<T>(qScale, qOffset, {
328 11,21,31,41,51,
329 12,22,32,42,52,
330 13,23,33,43,53,
331 14,24,34,44,54,
332 15,25,35,45,55,
333 })));
334
telsoa01c577f2c2018-08-31 09:22:23 +0100335 // Use 1 batch of a 1-channel 4x4 kernel.
telsoa014fcda012018-03-09 14:13:49 +0000336 armnn::TensorInfo kernelDesc({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
337 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
338 QuantizedVector<T>(qScale, qOffset, {
339 -11,-21,-31,-41,
340 -12,-22,-32,-42,
341 -13,-23,-33,-43,
342 -14,-24,-34,-44,
343 })));
344
telsoa01c577f2c2018-08-31 09:22:23 +0100345 // Expected output is 1 batch of a 1-channel 5x5 image.
telsoa014fcda012018-03-09 14:13:49 +0000346 armnn::TensorInfo outputDesc({ 1, 1, 5, 5 }, armnn::GetDataType<T>());
347 std::vector<T> myVec(outputDesc.GetNumElements(), 0);
348 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
349 QuantizedVector<T>(qScale, qOffset, {
telsoa014fcda012018-03-09 14:13:49 +0000350 -7140, -10580, -13940, -9300, -5230,
351 -9590, -14120, -18520, -12290, -6860,
352 -9980, -14560, -18960, -12560, -7000,
353 -7518, -10904, -14144, -9318, -5152,
surmeh013537c2c2018-05-18 16:31:43 +0100354 -5032, -7256, -9376, -6142, -3368,
telsoa014fcda012018-03-09 14:13:49 +0000355 })));
356
357 return SimpleConvolution2dTestImpl<T>(workloadFactory,
358 input,
359 kernel,
360 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(false, qScale, qOffset),
361 expectedOutput,
362 qScale,
363 qOffset,
telsoa01c577f2c2018-08-31 09:22:23 +0100364 1, // Padding left.
365 1, // Padding top.
366 2, // Padding right.
367 2); // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100368}
369
370template<typename T>
371LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(armnn::IWorkloadFactory& workloadFactory,
372 float qScale,
373 int32_t qOffset,
374 bool biasEnabled)
375{
telsoa01c577f2c2018-08-31 09:22:23 +0100376 // Use a single-batch 2-channel 5x5 image as input.
surmeh013537c2c2018-05-18 16:31:43 +0100377 armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5 }, armnn::GetDataType<T>());
378 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
379 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
380 0, 1, 2, 3, 4,
381 5, 6, 7, 8, 9,
382 10, 11, 12, 13, 14,
383 15, 16, 17, 18, 19,
384 20, 21, 22, 23, 24,
385
386 25, 26, 27, 28, 29,
387 30, 31, 32, 33, 34,
388 35, 36, 37, 38, 39,
389 40, 41, 42, 43, 44,
390 45, 46, 47, 48, 49
391 })));
392
telsoa01c577f2c2018-08-31 09:22:23 +0100393 // Use a depth multiplier of 1 on a 2-channel 4x4 kernel.
surmeh013537c2c2018-05-18 16:31:43 +0100394 armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, armnn::GetDataType<T>());
395 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
396 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
397 32, 31, 30, 29,
398 28, 27, 26, 25,
399 24, 23, 22, 21,
400 20, 19, 18, 17,
401
402 16, 15, 14, 13,
403 12, 11, 10, 9,
404 8, 7, 6, 5,
405 4, 3, 2, 1
406 })));
407
telsoa01c577f2c2018-08-31 09:22:23 +0100408 // Expected output is 1 batch of a 2-channel 5x5 image.
409 // Calculated using the python tensorflow library with strideX=1, strideY=1.
surmeh013537c2c2018-05-18 16:31:43 +0100410 armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5 }, armnn::GetDataType<T>());
411 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
412 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
413 1062, 1580, 1850, 1530, 1117,
414 2140, 3108, 3500, 2842, 2042,
415 3580, 5068, 5460, 4342, 3062,
416 3618, 5072, 5390, 4248, 2971,
417 3074, 4282, 4510, 3533, 2457,
418 1550, 2284, 2362, 1955, 1428,
419 2910, 4206, 4342, 3528, 2536,
420 3390, 4886, 5022, 4068, 2916,
421 3566, 5056, 5182, 4133, 2922,
422 3100, 4352, 4452, 3517, 2465
423 })));
424
425 return DepthwiseConvolution2dAsymmetricTestImpl<T>(workloadFactory,
426 input,
427 kernel,
428 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
429 expectedOutput,
430 qScale,
431 qOffset,
telsoa01c577f2c2018-08-31 09:22:23 +0100432 1, // Padding left.
433 1, // Padding top.
434 2, // Padding right.
435 2, // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100436 1, // strideX
437 1); // strideY
telsoa014fcda012018-03-09 14:13:49 +0000438}
439
440LayerTestResult<float, 4>
441Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest(armnn::IWorkloadFactory& workloadFactory)
442{
443 return Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon<float>(workloadFactory, 0.0f, 0);
444}
445
446LayerTestResult<float, 4> Convolution2dAsymmetricPaddingTest(armnn::IWorkloadFactory& workloadFactory)
447{
448 return SimpleConvolution2dAsymmetricPaddingTestCommon<float>(workloadFactory, 0.0f, 0);
449}
450
451LayerTestResult<float, 4> DepthwiseConvolution2dTest(armnn::IWorkloadFactory& workloadFactory,
452 bool biasEnabled)
453{
454 return DepthwiseConvolution2dTestImpl<float, float>(workloadFactory, 0.0f, 0, biasEnabled);
455}
456
457LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul1Test(armnn::IWorkloadFactory& workloadFactory,
458 bool biasEnabled)
459{
460 return DepthwiseConvolution2dDepthMul1TestImpl<float, float>(workloadFactory, 0.0f, 0, biasEnabled);
461}
462
surmeh013537c2c2018-05-18 16:31:43 +0100463LayerTestResult<float, 4> DepthwiseConvolution2dAsymmetricTest(armnn::IWorkloadFactory& workloadFactory,
464 bool biasEnabled)
465{
466 return DepthwiseConvolution2dAsymmetricTestCommon<float>(workloadFactory, 0.0f, 0, biasEnabled);
467}
468
telsoa014fcda012018-03-09 14:13:49 +0000469LayerTestResult<uint8_t, 4> DepthwiseConvolution2dUint8Test(armnn::IWorkloadFactory& workloadFactory,
470 bool biasEnabled)
471{
472 return DepthwiseConvolution2dTestImpl<uint8_t, int32_t>(workloadFactory, 0.5f, 50, biasEnabled);
473}
474
475LayerTestResult<uint8_t, 4> DepthwiseConvolution2dDepthMul1Uint8Test(armnn::IWorkloadFactory& workloadFactory,
476 bool biasEnabled)
477{
478 return DepthwiseConvolution2dDepthMul1TestImpl<uint8_t, int32_t>(workloadFactory, 0.5f, 50, biasEnabled);
479}
480
481LayerTestResult<float, 4> Convolution1dTest(armnn::IWorkloadFactory& workloadFactory, bool biasEnabled)
482{
483 return Convolution1dTestImpl<float>(workloadFactory, 0.0f, 0, biasEnabled);
484}
485
486LayerTestResult<uint8_t, 4> Convolution1dUint8Test(armnn::IWorkloadFactory& workloadFactory, bool biasEnabled)
487{
488 return Convolution1dTestImpl<uint8_t>(workloadFactory, 0.1f, 128, biasEnabled);
489}
490
491LayerTestResult<float,4> CompareConvolution2dTest(armnn::IWorkloadFactory& workloadFactory,
492 armnn::IWorkloadFactory& refWorkloadFactory)
493{
494 return CompareConvolution2dTestImpl<float>(workloadFactory, refWorkloadFactory);
495}
496
497template<typename T>
498LayerTestResult<T,4> CompareDepthwiseConvolution2dTest(armnn::IWorkloadFactory& workloadFactory,
499 armnn::IWorkloadFactory& refWorkloadFactory)
500{
501 return CompareDepthwiseConvolution2dTestImpl<T>(workloadFactory, refWorkloadFactory);
502}
503
504template LayerTestResult<float, 4> CompareDepthwiseConvolution2dTest<float>(
505 armnn::IWorkloadFactory&, armnn::IWorkloadFactory&);
506template LayerTestResult<uint8_t, 4> CompareDepthwiseConvolution2dTest<uint8_t>(
507 armnn::IWorkloadFactory&, armnn::IWorkloadFactory&);
508
509LayerTestResult<float,4> SimpleNormalizationAcrossTest(armnn::IWorkloadFactory& workloadFactory)
510{
511 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
512 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
513 return SimpleNormalizationTestImpl(workloadFactory, normChannel, normMethod);
514}
515
516LayerTestResult<float,4> SimpleNormalizationWithinTest(armnn::IWorkloadFactory& workloadFactory)
517{
518 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
519 auto normChannel = armnn::NormalizationAlgorithmChannel::Within;
520 return SimpleNormalizationTestImpl(workloadFactory, normChannel, normMethod);
521}
522
narpra0155a97bc2018-10-02 14:35:53 +0100523LayerTestResult<float,4> SimpleNormalizationAcrossNhwcTest(armnn::IWorkloadFactory& workloadFactory)
524{
525 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
526 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
527 return SimpleNormalizationNhwcClNeonTestImpl(workloadFactory, normChannel, normMethod);
528}
529
telsoa014fcda012018-03-09 14:13:49 +0000530LayerTestResult<float,2> SimpleSoftmaxTest(armnn::IWorkloadFactory& workloadFactory, float beta)
531{
532 return SimpleSoftmaxTestImpl<float>(workloadFactory, beta);
533}
534
535LayerTestResult<uint8_t,2> SimpleSoftmaxUint8Test(armnn::IWorkloadFactory& workloadFactory, float beta)
536{
537 return SimpleSoftmaxTestImpl<uint8_t>(workloadFactory, beta);
538}
539
540LayerTestResult<float,4> CompareNormalizationTest(armnn::IWorkloadFactory& workloadFactory,
541 armnn::IWorkloadFactory& refWorkloadFactory,
542 armnn::NormalizationAlgorithmChannel normChannel,
543 armnn::NormalizationAlgorithmMethod normMethod)
544{
545 return CompareNormalizationTestImpl(workloadFactory, refWorkloadFactory, normChannel, normMethod);
546}
547
548LayerTestResult<float,2> CompareSoftmaxTest(armnn::IWorkloadFactory& workloadFactory,
549 armnn::IWorkloadFactory& refWorkloadFactory,
550 float beta)
551{
552 return CompareSoftmaxTestImpl<float>(workloadFactory, refWorkloadFactory, beta);
553}
554
555LayerTestResult<uint8_t,2> CompareSoftmaxUint8Test(armnn::IWorkloadFactory& workloadFactory,
556 armnn::IWorkloadFactory& refWorkloadFactory,
557 float beta)
558{
559 return CompareSoftmaxTestImpl<uint8_t>(workloadFactory, refWorkloadFactory, beta);
560}
561
562std::vector<LayerTestResult<float,3>> SplitterTest(armnn::IWorkloadFactory& workloadFactory)
563{
564 return SplitterTestCommon<float>(workloadFactory);
565}
566
567std::vector<LayerTestResult<uint8_t,3>> SplitterUint8Test(armnn::IWorkloadFactory& workloadFactory)
568{
569 return SplitterTestCommon<uint8_t>(workloadFactory, 1.0f, 0);
570}
571
572LayerTestResult<float, 3> CopyViaSplitterTest(armnn::IWorkloadFactory& workloadFactory)
573{
574 return CopyViaSplitterTestImpl<float>(workloadFactory, 0.0f, 0);
575}
576
577LayerTestResult<uint8_t, 3> CopyViaSplitterUint8Test(armnn::IWorkloadFactory& workloadFactory)
578{
579 return CopyViaSplitterTestImpl<uint8_t>(workloadFactory, 1.0f, 0);
580}
581
telsoa01c577f2c2018-08-31 09:22:23 +0100582LayerTestResult<float, 2> LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest(
583 armnn::IWorkloadFactory& workloadFactory)
584{
585 armnn::TensorInfo inputDesc({ 2, 2 }, armnn::GetDataType<float>());
586 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
587 { 2., 3., 3., 4. }));
588
589 armnn::TensorInfo outputDesc({ 2, 4 }, armnn::GetDataType<float>());
590 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
591 {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
592 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f}));
593 return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl(workloadFactory, input, expectedOutput);
594}
595
596LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest(
597 armnn::IWorkloadFactory& workloadFactory)
598{
599 armnn::TensorInfo inputDesc({ 2, 5 }, armnn::GetDataType<float>());
600 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
601 {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
602 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f}));
603
604 armnn::TensorInfo outputDesc({ 2, 16 }, armnn::GetDataType<float>());
605 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
606 {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f,
607 -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f,
608 -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f,
609 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f,
610 -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f,
611 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f,
612 0.02168f}));
613 return LstmLayerFloat32NoCifgWithPeepholeWithProjectionTestImpl(workloadFactory, input, expectedOutput);
614}
615
616LayerTestResult<float, 2> LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest(armnn::IWorkloadFactory& workloadFactory)
617{
618 armnn::TensorInfo inputDesc({2, 2}, armnn::GetDataType<float>());
619 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
620 {2., 3., 3., 4.}));
621
622
623 armnn::TensorInfo outputDesc({2, 4}, armnn::GetDataType<float>());
624 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
625 {{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
626 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}}));
627
628 return LstmNoCifgNoPeepholeNoProjectionTestImpl(workloadFactory, input, expectedOutput);
629}
630
telsoa014fcda012018-03-09 14:13:49 +0000631LayerTestResult<float,3> MergerTest(armnn::IWorkloadFactory& workloadFactory)
632{
surmeh013537c2c2018-05-18 16:31:43 +0100633 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +0000634 unsigned int outputHeight = 6;
635 unsigned int outputChannels = 3;
636
surmeh013537c2c2018-05-18 16:31:43 +0100637 unsigned int inputWidth1 = 3;
638 unsigned int inputHeight1 = 6;
639 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +0000640
surmeh013537c2c2018-05-18 16:31:43 +0100641 unsigned int inputWidth2 = 3;
642 unsigned int inputHeight2 = 6;
643 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +0000644
telsoa01c577f2c2018-08-31 09:22:23 +0100645 // Define the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +0000646 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::Float32);
647 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::Float32);
648 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +0000649
650 LayerTestResult<float,3> ret(outputTensorInfo);
651
telsoa014fcda012018-03-09 14:13:49 +0000652 ret.outputExpected = MakeTensor<float, 3>(outputTensorInfo, std::vector<float>(
surmeh013537c2c2018-05-18 16:31:43 +0100653 {
654 1.0f, 2.0f, 3.0f,
655 4.0f, 5.0f, 6.0f,
656 7.0f, 8.0f, 9.0f,
657 10.0f, 11.0f, 12.0f,
658 13.0f, 14.0f, 15.0f,
659 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +0000660
surmeh013537c2c2018-05-18 16:31:43 +0100661 19.0f, 20.0f, 21.0f,
662 22.0f, 23.0f, 24.0f,
663 25.0f, 26.0f, 27.0f,
664 28.0f, 29.0f, 30.0f,
665 31.0f, 32.0f, 33.0f,
666 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +0000667
surmeh013537c2c2018-05-18 16:31:43 +0100668 37.0f, 38.0f, 39.0f,
669 40.0f, 41.0f, 42.0f,
670 43.0f, 44.0f, 45.0f,
671 46.0f, 47.0f, 48.0f,
672 49.0f, 50.0f, 51.0f,
673 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +0000674 })
675 );
676
telsoa014fcda012018-03-09 14:13:49 +0000677 auto input1 = MakeTensor<float, 3>(inputTensorInfo1, std::vector<float>(
678 {
surmeh013537c2c2018-05-18 16:31:43 +0100679 1.0f, 2.0f, 3.0f,
680 4.0f, 5.0f, 6.0f,
681 7.0f, 8.0f, 9.0f,
682 10.0f, 11.0f, 12.0f,
683 13.0f, 14.0f, 15.0f,
684 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +0000685
surmeh013537c2c2018-05-18 16:31:43 +0100686 19.0f, 20.0f, 21.0f,
687 22.0f, 23.0f, 24.0f,
688 25.0f, 26.0f, 27.0f,
689 28.0f, 29.0f, 30.0f,
690 31.0f, 32.0f, 33.0f,
691 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +0000692 })
693 );
694
695 auto input2 = MakeTensor<float, 3>(inputTensorInfo2, std::vector<float>(
696 {
surmeh013537c2c2018-05-18 16:31:43 +0100697 37.0f, 38.0f, 39.0f,
698 40.0f, 41.0f, 42.0f,
telsoa014fcda012018-03-09 14:13:49 +0000699 43.0f, 44.0f, 45.0f,
surmeh013537c2c2018-05-18 16:31:43 +0100700 46.0f, 47.0f, 48.0f,
701 49.0f, 50.0f, 51.0f,
702 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +0000703 })
704 );
705
telsoa01c577f2c2018-08-31 09:22:23 +0100706 std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of input[0].
telsoa014fcda012018-03-09 14:13:49 +0000707 armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1);
708
telsoa01c577f2c2018-08-31 09:22:23 +0100709 std::vector<unsigned int> wOrigin2 = {2, 0, 0}; //Extent of the window is defined by size of input[1].
telsoa014fcda012018-03-09 14:13:49 +0000710 armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2);
711
telsoa014fcda012018-03-09 14:13:49 +0000712 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
713
714 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
715
716 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
717 subTensorsSupported ?
718 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
719 workloadFactory.CreateTensorHandle(inputTensorInfo1);
720
721 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
722 subTensorsSupported ?
723 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
724 workloadFactory.CreateTensorHandle(inputTensorInfo2);
725
telsoa014fcda012018-03-09 14:13:49 +0000726 armnn::MergerQueueDescriptor data;
727 armnn::WorkloadInfo info;
728 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
729 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +0000730 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
731
732 data.m_ViewOrigins.push_back(window1);
733 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +0000734
735 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(data, info);
736
737 inputHandle1->Allocate();
738 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +0000739 outputHandle->Allocate();
740
741 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
742 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +0000743
surmeh013537c2c2018-05-18 16:31:43 +0100744 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +0000745 workload->Execute();
746
747 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
748
749 return ret;
750}
751
752LayerTestResult<float,4> AdditionTest(armnn::IWorkloadFactory& workloadFactory)
753{
754 unsigned int batchSize = 2;
755 unsigned int channels = 2;
756 unsigned int height = 2;
757 unsigned int width = 3;
758
759 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
760 armnn::TensorInfo outputTensorInfo;
761
762 unsigned int shape[] = {batchSize, channels, height, width};
763
764 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
765 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
766 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
767
768
769 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, std::vector<float>(
770 {
771 0.0f, 2.0f, 1.0f,
772 0.2f, 1.0f, 2.0f,
773
774 1.0f, 2.0f, 1.0f,
775 0.2f, 1.0f, 2.0f,
776
777 0.0f, 2.0f, 1.0f,
778 4.2f, 1.0f, 2.0f,
779
780 0.0f, 0.0f, 1.0f,
781 0.2f, 1.0f, 2.0f,
782 }));
783
784 auto input2 = MakeTensor<float, 4>(inputTensorInfo2, std::vector<float>(
785 {
786 1.0f, 2.0f, 1.0f,
787 0.0f, 1.0f, 2.0f,
788
789 1.0f, 2.0f, -2.0f,
790 0.2f, 1.0f, 2.0f,
791
792 0.0f, 2.0f, 1.0f,
793 4.2f, 0.0f, -3.0f,
794
795 0.0f, 0.0f, 1.0f,
796 0.7f, 1.0f, 5.0f,
797 }));
798
799 LayerTestResult<float,4> ret(outputTensorInfo);
800 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>(
801 {
802 1.0f, 4.0f, 2.0f,
803 0.2f, 2.0f, 4.0f,
804
805 2.0f, 4.0f, -1.0f,
806 0.4f, 2.0f, 4.0f,
807
808 0.0f, 4.0f, 2.0f,
809 8.4f, 1.0f, -1.0f,
810
811 0.0f, 0.0f, 2.0f,
812 0.9f, 2.0f, 7.0f,
813 }));
814
815 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
816 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
817 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
818
819 armnn::AdditionQueueDescriptor data;
820 armnn::WorkloadInfo info;
821 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
822 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
823 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
824
825 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
826
827 inputHandle1->Allocate();
828 inputHandle2->Allocate();
829 outputHandle->Allocate();
830
831 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
832 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
833
surmeh013537c2c2018-05-18 16:31:43 +0100834 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +0000835 workload->Execute();
836
837 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
838
839 return ret;
840}
841
842template <typename T>
843LayerTestResult<T, 4> AdditionBroadcastTestImpl(armnn::IWorkloadFactory& workloadFactory,
844 float qScale,
845 int32_t qOffset)
846{
847 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, armnn::GetDataType<T>());
848 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, armnn::GetDataType<T>());
849 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>());
850
851 if (armnn::IsQuantizedType<T>())
852 {
853 inputTensorInfo1.SetQuantizationScale(qScale);
854 inputTensorInfo1.SetQuantizationOffset(qOffset);
855 inputTensorInfo2.SetQuantizationScale(qScale);
856 inputTensorInfo2.SetQuantizationOffset(qOffset);
857 outputTensorInfo.SetQuantizationScale(qScale);
858 outputTensorInfo.SetQuantizationOffset(qOffset);
859 }
860
861 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
862 {
863 0.0f,
864 1.0f,
865
866 2.0f,
867 3.0f,
868
869 4.0f,
870 5.0f,
871 }));
872
873 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
874 {
875 0.5f, 1.5f, 2.5f,
876 3.5f, 4.5f, 5.5f,
877 }));
878
879 LayerTestResult<T,4> ret(outputTensorInfo);
880 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
881 {
882 0.5f, 1.5f, 2.5f,
883 4.5f, 5.5f, 6.5f,
884
885 2.5f, 3.5f, 4.5f,
886 6.5f, 7.5f, 8.5f,
887
888 4.5f, 5.5f, 6.5f,
889 8.5f, 9.5f, 10.5f,
890 }));
891
892 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
893 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
894 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
895
896 armnn::AdditionQueueDescriptor data;
897 armnn::WorkloadInfo info;
898 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
899 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
900 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
901
902 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
903
904 inputHandle1->Allocate();
905 inputHandle2->Allocate();
906 outputHandle->Allocate();
907
908 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
909 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
910
surmeh013537c2c2018-05-18 16:31:43 +0100911 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +0000912 workload->Execute();
913
914 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
915
916 return ret;
917}
918
919template <typename T>
920LayerTestResult<T, 4> AdditionBroadcast1ElementTestImpl(armnn::IWorkloadFactory& workloadFactory,
921 float qScale,
922 int32_t qOffset)
923{
924 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>());
925 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, armnn::GetDataType<T>());
926 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>());
927
928 if (armnn::IsQuantizedType<T>())
929 {
930 inputTensorInfo1.SetQuantizationScale(qScale);
931 inputTensorInfo1.SetQuantizationOffset(qOffset);
932 inputTensorInfo2.SetQuantizationScale(qScale);
933 inputTensorInfo2.SetQuantizationOffset(qOffset);
934 outputTensorInfo.SetQuantizationScale(qScale);
935 outputTensorInfo.SetQuantizationOffset(qOffset);
936 }
937
938 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
939 {
940 0.0f, 1.0f, 2.0f,
941 3.0f, 4.0f, 5.0f,
942 6.0f, 7.0f, 8.0f,
943 9.0f, 10.0f, 11.0f,
944 12.0f, 13.0f, 14.0f,
945 15.0f, 16.0f, 17.0f,
946 }));
947
948 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
949 {
950 0.5f,
951 }));
952
953 LayerTestResult<T,4> ret(outputTensorInfo);
954 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
955 {
956 0.5f, 1.5f, 2.5f,
957 3.5f, 4.5f, 5.5f,
958 6.5f, 7.5f, 8.5f,
959 9.5f, 10.5f, 11.5f,
960 12.5f, 13.5f, 14.5f,
961 15.5f, 16.5f, 17.5f,
962 }));
963
964 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
965 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
966 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
967
968 armnn::AdditionQueueDescriptor data;
969 armnn::WorkloadInfo info;
970 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
971 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
972 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
973
974 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
975
976 inputHandle1->Allocate();
977 inputHandle2->Allocate();
978 outputHandle->Allocate();
979
980 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
981 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
982
surmeh013537c2c2018-05-18 16:31:43 +0100983 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +0000984 workload->Execute();
985
986 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
987
988 return ret;
989}
990
991LayerTestResult<float, 4> AdditionBroadcastTest(armnn::IWorkloadFactory& workloadFactory)
992{
993 return AdditionBroadcastTestImpl<float>(workloadFactory, 0.0f, 0);
994}
995
996LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test(armnn::IWorkloadFactory& workloadFactory)
997{
998 return AdditionBroadcastTestImpl<uint8_t>(workloadFactory, 2.f, 0);
999}
1000
1001LayerTestResult<float, 4> AdditionBroadcast1ElementTest(armnn::IWorkloadFactory& workloadFactory)
1002{
1003 return AdditionBroadcast1ElementTestImpl<float>(workloadFactory, 0.0f, 0);
1004}
1005
1006LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test(armnn::IWorkloadFactory& workloadFactory)
1007{
1008 return AdditionBroadcast1ElementTestImpl<uint8_t>(workloadFactory, 0.1333333f, 128);
1009}
1010
1011LayerTestResult<float,4> CompareAdditionTest(armnn::IWorkloadFactory& workloadFactory,
David Beckf195f032018-09-06 16:46:34 +01001012 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00001013{
1014 unsigned int batchSize = 4;
1015 unsigned int channels = 1;
1016 unsigned int height = 2;
1017 unsigned int width = 3;
1018
1019 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
1020 armnn::TensorInfo outputTensorInfo;
1021
1022 unsigned int shape[] = {batchSize, channels, height, width};
1023
1024 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1025 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1026 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1027
1028 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 1232);
1029 auto input2 = MakeRandomTensor<float, 4>(inputTensorInfo2, 456);
1030
1031 LayerTestResult<float,4> ret(outputTensorInfo);
1032
1033 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1034 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1035 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1036
1037 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
1038 std::unique_ptr<armnn::ITensorHandle> inputHandle2Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo2);
1039 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
1040
1041 armnn::AdditionQueueDescriptor data;
1042 armnn::WorkloadInfo info;
1043 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1044 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1045 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1046
1047 armnn::AdditionQueueDescriptor refData = data;
1048 armnn::WorkloadInfo refInfo = info;
1049 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo1, inputHandle1Ref.get());
1050 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get());
1051 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1052
1053 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1054 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateAddition(refData, refInfo);
1055
1056 inputHandle1->Allocate();
1057 inputHandle2->Allocate();
1058 outputHandle->Allocate();
1059 inputHandle1Ref->Allocate();
1060 inputHandle2Ref->Allocate();
1061 outputHandleRef->Allocate();
1062
1063 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1064 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1065 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
1066 CopyDataToITensorHandle(inputHandle2Ref.get(), &input2[0][0][0][0]);
1067
surmeh013537c2c2018-05-18 16:31:43 +01001068 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001069 workload->Execute();
surmeh013537c2c2018-05-18 16:31:43 +01001070 refWorkloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001071 workloadRef->Execute();
1072
1073 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1074 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
1075
1076 return ret;
1077}
1078
surmeh01bceff2f2018-03-29 16:29:27 +01001079namespace {
David Beck5cd01f32018-09-12 16:00:08 +01001080template <typename T>
1081LayerTestResult<T, 4> DivisionTestHelper(armnn::IWorkloadFactory& workloadFactory,
1082 const unsigned int shape0[4],
1083 const std::vector<T>& values0,
1084 float scale0,
1085 int32_t offset0,
1086 const unsigned int shape1[4],
1087 const std::vector<T> & values1,
1088 float scale1,
1089 int32_t offset1,
1090 const unsigned int outShape[4],
1091 const std::vector<T> & outValues,
1092 float outScale,
1093 int32_t outOffset)
1094{
1095 auto dataType = (std::is_same<T, uint8_t>::value ?
1096 armnn::DataType::QuantisedAsymm8 :
1097 armnn::DataType::Float32);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001098
David Beck5cd01f32018-09-12 16:00:08 +01001099 armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
1100 armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
1101 armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001102
David Beck5cd01f32018-09-12 16:00:08 +01001103 inputTensorInfo0.SetQuantizationScale(scale0);
1104 inputTensorInfo0.SetQuantizationOffset(offset0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001105
David Beck5cd01f32018-09-12 16:00:08 +01001106 inputTensorInfo1.SetQuantizationScale(scale1);
1107 inputTensorInfo1.SetQuantizationOffset(offset1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001108
David Beck5cd01f32018-09-12 16:00:08 +01001109 outputTensorInfo.SetQuantizationScale(outScale);
1110 outputTensorInfo.SetQuantizationOffset(outOffset);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001111
David Beck5cd01f32018-09-12 16:00:08 +01001112 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
1113 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001114
David Beck5cd01f32018-09-12 16:00:08 +01001115 LayerTestResult<T, 4> result(outputTensorInfo);
1116 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001117
David Beck5cd01f32018-09-12 16:00:08 +01001118 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
1119 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1120 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001121
David Beck5cd01f32018-09-12 16:00:08 +01001122 armnn::DivisionQueueDescriptor data;
1123 armnn::WorkloadInfo info;
1124 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
1125 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1126 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001127
David Beck5cd01f32018-09-12 16:00:08 +01001128 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDivision(data, info);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001129
David Beck5cd01f32018-09-12 16:00:08 +01001130 inputHandle0->Allocate();
1131 inputHandle1->Allocate();
1132 outputHandle->Allocate();
1133
1134 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
1135 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1136
1137 workloadFactory.Finalize();
1138 workload->Execute();
1139
1140 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
1141
1142 return result;
1143}
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001144} // anonymous namespace
1145
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01001146LayerTestResult<float,4> DivisionByZeroTest(armnn::IWorkloadFactory& workloadFactory)
1147{
1148 const unsigned int width = 2;
1149 const unsigned int height = 2;
1150 const unsigned int channelCount = 2;
1151 const unsigned int batchSize = 2;
1152
1153 unsigned int shape[] = { batchSize, channelCount, height, width };
1154
1155 std::vector<float> input0({
1156 1.f, 1.f, 1.f, 1.f, 0.f, 0.f, 0.f, 0.f,
1157 -1.f, -1.f, -1.f, -1.f, 5.f, 5.f, 5.f, 5.f });
1158
1159 std::vector<float> input1({
1160 0.f, 0.f, -0.f, -0.f, 0.f, 0.f, -0.f, -0.f,
1161 0.f, 0.f, -0.f, -0.f, 5.f, 5.f, 5.f, 5.f });
1162
1163 std::vector<float> output({
1164 INFINITY, INFINITY, -INFINITY, -INFINITY, NAN, NAN, -NAN, -NAN,
1165 -INFINITY, -INFINITY, INFINITY, INFINITY, 1, 1, 1, 1 });
1166
David Beck5cd01f32018-09-12 16:00:08 +01001167 return DivisionTestHelper<float>(workloadFactory,
1168 shape, input0, 1.0f, 0,
1169 shape, input1, 1.0f, 0,
1170 shape, output, 1.0f, 0);
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01001171}
1172
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001173LayerTestResult<float,4> DivisionTest(armnn::IWorkloadFactory& workloadFactory)
1174{
1175 const unsigned int width = 2;
1176 const unsigned int height = 2;
1177 const unsigned int channelCount = 2;
1178 const unsigned int batchSize = 2;
1179
1180 unsigned int shape[] = { batchSize, channelCount, height, width };
1181
1182 std::vector<float> input0({
1183 2, 2, 2, 2, 3, 3, 3, 3,
1184 4, 4, 4, 4, 5, 5, 5, 5 });
1185
1186 std::vector<float> input1({
1187 1, 1, 1, 1, 2, 2, 2, 2,
1188 4, 4, 4, 4, 4, 4, 4, 4 });
1189
1190 std::vector<float> output({
1191 2, 2, 2, 2, 1.5, 1.5, 1.5, 1.5,
1192 1, 1, 1, 1, 1.25, 1.25, 1.25, 1.25 });
1193
David Beck5cd01f32018-09-12 16:00:08 +01001194
1195 return DivisionTestHelper<float>(workloadFactory,
1196 shape, input0, 1.0f, 0,
1197 shape, input1, 1.0f, 0,
1198 shape, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001199}
1200
1201LayerTestResult<float, 4> DivisionBroadcast1ElementTest(armnn::IWorkloadFactory& workloadFactory)
1202{
1203 unsigned int shape0[] = { 1, 2, 2, 2 };
1204 std::vector<float> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
1205
1206 unsigned int shape1[] = { 1, 1, 1, 1 };
1207 std::vector<float> input1({ 2 });
1208
1209 std::vector<float> output({ 1, 2, 3, 4, 5, 6, 7, 8});
1210
David Beck5cd01f32018-09-12 16:00:08 +01001211
1212 return DivisionTestHelper<float>(workloadFactory,
1213 shape0, input0, 1.0f, 0,
1214 shape1, input1, 1.0f, 0,
1215 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001216}
1217
1218LayerTestResult<float, 4> DivisionBroadcast1DVectorTest(armnn::IWorkloadFactory& workloadFactory)
1219{
1220 unsigned int shape0[] = { 1, 3, 3, 2 };
1221 std::vector<float> input0({
1222 1, 4, 3, 8, 5, 12,
1223 7, 16, 9, 20, 11, 24,
1224 13, 28, 15, 32, 17, 36});
1225
1226 unsigned int shape1[] = { 1, 1, 1, 2 };
1227 std::vector<float> input1({ 1, 2 });
1228
1229 std::vector<float> output({
1230 1, 2, 3, 4, 5, 6,
1231 7, 8, 9, 10, 11, 12,
1232 13, 14, 15, 16, 17, 18});
1233
David Beck5cd01f32018-09-12 16:00:08 +01001234 return DivisionTestHelper<float>(workloadFactory,
1235 shape0, input0, 1.0f, 0,
1236 shape1, input1, 1.0f, 0,
1237 shape0, output, 1.0f, 0);
1238}
1239
1240
1241LayerTestResult<uint8_t,4> DivisionUint8Test(armnn::IWorkloadFactory& workloadFactory)
1242{
1243 const unsigned int width = 2;
1244 const unsigned int height = 2;
1245 const unsigned int channelCount = 2;
1246 const unsigned int batchSize = 2;
1247
1248 unsigned int shape[] = { batchSize, channelCount, height, width };
1249
1250 std::vector<uint8_t> input0({2, 2, 2, 2, 3, 3, 3, 3,
1251 4, 4, 4, 4, 5, 5, 5, 5 });
1252
1253 std::vector<uint8_t> input1({1, 1, 1, 1, 2, 2, 2, 2,
1254 4, 4, 4, 4, 4, 4, 4, 4 });
1255
1256 std::vector<uint8_t> output({8, 8, 8, 8, 6, 6, 6, 6,
1257 4, 4, 4, 4, 5, 5, 5, 5});
1258
1259
1260 return DivisionTestHelper<uint8_t>(workloadFactory,
1261 shape, input0, 1.0f, 0,
1262 shape, input1, 1.0f, 0,
1263 shape, output, 0.25f, 0);
1264}
1265
1266LayerTestResult<uint8_t, 4> DivisionBroadcast1ElementUint8Test(armnn::IWorkloadFactory& workloadFactory)
1267{
1268 unsigned int shape0[] = { 1, 2, 2, 2 };
1269 std::vector<uint8_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
1270
1271 unsigned int shape1[] = { 1, 1, 1, 1 };
1272 std::vector<uint8_t> input1({ 2 });
1273
1274 std::vector<uint8_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
1275
1276 return DivisionTestHelper<uint8_t>(workloadFactory,
1277 shape0, input0, 1.0f, 0,
1278 shape1, input1, 1.0f, 0,
1279 shape0, output, 1.0f, 0);
1280}
1281
1282LayerTestResult<uint8_t, 4> DivisionBroadcast1DVectorUint8Test(armnn::IWorkloadFactory& workloadFactory)
1283{
1284 unsigned int shape0[] = { 1, 3, 3, 2 };
1285 std::vector<uint8_t> input0({1, 4, 3, 8, 5, 12,
1286 7, 16, 9, 20, 11, 24,
1287 13, 28, 15, 32, 17, 36});
1288
1289 unsigned int shape1[] = { 1, 1, 1, 2 };
1290 std::vector<uint8_t> input1({ 1, 2 });
1291
1292 std::vector<uint8_t> output({1, 2, 3, 4, 5, 6,
1293 7, 8, 9, 10, 11, 12,
1294 13, 14, 15, 16, 17, 18});
1295
1296 return DivisionTestHelper<uint8_t>(workloadFactory,
1297 shape0, input0, 1.0f, 0,
1298 shape1, input1, 1.0f, 0,
1299 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001300}
1301
1302namespace {
surmeh01bceff2f2018-03-29 16:29:27 +01001303LayerTestResult<float,4> MultiplicationTestHelper(armnn::IWorkloadFactory& workloadFactory,
1304 const unsigned int shape0[4],
1305 const std::vector<float> & values0,
1306 const unsigned int shape1[4],
1307 const std::vector<float> & values1,
1308 const unsigned int outShape[4],
1309 const std::vector<float> & outValues)
telsoa014fcda012018-03-09 14:13:49 +00001310{
surmeh01bceff2f2018-03-29 16:29:27 +01001311 const size_t dimensionCount = 4;
1312 armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, armnn::DataType::Float32};
1313 armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, armnn::DataType::Float32};
1314 armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, armnn::DataType::Float32};
telsoa014fcda012018-03-09 14:13:49 +00001315
surmeh01bceff2f2018-03-29 16:29:27 +01001316 auto input0 = MakeTensor<float, 4>(inputTensorInfo0, values0);
1317 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00001318
1319 LayerTestResult<float,4> ret(outputTensorInfo);
1320
1321 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
1322 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1323 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1324
1325 armnn::MultiplicationQueueDescriptor data;
1326 armnn::WorkloadInfo info;
1327 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
1328 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1329 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1330
1331 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
1332
1333 inputHandle0->Allocate();
1334 inputHandle1->Allocate();
1335 outputHandle->Allocate();
1336
1337 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
1338 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1339
surmeh013537c2c2018-05-18 16:31:43 +01001340 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001341 workload->Execute();
1342
1343 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1344
surmeh01bceff2f2018-03-29 16:29:27 +01001345 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00001346 return ret;
1347}
surmeh01bceff2f2018-03-29 16:29:27 +01001348} // anonymous namespace
1349
1350
1351LayerTestResult<float,4> MultiplicationTest(armnn::IWorkloadFactory& workloadFactory)
1352{
1353 const unsigned int width = 2;
1354 const unsigned int height = 2;
1355 const unsigned int channelCount = 2;
1356 const unsigned int batchSize = 2;
1357
1358 unsigned int shape[] = { batchSize, channelCount, height, width };
1359
1360 std::vector<float> input0({
1361 1, 1, 1, 1, 2, 2, 2, 2,
1362 3, 3, 3, 3, 4, 4, 4, 4 });
1363
1364 std::vector<float> input1({
1365 2, 2, 2, 2, 3, 3, 3, 3,
1366 4, 4, 4, 4, 5, 5, 5, 5 });
1367
1368 std::vector<float> output({
1369 2, 2, 2, 2, 6, 6, 6, 6,
1370 12, 12, 12, 12, 20, 20, 20, 20 });
1371
1372 return MultiplicationTestHelper(workloadFactory,
1373 shape,
1374 input0,
1375 shape,
1376 input1,
1377 shape,
1378 output);
1379}
1380
1381LayerTestResult<float, 4> MultiplicationBroadcast1ElementTest(armnn::IWorkloadFactory& workloadFactory)
1382{
1383 unsigned int shape0[] = { 1, 2, 2, 2 };
1384 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
1385
1386 unsigned int shape1[] = { 1, 1, 1, 1 };
1387 std::vector<float> input1({ 2 });
1388
1389 std::vector<float> output({ 2, 4, 6, 8, 10, 12, 14, 16});
1390
1391 return MultiplicationTestHelper(workloadFactory,
1392 shape0,
1393 input0,
1394 shape1,
1395 input1,
1396 shape0,
1397 output);
1398}
1399
1400LayerTestResult<float, 4> MultiplicationBroadcast1DVectorTest(armnn::IWorkloadFactory& workloadFactory)
1401{
1402 unsigned int shape0[] = { 1, 3, 3, 2 };
1403 std::vector<float> input0({
1404 1, 2, 3, 4, 5, 6,
1405 7, 8, 9, 10, 11, 12,
1406 13, 14, 15, 16, 17, 18});
1407
1408 unsigned int shape1[] = { 1, 1, 1, 2 };
1409 std::vector<float> input1({ 1, 2 });
1410
1411 std::vector<float> output({
1412 1, 4, 3, 8, 5, 12,
1413 7, 16, 9, 20, 11, 24,
1414 13, 28, 15, 32, 17, 36});
1415
1416 return MultiplicationTestHelper(workloadFactory,
1417 shape0,
1418 input0,
1419 shape1,
1420 input1,
1421 shape0,
1422 output);
1423}
telsoa014fcda012018-03-09 14:13:49 +00001424
1425LayerTestResult<float,4> CompareMultiplicationTest(armnn::IWorkloadFactory& workloadFactory,
1426 armnn::IWorkloadFactory& refWorkloadFactory)
1427{
1428 const unsigned int width = 16;
1429 const unsigned int height = 32;
1430 const unsigned int channelCount = 2;
1431 const unsigned int batchSize = 5;
1432
1433 armnn::TensorInfo inputTensorInfo0;
1434 armnn::TensorInfo inputTensorInfo1;
1435 armnn::TensorInfo outputTensorInfo;
1436
1437 constexpr unsigned int shape[] = { batchSize, channelCount, height, width };
1438
1439 inputTensorInfo0 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1440 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1441 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1442
1443 LayerTestResult<float,4> comparisonResult(outputTensorInfo);
1444
1445 auto input0 = MakeRandomTensor<float, 4>(inputTensorInfo0, 803506992);
1446 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 54902257);
1447
1448 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
1449 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1450 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1451
1452 std::unique_ptr<armnn::ITensorHandle> inputHandle0Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo0);
1453 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
1454 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
1455
1456 armnn::MultiplicationQueueDescriptor data;
1457 armnn::WorkloadInfo info;
1458 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
1459 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1460 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1461
1462 armnn::MultiplicationQueueDescriptor refData = data;
1463 armnn::WorkloadInfo refInfo = info;
1464 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo0, inputHandle0Ref.get());
1465 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo1, inputHandle1Ref.get());
1466 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1467
1468 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
1469 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateMultiplication(refData, refInfo);
1470
1471 inputHandle0->Allocate();
1472 inputHandle1->Allocate();
1473 outputHandle->Allocate();
1474 inputHandle0Ref->Allocate();
1475 inputHandle1Ref->Allocate();
1476 outputHandleRef->Allocate();
1477
1478 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
1479 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1480 CopyDataToITensorHandle(inputHandle0Ref.get(), &input0[0][0][0][0]);
1481 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
1482
surmeh013537c2c2018-05-18 16:31:43 +01001483 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001484 workload->Execute();
surmeh013537c2c2018-05-18 16:31:43 +01001485 refWorkloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001486 workloadRef->Execute();
1487
1488 CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get());
1489 CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get());
1490
1491 return comparisonResult;
1492}
1493
1494LayerTestResult<float,4> CompareBatchNormTest(armnn::IWorkloadFactory& workloadFactory,
1495 armnn::IWorkloadFactory& refWorkloadFactory)
1496{
1497 const unsigned int width = 2;
1498 const unsigned int height = 3;
1499 const unsigned int channels = 5;
1500 const unsigned int batchSize = 3;
1501
1502 armnn::TensorInfo inputTensorInfo;
1503 armnn::TensorInfo outputTensorInfo;
1504 armnn::TensorInfo tensorInfo;
1505
1506 constexpr unsigned int shape[] = {batchSize, channels, height, width};
1507 constexpr unsigned int tensorShape[] = {channels};
1508
1509 inputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1510 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1511 tensorInfo = armnn::TensorInfo(1, tensorShape, armnn::DataType::Float32);
1512
1513 auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 21312);
1514
1515 auto mean = MakeRandomTensor<float, 1>(tensorInfo, 123);
1516 auto variance = MakeRandomTensor<float, 1>(tensorInfo, 234, 0.0f);
1517 auto beta = MakeRandomTensor<float, 1>(tensorInfo, 123);
1518 auto gamma = MakeRandomTensor<float, 1>(tensorInfo, 345);
1519
1520 LayerTestResult<float,4> ret(outputTensorInfo);
1521
1522 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
1523 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1524
1525 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
1526 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
1527
1528 armnn::BatchNormalizationQueueDescriptor data;
1529 armnn::WorkloadInfo info;
1530 armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
1531 armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
1532 armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
1533 armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
1534
1535 AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
1536 AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
1537 AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
1538 AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
1539
1540 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1541 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1542 data.m_Mean = &meanTensor;
1543 data.m_Variance = &varianceTensor;
1544 data.m_Beta = &betaTensor;
1545 data.m_Gamma = &gammaTensor;
1546 data.m_Parameters.m_Eps = 0.01f;
1547
1548 armnn::BatchNormalizationQueueDescriptor refData = data;
1549 armnn::WorkloadInfo refInfo = info;
1550 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
1551 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1552
1553 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
1554 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateBatchNormalization(refData, refInfo);
1555
1556 inputHandle->Allocate();
1557 outputHandle->Allocate();
1558 inputHandleRef->Allocate();
1559 outputHandleRef->Allocate();
1560
1561 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
1562 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
1563
surmeh013537c2c2018-05-18 16:31:43 +01001564 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001565 workload->Execute();
surmeh013537c2c2018-05-18 16:31:43 +01001566 refWorkloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001567 workloadRef->Execute();
1568
1569 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1570 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
1571
1572 return ret;
1573}
1574
surmeh013537c2c2018-05-18 16:31:43 +01001575template<typename T>
1576void PermuteTensorData(
1577 armnn::IWorkloadFactory& workloadFactory,
1578 const armnn::PermutationVector& mappings,
1579 armnn::TensorInfo & inputTensorInfo,
1580 const T * inputData,
1581 std::vector<T>& outputData)
telsoa014fcda012018-03-09 14:13:49 +00001582{
surmeh013537c2c2018-05-18 16:31:43 +01001583 BOOST_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
1584 if (inputData == nullptr)
1585 {
1586 // Nullptr is an error in the test. By returning without doing the concatenation
1587 // I expect the caller to fail the test. It still makes sense to report this as
1588 // an assert for Debug builds.
1589 return;
1590 }
telsoa014fcda012018-03-09 14:13:49 +00001591
surmeh013537c2c2018-05-18 16:31:43 +01001592 armnn::TensorInfo outputTensorInfo = armnnUtils::Permuted(inputTensorInfo, mappings);
1593
1594 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
1595 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1596
1597 armnn::PermuteQueueDescriptor queueDescriptor;
1598 queueDescriptor.m_Parameters = armnn::PermuteDescriptor{mappings};
1599 armnn::WorkloadInfo workloadInfo;
1600 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
1601 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
1602
1603 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePermute(queueDescriptor, workloadInfo);
1604
1605 inputHandle->Allocate();
1606 outputHandle->Allocate();
1607
1608 CopyDataToITensorHandle(inputHandle.get(), inputData);
1609
1610 workload->Execute();
1611
1612 outputData.resize(outputTensorInfo.GetNumElements());
1613 CopyDataFromITensorHandle(&outputData[0], outputHandle.get());
1614 inputTensorInfo = outputTensorInfo;
1615}
1616
1617armnn::OriginsDescriptor CreateMergerDescriptorForConcatenation(
1618 const std::vector<armnn::TensorInfo> & inputTensorInfos,
1619 unsigned int concatDim)
1620{
telsoa014fcda012018-03-09 14:13:49 +00001621 std::vector<armnn::TensorShape> shapes;
1622 shapes.reserve(inputTensorInfos.size());
1623 for (const armnn::TensorInfo& it: inputTensorInfos)
1624 {
1625 shapes.push_back(it.GetShape());
1626 }
surmeh013537c2c2018-05-18 16:31:43 +01001627
1628 return armnn::CreateMergerDescriptorForConcatenation(shapes.begin(),
1629 shapes.end(),
1630 concatDim);
1631}
1632
1633//
1634// Concatenation is only supported for N and C dimensions for NCHW. In case of
telsoa01c577f2c2018-08-31 09:22:23 +01001635// <4 dimensions we need to make sure that the concat dimensions are at least
surmeh013537c2c2018-05-18 16:31:43 +01001636// the 3rd slowest iterating one.
1637//
1638
1639bool NeedPermuteForConcat(
1640 const std::vector<armnn::TensorInfo> & inputTensorInfos,
1641 unsigned int concatDim)
1642{
1643 // See note above. Additionally we expect the input shapes to have the
1644 // same number of dimensions.
1645 unsigned int nDimensions = 0;
1646
telsoa01c577f2c2018-08-31 09:22:23 +01001647 // Determine the number of dimensions as well as sanity check them
1648 // agains test implementation issues.
surmeh013537c2c2018-05-18 16:31:43 +01001649 for (auto && tensorInfo : inputTensorInfos)
1650 {
1651 if (!nDimensions)
1652 {
1653 nDimensions = tensorInfo.GetShape().GetNumDimensions();
1654 }
1655 else
1656 {
1657 BOOST_ASSERT_MSG(nDimensions == tensorInfo.GetShape().GetNumDimensions(),
1658 "Input shapes must have the same number of dimensions");
1659 }
1660 }
1661
1662 return (nDimensions-concatDim) < 3;
1663}
1664
1665armnn::TensorShape ExpandTensorShapeTo3dForPermute(const armnn::TensorShape & inputShape)
1666{
1667 unsigned int numDims = inputShape.GetNumDimensions();
1668 if (numDims >= 3)
1669 {
1670 // Nothing to do if the inputShape has at least 3 dimensions.
1671 return inputShape;
1672 }
1673
1674 std::vector<unsigned int> newDims(size_t(3), 1u);
1675 unsigned int expandedBy = 3 - numDims;
1676 for (unsigned int i=0; i<numDims; ++i)
1677 {
1678 newDims[expandedBy+i] = inputShape[i];
1679 }
1680 return armnn::TensorShape(3u, &newDims[0]);
1681}
1682
1683void Generate3dPermuteVectorForConcat(
1684 unsigned int numDimensions,
1685 unsigned int & concatDim,
1686 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutations)
1687{
1688 BOOST_ASSERT_MSG(numDimensions <= 3,
1689 "Only dimensions 1,2 and 3 are supported by this helper");
1690
1691 unsigned int expandedBy = 3 - numDimensions;
1692 unsigned int expandedConcatAxis = concatDim + expandedBy;
1693
1694 if (expandedConcatAxis == 2)
1695 {
1696 concatDim = 0;
1697 armnn::PermutationVector forwardPermutation({1, 2, 0});
1698 armnn::PermutationVector reversePermutation({2, 0, 1});
1699 permutations = std::make_pair(forwardPermutation, reversePermutation);
1700 }
1701 else if (expandedConcatAxis == 1)
1702 {
1703 concatDim = 0;
1704 armnn::PermutationVector forwardPermutation({2, 0, 1});
1705 armnn::PermutationVector reversePermutation({1, 2, 0});
1706 permutations = std::make_pair(forwardPermutation, reversePermutation);
1707 }
1708 else
1709 {
1710 BOOST_ASSERT(expandedConcatAxis == 0);
1711 concatDim = 0;
1712 }
1713}
1714
1715//
1716// Permute the input tensors so we can do a supported concatenation.
1717// Also treat lower than 3d tensors as 3d by adding dummy 1 dimensions
1718// at the front. Finally this function tells what the output shape
1719// of the permuted concatenated tensor is going to be.
1720//
1721template <typename T>
1722void PermuteInputsForConcat(
1723 armnn::IWorkloadFactory& workloadFactory,
1724 std::vector<armnn::TensorInfo> & inputTensorInfos,
1725 std::vector<T *> & inputData,
1726 std::vector<std::vector<T>> & inputDataStorage,
1727 armnn::PermutationVector & permuteVector,
1728 unsigned int & concatDim,
1729 armnn::TensorInfo & outputTensorInfo)
1730{
1731 BOOST_ASSERT_MSG(inputTensorInfos.size() > 1,
1732 "Expecting more than one tensor to be concatenated here");
1733
1734 unsigned int numDims = 0;
1735 unsigned int nthInput = 0;
1736 const armnn::PermutationVector identity({0, 1, 2});
1737
1738 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutations =
1739 std::make_pair(identity, identity);
1740
1741 inputDataStorage.resize(inputData.size());
1742
1743 for (auto && tensorInfo : inputTensorInfos)
1744 {
1745 if (numDims == 0)
1746 {
1747 numDims = tensorInfo.GetShape().GetNumDimensions();
1748 Generate3dPermuteVectorForConcat(numDims, concatDim, permutations);
telsoa01c577f2c2018-08-31 09:22:23 +01001749 // Store the reverese permutation.
surmeh013537c2c2018-05-18 16:31:43 +01001750 permuteVector = permutations.second;
1751 BOOST_ASSERT_MSG(!permuteVector.IsEqual(identity),
1752 "Test logic error, we don't need permutation, so we shouldn't arrive here");
1753 }
1754 else
1755 {
1756 BOOST_ASSERT_MSG(numDims == tensorInfo.GetShape().GetNumDimensions(),
1757 "All inputs must have the same number of dimensions");
1758 }
1759
1760 armnn::TensorInfo newTensorInfo = tensorInfo;
1761 newTensorInfo.SetShape(ExpandTensorShapeTo3dForPermute(tensorInfo.GetShape()));
1762
1763 PermuteTensorData<T>(workloadFactory,
1764 permutations.first,
1765 newTensorInfo,
1766 inputData[nthInput],
1767 inputDataStorage[nthInput]);
1768
1769 inputData[nthInput] = inputDataStorage[nthInput].data();
1770 inputTensorInfos[nthInput] = newTensorInfo;
1771
1772 ++nthInput;
1773 }
1774
1775 outputTensorInfo.SetShape(
1776 armnnUtils::Permuted(
1777 ExpandTensorShapeTo3dForPermute(outputTensorInfo.GetShape()),
1778 permutations.first));
1779}
1780
1781
1782//
1783// This is the pair of PermuteInputsForConcat(...) which permutes back
telsoa01c577f2c2018-08-31 09:22:23 +01001784// the output of the concatenation so we can check it against an expected
surmeh013537c2c2018-05-18 16:31:43 +01001785// output.
1786//
1787template <typename T>
1788void PermuteOutputForConcat(
1789 armnn::IWorkloadFactory& workloadFactory,
1790 const armnn::TensorInfo & tensorInfo,
1791 const armnn::PermutationVector & permuteVector,
1792 std::unique_ptr<armnn::ITensorHandle> && inputDataHandle,
1793 T * data)
1794{
1795 BOOST_ASSERT_MSG(data != nullptr, "data must not be null");
1796 if (data == nullptr)
1797 {
1798 // Nullptr is an error in the test. By returning without doing the permutation
1799 // I expect the caller to fail the test. It still makes sense to report this as
1800 // an assert for Debug builds.
1801 return;
1802 }
1803
1804 armnn::TensorInfo resultTensorInfo = tensorInfo;
1805 std::vector<T> inputData(tensorInfo.GetNumElements());
1806 std::vector<T> outputData;
1807
1808 CopyDataFromITensorHandle(&inputData[0], inputDataHandle.get());
1809
1810 PermuteTensorData<T>(workloadFactory,
1811 permuteVector,
1812 resultTensorInfo,
1813 &inputData[0],
1814 outputData);
1815
1816 ::memcpy(data, &outputData[0], sizeof(T)*outputData.size());
1817}
1818
1819template <typename T>
1820void Concatenate(armnn::IWorkloadFactory& workloadFactory,
1821 std::initializer_list<const armnn::TensorInfo> inputTensorInfosOrig,
1822 std::initializer_list<T *> inputsOrig,
1823 const armnn::TensorInfo& outputTensorInfoOrig,
1824 T * output,
1825 unsigned int concatDim)
1826{
1827 BOOST_ASSERT_MSG(output != nullptr, "output must not be null");
1828 if (output == nullptr)
1829 {
1830 // Nullptr is an error in the test. By returning without doing the permutation
1831 // I expect the caller to fail the test. It still makes sense to report this as
1832 // an assert for Debug builds.
1833 return;
1834 }
1835
1836 armnn::MergerQueueDescriptor queueDescriptor;
1837
telsoa01c577f2c2018-08-31 09:22:23 +01001838 // Saves a copy of the parameters which we might need to change.
surmeh013537c2c2018-05-18 16:31:43 +01001839 std::vector<armnn::TensorInfo> inputTensorInfos(inputTensorInfosOrig.begin(), inputTensorInfosOrig.end());
1840 std::vector<T *> inputs = inputsOrig;
1841 armnn::TensorInfo outputTensorInfo = outputTensorInfoOrig;
1842
1843 armnn::PermutationVector permuteVector{0, 1, 2};
1844
telsoa01c577f2c2018-08-31 09:22:23 +01001845 // Holds and automatically releases memory for the reshaped input data.
surmeh013537c2c2018-05-18 16:31:43 +01001846 std::vector<std::vector<T>> tmpInputDataStorage;
1847
1848 const size_t inputCount = inputTensorInfos.size();
1849
1850 bool needPermuteForConcat = NeedPermuteForConcat(inputTensorInfos, concatDim);
1851
1852 if (needPermuteForConcat)
1853 {
1854 //
1855 // We need to permute the inputs, because concatenation along
telsoa01c577f2c2018-08-31 09:22:23 +01001856 // the requested axis is not supported.
surmeh013537c2c2018-05-18 16:31:43 +01001857 //
1858 PermuteInputsForConcat<T>(workloadFactory,
1859 inputTensorInfos,
1860 inputs,
1861 tmpInputDataStorage,
1862 permuteVector,
1863 concatDim,
1864 outputTensorInfo);
1865 }
1866
1867 armnn::OriginsDescriptor viewsDescriptor = CreateMergerDescriptorForConcatenation(inputTensorInfos, concatDim);
telsoa014fcda012018-03-09 14:13:49 +00001868
1869 queueDescriptor.m_ViewOrigins.reserve(viewsDescriptor.GetNumViews());
1870 for (unsigned int i = 0; i < viewsDescriptor.GetNumViews(); ++i)
1871 {
1872 queueDescriptor.m_ViewOrigins.emplace_back(std::vector<unsigned int>(viewsDescriptor.GetViewOrigin(i),
1873 viewsDescriptor.GetViewOrigin(i) + viewsDescriptor.GetNumDimensions()));
1874 }
1875
telsoa014fcda012018-03-09 14:13:49 +00001876 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1877
1878 std::vector<std::unique_ptr<armnn::ITensorHandle>> inputHandles;
1879 inputHandles.reserve(inputCount);
1880
1881 const bool subTensorsSupported = workloadFactory.SupportsSubTensors();
1882 for (unsigned int i = 0; i < inputCount; ++i)
1883 {
surmeh013537c2c2018-05-18 16:31:43 +01001884 const armnn::TensorInfo& inputTensorInfo = inputTensorInfos[i];
telsoa014fcda012018-03-09 14:13:49 +00001885
1886 std::unique_ptr<armnn::ITensorHandle> inputHandle = subTensorsSupported ?
1887 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo.GetShape(),
1888 queueDescriptor.m_ViewOrigins[i].m_Origin.data())
1889 : workloadFactory.CreateTensorHandle(inputTensorInfo);
1890
1891 inputHandles.emplace_back(std::move(inputHandle));
1892 }
1893
1894 armnn::WorkloadInfo workloadInfo;
1895
1896 for (unsigned int i = 0; i < inputCount; ++i)
1897 {
surmeh013537c2c2018-05-18 16:31:43 +01001898 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfos[i], inputHandles[i].get());
telsoa014fcda012018-03-09 14:13:49 +00001899 }
1900
1901 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
1902
1903 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(queueDescriptor, workloadInfo);
1904
1905 for (auto& inputHandle : inputHandles)
1906 {
1907 inputHandle->Allocate();
1908 }
1909
1910 outputHandle->Allocate();
1911
1912 unsigned int nextInputId = 0;
1913 for (auto& inputHandle : inputHandles)
1914 {
surmeh013537c2c2018-05-18 16:31:43 +01001915 CopyDataToITensorHandle(inputHandle.get(), inputs[nextInputId]);
1916 ++nextInputId;
telsoa014fcda012018-03-09 14:13:49 +00001917 }
1918
surmeh013537c2c2018-05-18 16:31:43 +01001919 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00001920 workload->Execute();
1921
surmeh013537c2c2018-05-18 16:31:43 +01001922 if (needPermuteForConcat)
1923 {
1924 PermuteOutputForConcat<T>(workloadFactory,
1925 outputTensorInfo,
1926 permuteVector,
1927 std::move(outputHandle),
1928 output);
1929 }
1930 else
1931 {
1932 CopyDataFromITensorHandle(output, outputHandle.get());
1933 }
telsoa014fcda012018-03-09 14:13:49 +00001934}
1935
1936template <typename T>
1937LayerTestResult<T, 1> Concatenation1dTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale, int32_t qOffset)
1938{
1939 armnn::TensorInfo inputTensorInfo({ 3 }, armnn::GetDataType<T>());
1940
1941 auto input0 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 1.0f, 2.0f, 3.0f }));
1942 auto input1 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 4.0f, 5.0f, 6.0f }));
1943 auto input2 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 7.0f, 8.0f, 9.0f }));
1944
1945 armnn::TensorInfo outputTensorInfo({ 9 }, armnn::GetDataType<T>());
1946
1947 LayerTestResult<T, 1> result(outputTensorInfo);
1948
1949 std::vector<T> output;
1950 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01001951 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00001952 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
1953 { input0.data(), input1.data(), input2.data() },
1954 outputTensorInfo,
1955 output.data(),
1956 0);
1957
1958 result.output = MakeTensor<T, 1>(outputTensorInfo, output);
1959 result.outputExpected = MakeTensor<T, 1>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
1960 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f
1961 }));
1962
1963 return result;
1964}
1965
1966LayerTestResult<float, 1> Concatenation1dTest(armnn::IWorkloadFactory& workloadFactory)
1967{
1968 return Concatenation1dTestImpl<float>(workloadFactory, 0.0f, 0);
1969}
1970
1971template <typename T>
1972LayerTestResult<T, 2> Concatenation2dTestImpl(armnn::IWorkloadFactory& workloadFactory,
1973 const armnn::TensorInfo& outputTensorInfo,
1974 unsigned int dimension,
1975 const float qScale,
1976 const int32_t qOffset)
1977{
1978 armnn::TensorInfo inputTensorInfo({ 2, 3 }, armnn::GetDataType<T>());
1979
1980 auto input0 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
1981 // Batch 0
1982 1.0f, 2.0f, 3.0f,
1983
1984 // Batch 1
1985 10.0f, 11.0f, 12.0f,
1986 }));
1987
1988 auto input1 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
1989 // Batch 0
1990 4.0f, 5.0f, 6.0f,
1991
1992 // Batch 1
1993 13.0f, 14.0f, 15.0f,
1994 }));
1995
1996 auto input2 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
1997 // Batch 0
1998 7.0f, 8.0f, 9.0f,
1999
2000 // Batch 1
2001 16.0f, 17.0f, 18.0f,
2002 }));
2003
2004 LayerTestResult<T, 2> result(outputTensorInfo);
2005
2006 std::vector<T> output;
2007 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002008 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002009 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
2010 { input0.data(), input1.data(), input2.data() },
2011 outputTensorInfo,
2012 output.data(),
2013 dimension);
2014
2015 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
2016 return result;
2017}
2018
2019template <typename T>
2020LayerTestResult<T, 2> Concatenation2dDim0TestImpl(armnn::IWorkloadFactory& workloadFactory,
2021 float qScale, int32_t qOffset)
2022{
2023 armnn::TensorInfo outputTensorInfo({ 6, 3 }, armnn::GetDataType<T>());
2024
2025 LayerTestResult<T, 2> result = Concatenation2dTestImpl<T>(workloadFactory, outputTensorInfo, 0, qScale, qOffset);
2026 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2027 // Batch 0
2028 1.0f, 2.0f, 3.0f,
2029
2030 // Batch 1
2031 10.0f, 11.0f, 12.0f,
2032
2033 // Batch 2
2034 4.0f, 5.0f, 6.0f,
2035
2036 // Batch 3
2037 13.0f, 14.0f, 15.0f,
2038
2039 // Batch 4
2040 7.0f, 8.0f, 9.0f,
2041
2042 // Batch 5
2043 16.0f, 17.0f, 18.0f,
2044 }));
2045
2046 return result;
2047}
2048
2049LayerTestResult<float, 2> Concatenation2dDim0Test(armnn::IWorkloadFactory& workloadFactory)
2050{
2051 return Concatenation2dDim0TestImpl<float>(workloadFactory, 0.0f, 0);
2052}
2053
2054template <typename T>
2055LayerTestResult<T, 2> Concatenation2dDim1TestImpl(armnn::IWorkloadFactory& workloadFactory,
2056 float qScale, int32_t qOffset)
2057{
2058 armnn::TensorInfo outputTensorInfo({ 2, 9 }, armnn::GetDataType<T>());
2059
2060 LayerTestResult<T, 2> result = Concatenation2dTestImpl<T>(workloadFactory, outputTensorInfo, 1, qScale, qOffset);
2061 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2062 // Batch 0
2063 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
2064
2065 // Batch 1
2066 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f
2067 }));
2068
2069 return result;
2070}
2071
2072LayerTestResult<float, 2> Concatenation2dDim1Test(armnn::IWorkloadFactory& workloadFactory)
2073{
2074 return Concatenation2dDim1TestImpl<float>(workloadFactory, 0.0f, 0);
2075}
2076
2077template <typename T>
2078LayerTestResult<T, 2> Concatenation2dDim0DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2079 int32_t qOffset)
2080{
2081 armnn::TensorInfo input0TensorInfo({ 2, 3 }, armnn::GetDataType<T>());
2082 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2083 // Batch 0
2084 1.0f, 2.0f, 3.0f,
2085
2086 // Batch 1
2087 10.0f, 11.0f, 12.0f,
2088 }));
2089
2090 armnn::TensorInfo input1TensorInfo({ 3, 3 }, armnn::GetDataType<T>());
2091 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2092 // Batch 0
2093 4.0f, 5.0f, 6.0f,
2094
2095 // Batch 1
2096 13.0f, 14.0f, 15.0f,
2097
2098 // Batch 0
2099 7.0f, 8.0f, 9.0f,
2100 }));
2101
2102 armnn::TensorInfo input2TensorInfo({ 1, 3 }, armnn::GetDataType<T>());
2103 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2104 // Batch 1
2105 16.0f, 17.0f, 18.0f,
2106 }));
2107
2108 armnn::TensorInfo outputTensorInfo({ 6, 3 }, armnn::GetDataType<T>());
2109 LayerTestResult<T, 2> result(outputTensorInfo);
2110
2111 std::vector<T> output;
2112 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002113 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002114 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
2115 { input0.data(), input1.data(), input2.data() },
2116 outputTensorInfo,
2117 output.data(),
2118 0);
2119
2120 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
2121 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2122 // Batch 0
2123 1.0f, 2.0f, 3.0f,
2124
2125 // Batch 1
2126 10.0f, 11.0f, 12.0f,
2127
2128 // Batch 2
2129 4.0f, 5.0f, 6.0f,
2130
2131 // Batch 3
2132 13.0f, 14.0f, 15.0f,
2133
2134 // Batch 4
2135 7.0f, 8.0f, 9.0f,
2136
2137 // Batch 5
2138 16.0f, 17.0f, 18.0f,
2139 }));
2140
2141 return result;
2142}
2143
2144LayerTestResult<float, 2> Concatenation2dDim0DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory)
2145{
2146 return Concatenation2dDim0DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
2147}
2148
2149template <typename T>
2150LayerTestResult<T, 2> Concatenation2dDim1DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2151 int32_t qOffset)
2152{
2153 armnn::TensorInfo input0TensorInfo({ 2, 3 }, armnn::GetDataType<T>());
2154 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2155 // Batch 0
2156 1.0f, 2.0f, 3.0f,
2157
2158 // Batch 1
2159 10.0f, 11.0f, 12.0f,
2160 }));
2161
2162 armnn::TensorInfo input1TensorInfo({ 2, 5 }, armnn::GetDataType<T>());
2163 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2164 // Batch 0
2165 4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
2166
2167 // Batch 1
2168 13.0f, 14.0f, 15.0f, 16.0f, 17.0f,
2169 }));
2170
2171 armnn::TensorInfo input2TensorInfo({ 2, 1 }, armnn::GetDataType<T>());
2172 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2173 // Batch 0
2174 9.0f,
2175
2176 // Batch 1
2177 18.0f
2178 }));
2179
2180 armnn::TensorInfo outputTensorInfo({ 2, 9 }, armnn::GetDataType<T>());
2181 LayerTestResult<T, 2> result(outputTensorInfo);
2182
2183 std::vector<T> output;
2184 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002185 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002186 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
2187 { input0.data(), input1.data(), input2.data() },
2188 outputTensorInfo,
2189 output.data(),
2190 1);
2191
2192 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
2193 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2194 // Batch 0
2195 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
2196
2197 // Batch 1
2198 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f,
2199 }));
2200
2201 return result;
2202}
2203
2204LayerTestResult<float, 2> Concatenation2dDim1DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory)
2205{
2206 return Concatenation2dDim1DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
2207}
2208
2209template <typename T>
2210LayerTestResult<T, 3> Concatenation3dTestImpl(armnn::IWorkloadFactory& workloadFactory,
2211 const armnn::TensorInfo& outputTensorInfo,
2212 unsigned int dimension,
2213 float qScale,
2214 int32_t qOffset)
2215{
2216 armnn::TensorInfo inputTensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
2217
2218 auto input0 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2219 // Batch 0, Channel 0
2220 1.0f, 2.0f,
2221
2222 // Batch 0, Channel 1
2223 3.0f, 4.0f,
2224
2225 // Batch 0, Channel 2
2226 5.0f, 6.0f,
2227
2228 // Batch 1, Channel 0
2229 19.0f, 20.0f,
2230
2231 // Batch 1, Channel 1
2232 21.0f, 22.0f,
2233
2234 // Batch 1, Channel 2
2235 23.0f, 24.0f
2236 }));
2237
2238 auto input1 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2239 // Batch 0, Channel 0
2240 7.0f, 8.0f,
2241
2242 // Batch 0, Channel 1
2243 9.0f, 10.0f,
2244
2245 // Batch 0, Channel 2
2246 11.0f, 12.0f,
2247
2248 // Batch 1, Channel 0
2249 25.0f, 26.0f,
2250
2251 // Batch 1, Channel 1
2252 27.0f, 28.0f,
2253
2254 // Batch 1, Channel 2
2255 29.0f, 30.0f
2256 }));
2257
2258 auto input2 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2259 // Batch 0, Channel 0
2260 13.0f, 14.0f,
2261
2262 // Batch 0, Channel 1
2263 15.0f, 16.0f,
2264
2265 // Batch 0, Channel 2
2266 17.0f, 18.0f,
2267
2268 // Batch 1, Channel 0
2269 31.0f, 32.0f,
2270
2271 // Batch 1, Channel 1
2272 33.0f, 34.0f,
2273
2274 // Batch 1, Channel 2
2275 35.0f, 36.0f
2276 }));
2277
2278 LayerTestResult<T, 3> result(outputTensorInfo);
2279
2280 std::vector<T> output;
2281 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002282 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002283 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
2284 { input0.data(), input1.data(), input2.data() },
2285 outputTensorInfo,
2286 output.data(),
2287 dimension);
2288
2289 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
2290 return result;
2291}
2292
2293template <typename T>
2294LayerTestResult<T, 3> Concatenation3dDim0TestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2295 int32_t qOffset)
2296{
2297 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, armnn::GetDataType<T>());
2298
2299 LayerTestResult<T, 3> result = Concatenation3dTestImpl<T>(workloadFactory, outputTensorInfo, 0,
2300 qScale, qOffset);
2301 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2302 // Batch 0, Channel 0
2303 1.0f, 2.0f,
2304
2305 // Batch 0, Channel 1
2306 3.0f, 4.0f,
2307
2308 // Batch 0, Channel 2
2309 5.0f, 6.0f,
2310
2311 // Batch 1, Channel 0
2312 19.0f, 20.0f,
2313
2314 // Batch 1, Channel 1
2315 21.0f, 22.0f,
2316
2317 // Batch 1, Channel 2
2318 23.0f, 24.0f,
2319
2320 // Batch 2, Channel 0
2321 7.0f, 8.0f,
2322
2323 // Batch 2, Channel 1
2324 9.0f, 10.0f,
2325
2326 // Batch 2, Channel 2
2327 11.0f, 12.0f,
2328
2329 // Batch 3, Channel 0
2330 25.0f, 26.0f,
2331
2332 // Batch 3, Channel 1
2333 27.0f, 28.0f,
2334
2335 // Batch 3, Channel 2
2336 29.0f, 30.0f,
2337
2338 // Batch 4, Channel 0
2339 13.0f, 14.0f,
2340
2341 // Batch 4, Channel 1
2342 15.0f, 16.0f,
2343
2344 // Batch 4, Channel 2
2345 17.0f, 18.0f,
2346
2347 // Batch 5, Channel 0
2348 31.0f, 32.0f,
2349
2350 // Batch 5, Channel 1
2351 33.0f, 34.0f,
2352
2353 // Batch 5, Channel 2
2354 35.0f, 36.0f
2355 }));
2356 return result;
2357}
2358
2359LayerTestResult<float, 3> Concatenation3dDim0Test(armnn::IWorkloadFactory& workloadFactory)
2360{
2361 return Concatenation3dDim0TestImpl<float>(workloadFactory, 0.0f, 0);
2362}
2363
2364template <typename T>
2365LayerTestResult<T, 3> Concatenation3dDim1TestImpl(armnn::IWorkloadFactory& workloadFactory,
2366 float qScale, int32_t qOffset)
2367{
2368 armnn::TensorInfo outputTensorInfo({ 2, 9, 2 }, armnn::GetDataType<T>());
2369
2370 LayerTestResult<T, 3> result = Concatenation3dTestImpl<T>(workloadFactory, outputTensorInfo, 1, qScale, qOffset);
2371 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2372 // Batch 0, Channel 0
2373 1.0f, 2.0f,
2374
2375 // Batch 0, Channel 1
2376 3.0f, 4.0f,
2377
2378 // Batch 0, Channel 2
2379 5.0f, 6.0f,
2380
2381 // Batch 0, Channel 3
2382 7.0f, 8.0f,
2383
2384 // Batch 0, Channel 4
2385 9.0f, 10.0f,
2386
2387 // Batch 0, Channel 5
2388 11.0f, 12.0f,
2389
2390 // Batch 0, Channel 6
2391 13.0f, 14.0f,
2392
2393 // Batch 0, Channel 7
2394 15.0f, 16.0f,
2395
2396 // Batch 0, Channel 8
2397 17.0f, 18.0f,
2398
2399 // Batch 1, Channel 0
2400 19.0f, 20.0f,
2401
2402 // Batch 1, Channel 1
2403 21.0f, 22.0f,
2404
2405 // Batch 1, Channel 2
2406 23.0f, 24.0f,
2407
2408 // Batch 1, Channel 3
2409 25.0f, 26.0f,
2410
2411 // Batch 1, Channel 4
2412 27.0f, 28.0f,
2413
2414 // Batch 1, Channel 5
2415 29.0f, 30.0f,
2416
2417 // Batch 1, Channel 6
2418 31.0f, 32.0f,
2419
2420 // Batch 1, Channel 7
2421 33.0f, 34.0f,
2422
2423 // Batch 1, Channel 8
2424 35.0f, 36.0f
2425 }));
2426
2427 return result;
2428}
2429
2430LayerTestResult<float, 3> Concatenation3dDim1Test(armnn::IWorkloadFactory& workloadFactory)
2431{
2432 return Concatenation3dDim1TestImpl<float>(workloadFactory, 0.0f, 0);
2433}
2434
2435template <typename T>
2436LayerTestResult<T, 3> Concatenation3dDim2TestImpl(armnn::IWorkloadFactory& workloadFactory,
2437 float qScale, int32_t qOffset)
2438{
2439 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, armnn::GetDataType<T>());
2440
2441 LayerTestResult<T, 3> result = Concatenation3dTestImpl<T>(workloadFactory, outputTensorInfo, 2, qScale, qOffset);
2442 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2443 // Batch 0, Channel 0
2444 1.0f, 2.0f, 7.0f, 8.0f, 13.0f, 14.0f,
2445
2446 // Batch 0, Channel 1
2447 3.0f, 4.0f, 9.0f, 10.0f, 15.0f, 16.0f,
2448
2449 // Batch 0, Channel 2
2450 5.0f, 6.0f, 11.0f, 12.0f, 17.0f, 18.0f,
2451
2452 // Batch 1, Channel 0
2453 19.0f, 20.0f, 25.0f, 26.0f, 31.0f, 32.0f,
2454
2455 // Batch 1, Channel 1
2456 21.0f, 22.0f, 27.0f, 28.0f, 33.0f, 34.0f,
2457
2458 // Batch 1, Channel 2
2459 23.0f, 24.0f, 29.0f, 30.0f, 35.0f, 36.0f,
2460 }));
2461
2462 return result;
2463}
2464
2465LayerTestResult<float, 3> Concatenation3dDim2Test(armnn::IWorkloadFactory& workloadFactory)
2466{
2467 return Concatenation3dDim2TestImpl<float>(workloadFactory, 0.0f, 0);
2468}
2469
2470template <typename T>
2471LayerTestResult<T, 3> Concatenation3dDim0DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2472 int32_t qOffset)
2473{
2474 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
2475 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2476 // Batch 0, Channel 0
2477 1.0f, 2.0f,
2478
2479 // Batch 0, Channel 1
2480 3.0f, 4.0f,
2481
2482 // Batch 0, Channel 2
2483 5.0f, 6.0f,
2484
2485 // Batch 1, Channel 0
2486 19.0f, 20.0f,
2487
2488 // Batch 1, Channel 1
2489 21.0f, 22.0f,
2490
2491 // Batch 1, Channel 2
2492 23.0f, 24.0f
2493 }));
2494
2495 armnn::TensorInfo input1TensorInfo({ 1, 3, 2 }, armnn::GetDataType<T>());
2496 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2497 // Batch 0, Channel 0
2498 7.0f, 8.0f,
2499
2500 // Batch 0, Channel 1
2501 9.0f, 10.0f,
2502
2503 // Batch 0, Channel 2
2504 11.0f, 12.0f,
2505 }));
2506
2507 armnn::TensorInfo input2TensorInfo({ 3, 3, 2 }, armnn::GetDataType<T>());
2508 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2509 // Batch 0, Channel 0
2510 25.0f, 26.0f,
2511
2512 // Batch 0, Channel 1
2513 27.0f, 28.0f,
2514
2515 // Batch 0, Channel 2
2516 29.0f, 30.0f,
2517
2518 // Batch 1, Channel 0
2519 13.0f, 14.0f,
2520
2521 // Batch 1, Channel 1
2522 15.0f, 16.0f,
2523
2524 // Batch 1, Channel 2
2525 17.0f, 18.0f,
2526
2527 // Batch 2, Channel 0
2528 31.0f, 32.0f,
2529
2530 // Batch 2, Channel 1
2531 33.0f, 34.0f,
2532
2533 // Batch 2, Channel 2
2534 35.0f, 36.0f
2535 }));
2536
2537 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, armnn::GetDataType<T>());
2538 LayerTestResult<T, 3> result(outputTensorInfo);
2539
2540 std::vector<T> output;
2541 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002542 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002543 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
2544 { input0.data(), input1.data(), input2.data() },
2545 outputTensorInfo,
2546 output.data(),
2547 0);
2548
2549 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
2550 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2551 // Batch 0, Channel 0
2552 1.0f, 2.0f,
2553
2554 // Batch 0, Channel 1
2555 3.0f, 4.0f,
2556
2557 // Batch 0, Channel 2
2558 5.0f, 6.0f,
2559
2560 // Batch 1, Channel 0
2561 19.0f, 20.0f,
2562
2563 // Batch 1, Channel 1
2564 21.0f, 22.0f,
2565
2566 // Batch 1, Channel 2
2567 23.0f, 24.0f,
2568
2569 // Batch 2, Channel 0
2570 7.0f, 8.0f,
2571
2572 // Batch 2, Channel 1
2573 9.0f, 10.0f,
2574
2575 // Batch 2, Channel 2
2576 11.0f, 12.0f,
2577
2578 // Batch 3, Channel 0
2579 25.0f, 26.0f,
2580
2581 // Batch 3, Channel 1
2582 27.0f, 28.0f,
2583
2584 // Batch 3, Channel 2
2585 29.0f, 30.0f,
2586
2587 // Batch 4, Channel 0
2588 13.0f, 14.0f,
2589
2590 // Batch 4, Channel 1
2591 15.0f, 16.0f,
2592
2593 // Batch 4, Channel 2
2594 17.0f, 18.0f,
2595
2596 // Batch 5, Channel 0
2597 31.0f, 32.0f,
2598
2599 // Batch 5, Channel 1
2600 33.0f, 34.0f,
2601
2602 // Batch 5, Channel 2
2603 35.0f, 36.0f
2604 }));
2605
2606 return result;
2607}
2608
2609LayerTestResult<float, 3> Concatenation3dDim0DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory)
2610{
2611 return Concatenation3dDim0DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
2612}
2613
2614template <typename T>
2615LayerTestResult<T, 3> Concatenation3dDim1DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2616 int32_t qOffset)
2617{
2618 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
2619 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2620 // Batch 0, Channel 0
2621 1.0f, 2.0f,
2622
2623 // Batch 0, Channel 1
2624 3.0f, 4.0f,
2625
2626 // Batch 0, Channel 2
2627 5.0f, 6.0f,
2628
2629 // Batch 1, Channel 0
2630 19.0f, 20.0f,
2631
2632 // Batch 1, Channel 1
2633 21.0f, 22.0f,
2634
2635 // Batch 1, Channel 2
2636 23.0f, 24.0f
2637 }));
2638
2639 armnn::TensorInfo input1TensorInfo({ 2, 4, 2 }, armnn::GetDataType<T>());
2640 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2641 // Batch 0, Channel 0
2642 7.0f, 8.0f,
2643
2644 // Batch 0, Channel 1
2645 9.0f, 10.0f,
2646
2647 // Batch 0, Channel 2
2648 11.0f, 12.0f,
2649
2650 // Batch 0, Channel 3
2651 25.0f, 26.0f,
2652
2653 // Batch 1, Channel 0
2654 27.0f, 28.0f,
2655
2656 // Batch 1, Channel 1
2657 29.0f, 30.0f,
2658
2659 // Batch 1, Channel 2
2660 13.0f, 14.0f,
2661
2662 // Batch 1, Channel 3
2663 15.0f, 16.0f,
2664 }));
2665
2666 armnn::TensorInfo input2TensorInfo({ 2, 1, 2 }, armnn::GetDataType<T>());
2667 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2668 // Batch 0, Channel 0
2669 17.0f, 18.0f,
2670
2671 // Batch 1, Channel 0
2672 31.0f, 32.0f,
2673 }));
2674
2675 armnn::TensorInfo outputTensorInfo({ 2, 8, 2 }, armnn::GetDataType<T>());
2676 LayerTestResult<T, 3> result(outputTensorInfo);
2677
2678 std::vector<T> output;
2679 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002680 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002681 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
2682 { input0.data(), input1.data(), input2.data() },
2683 outputTensorInfo,
2684 output.data(),
2685 1);
2686
2687 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
2688 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2689 // Batch 0, Channel 0
2690 1.0f, 2.0f,
2691
2692 // Batch 0, Channel 1
2693 3.0f, 4.0f,
2694
2695 // Batch 0, Channel 2
2696 5.0f, 6.0f,
2697
2698 // Batch 0, Channel 3
2699 7.0f, 8.0f,
2700
2701 // Batch 0, Channel 4
2702 9.0f, 10.0f,
2703
2704 // Batch 0, Channel 5
2705 11.0f, 12.0f,
2706
2707 // Batch 0, Channel 6
2708 25.0f, 26.0f,
2709
2710 // Batch 0, Channel 7
2711 17.0f, 18.0f,
2712
2713 // Batch 1, Channel 0
2714 19.0f, 20.0f,
2715
2716 // Batch 1, Channel 1
2717 21.0f, 22.0f,
2718
2719 // Batch 1, Channel 2
2720 23.0f, 24.0f,
2721
2722 // Batch 1, Channel 3
2723 27.0f, 28.0f,
2724
2725 // Batch 1, Channel 4
2726 29.0f, 30.0f,
2727
2728 // Batch 1, Channel 5
2729 13.0f, 14.0f,
2730
2731 // Batch 1, Channel 6
2732 15.0f, 16.0f,
2733
2734 // Batch 1, Channel 7
2735 31.0f, 32.0f,
2736 }));
2737
2738 return result;
2739}
2740
2741LayerTestResult<float, 3> Concatenation3dDim1DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory)
2742{
2743 return Concatenation3dDim1DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
2744}
2745
2746template <typename T>
2747LayerTestResult<T, 3> Concatenation3dDim2DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
2748 int32_t qOffset)
2749{
2750 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
2751 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2752 // Batch 0, Channel 0
2753 1.0f, 2.0f,
2754
2755 // Batch 0, Channel 1
2756 3.0f, 4.0f,
2757
2758 // Batch 0, Channel 2
2759 5.0f, 6.0f,
2760
2761 // Batch 1, Channel 0
2762 19.0f, 20.0f,
2763
2764 // Batch 1, Channel 1
2765 21.0f, 22.0f,
2766
2767 // Batch 1, Channel 2
2768 23.0f, 24.0f
2769 }));
2770
2771 armnn::TensorInfo input1TensorInfo({ 2, 3, 1 }, armnn::GetDataType<T>());
2772 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2773 // Batch 0, Channel 0
2774 7.0f,
2775
2776 // Batch 0, Channel 1
2777 9.0f,
2778
2779 // Batch 0, Channel 2
2780 11.0f,
2781
2782 // Batch 1, Channel 0
2783 25.0f,
2784
2785 // Batch 1, Channel 1
2786 27.0f,
2787
2788 // Batch 1, Channel 2
2789 29.0f
2790 }));
2791
2792 armnn::TensorInfo input2TensorInfo({ 2, 3, 3 }, armnn::GetDataType<T>());
2793 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2794 // Batch 0, Channel 0
2795 13.0f, 14.0f, 50.0f,
2796
2797 // Batch 0, Channel 1
2798 15.0f, 16.0f, 51.0f,
2799
2800 // Batch 0, Channel 2
2801 17.0f, 18.0f, 52.0f,
2802
2803 // Batch 1, Channel 0
2804 31.0f, 32.0f, 53.0f,
2805
2806 // Batch 1, Channel 1
2807 33.0f, 34.0f, 54.0f,
2808
2809 // Batch 1, Channel 2
2810 35.0f, 36.0f, 55.0f,
2811 }));
2812
2813 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, armnn::GetDataType<T>());
2814 LayerTestResult<T, 3> result(outputTensorInfo);
2815
2816 std::vector<T> output;
2817 output.resize(outputTensorInfo.GetNumElements());
surmeh013537c2c2018-05-18 16:31:43 +01002818 Concatenate<T>(workloadFactory,
telsoa014fcda012018-03-09 14:13:49 +00002819 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
2820 { input0.data(), input1.data(), input2.data() },
2821 outputTensorInfo,
2822 output.data(),
2823 2);
2824
2825 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
2826 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2827 // Batch 0, Channel 0
2828 1.0f, 2.0f, 7.0f, 13.0f, 14.0f, 50.0f,
2829
2830 // Batch 0, Channel 1
2831 3.0f, 4.0f, 9.0f, 15.0f, 16.0f, 51.0f,
2832
2833 // Batch 0, Channel 2
2834 5.0f, 6.0f, 11.0f, 17.0f, 18.0f, 52.0f,
2835
2836 // Batch 1, Channel 0
2837 19.0f, 20.0f, 25.0f, 31.0f, 32.0f, 53.0f,
2838
2839 // Batch 1, Channel 1
2840 21.0f, 22.0f, 27.0f, 33.0f, 34.0f, 54.0f,
2841
2842 // Batch 1, Channel 2
2843 23.0f, 24.0f, 29.0f, 35.0f, 36.0f, 55.0f,
2844 }));
2845
2846 return result;
2847}
2848
2849LayerTestResult<float, 3> Concatenation3dDim2DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory)
2850{
2851 return Concatenation3dDim2DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
2852}
2853
2854LayerTestResult<float, 4> ResizeBilinearNopTest(armnn::IWorkloadFactory& workloadFactory)
2855{
2856 constexpr unsigned int inputWidth = 4;
2857 constexpr unsigned int inputHeight = 4;
2858 constexpr unsigned int inputChannels = 1;
2859 constexpr unsigned int inputBatchSize = 1;
2860
2861 constexpr unsigned int outputWidth = inputWidth;
2862 constexpr unsigned int outputHeight = inputHeight;
2863 constexpr unsigned int outputChannels = inputChannels;
2864 constexpr unsigned int outputBatchSize = inputBatchSize;
2865
2866 const armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
2867 armnn::DataType::Float32);
2868 const armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
2869 armnn::DataType::Float32);
2870
2871 auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
2872 1.0f, 2.0f, 3.0f, 4.0f,
2873 2.0f, 3.0f, 4.0f, 5.0f,
2874 3.0f, 4.0f, 5.0f, 6.0f,
2875 4.0f, 5.0f, 6.0f, 7.0f
2876 }));
2877
2878 LayerTestResult<float, 4> result(outputTensorInfo);
2879 result.outputExpected = input;
2880
2881 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
2882 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2883
2884 armnn::ResizeBilinearQueueDescriptor descriptor;
2885 armnn::WorkloadInfo info;
2886 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
2887 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
2888
2889 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
2890
2891 inputHandle->Allocate();
2892 outputHandle->Allocate();
2893 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
2894
surmeh013537c2c2018-05-18 16:31:43 +01002895 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00002896 workload->Execute();
2897
2898 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
2899 return result;
2900}
2901
2902LayerTestResult<float, 4> SimpleResizeBilinearTest(armnn::IWorkloadFactory& workloadFactory)
2903{
2904 constexpr unsigned int inputWidth = 2;
2905 constexpr unsigned int inputHeight = 2;
2906 constexpr unsigned int inputChannels = 1;
2907 constexpr unsigned int inputBatchSize = 1;
2908
2909 constexpr unsigned int outputWidth = inputWidth / 2;
2910 constexpr unsigned int outputHeight = inputHeight / 2;
2911 constexpr unsigned int outputChannels = inputChannels;
2912 constexpr unsigned int outputBatchSize = inputBatchSize;
2913
2914 const armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
2915 armnn::DataType::Float32);
2916 const armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
2917 armnn::DataType::Float32);
2918
2919 auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
2920 1.0f, 255.0f,
2921 200.0f, 250.f,
2922 }));
2923
2924 // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
2925 // then figures out the interpolants and weights. Note this is different to projecting the centre of the
telsoa01c577f2c2018-08-31 09:22:23 +01002926 // output texel - and thus we'll expect the output 1x1 matrix to contain, as its single element, the value
telsoa014fcda012018-03-09 14:13:49 +00002927 // that was at position (0,0) of the input matrix (rather than an average, which we would expect if projecting
2928 // the centre).
2929 LayerTestResult<float, 4> result(outputTensorInfo);
2930 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>({
2931 1.0f
2932 }));
2933
2934 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
2935 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2936
2937 armnn::ResizeBilinearQueueDescriptor descriptor;
2938 armnn::WorkloadInfo info;
2939 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
2940 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
2941
2942 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
2943
2944 inputHandle->Allocate();
2945 outputHandle->Allocate();
2946 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
2947
surmeh013537c2c2018-05-18 16:31:43 +01002948 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00002949 workload->Execute();
2950
2951 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
2952 return result;
2953}
2954
2955LayerTestResult<float, 4> ResizeBilinearSqMinTest(armnn::IWorkloadFactory& workloadFactory)
2956{
2957 constexpr unsigned int inputWidth = 4;
2958 constexpr unsigned int inputHeight = 4;
2959 constexpr unsigned int inputChannels = 1;
2960 constexpr unsigned int inputBatchSize = 1;
2961
2962 constexpr unsigned int outputWidth = inputWidth / 2;
2963 constexpr unsigned int outputHeight = inputHeight / 2;
2964 constexpr unsigned int outputChannels = inputChannels;
2965 constexpr unsigned int outputBatchSize = inputBatchSize;
2966
2967 const armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
2968 armnn::DataType::Float32);
2969 const armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
2970 armnn::DataType::Float32);
2971
2972 auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
2973 1.0f, 2.0f, 3.0f, 4.0f,
2974 2.0f, 3.0f, 4.0f, 5.0f,
2975 3.0f, 4.0f, 5.0f, 6.0f,
2976 4.0f, 5.0f, 6.0f, 7.0f
2977 }));
2978
2979 LayerTestResult<float, 4> result(outputTensorInfo);
2980 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>({
2981 1.f, 3.f,
2982 3.f, 5.f
2983 }));
2984
2985 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
2986 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2987
2988 armnn::ResizeBilinearQueueDescriptor descriptor;
2989 armnn::WorkloadInfo info;
2990 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
2991 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
2992
2993 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
2994
2995 inputHandle->Allocate();
2996 outputHandle->Allocate();
2997 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
2998
surmeh013537c2c2018-05-18 16:31:43 +01002999 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00003000 workload->Execute();
3001
3002 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3003 return result;
3004}
3005
3006LayerTestResult<float, 4> ResizeBilinearMinTest(armnn::IWorkloadFactory& workloadFactory)
3007{
3008 constexpr unsigned int inputWidth = 5;
3009 constexpr unsigned int inputHeight = 3;
3010 constexpr unsigned int inputChannels = 1;
3011 constexpr unsigned int inputBatchSize = 1;
3012
3013 constexpr unsigned int outputWidth = 3;
3014 constexpr unsigned int outputHeight = 2;
3015 constexpr unsigned int outputChannels = inputChannels;
3016 constexpr unsigned int outputBatchSize = inputBatchSize;
3017
3018 const armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
3019 armnn::DataType::Float32);
3020 const armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
3021 armnn::DataType::Float32);
3022
3023 auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
3024 1.0f, 2.0f, 3.0f, 5.0f, 8.0f,
3025 13.0f, 21.0f, 34.0f, 55.0f, 89.0f,
3026 144.0f, 233.0f, 377.0f, 610.0f, 987.0f
3027 }));
3028
3029 LayerTestResult<float, 4> result(outputTensorInfo);
3030 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>({
3031 1.0f, 2.6666f, 6.0f,
3032 78.5f, 179.3333f, 401.f
3033 }));
3034
3035 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3036 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3037
3038 armnn::ResizeBilinearQueueDescriptor descriptor;
3039 armnn::WorkloadInfo info;
3040 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3041 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3042
3043 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
3044
3045 inputHandle->Allocate();
3046 outputHandle->Allocate();
3047 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3048
surmeh013537c2c2018-05-18 16:31:43 +01003049 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00003050 workload->Execute();
3051
3052 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3053 return result;
3054}
3055
3056LayerTestResult<float, 4> ResizeBilinearMagTest(armnn::IWorkloadFactory& workloadFactory)
3057{
3058 constexpr unsigned int inputWidth = 2;
3059 constexpr unsigned int inputHeight = 3;
3060 constexpr unsigned int inputChannels = 1;
3061 constexpr unsigned int inputBatchSize = 1;
3062
3063 constexpr unsigned int outputWidth = 5;
3064 constexpr unsigned int outputHeight = 3;
3065 constexpr unsigned int outputChannels = inputChannels;
3066 constexpr unsigned int outputBatchSize = inputBatchSize;
3067
3068 const armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
3069 armnn::DataType::Float32);
3070 const armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
3071 armnn::DataType::Float32);
3072
3073 auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
3074 1.0f, 2.0f,
3075 13.0f, 21.0f,
3076 144.0f, 233.0f
3077 }));
3078
3079 LayerTestResult<float, 4> result(outputTensorInfo);
3080 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>({
3081 1.0f, 1.4f, 1.8f, 2.f, 2.f,
3082 13.f, 16.2f, 19.4f, 21.f, 21.f,
3083 144.f, 179.6f, 215.2f, 233.f, 233.f
3084 }));
3085
3086 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3087 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3088
3089 armnn::ResizeBilinearQueueDescriptor descriptor;
3090 armnn::WorkloadInfo info;
3091 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3092 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3093
3094 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
3095
3096 inputHandle->Allocate();
3097 outputHandle->Allocate();
3098 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3099
surmeh013537c2c2018-05-18 16:31:43 +01003100 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00003101 workload->Execute();
3102
3103 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3104 return result;
3105}
3106
3107LayerTestResult<float, 2> FakeQuantizationTest(armnn::IWorkloadFactory& workloadFactory)
3108{
3109 constexpr unsigned int width = 2;
3110 constexpr unsigned int height = 3;
3111
3112 const armnn::TensorInfo tensorInfo({height, width },
3113 armnn::DataType::Float32);
3114 auto input = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
3115 -10.0f, -5.0f,
3116 0.0f, 5.0f,
3117 10.0f, 10.0f
3118 }));
3119
3120 LayerTestResult<float, 2> ret(tensorInfo);
3121
3122 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
3123
3124 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
3125
3126 armnn::FakeQuantizationQueueDescriptor data;
3127 armnn::WorkloadInfo info;
3128
3129 AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
3130 AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
3131 float min = -10.f;
3132 float max = 10.f;
3133
3134 data.m_Parameters.m_Min = min;
3135 data.m_Parameters.m_Max = max;
3136
3137 armnn::PassthroughCpuTensorHandle refHandle(tensorInfo, &ret.outputExpected[0][0]);
3138 armnn::FakeQuantizationQueueDescriptor refData = data;
3139 armnn::WorkloadInfo refInfo = info;
3140 SetWorkloadOutput(refData, refInfo, 0, tensorInfo, &refHandle);
3141
3142 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFakeQuantization(data, info);
3143
3144 inputHandle->Allocate();
3145 outputHandle->Allocate();
3146
3147 CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
3148
surmeh013537c2c2018-05-18 16:31:43 +01003149 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00003150 workload->Execute();
3151
3152 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
3153
3154 ret.outputExpected = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
3155 0.0f, 63.0f,
3156 128.0f, 191.0f,
3157 255.0f, 255.0f
3158 }));
3159 return ret;
3160}
3161
3162LayerTestResult<float, 4> L2Normalization1dTest(armnn::IWorkloadFactory& workloadFactory)
3163{
3164 constexpr unsigned int inputWidth = 1;
3165 constexpr unsigned int inputHeight = 1;
3166 constexpr unsigned int inputChannels = 10;
3167 constexpr unsigned int inputBatchSize = 1;
3168
3169 constexpr unsigned int outputWidth = inputWidth;
3170 constexpr unsigned int outputHeight = inputHeight;
3171 constexpr unsigned int outputChannels = inputChannels;
3172 constexpr unsigned int outputBatchSize = inputBatchSize;
3173
3174 const armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
3175 armnn::DataType::Float32);
3176 const armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
3177 armnn::DataType::Float32);
3178
3179 auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
3180 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f
3181 }));
3182
3183 const float approxInvL2Norm = 0.050964719f;
3184 LayerTestResult<float, 4> result(outputTensorInfo);
3185 result.outputExpected = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
3186 1.0f * approxInvL2Norm,
3187 2.0f * approxInvL2Norm,
3188 3.0f * approxInvL2Norm,
3189 4.0f * approxInvL2Norm,
3190 5.0f * approxInvL2Norm,
3191 6.0f * approxInvL2Norm,
3192 7.0f * approxInvL2Norm,
3193 8.0f * approxInvL2Norm,
3194 9.0f * approxInvL2Norm,
3195 10.0f * approxInvL2Norm
3196 }));
3197
3198 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3199 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3200
3201 armnn::L2NormalizationQueueDescriptor descriptor;
3202 armnn::WorkloadInfo info;
3203 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3204 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3205
3206 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
3207
3208 inputHandle->Allocate();
3209 outputHandle->Allocate();
3210 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3211
surmeh013537c2c2018-05-18 16:31:43 +01003212 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00003213 workload->Execute();
3214
3215 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3216 return result;
3217}
3218
3219namespace
3220{
3221
3222float CalcInvL2Norm(std::initializer_list<float> elements)
3223{
3224 const float reduction = std::accumulate(elements.begin(), elements.end(), 0.0f,
3225 [](float acc, float element) { return acc + element * element; });
3226 return 1.0f / sqrtf(reduction);
3227}
3228
3229}
3230
3231LayerTestResult<float, 4> L2Normalization2dTest(armnn::IWorkloadFactory& workloadFactory)
3232{
3233 constexpr unsigned int inputWidth = 5;
3234 constexpr unsigned int inputHeight = 1;
3235 constexpr unsigned int inputChannels = 2;
3236 constexpr unsigned int inputBatchSize = 1;
3237
3238 constexpr unsigned int outputWidth = inputWidth;
3239 constexpr unsigned int outputHeight = inputHeight;
3240 constexpr unsigned int outputChannels = inputChannels;
3241 constexpr unsigned int outputBatchSize = inputBatchSize;
3242
3243 const armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
3244 armnn::DataType::Float32);
3245 const armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
3246 armnn::DataType::Float32);
3247
3248 auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
3249 1.0f, 3.0f, 5.0f, 7.0f, 9.0f,
3250 2.0f, 4.0f, 6.0f, 8.0f, 10.0f
3251 }));
3252
3253 LayerTestResult<float, 4> result(outputTensorInfo);
3254 result.outputExpected = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
3255 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
3256 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
3257 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
3258 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
3259 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
3260
3261 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
3262 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
3263 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
3264 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
3265 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
3266 }));
3267
3268 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3269 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3270
3271 armnn::L2NormalizationQueueDescriptor descriptor;
3272 armnn::WorkloadInfo info;
3273 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3274 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3275
3276 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
3277
3278 inputHandle->Allocate();
3279 outputHandle->Allocate();
3280 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3281
surmeh013537c2c2018-05-18 16:31:43 +01003282 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00003283 workload->Execute();
3284
3285 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3286 return result;
3287}
3288
3289LayerTestResult<float, 4> L2Normalization3dTest(armnn::IWorkloadFactory& workloadFactory)
3290{
3291 constexpr unsigned int inputWidth = 3;
3292 constexpr unsigned int inputHeight = 4;
3293 constexpr unsigned int inputChannels = 2;
3294 constexpr unsigned int inputBatchSize = 1;
3295
3296 constexpr unsigned int outputWidth = inputWidth;
3297 constexpr unsigned int outputHeight = inputHeight;
3298 constexpr unsigned int outputChannels = inputChannels;
3299 constexpr unsigned int outputBatchSize = inputBatchSize;
3300
3301 const armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
3302 armnn::DataType::Float32);
3303 const armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
3304 armnn::DataType::Float32);
3305
3306 auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
3307 // Channel 0
3308 119.0f, 21.0f, 150.0f,
3309 149.0f, 32.0f, 179.0f,
3310 15.0f, 227.0f, 141.0f,
3311 147.0f, 199.0f, 220.0f,
3312
3313 // Channel 1
3314 110.0f, 140.0f, 73.0f,
3315 211.0f, 212.0f, 89.0f,
3316 24.0f, 138.0f, 188.0f,
3317 162.0f, 12.0f, 161.0f,
3318 }));
3319
3320 LayerTestResult<float, 4> result(outputTensorInfo);
3321 result.outputExpected = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
3322 119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
3323 21.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
3324 150.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
3325 149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
3326 32.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
3327 179.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
3328 15.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
3329 227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
3330 141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
3331 147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
3332 199.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
3333 220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
3334
3335 110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
3336 140.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
3337 73.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
3338 211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
3339 212.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
3340 89.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
3341 24.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
3342 138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
3343 188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
3344 162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
3345 12.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
3346 161.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
3347 }));
3348
3349 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3350 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3351
3352 armnn::L2NormalizationQueueDescriptor descriptor;
3353 armnn::WorkloadInfo info;
3354 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3355 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3356
3357 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
3358
3359 inputHandle->Allocate();
3360 outputHandle->Allocate();
3361 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3362
surmeh013537c2c2018-05-18 16:31:43 +01003363 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00003364 workload->Execute();
3365
3366 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3367 return result;
3368}
3369
3370LayerTestResult<float, 4> L2Normalization4dTest(armnn::IWorkloadFactory& workloadFactory)
3371{
3372 constexpr unsigned int inputWidth = 3;
3373 constexpr unsigned int inputHeight = 4;
3374 constexpr unsigned int inputChannels = 3;
3375 constexpr unsigned int inputBatchSize = 2;
3376
3377 constexpr unsigned int outputWidth = inputWidth;
3378 constexpr unsigned int outputHeight = inputHeight;
3379 constexpr unsigned int outputChannels = inputChannels;
3380 constexpr unsigned int outputBatchSize = inputBatchSize;
3381
3382 const armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
3383 armnn::DataType::Float32);
3384 const armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
3385 armnn::DataType::Float32);
3386
3387 auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
3388 // Batch 0, Channel 0
3389 235.0f, 46.0f, 178.0f,
3390 100.0f, 123.0f, 19.0f,
3391 172.0f, 74.0f, 250.0f,
3392 6.0f, 195.0f, 80.0f,
3393
3394 // Batch 0, Channel 1
3395 113.0f, 95.0f, 202.0f,
3396 77.0f, 114.0f, 71.0f,
3397 122.0f, 246.0f, 166.0f,
3398 82.0f, 28.0f, 37.0f,
3399
3400 // Batch 0, Channel 2
3401 56.0f, 170.0f, 162.0f,
3402 194.0f, 89.0f, 254.0f,
3403 12.0f, 209.0f, 200.0f,
3404 1.0f, 64.0f, 54.0f,
3405
3406 // Batch 1, Channel 0
3407 67.0f, 90.0f, 49.0f,
3408 7.0f, 163.0f, 18.0f,
3409 25.0f, 117.0f, 103.0f,
3410 247.0f, 59.0f, 189.0f,
3411
3412 // Batch 1, Channel 1
3413 239.0f, 104.0f, 199.0f,
3414 17.0f, 124.0f, 153.0f,
3415 222.0f, 217.0f, 75.0f,
3416 32.0f, 126.0f, 21.0f,
3417
3418 // Batch 1, Channel 2
3419 97.0f, 145.0f, 215.0f,
3420 115.0f, 116.0f, 238.0f,
3421 226.0f, 16.0f, 132.0f,
3422 92.0f, 125.0f, 88.0f,
3423 }));
3424
3425 LayerTestResult<float, 4> result(outputTensorInfo);
3426 result.outputExpected = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
3427
3428 // Batch 0, Channel 0
3429 235.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
3430 46.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
3431 178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
3432 100.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
3433 123.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
3434 19.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
3435 172.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
3436 74.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
3437 250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
3438 6.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
3439 195.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
3440 80.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
3441
3442 // Batch 0, Channel 1
3443 113.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
3444 95.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
3445 202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
3446 77.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
3447 114.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
3448 71.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
3449 122.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
3450 246.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
3451 166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
3452 82.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
3453 28.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
3454 37.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
3455
3456 // Batch 0, Channel 2
3457 56.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
3458 170.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
3459 162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
3460 194.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
3461 89.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
3462 254.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
3463 12.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
3464 209.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
3465 200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
3466 1.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
3467 64.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
3468 54.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
3469
3470 // Batch 1, Channel 0
3471 67.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
3472 90.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
3473 49.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
3474 7.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
3475 163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
3476 18.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
3477 25.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
3478 117.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
3479 103.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
3480 247.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
3481 59.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
3482 189.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
3483
3484 // Batch 1, Channel 1
3485 239.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
3486 104.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
3487 199.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
3488 17.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
3489 124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
3490 153.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
3491 222.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
3492 217.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
3493 75.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
3494 32.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
3495 126.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
3496 21.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
3497
3498 // Batch 1, Channel 2
3499 97.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
3500 145.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
3501 215.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
3502 115.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
3503 116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
3504 238.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
3505 226.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
3506 16.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
3507 132.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
3508 92.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
3509 125.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
3510 88.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
3511 }));
3512
3513 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3514 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3515
3516 armnn::L2NormalizationQueueDescriptor descriptor;
3517 armnn::WorkloadInfo info;
3518 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
3519 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3520
3521 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
3522
3523 inputHandle->Allocate();
3524 outputHandle->Allocate();
3525 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3526
surmeh013537c2c2018-05-18 16:31:43 +01003527 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00003528 workload->Execute();
3529
3530 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3531 return result;
3532}
3533
3534template <typename T>
3535LayerTestResult<T, 4> ConstantTestImpl(armnn::IWorkloadFactory& workloadFactory,
3536 float qScale,
3537 int32_t qOffset)
3538{
3539 constexpr unsigned int inputWidth = 3;
3540 constexpr unsigned int inputHeight = 4;
3541 constexpr unsigned int inputChannels = 3;
3542 constexpr unsigned int inputBatchSize = 2;
3543
3544 constexpr unsigned int outputWidth = inputWidth;
3545 constexpr unsigned int outputHeight = inputHeight;
3546 constexpr unsigned int outputChannels = inputChannels;
3547 constexpr unsigned int outputBatchSize = inputBatchSize;
3548
3549 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
3550 armnn::GetDataType<T>());
3551
3552 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
3553 armnn::GetDataType<T>());
3554
3555 // Set quantization parameters if the requested type is a quantized type.
3556 if(armnn::IsQuantizedType<T>())
3557 {
3558 inputTensorInfo.SetQuantizationScale(qScale);
3559 inputTensorInfo.SetQuantizationOffset(qOffset);
3560 outputTensorInfo.SetQuantizationScale(qScale);
3561 outputTensorInfo.SetQuantizationOffset(qOffset);
3562 }
3563
3564 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
3565 QuantizedVector<T>(qScale, qOffset, {
3566 // Batch 0, Channel 0
3567 235.0f, 46.0f, 178.0f,
3568 100.0f, 123.0f, 19.0f,
3569 172.0f, 74.0f, 250.0f,
3570 6.0f, 195.0f, 80.0f,
3571
3572 // Batch 0, Channel 1
3573 113.0f, 95.0f, 202.0f,
3574 77.0f, 114.0f, 71.0f,
3575 122.0f, 246.0f, 166.0f,
3576 82.0f, 28.0f, 37.0f,
3577
3578 // Batch 0, Channel 2
3579 56.0f, 170.0f, 162.0f,
3580 194.0f, 89.0f, 254.0f,
3581 12.0f, 209.0f, 200.0f,
3582 1.0f, 64.0f, 54.0f,
3583
3584 // Batch 1, Channel 0
3585 67.0f, 90.0f, 49.0f,
3586 7.0f, 163.0f, 18.0f,
3587 25.0f, 117.0f, 103.0f,
3588 247.0f, 59.0f, 189.0f,
3589
3590 // Batch 1, Channel 1
3591 239.0f, 104.0f, 199.0f,
3592 17.0f, 124.0f, 153.0f,
3593 222.0f, 217.0f, 75.0f,
3594 32.0f, 126.0f, 21.0f,
3595
3596 // Batch 1, Channel 2
3597 97.0f, 145.0f, 215.0f,
3598 115.0f, 116.0f, 238.0f,
3599 226.0f, 16.0f, 132.0f,
3600 92.0f, 125.0f, 88.0f,
3601 })));
3602
3603 LayerTestResult<T, 4> result(outputTensorInfo);
3604 result.outputExpected = input;
3605
3606 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3607
3608 armnn::ScopedCpuTensorHandle constantTensor(inputTensorInfo);
3609 AllocateAndCopyDataToITensorHandle(&constantTensor, &input[0][0][0][0]);
3610
3611 armnn::ConstantQueueDescriptor descriptor;
3612 descriptor.m_LayerOutput = &constantTensor;
3613
3614 armnn::WorkloadInfo info;
3615 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
3616
3617 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConstant(descriptor, info);
3618
3619 outputHandle->Allocate();
3620
surmeh013537c2c2018-05-18 16:31:43 +01003621 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00003622 workload->Execute();
3623
3624 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3625 return result;
3626}
3627
3628LayerTestResult<float, 4> ConstantTest(armnn::IWorkloadFactory& workloadFactory)
3629{
3630 return ConstantTestImpl<float>(workloadFactory, 0.0f, 0);
3631}
3632
3633LayerTestResult<uint8_t, 4> ConstantTestUint8(armnn::IWorkloadFactory& workloadFactory)
3634{
3635 return ConstantTestImpl<uint8_t>(workloadFactory, 1.0f, 0);
3636}
3637
3638LayerTestResult<uint8_t, 3> MergerUint8Test(armnn::IWorkloadFactory& workloadFactory)
3639{
surmeh013537c2c2018-05-18 16:31:43 +01003640 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +00003641 unsigned int outputHeight = 6;
3642 unsigned int outputChannels = 3;
3643
surmeh013537c2c2018-05-18 16:31:43 +01003644 unsigned int inputWidth1 = 3;
3645 unsigned int inputHeight1 = 6;
3646 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +00003647
surmeh013537c2c2018-05-18 16:31:43 +01003648 unsigned int inputWidth2 = 3;
3649 unsigned int inputHeight2 = 6;
3650 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +00003651
telsoa01c577f2c2018-08-31 09:22:23 +01003652 // Defines the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +00003653 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
3654 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
3655 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
telsoa014fcda012018-03-09 14:13:49 +00003656
telsoa01c577f2c2018-08-31 09:22:23 +01003657 // Arbitrary scale and offsets. They don't really matter as the merger operator doesn't dequantize/quantize them.
telsoa014fcda012018-03-09 14:13:49 +00003658 const float scale = 0.13497836f;
3659 const int32_t offset = -7;
3660
3661 outputTensorInfo.SetQuantizationScale(scale);
3662 outputTensorInfo.SetQuantizationOffset(offset);
3663 inputTensorInfo1.SetQuantizationScale(scale);
3664 inputTensorInfo1.SetQuantizationOffset(offset);
3665 inputTensorInfo2.SetQuantizationScale(scale);
3666 inputTensorInfo2.SetQuantizationOffset(offset);
telsoa014fcda012018-03-09 14:13:49 +00003667
3668 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
3669
3670 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
surmeh013537c2c2018-05-18 16:31:43 +01003671 {
3672 1, 2, 3,
3673 4, 5, 6,
3674 7, 8, 9,
3675 10, 11, 12,
3676 13, 14, 15,
3677 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00003678
surmeh013537c2c2018-05-18 16:31:43 +01003679 19, 20, 21,
3680 22, 23, 24,
3681 25, 26, 27,
3682 28, 29, 30,
3683 31, 32, 33,
3684 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00003685
surmeh013537c2c2018-05-18 16:31:43 +01003686 37, 38, 39,
3687 40, 41, 42,
3688 43, 44, 45,
3689 46, 47, 48,
3690 49, 50, 51,
3691 52, 53, 54,
3692 })
telsoa014fcda012018-03-09 14:13:49 +00003693 );
3694
telsoa014fcda012018-03-09 14:13:49 +00003695 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
3696 {
surmeh013537c2c2018-05-18 16:31:43 +01003697 1, 2, 3,
3698 4, 5, 6,
3699 7, 8, 9,
3700 10, 11, 12,
3701 13, 14, 15,
3702 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00003703
surmeh013537c2c2018-05-18 16:31:43 +01003704 19, 20, 21,
3705 22, 23, 24,
3706 25, 26, 27,
3707 28, 29, 30,
3708 31, 32, 33,
3709 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00003710 })
3711 );
3712
3713 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
3714 {
surmeh013537c2c2018-05-18 16:31:43 +01003715 37, 38, 39,
3716 40, 41, 42,
telsoa014fcda012018-03-09 14:13:49 +00003717 43, 44, 45,
surmeh013537c2c2018-05-18 16:31:43 +01003718 46, 47, 48,
3719 49, 50, 51,
3720 52, 53, 54,
telsoa014fcda012018-03-09 14:13:49 +00003721 })
3722 );
3723
telsoa01c577f2c2018-08-31 09:22:23 +01003724 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
telsoa014fcda012018-03-09 14:13:49 +00003725 armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1);
3726
telsoa01c577f2c2018-08-31 09:22:23 +01003727 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
telsoa014fcda012018-03-09 14:13:49 +00003728 armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2);
3729
telsoa014fcda012018-03-09 14:13:49 +00003730
3731 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3732
3733 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
3734
3735 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
3736 subTensorsSupported ?
3737 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
3738 workloadFactory.CreateTensorHandle(inputTensorInfo1);
3739
3740 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
3741 subTensorsSupported ?
3742 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
3743 workloadFactory.CreateTensorHandle(inputTensorInfo2);
3744
telsoa014fcda012018-03-09 14:13:49 +00003745
3746 armnn::MergerQueueDescriptor data;
3747 armnn::WorkloadInfo info;
3748 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
3749 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +00003750 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
3751
3752 data.m_ViewOrigins.push_back(window1);
3753 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +00003754
3755 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(data, info);
3756
3757 inputHandle1->Allocate();
3758 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00003759 outputHandle->Allocate();
3760
3761 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
3762 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00003763
surmeh013537c2c2018-05-18 16:31:43 +01003764 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00003765 workload->Execute();
3766
3767 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
3768
3769 return ret;
3770}
3771
3772LayerTestResult<uint8_t, 4> AdditionUint8Test(armnn::IWorkloadFactory& workloadFactory)
3773{
3774 unsigned int batchSize = 1;
3775 unsigned int channels = 2;
3776 unsigned int height = 2;
3777 unsigned int width = 3;
3778
3779 const float scale = 7.0f;
3780 const int32_t offset = 3;
3781
3782 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
3783 armnn::TensorInfo outputTensorInfo;
3784
3785 const unsigned int shape[] = { batchSize, channels, height, width };
3786 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
3787 inputTensorInfo1.SetQuantizationScale(scale);
3788 inputTensorInfo1.SetQuantizationOffset(offset);
3789
3790 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
3791 inputTensorInfo2.SetQuantizationScale(scale);
3792 inputTensorInfo2.SetQuantizationOffset(offset);
3793
3794 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
3795 outputTensorInfo.SetQuantizationScale(scale);
3796 outputTensorInfo.SetQuantizationOffset(offset);
3797
telsoa01c577f2c2018-08-31 09:22:23 +01003798 // See dequantized values to the right.
telsoa014fcda012018-03-09 14:13:49 +00003799 auto input1 = MakeTensor<uint8_t, 4>(inputTensorInfo1, std::vector<uint8_t>(
3800 {
3801 63, 35, 77, 70, 56, 112, // 420, 224, 518, 469, 371, 763
3802 203, 28, 252, 168, 245, 91 // 1400, 175, 1743, 1155, 1694, 616
3803 }));
3804
telsoa01c577f2c2018-08-31 09:22:23 +01003805 // See dequantized values to the right.
telsoa014fcda012018-03-09 14:13:49 +00003806 auto input2 = MakeTensor<uint8_t, 4>(inputTensorInfo1, std::vector<uint8_t>(
3807 {
3808 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
3809 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
3810 }));
3811
telsoa01c577f2c2018-08-31 09:22:23 +01003812 // See dequantized values to the right.
telsoa014fcda012018-03-09 14:13:49 +00003813 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
3814 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>(
3815 {
3816 81, 39, 249, 255, 228, 255, // 546, 252, 1722, 2065(clamped), 1575, 2212(clamped)
3817 255, 186, 255, 186, 255, 214, // 2261(clamped), 1281, 2163(clamped), 1281, 2408(clamped), 1477
3818 }));
3819
3820 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
3821 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
3822 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3823
3824 armnn::AdditionQueueDescriptor data;
3825 armnn::WorkloadInfo info;
3826 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
3827 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
3828 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
3829
3830 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
3831
3832 inputHandle1->Allocate();
3833 inputHandle2->Allocate();
3834 outputHandle->Allocate();
3835
3836 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
3837 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
3838
surmeh013537c2c2018-05-18 16:31:43 +01003839 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00003840 workload->Execute();
3841
3842 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3843
3844 return result;
3845}
3846
surmeh01bceff2f2018-03-29 16:29:27 +01003847namespace
telsoa014fcda012018-03-09 14:13:49 +00003848{
surmeh01bceff2f2018-03-29 16:29:27 +01003849LayerTestResult<uint8_t, 4> MultiplicationUint8TestHelper(armnn::IWorkloadFactory& workloadFactory,
3850 const unsigned int shape0[4],
3851 const std::vector<uint8_t> & values0,
3852 float scale0,
3853 int32_t offset0,
3854 const unsigned int shape1[4],
3855 const std::vector<uint8_t> & values1,
3856 float scale1,
3857 int32_t offset1,
3858 const unsigned int outShape[4],
3859 const std::vector<uint8_t> & outValues,
3860 float outScale,
3861 int32_t outOffset)
3862{
3863 armnn::TensorInfo inputTensorInfo0(4, shape0, armnn::DataType::QuantisedAsymm8);
3864 armnn::TensorInfo inputTensorInfo1(4, shape1, armnn::DataType::QuantisedAsymm8);
3865 armnn::TensorInfo outputTensorInfo(4, outShape, armnn::DataType::QuantisedAsymm8);
telsoa014fcda012018-03-09 14:13:49 +00003866
surmeh01bceff2f2018-03-29 16:29:27 +01003867 inputTensorInfo0.SetQuantizationScale(scale0);
3868 inputTensorInfo0.SetQuantizationOffset(offset0);
telsoa014fcda012018-03-09 14:13:49 +00003869
surmeh01bceff2f2018-03-29 16:29:27 +01003870 inputTensorInfo1.SetQuantizationScale(scale1);
3871 inputTensorInfo1.SetQuantizationOffset(offset1);
telsoa014fcda012018-03-09 14:13:49 +00003872
surmeh01bceff2f2018-03-29 16:29:27 +01003873 outputTensorInfo.SetQuantizationScale(outScale);
3874 outputTensorInfo.SetQuantizationOffset(outOffset);
telsoa014fcda012018-03-09 14:13:49 +00003875
surmeh01bceff2f2018-03-29 16:29:27 +01003876 auto input0 = MakeTensor<uint8_t, 4>(inputTensorInfo0, values0);
3877 auto input1 = MakeTensor<uint8_t, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00003878
telsoa014fcda012018-03-09 14:13:49 +00003879 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
surmeh01bceff2f2018-03-29 16:29:27 +01003880 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00003881
surmeh01bceff2f2018-03-29 16:29:27 +01003882 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
telsoa014fcda012018-03-09 14:13:49 +00003883 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
telsoa014fcda012018-03-09 14:13:49 +00003884 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3885
3886 armnn::MultiplicationQueueDescriptor data;
3887 armnn::WorkloadInfo info;
surmeh01bceff2f2018-03-29 16:29:27 +01003888 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
3889 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
telsoa014fcda012018-03-09 14:13:49 +00003890 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
3891
3892 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
3893
surmeh01bceff2f2018-03-29 16:29:27 +01003894 inputHandle0->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00003895 inputHandle1->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00003896 outputHandle->Allocate();
3897
surmeh01bceff2f2018-03-29 16:29:27 +01003898 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00003899 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00003900
surmeh013537c2c2018-05-18 16:31:43 +01003901 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00003902 workload->Execute();
3903
3904 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3905
3906 return result;
3907}
surmeh01bceff2f2018-03-29 16:29:27 +01003908} // anonymous namespace
3909
3910LayerTestResult<uint8_t, 4> MultiplicationUint8Test(armnn::IWorkloadFactory& workloadFactory)
3911{
3912 unsigned int batchSize = 1;
3913 unsigned int channels = 2;
3914 unsigned int height = 2;
3915 unsigned int width = 3;
3916 const unsigned int shape[] = { batchSize, channels, height, width };
3917
telsoa01c577f2c2018-08-31 09:22:23 +01003918 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01003919 std::vector<uint8_t> input0({
3920 62, 37, 3, 172, 13, 111, // 244, 144, 8, 684, 48, 440,
3921 188, 20, 73, 31, 23, 31 // 748, 76, 288, 120, 88, 120
3922 });
3923
telsoa01c577f2c2018-08-31 09:22:23 +01003924 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01003925 std::vector<uint8_t> input1({
3926 126, 240, 252, 183, 121, 247, // 384, 726, 762, 555, 369, 747,
3927 48, 115, 151, 79, 78, 97 // 150, 351, 459, 243, 240, 297
3928 });
3929
telsoa01c577f2c2018-08-31 09:22:23 +01003930 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01003931 std::vector<uint8_t> output(
3932 {
3933 64, 72, 0, 255, 8, 236, // 93696, 104544, 6096(clamped), 379620(clamped), 17712, 328680,
3934 77, 15, 92, 16, 10, 21, // 112200, 26676, 132192, 29160, 21120, 35640
3935 });
3936
3937 return MultiplicationUint8TestHelper(workloadFactory,
3938 shape,
3939 input0,
3940 4.0f,
3941 1,
3942 shape,
3943 input1,
3944 3.0f,
3945 -2,
3946 shape,
3947 output,
telsoa01c577f2c2018-08-31 09:22:23 +01003948 1366.255f, // Scale/offset chosen to have output values out of range.
surmeh01bceff2f2018-03-29 16:29:27 +01003949 -5);
3950}
3951
3952LayerTestResult<uint8_t, 4> MultiplicationBroadcast1ElementUint8Test(armnn::IWorkloadFactory& workloadFactory)
3953{
3954 const unsigned int shape0[] = { 1, 2, 2, 3 };
3955 const unsigned int shape1[] = { 1, 1, 1, 1 };
3956
3957 std::vector<uint8_t> input0({
3958 1, 2, 3, 4, 5, 6,
3959 7, 8, 9, 10, 11, 12
3960 });
3961
3962 std::vector<uint8_t> input1({2});
3963
3964 std::vector<uint8_t> output({
3965 2, 4, 6, 8, 10, 12,
3966 14, 16, 18, 20, 22, 24
3967 });
3968
3969 return MultiplicationUint8TestHelper(workloadFactory,
3970 shape0,
3971 input0,
3972 1.0f,
3973 0,
3974 shape1,
3975 input1,
3976 1.0f,
3977 0,
3978 shape0,
3979 output,
3980 1.0f,
3981 0);
3982}
3983
3984LayerTestResult<uint8_t, 4> MultiplicationBroadcast1DVectorUint8Test(armnn::IWorkloadFactory& workloadFactory)
3985{
3986 const unsigned int shape0[] = { 1, 2, 2, 3 };
3987 const unsigned int shape1[] = { 1, 1, 1, 3 };
3988
3989 std::vector<uint8_t> input0({
3990 1, 2, 3, 4, 5, 6,
3991 7, 8, 9, 10, 11, 12
3992 });
3993
3994 std::vector<uint8_t> input1({1, 2, 3});
3995
3996 std::vector<uint8_t> output({
3997 1, 4, 9, 4, 10, 18,
3998 7, 16, 27, 10, 22, 36
3999 });
4000
4001 return MultiplicationUint8TestHelper(workloadFactory,
4002 shape0,
4003 input0,
4004 1.0f,
4005 0,
4006 shape1,
4007 input1,
4008 1.0f,
4009 0,
4010 shape0,
4011 output,
4012 1.0f,
4013 0);
4014}
telsoa014fcda012018-03-09 14:13:49 +00004015
David Beckf195f032018-09-06 16:46:34 +01004016namespace
4017{
4018template <typename T>
4019LayerTestResult<T, 4> SubtractionTestHelper(armnn::IWorkloadFactory& workloadFactory,
4020 const unsigned int shape0[4],
4021 const std::vector<T>& values0,
4022 float scale0,
4023 int32_t offset0,
4024 const unsigned int shape1[4],
4025 const std::vector<T> & values1,
4026 float scale1,
4027 int32_t offset1,
4028 const unsigned int outShape[4],
4029 const std::vector<T> & outValues,
4030 float outScale,
4031 int32_t outOffset)
4032{
4033 auto dataType = (std::is_same<T, uint8_t>::value ?
4034 armnn::DataType::QuantisedAsymm8 :
4035 armnn::DataType::Float32);
4036
4037 armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
4038 armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
4039 armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
4040
4041 inputTensorInfo0.SetQuantizationScale(scale0);
4042 inputTensorInfo0.SetQuantizationOffset(offset0);
4043
4044 inputTensorInfo1.SetQuantizationScale(scale1);
4045 inputTensorInfo1.SetQuantizationOffset(offset1);
4046
4047 outputTensorInfo.SetQuantizationScale(outScale);
4048 outputTensorInfo.SetQuantizationOffset(outOffset);
4049
4050 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
4051 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
4052
4053 LayerTestResult<T, 4> result(outputTensorInfo);
4054 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
4055
4056 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
4057 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
4058 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4059
4060 armnn::SubtractionQueueDescriptor data;
4061 armnn::WorkloadInfo info;
4062 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
4063 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
4064 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4065
4066 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSubtraction(data, info);
4067
4068 inputHandle0->Allocate();
4069 inputHandle1->Allocate();
4070 outputHandle->Allocate();
4071
4072 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
4073 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
4074
4075 workloadFactory.Finalize();
4076 workload->Execute();
4077
4078 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4079
4080 return result;
4081}
4082} // anonymous namespace
4083
4084LayerTestResult<uint8_t, 4> SubtractionUint8Test(armnn::IWorkloadFactory& workloadFactory)
4085{
4086 const unsigned int shape0[] = { 1, 1, 2, 2 };
4087 const unsigned int shape1[] = { 1, 1, 2, 2 };
4088
4089 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
4090 std::vector<uint8_t> input1({ 1, 2, 1, 2 });
4091 std::vector<uint8_t> output({ 3, 3, 5, 5 });
4092
4093 return SubtractionTestHelper(workloadFactory,
4094 shape0, input0, 0.5f, 2,
4095 shape1, input1, 1.0f, 0,
4096 shape0, output, 1.0f, 0);
4097}
4098
4099LayerTestResult<uint8_t, 4> SubtractionBroadcast1ElementUint8Test(armnn::IWorkloadFactory& workloadFactory)
4100{
4101 const unsigned int shape0[] = { 1, 1, 2, 2 };
4102 const unsigned int shape1[] = { 1, 1, 1, 1 };
4103
4104 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
4105 std::vector<uint8_t> input1({ 2 });
4106 std::vector<uint8_t> output({ 5, 6, 7, 8 });
4107
4108 return SubtractionTestHelper(workloadFactory,
4109 shape0, input0, 0.5f, 2,
4110 shape1, input1, 1.0f, 0,
4111 shape0, output, 1.0f, 3);
4112}
4113
4114LayerTestResult<uint8_t, 4> SubtractionBroadcastUint8Test(armnn::IWorkloadFactory& workloadFactory)
4115{
4116 const unsigned int shape0[] = { 1, 1, 2, 2 };
4117 const unsigned int shape1[] = { 1, 1, 2, 1 };
4118
4119 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
4120 std::vector<uint8_t> input1({ 2, 1 });
4121 std::vector<uint8_t> output({ 8, 11, 12, 15 });
4122
4123 return SubtractionTestHelper(workloadFactory,
4124 shape0, input0, 1.0f, 0,
4125 shape1, input1, 1.0f, 0,
4126 shape0, output, 1.0f, 0);
4127}
4128
4129LayerTestResult<float, 4> SubtractionTest(armnn::IWorkloadFactory& workloadFactory)
4130{
4131 const unsigned int shape0[] = { 1, 1, 2, 2 };
4132 const unsigned int shape1[] = { 1, 1, 2, 2 };
4133
4134 std::vector<float> input0({ 1, 2, 3, 4 });
4135 std::vector<float> input1({ 1, -1, 0, 2 });
4136 std::vector<float> output({ 0, 3, 3, 2 });
4137
4138 return SubtractionTestHelper(workloadFactory,
4139 shape0, input0, 1.0f, 0,
4140 shape1, input1, 1.0f, 0,
4141 shape0, output, 1.0f, 0);
4142}
4143
4144LayerTestResult<float, 4> SubtractionBroadcast1ElementTest(armnn::IWorkloadFactory& workloadFactory)
4145{
4146 const unsigned int shape0[] = { 1, 1, 2, 2 };
4147 const unsigned int shape1[] = { 1, 1, 1, 1 };
4148
4149 std::vector<float> input0({ 1, 2, 3, 4 });
4150 std::vector<float> input1({ 10 });
4151 std::vector<float> output({ -9, -8, -7, -6 });
4152
4153 return SubtractionTestHelper(workloadFactory,
4154 shape0, input0, 1.0f, 0,
4155 shape1, input1, 1.0f, 0,
4156 shape0, output, 1.0f, 0);
4157}
4158
4159LayerTestResult<float, 4> SubtractionBroadcastTest(armnn::IWorkloadFactory& workloadFactory)
4160{
4161 const unsigned int shape0[] = { 1, 1, 2, 2 };
4162 const unsigned int shape1[] = { 1, 1, 1, 2 };
4163
4164 std::vector<float> input0({ 1, 2, 3, 4 });
4165 std::vector<float> input1({ 10, -5 });
4166 std::vector<float> output({ -9, 7, -7, 9 });
4167
4168 return SubtractionTestHelper(workloadFactory,
4169 shape0, input0, 1.0f, 0,
4170 shape1, input1, 1.0f, 0,
4171 shape0, output, 1.0f, 0);
4172}
4173
telsoa014fcda012018-03-09 14:13:49 +00004174LayerTestResult<uint8_t, 4> ResizeBilinearNopUint8Test(armnn::IWorkloadFactory& workloadFactory)
4175{
4176 constexpr unsigned int inputWidth = 4;
4177 constexpr unsigned int inputHeight = 4;
4178 constexpr unsigned int inputChannels = 1;
4179 constexpr unsigned int inputBatchSize = 1;
4180
4181 constexpr unsigned int outputWidth = inputWidth;
4182 constexpr unsigned int outputHeight = inputHeight;
4183 constexpr unsigned int outputChannels = inputChannels;
4184 constexpr unsigned int outputBatchSize = inputBatchSize;
4185
4186 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
4187 armnn::DataType::QuantisedAsymm8);
4188 inputTensorInfo.SetQuantizationScale(1.5f);
4189 inputTensorInfo.SetQuantizationOffset(-3);
4190
4191 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
4192 armnn::DataType::QuantisedAsymm8);
4193 outputTensorInfo.SetQuantizationScale(1.5f);
4194 outputTensorInfo.SetQuantizationOffset(-3);
4195
4196 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
4197 1, 2, 3, 4,
4198 2, 3, 4, 5,
4199 3, 4, 5, 6,
4200 4, 5, 6, 7
4201 }));
4202
4203 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
4204 result.outputExpected = input;
4205
4206 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4207 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4208
4209 armnn::ResizeBilinearQueueDescriptor descriptor;
4210 armnn::WorkloadInfo info;
4211 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
4212 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4213
4214 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
4215
4216 inputHandle->Allocate();
4217 outputHandle->Allocate();
4218 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
4219
surmeh013537c2c2018-05-18 16:31:43 +01004220 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00004221 workload->Execute();
4222
4223 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4224 return result;
4225}
4226
4227LayerTestResult<uint8_t, 4> SimpleResizeBilinearUint8Test(armnn::IWorkloadFactory& workloadFactory)
4228{
4229 constexpr unsigned int inputWidth = 2;
4230 constexpr unsigned int inputHeight = 2;
4231 constexpr unsigned int inputChannels = 1;
4232 constexpr unsigned int inputBatchSize = 1;
4233
4234 constexpr unsigned int outputWidth = inputWidth / 2;
4235 constexpr unsigned int outputHeight = inputHeight / 2;
4236 constexpr unsigned int outputChannels = inputChannels;
4237 constexpr unsigned int outputBatchSize = inputBatchSize;
4238
4239 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
4240 armnn::DataType::QuantisedAsymm8);
4241 inputTensorInfo.SetQuantizationScale(0.1567f);
4242 inputTensorInfo.SetQuantizationOffset(1);
4243
4244 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
4245 armnn::DataType::QuantisedAsymm8);
4246 outputTensorInfo.SetQuantizationScale(0.1567f);
4247 outputTensorInfo.SetQuantizationOffset(1);
4248
4249 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
4250 1, 255,
4251 200, 250
4252 }));
4253
4254 // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
4255 // then figures out the interpolants and weights. Note this is different to projecting the centre of the
telsoa01c577f2c2018-08-31 09:22:23 +01004256 // output texel - and thus we'll expect the output 1x1 matrix to contain, as its single element, the value
telsoa014fcda012018-03-09 14:13:49 +00004257 // that was at position (0,0) of the input matrix (rather than an average, which we would expect if projecting
4258 // the centre).
4259 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
4260 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
4261 1
4262 }));
4263
4264 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4265 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4266
4267 armnn::ResizeBilinearQueueDescriptor descriptor;
4268 armnn::WorkloadInfo info;
4269 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
4270 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4271
4272 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
4273
4274 inputHandle->Allocate();
4275 outputHandle->Allocate();
4276 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
4277
surmeh013537c2c2018-05-18 16:31:43 +01004278 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00004279 workload->Execute();
4280
4281 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4282 return result;
4283}
4284
4285LayerTestResult<uint8_t, 4> ResizeBilinearSqMinUint8Test(armnn::IWorkloadFactory& workloadFactory)
4286{
4287 constexpr unsigned int inputWidth = 4;
4288 constexpr unsigned int inputHeight = 4;
4289 constexpr unsigned int inputChannels = 1;
4290 constexpr unsigned int inputBatchSize = 1;
4291
4292 constexpr unsigned int outputWidth = inputWidth / 2;
4293 constexpr unsigned int outputHeight = inputHeight / 2;
4294 constexpr unsigned int outputChannels = inputChannels;
4295 constexpr unsigned int outputBatchSize = inputBatchSize;
4296
4297 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
4298 armnn::DataType::QuantisedAsymm8);
4299 inputTensorInfo.SetQuantizationScale(3.141592f);
4300 inputTensorInfo.SetQuantizationOffset(3);
4301
4302 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
4303 armnn::DataType::QuantisedAsymm8);
4304 outputTensorInfo.SetQuantizationScale(3.141592f);
4305 outputTensorInfo.SetQuantizationOffset(3);
4306
4307 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
4308 1, 2, 3, 4,
4309 2, 3, 4, 5,
4310 3, 4, 5, 6,
4311 4, 5, 6, 7
4312 }));
4313
4314 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
4315 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
4316 1, 3,
4317 3, 5
4318 }));
4319
4320 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4321 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4322
4323 armnn::ResizeBilinearQueueDescriptor descriptor;
4324 armnn::WorkloadInfo info;
4325 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
4326 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4327
4328 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
4329
4330 inputHandle->Allocate();
4331 outputHandle->Allocate();
4332 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
4333
surmeh013537c2c2018-05-18 16:31:43 +01004334 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00004335 workload->Execute();
4336
4337 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4338 return result;
4339}
4340
4341LayerTestResult<uint8_t, 4> ResizeBilinearMinUint8Test(armnn::IWorkloadFactory& workloadFactory)
4342{
4343 constexpr unsigned int inputWidth = 3;
4344 constexpr unsigned int inputHeight = 2;
4345 constexpr unsigned int inputChannels = 1;
4346 constexpr unsigned int inputBatchSize = 1;
4347
4348 constexpr unsigned int outputWidth = 2;
4349 constexpr unsigned int outputHeight = 1;
4350 constexpr unsigned int outputChannels = inputChannels;
4351 constexpr unsigned int outputBatchSize = inputBatchSize;
4352
4353 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
4354 armnn::DataType::QuantisedAsymm8);
4355 inputTensorInfo.SetQuantizationScale(1.5f);
4356 inputTensorInfo.SetQuantizationOffset(-1);
4357
4358 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
4359 armnn::DataType::QuantisedAsymm8);
4360 outputTensorInfo.SetQuantizationScale(1.5f);
4361 outputTensorInfo.SetQuantizationOffset(-1);
4362
4363 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
4364 1, 2, 3, // 3.0, 4.5, 6.0
4365 5, 8, 13 // 9.0, 13.5, 21.0
4366 }));
4367
4368 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
4369 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
4370 1, 3 // 3.0, 5.25
4371 }));
4372
4373 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4374 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4375
4376 armnn::ResizeBilinearQueueDescriptor descriptor;
4377 armnn::WorkloadInfo info;
4378 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
4379 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4380
4381 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
4382
4383 inputHandle->Allocate();
4384 outputHandle->Allocate();
4385
4386 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
4387
surmeh013537c2c2018-05-18 16:31:43 +01004388 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00004389 workload->Execute();
4390
4391 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4392 return result;
4393}
4394
4395LayerTestResult<uint8_t, 4> ResizeBilinearMagUint8Test(armnn::IWorkloadFactory& workloadFactory)
4396{
4397 constexpr unsigned int inputWidth = 2;
4398 constexpr unsigned int inputHeight = 3;
4399 constexpr unsigned int inputChannels = 1;
4400 constexpr unsigned int inputBatchSize = 1;
4401
4402 constexpr unsigned int outputWidth = 5;
4403 constexpr unsigned int outputHeight = 3;
4404 constexpr unsigned int outputChannels = inputChannels;
4405 constexpr unsigned int outputBatchSize = inputBatchSize;
4406
4407 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
4408 armnn::DataType::QuantisedAsymm8);
4409 inputTensorInfo.SetQuantizationScale(0.010765f);
4410 inputTensorInfo.SetQuantizationOffset(7);
4411
4412 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
4413 armnn::DataType::QuantisedAsymm8);
4414 outputTensorInfo.SetQuantizationScale(0.010132f);
4415 outputTensorInfo.SetQuantizationOffset(-18);
4416
4417 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
4418 24, 228, // 0.183005, 2.379065,
4419 105, 128, // 1.05497, 1.302565
4420 230, 71 // 2.400595, 0.68896
4421 }));
4422
4423 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
4424 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
4425 0, 87, 173, 217, 217, // 0.18300501, 1.06142902, 1.93985295, 2.37906504, 2.37906504
4426 86, 96, 106, 111, 111, // 1.05497003, 1.15400803, 1.25304604, 1.30256498, 1.30256498
4427 219, 151, 84, 50, 50 // 2.40059495, 1.71594095, 1.03128707, 0.68896002, 0.68896002
4428 }));
4429
4430 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4431 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4432
4433 armnn::ResizeBilinearQueueDescriptor descriptor;
4434 armnn::WorkloadInfo info;
4435 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
4436 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4437
4438 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
4439
4440 inputHandle->Allocate();
4441 outputHandle->Allocate();
4442 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
4443
surmeh013537c2c2018-05-18 16:31:43 +01004444 workloadFactory.Finalize();
telsoa014fcda012018-03-09 14:13:49 +00004445 workload->Execute();
4446
4447 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4448 return result;
4449}
4450
4451LayerTestResult<float, 4> BatchNormTest(armnn::IWorkloadFactory& workloadFactory)
4452{
4453 auto ret = BatchNormTestImpl<float>(workloadFactory, 0.f, 0);
4454 return ret;
4455}
4456
4457LayerTestResult<uint8_t, 4> BatchNormUint8Test(armnn::IWorkloadFactory& workloadFactory)
4458{
4459 auto ret = BatchNormTestImpl<uint8_t>(workloadFactory, 1.f/20.f, 50);
4460 return ret;
4461}
4462
4463LayerTestResult<uint8_t, 4> ConstantUint8Test(armnn::IWorkloadFactory& workloadFactory)
4464{
4465 return ConstantTestImpl<uint8_t>(workloadFactory, 2e-6f, 1);
4466}
4467
4468LayerTestResult<uint8_t, 1> Concatenation1dUint8Test(armnn::IWorkloadFactory& workloadFactory)
4469{
4470 return Concatenation1dTestImpl<uint8_t>(workloadFactory, 0.5f, -1);
4471}
4472
4473LayerTestResult<uint8_t, 2> Concatenation2dDim0Uint8Test(armnn::IWorkloadFactory& workloadFactory)
4474{
4475 return Concatenation2dDim0TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
4476}
4477
4478LayerTestResult<uint8_t, 2> Concatenation2dDim1Uint8Test(armnn::IWorkloadFactory& workloadFactory)
4479{
4480 return Concatenation2dDim1TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
4481}
4482
4483LayerTestResult<uint8_t, 2> Concatenation2dDim0DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory)
4484{
4485 return Concatenation2dDim0DiffInputDimsTestImpl<uint8_t>(workloadFactory, 0.5f, -1);
4486}
4487
4488LayerTestResult<uint8_t, 2> Concatenation2dDim1DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory)
4489{
4490 return Concatenation2dDim1DiffInputDimsTestImpl<uint8_t>(workloadFactory, 0.5f, -1);
4491}
4492
4493LayerTestResult<uint8_t, 3> Concatenation3dDim0Uint8Test(armnn::IWorkloadFactory& workloadFactory)
4494{
4495 return Concatenation3dDim0TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
4496}
4497
4498LayerTestResult<uint8_t, 3> Concatenation3dDim1Uint8Test(armnn::IWorkloadFactory& workloadFactory)
4499{
4500 return Concatenation3dDim1TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
4501}
4502
4503LayerTestResult<uint8_t, 3> Concatenation3dDim2Uint8Test(armnn::IWorkloadFactory& workloadFactory)
4504{
4505 return Concatenation3dDim2TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
4506}
4507
4508LayerTestResult<uint8_t, 3> Concatenation3dDim0DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory)
4509{
4510 return Concatenation3dDim0TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
4511}
4512
4513LayerTestResult<uint8_t, 3> Concatenation3dDim1DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory)
4514{
4515 return Concatenation3dDim1DiffInputDimsTestImpl<uint8_t>(workloadFactory, 0.5f, -1);
4516}
4517
4518LayerTestResult<uint8_t, 3> Concatenation3dDim2DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory)
4519{
4520 return Concatenation3dDim2DiffInputDimsTestImpl<uint8_t>(workloadFactory, 0.5f, -1);
4521}
4522
4523LayerTestResult<float, 4> SimpleMaxPooling2dSize2x2Stride2x2Test(armnn::IWorkloadFactory& workloadFactory,
4524 bool forceNoPadding)
4525{
4526 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<float>(workloadFactory, forceNoPadding);
4527}
4528
4529LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(armnn::IWorkloadFactory& workloadFactory,
4530 bool forceNoPadding)
4531{
4532 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<uint8_t>(workloadFactory, forceNoPadding, 3.0f, -5);
4533}
4534
4535LayerTestResult<float, 4> SimpleMaxPooling2dSize3x3Stride2x4Test(armnn::IWorkloadFactory& workloadFactory,
4536 bool forceNoPadding)
4537{
4538 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<float>(workloadFactory, forceNoPadding);
4539}
4540
4541LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(armnn::IWorkloadFactory& workloadFactory,
4542 bool forceNoPadding)
4543{
4544 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<uint8_t>(workloadFactory, forceNoPadding, 0.1f, 128);
4545}
4546
4547LayerTestResult<float, 4> SimpleAveragePooling2dTest(armnn::IWorkloadFactory& workloadFactory)
4548{
4549 return SimpleAveragePooling2dTestCommon<float>(workloadFactory);
4550}
4551
4552LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
4553{
4554 return SimpleAveragePooling2dTestCommon<uint8_t>(workloadFactory, 0.5, -1);
4555}
4556
surmeh01bceff2f2018-03-29 16:29:27 +01004557LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(armnn::IWorkloadFactory& workloadFactory,
4558 bool forceNoPadding)
4559{
4560 return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<float>(workloadFactory, forceNoPadding);
4561}
4562
telsoa014fcda012018-03-09 14:13:49 +00004563LayerTestResult<float, 4> LargeTensorsAveragePooling2dTest(armnn::IWorkloadFactory& workloadFactory)
4564{
4565 return LargeTensorsAveragePooling2dTestCommon<float>(workloadFactory);
4566}
4567
4568LayerTestResult<uint8_t, 4> LargeTensorsAveragePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
4569{
4570 return LargeTensorsAveragePooling2dTestCommon<uint8_t>(workloadFactory, 0.5, -1);
4571}
4572
4573LayerTestResult<float, 4> SimpleL2Pooling2dTest(armnn::IWorkloadFactory& workloadFactory)
4574{
4575 return SimpleL2Pooling2dTestCommon<float>(workloadFactory);
4576}
4577
4578LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
4579{
4580 return SimpleL2Pooling2dTestCommon<uint8_t>(workloadFactory);
4581}
4582
4583LayerTestResult<float, 4> L2Pooling2dSize3Stride1Test(armnn::IWorkloadFactory& workloadFactory)
4584{
4585 return L2Pooling2dSize3Stride1TestCommon<float>(workloadFactory);
4586}
4587
4588LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride1Uint8Test(armnn::IWorkloadFactory& workloadFactory)
4589{
4590 return L2Pooling2dSize3Stride1TestCommon<uint8_t>(workloadFactory);
4591}
4592
4593LayerTestResult<float, 4> L2Pooling2dSize3Stride3Test(armnn::IWorkloadFactory& workloadFactory)
4594{
4595 return L2Pooling2dSize3Stride3TestCommon<float>(workloadFactory);
4596}
4597
4598LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride3Uint8Test(armnn::IWorkloadFactory& workloadFactory)
4599{
4600 return L2Pooling2dSize3Stride3TestCommon<uint8_t>(workloadFactory);
4601}
4602
4603LayerTestResult<float, 4> L2Pooling2dSize3Stride4Test(armnn::IWorkloadFactory& workloadFactory)
4604{
4605 return L2Pooling2dSize3Stride4TestCommon<float>(workloadFactory);
4606}
4607
4608LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride4Uint8Test(armnn::IWorkloadFactory& workloadFactory)
4609{
4610 return L2Pooling2dSize3Stride4TestCommon<uint8_t>(workloadFactory);
4611}
4612
4613LayerTestResult<float, 4> L2Pooling2dSize7Test(armnn::IWorkloadFactory& workloadFactory)
4614{
4615 return L2Pooling2dSize7TestCommon<float>(workloadFactory);
4616}
4617
4618LayerTestResult<uint8_t, 4> L2Pooling2dSize7Uint8Test(armnn::IWorkloadFactory& workloadFactory)
4619{
4620 return L2Pooling2dSize7TestCommon<uint8_t>(workloadFactory);
4621}
4622
4623LayerTestResult<float, 4> L2Pooling2dSize9Test(armnn::IWorkloadFactory& workloadFactory)
4624{
4625 return L2Pooling2dSize9TestCommon<float>(workloadFactory);
4626}
4627
4628LayerTestResult<uint8_t, 4> L2Pooling2dSize9Uint8Test(armnn::IWorkloadFactory& workloadFactory)
4629{
4630 return L2Pooling2dSize9TestCommon<uint8_t>(workloadFactory);
4631}
4632
4633LayerTestResult<float, 4> AsymmetricNonSquarePooling2dTest(armnn::IWorkloadFactory& workloadFactory)
4634{
4635 return AsymmetricNonSquarePooling2dTestCommon<float>(workloadFactory);
4636}
4637
4638LayerTestResult<uint8_t, 4> AsymmetricNonSquarePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
4639{
4640 return AsymmetricNonSquarePooling2dTestCommon<uint8_t>(workloadFactory);
4641}
4642
4643LayerTestResult<float, 4> ComparePooling2dTest(armnn::IWorkloadFactory& workloadFactory,
4644 armnn::IWorkloadFactory& refWorkloadFactory,
4645 armnn::PoolingAlgorithm poolingType)
4646{
4647 return ComparePooling2dTestCommon<float>(workloadFactory, refWorkloadFactory, poolingType);
4648}
4649
4650LayerTestResult<uint8_t, 4> ComparePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory,
4651 armnn::IWorkloadFactory& refWorkloadFactory,
4652 armnn::PoolingAlgorithm poolingType)
4653{
4654 return ComparePooling2dTestCommon<uint8_t>(workloadFactory, refWorkloadFactory, poolingType, 0.1f, 128);
4655}
4656
4657LayerTestResult<float, 2> FullyConnectedLargeTest(armnn::IWorkloadFactory& workloadFactory,
4658 bool transposeWeights)
4659{
4660 return FullyConnectedLargeTestCommon<float>(workloadFactory, transposeWeights);
4661}
4662
4663LayerTestResult<float, 4> IgnorePaddingSimpleMaxPooling2dTest(armnn::IWorkloadFactory& workloadFactory)
4664{
4665 return IgnorePaddingSimpleMaxPooling2dTestCommon<float>(workloadFactory);
4666}
4667
4668LayerTestResult<uint8_t, 4> IgnorePaddingSimpleMaxPooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
4669{
4670 return IgnorePaddingSimpleMaxPooling2dTestCommon<uint8_t>(workloadFactory, 1.0f, -5);
4671}
4672
4673LayerTestResult<float, 4> IgnorePaddingMaxPooling2dSize3Test(armnn::IWorkloadFactory& workloadFactory)
4674{
4675 return IgnorePaddingMaxPooling2dSize3TestCommon<float>(workloadFactory);
4676}
4677
4678LayerTestResult<uint8_t, 4> IgnorePaddingMaxPooling2dSize3Uint8Test(armnn::IWorkloadFactory& workloadFactory)
4679{
4680 return IgnorePaddingMaxPooling2dSize3TestCommon<uint8_t>(workloadFactory, 1.0f, -5);
4681}
4682
4683LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dTest(armnn::IWorkloadFactory& workloadFactory)
4684{
4685 return IgnorePaddingSimpleAveragePooling2dTestCommon<float>(workloadFactory);
4686}
4687
4688LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
4689{
4690 return IgnorePaddingSimpleAveragePooling2dTestCommon<uint8_t>(workloadFactory);
4691}
4692
4693LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTest(armnn::IWorkloadFactory& workloadFactory)
4694{
4695 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<float>(workloadFactory);
4696}
4697
4698LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(
4699 armnn::IWorkloadFactory& workloadFactory)
4700{
4701 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<uint8_t>(workloadFactory);
4702}
4703
4704LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3Test(armnn::IWorkloadFactory& workloadFactory)
4705{
4706 return IgnorePaddingAveragePooling2dSize3TestCommon<float>(workloadFactory);
4707}
4708
4709LayerTestResult<uint8_t, 4> IgnorePaddingAveragePooling2dSize3Uint8Test(armnn::IWorkloadFactory& workloadFactory)
4710{
4711 return IgnorePaddingAveragePooling2dSize3TestCommon<uint8_t>(workloadFactory);
4712}
4713
4714LayerTestResult<float, 4> IgnorePaddingSimpleL2Pooling2dTest(armnn::IWorkloadFactory& workloadFactory)
4715{
4716 return IgnorePaddingSimpleL2Pooling2dTestCommon<float>(workloadFactory);
4717}
4718
4719LayerTestResult<uint8_t, 4> IgnorePaddingSimpleL2Pooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
4720{
4721 return IgnorePaddingSimpleL2Pooling2dTestCommon<uint8_t>(workloadFactory);
4722}
4723
4724LayerTestResult<float, 4> IgnorePaddingL2Pooling2dSize3Test(armnn::IWorkloadFactory& workloadFactory)
4725{
4726 return IgnorePaddingL2Pooling2dSize3TestCommon<float>(workloadFactory);
4727}
4728
4729LayerTestResult<uint8_t, 4> IgnorePaddingL2Pooling2dSize3Uint8Test(armnn::IWorkloadFactory& workloadFactory)
4730{
4731 return IgnorePaddingL2Pooling2dSize3TestCommon<uint8_t>(workloadFactory);
4732}
4733
4734LayerTestResult<float, 4> SimplePermuteFloat32Test(armnn::IWorkloadFactory& workloadFactory)
4735{
4736 return SimplePermuteFloat32TestCommon(workloadFactory);
4737};
4738
4739LayerTestResult<uint8_t, 4> SimplePermuteUint8Test(armnn::IWorkloadFactory& workloadFactory)
4740{
4741 return SimplePermuteUint8TestCommon(workloadFactory);
4742};
surmeh01bceff2f2018-03-29 16:29:27 +01004743
4744LayerTestResult<float, 4> PermuteFloat32ValueSet1Test(armnn::IWorkloadFactory& workloadFactory)
4745{
4746 return PermuteFloat32ValueSet1TestCommon(workloadFactory);
4747};
4748
4749LayerTestResult<float, 4> PermuteFloat32ValueSet2Test(armnn::IWorkloadFactory& workloadFactory)
4750{
4751 return PermuteFloat32ValueSet2TestCommon(workloadFactory);
4752};
4753
4754LayerTestResult<float, 4> PermuteFloat32ValueSet3Test(armnn::IWorkloadFactory& workloadFactory)
4755{
4756 return PermuteFloat32ValueSet3TestCommon(workloadFactory);
narpra011e4c31d2018-09-28 11:07:51 +01004757};
4758
4759namespace
4760{
4761template <typename T, std::size_t InputDim, std::size_t OutputDim>
4762LayerTestResult<T, OutputDim> MeanTestHelper(armnn::IWorkloadFactory& workloadFactory,
4763 const unsigned int* inputShape,
4764 const std::vector<T>& inputData,
4765 const std::vector<unsigned int>& axis,
4766 bool keepDims,
4767 const unsigned int* outputShape,
4768 const std::vector<T>& outputData,
4769 float scale = 1.0f,
4770 int32_t offset = 0)
4771{
4772 auto dataType = (std::is_same<T, uint8_t>::value ?
4773 armnn::DataType::QuantisedAsymm8 :
4774 armnn::DataType::Float32);
4775
4776 armnn::TensorInfo inputTensorInfo(InputDim, inputShape, dataType);
4777 armnn::TensorInfo outputTensorInfo(OutputDim, outputShape, dataType);
4778
4779 inputTensorInfo.SetQuantizationScale(scale);
4780 inputTensorInfo.SetQuantizationOffset(offset);
4781
4782 outputTensorInfo.SetQuantizationScale(scale);
4783 outputTensorInfo.SetQuantizationOffset(offset);
4784
4785 auto input = MakeTensor<T, InputDim>(inputTensorInfo, inputData);
4786
4787 LayerTestResult<T, OutputDim> result(outputTensorInfo);
4788 result.outputExpected = MakeTensor<T, OutputDim>(outputTensorInfo, outputData);
4789
4790 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4791 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4792
4793 armnn::MeanQueueDescriptor data;
4794 data.m_Parameters.m_Axis = axis;
4795 data.m_Parameters.m_KeepDims = keepDims;
4796 armnn::WorkloadInfo info;
4797 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
4798 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4799
4800 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMean(data, info);
4801
4802 inputHandle->Allocate();
4803 outputHandle->Allocate();
4804
4805 CopyDataToITensorHandle(inputHandle.get(), input.origin());
4806
4807 workloadFactory.Finalize();
4808 workload->Execute();
4809
4810 CopyDataFromITensorHandle(result.output.origin(), outputHandle.get());
4811
4812 return result;
4813}
4814} // anonymous namespace
4815
4816LayerTestResult<uint8_t, 1> MeanUint8SimpleTest(armnn::IWorkloadFactory& workloadFactory)
4817{
4818 const unsigned int inputShape[] = { 3, 2 };
4819 const unsigned int outputShape[] = { 1 };
4820
4821 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
4822 std::vector<uint8_t> output({ 2 });
4823
4824 return MeanTestHelper<uint8_t, 2, 1>(workloadFactory, inputShape, input, {}, false, outputShape, output);
4825}
4826
4827LayerTestResult<uint8_t, 3> MeanUint8SimpleAxisTest(armnn::IWorkloadFactory& workloadFactory)
4828{
4829 const unsigned int inputShape[] = { 1, 1, 3, 2 };
4830 const unsigned int outputShape[] = { 1, 1, 2 };
4831
4832 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
4833 std::vector<uint8_t> output({ 2, 2 });
4834
4835 return MeanTestHelper<uint8_t, 4, 3>(workloadFactory, inputShape, input, {2}, false, outputShape, output);
4836}
4837
4838LayerTestResult<uint8_t, 4> MeanUint8KeepDimsTest(armnn::IWorkloadFactory& workloadFactory)
4839{
4840 const unsigned int inputShape[] = { 1, 1, 3, 2 };
4841 const unsigned int outputShape[] = { 1, 1, 1, 2 };
4842
4843 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
4844 std::vector<uint8_t> output({ 2, 2 });
4845
4846 return MeanTestHelper<uint8_t, 4, 4>(workloadFactory, inputShape, input, {2}, true, outputShape, output);
4847}
4848
4849LayerTestResult<uint8_t, 4> MeanUint8MultipleDimsTest(armnn::IWorkloadFactory& workloadFactory)
4850{
4851 const unsigned int inputShape[] = { 2, 3, 1, 2 };
4852 const unsigned int outputShape[] = { 1, 3, 1, 1 };
4853
4854 std::vector<uint8_t> input({ 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6});
4855 std::vector<uint8_t> output({ 1, 3, 5 });
4856
4857 return MeanTestHelper<uint8_t, 4, 4>(workloadFactory, inputShape, input, {0, 3}, true, outputShape, output);
4858}
4859
4860LayerTestResult<uint8_t, 1> MeanVtsUint8Test(armnn::IWorkloadFactory& workloadFactory)
4861{
4862 const unsigned int inputShape[] = {4, 3, 2};
4863 const unsigned int outputShape[] = { 2 };
4864
4865 std::vector<uint8_t> input({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24});
4866 std::vector<uint8_t> output({12, 13});
4867
4868 return MeanTestHelper<uint8_t, 3, 1>(workloadFactory, inputShape, input, {0, 1}, false, outputShape,
4869 output, 0.8f, 5);
4870}
4871
4872LayerTestResult<float, 1> MeanFloatSimpleTest(armnn::IWorkloadFactory& workloadFactory)
4873{
4874 const unsigned int inputShape[] = { 3, 2 };
4875 const unsigned int outputShape[] = { 1 };
4876
4877 std::vector<float> input({ 1., 1., 2., 2., 3., 3. });
4878 std::vector<float> output({ 2. });
4879
4880 return MeanTestHelper<float, 2, 1>(workloadFactory, inputShape, input, {}, false, outputShape, output);
4881}
4882
4883LayerTestResult<float, 3> MeanFloatSimpleAxisTest(armnn::IWorkloadFactory& workloadFactory)
4884{
4885 const unsigned int inputShape[] = { 2, 3, 1, 2 };
4886 const unsigned int outputShape[] = { 3, 1, 2 };
4887
4888 std::vector<float> input({ 1., 2., 3., 4., 5., 6., 1., 2., 3., 4., 5., 6.});
4889 std::vector<float> output({ 1., 2., 3., 4., 5., 6. });
4890
4891 return MeanTestHelper<float, 4, 3>(workloadFactory, inputShape, input, {0}, false, outputShape, output);
4892}
4893
4894LayerTestResult<float, 4> MeanFloatKeepDimsTest(armnn::IWorkloadFactory& workloadFactory)
4895{
4896 const unsigned int inputShape[] = { 1, 1, 3, 2 };
4897 const unsigned int outputShape[] = { 1, 1, 1, 2 };
4898
4899 std::vector<float> input({ 1., 1., 2., 2., 3., 3. });
4900 std::vector<float> output({ 2., 2. });
4901
4902 return MeanTestHelper<float, 4, 4>(workloadFactory, inputShape, input, {2}, true, outputShape, output);
4903}
4904
4905LayerTestResult<float, 4> MeanFloatMultipleDimsTest(armnn::IWorkloadFactory& workloadFactory)
4906{
4907 const unsigned int inputShape[] = { 2, 3, 1, 2 };
4908 const unsigned int outputShape[] = { 1, 3, 1, 1 };
4909
4910 std::vector<float> input({ 1., 2., 3., 4., 5., 6., 1., 2., 3., 4., 5., 6.});
4911 std::vector<float> output({ 1.5, 3.5, 5.5 });
4912
4913 return MeanTestHelper<float, 4, 4>(workloadFactory, inputShape, input, {0, 3}, true, outputShape, output);
4914}
4915
4916LayerTestResult<float, 1> MeanVtsFloat1Test(armnn::IWorkloadFactory& workloadFactory)
4917{
4918 const unsigned int inputShape[] = {4, 3, 2};
4919 const unsigned int outputShape[] = { 2 };
4920
4921 std::vector<float> input({1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f,
4922 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f});
4923 std::vector<float> output({12.0f, 13.0f});
4924
4925 return MeanTestHelper<float, 3, 1>(workloadFactory, inputShape, input, {0, 1}, false, outputShape, output);
4926}
4927
4928LayerTestResult<float, 3> MeanVtsFloat2Test(armnn::IWorkloadFactory& workloadFactory)
4929{
4930 const unsigned int inputShape[] = {4, 3, 2};
4931 const unsigned int outputShape[] = {1, 3, 1 };
4932
4933 std::vector<float> input({1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f,
4934 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f});
4935 std::vector<float> output({10.5f, 12.5f, 14.5f});
4936
4937 return MeanTestHelper<float, 3, 3>(workloadFactory, inputShape, input, {0, 2}, true, outputShape, output);
4938}