blob: 7366bcaf44b645d6b7c25065aa9d875ee3cfbddc [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#pragma once
6
7#include <armnn/ArmNN.hpp>
telsoa014fcda012018-03-09 14:13:49 +00008
David Beckac42efd2018-09-26 17:41:13 +01009#include <test/TensorHelpers.hpp>
telsoa014fcda012018-03-09 14:13:49 +000010#include "QuantizeHelper.hpp"
11
David Beckac42efd2018-09-26 17:41:13 +010012#include <backends/CpuTensorHandle.hpp>
13#include <backends/WorkloadFactory.hpp>
Francis Murtagh043d0d02018-10-05 14:08:48 +010014#include <backends/WorkloadInfo.hpp>
telsoa014fcda012018-03-09 14:13:49 +000015#include <algorithm>
16
17template<typename T>
18LayerTestResult<T, 4> SimplePooling2dTestImpl(
Francis Murtagh043d0d02018-10-05 14:08:48 +010019 armnn::IWorkloadFactory& workloadFactory,
20 armnn::Pooling2dDescriptor descriptor,
21 float qScale,
22 int32_t qOffset,
23 const boost::multi_array<T, 4>& input,
24 const boost::multi_array<T, 4>& outputExpected)
telsoa014fcda012018-03-09 14:13:49 +000025{
26 unsigned int inputHeight = boost::numeric_cast<unsigned int>(input.shape()[2]);
27 unsigned int inputWidth = boost::numeric_cast<unsigned int>(input.shape()[3]);
28 unsigned int inputChannels = boost::numeric_cast<unsigned int>(input.shape()[1]);
29 unsigned int inputBatchSize = boost::numeric_cast<unsigned int>(input.shape()[0]);
30
31 unsigned int outputHeight = boost::numeric_cast<unsigned int>(outputExpected.shape()[2]);
32 unsigned int outputWidth = boost::numeric_cast<unsigned int>(outputExpected.shape()[3]);
33 unsigned int outputChannels = boost::numeric_cast<unsigned int>(outputExpected.shape()[1]);
34 unsigned int outputBatchSize = boost::numeric_cast<unsigned int>(outputExpected.shape()[0]);
35
36 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
37 armnn::GetDataType<T>());
38 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
39 armnn::GetDataType<T>());
40
41 // Set quantization parameters if the requested type is a quantized type.
42 if(armnn::IsQuantizedType<T>())
43 {
44 inputTensorInfo.SetQuantizationScale(qScale);
45 inputTensorInfo.SetQuantizationOffset(qOffset);
46 outputTensorInfo.SetQuantizationScale(qScale);
47 outputTensorInfo.SetQuantizationOffset(qOffset);
48 }
49
50 LayerTestResult<T, 4> result(outputTensorInfo);
51
52 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
53 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
54
55 armnn::Pooling2dQueueDescriptor queueDescriptor;
56 queueDescriptor.m_Parameters = descriptor;
57 armnn::WorkloadInfo workloadInfo;
58 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
59 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
60
61 // Don't execute if Pooling is not supported, as an exception will be raised.
62 armnn::Compute compute = workloadFactory.GetCompute();
63 const size_t reasonIfUnsupportedMaxLen = 255;
64 char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
65 result.supported = armnn::IsPooling2dSupported(compute, inputTensorInfo, outputTensorInfo,
66 queueDescriptor.m_Parameters,
67 reasonIfUnsupported, reasonIfUnsupportedMaxLen);
68 if (!result.supported)
69 {
70 return result;
71 }
72
73 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
74
75 inputHandle->Allocate();
76 outputHandle->Allocate();
77
78 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
79
80 workload->Execute();
81
82 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
83
84 result.outputExpected = outputExpected;
85
86 return result;
87}
88
Francis Murtagh043d0d02018-10-05 14:08:48 +010089template<typename T>
90LayerTestResult<T, 4> SimplePooling2dNhwcTestImpl(
91 armnn::IWorkloadFactory& workloadFactory,
92 armnn::Pooling2dDescriptor descriptor,
93 float qScale,
94 int32_t qOffset,
95 const boost::multi_array<T, 4>& input,
96 const boost::multi_array<T, 4>& outputExpected)
97{
98 unsigned int inputHeight = boost::numeric_cast<unsigned int>(input.shape()[1]);
99 unsigned int inputWidth = boost::numeric_cast<unsigned int>(input.shape()[2]);
100 unsigned int inputChannels = boost::numeric_cast<unsigned int>(input.shape()[3]);
101 unsigned int inputBatchSize = boost::numeric_cast<unsigned int>(input.shape()[0]);
102
103 unsigned int outputHeight = boost::numeric_cast<unsigned int>(outputExpected.shape()[1]);
104 unsigned int outputWidth = boost::numeric_cast<unsigned int>(outputExpected.shape()[2]);
105 unsigned int outputChannels = boost::numeric_cast<unsigned int>(outputExpected.shape()[3]);
106 unsigned int outputBatchSize = boost::numeric_cast<unsigned int>(outputExpected.shape()[0]);
107
108 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputHeight, inputWidth, inputChannels },
109 armnn::GetDataType<T>());
110 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputHeight, outputWidth, outputChannels },
111 armnn::GetDataType<T>());
112
113 // Set quantization parameters if the requested type is a quantized type.
114 if(armnn::IsQuantizedType<T>())
115 {
116 inputTensorInfo.SetQuantizationScale(qScale);
117 inputTensorInfo.SetQuantizationOffset(qOffset);
118 outputTensorInfo.SetQuantizationScale(qScale);
119 outputTensorInfo.SetQuantizationOffset(qOffset);
120 }
121
122 LayerTestResult<T, 4> result(outputTensorInfo);
123
124 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
125 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
126
127 armnn::Pooling2dQueueDescriptor queueDescriptor;
128 queueDescriptor.m_Parameters = descriptor;
129 queueDescriptor.m_Parameters.m_DataLayout = armnn::DataLayout::NHWC;
130
131 armnn::WorkloadInfo workloadInfo;
132 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
133 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
134
135 // Don't execute if Pooling is not supported, as an exception will be raised.
136 armnn::Compute compute = workloadFactory.GetCompute();
137 const size_t reasonIfUnsupportedMaxLen = 255;
138 char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
139 result.supported = armnn::IsPooling2dSupported(compute, inputTensorInfo, outputTensorInfo,
140 queueDescriptor.m_Parameters,
141 reasonIfUnsupported, reasonIfUnsupportedMaxLen);
142 if (!result.supported)
143 {
144 return result;
145 }
146
147 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
148
149 inputHandle->Allocate();
150 outputHandle->Allocate();
151
152 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
153
154 workload->Execute();
155
156 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
157
158 result.outputExpected = outputExpected;
159
160 return result;
161}
162
telsoa014fcda012018-03-09 14:13:49 +0000163//
164// Tests max pooling with the following parameters:
165//
166// Pooling size: 3x3
167// Stride: (2,4)
168// input size: 8x13
169// channels: 2
170// batch size: 2
171//
172template<typename T>
173LayerTestResult<T, 4> SimpleMaxPooling2dSize3x3Stride2x4TestCommon(armnn::IWorkloadFactory& workloadFactory,
174 bool forceNoPadding,
175 float qScale = 1.0f,
176 int32_t qOffset = 0)
177{
178 armnn::Pooling2dDescriptor descriptor;
179 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
180 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
181 descriptor.m_StrideX = 2;
182 descriptor.m_StrideY = 4;
183 // forceNoPadding is mainly used for compatibility with ARM Compute.
184 // As of 16/05/2017, it errors if padX or padY are equal to or greater than the pool size.
185 descriptor.m_PadLeft = descriptor.m_PadRight = forceNoPadding ? 0 : 3;
186 descriptor.m_PadTop = descriptor.m_PadBottom = 0;
187 descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
188 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
189
190 unsigned int inputWidth = 8;
191 unsigned int inputHeight = 13;
192 unsigned int outputWidth =
193 (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) /
194 descriptor.m_StrideX;
195 unsigned int outputHeight =
196 (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) /
197 descriptor.m_StrideY;
198 unsigned int channels = 2;
199 unsigned int batchSize = 2;
200
201 armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, armnn::GetDataType<T>());
202 armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, armnn::GetDataType<T>());
203
204 // Set quantization parameters if the requested type is a quantized type.
205 if(armnn::IsQuantizedType<T>())
206 {
207 inputTensorInfo.SetQuantizationScale(qScale);
208 inputTensorInfo.SetQuantizationOffset(qOffset);
209 outputTensorInfo.SetQuantizationScale(qScale);
210 outputTensorInfo.SetQuantizationOffset(qOffset);
211 }
212
213 std::vector<float> singleChannelData({
214 0.0f, 4.0f, 8.0f, 1.0f, 6.0f, 4.0f, 5.0f, 8.0f,
215 1.0f, 1.0f, 6.0f, 0.0f, 3.0f, 7.0f, 4.0f, 7.0f,
216 8.0f, 5.0f, 0.0f, 0.0f, 8.0f, 3.0f, 4.0f, 3.0f,
217 8.0f, 2.0f, 5.0f, 4.0f, 1.0f, 9.0f, 2.0f, 0.0f,
218 5.0f, 4.0f, 5.0f, 0.0f, 0.0f, 0.0f, 7.0f, 2.0f,
219 1.0f, 2.0f, 6.0f, 2.0f, 7.0f, 9.0f, 5.0f, 2.0f,
220 9.0f, 7.0f, 3.0f, 1.0f, 3.0f, 4.0f, 8.0f, 3.0f,
221 1.0f, 0.0f, 0.0f, 5.0f, 5.0f, 4.0f, 2.0f, 0.0f,
222 6.0f, 4.0f, 3.0f, 6.0f, 9.0f, 5.0f, 5.0f, 6.0f,
223 8.0f, 7.0f, 9.0f, 6.0f, 1.0f, 4.0f, 1.0f, 9.0f,
224 7.0f, 1.0f, 9.0f, 2.0f, 9.0f, 9.0f, 8.0f, 1.0f,
225 4.0f, 4.0f, 5.0f, 9.0f, 2.0f, 6.0f, 6.0f, 4.0f,
226 3.0f, 5.0f, 4.0f, 0.0f, 1.0f, 5.0f, 9.0f, 7.0f,
227 });
228
telsoa01c577f2c2018-08-31 09:22:23 +0100229 // Constructs input data.
telsoa014fcda012018-03-09 14:13:49 +0000230 std::vector<float> inputData;
231 auto negator = [](float f) { return -f; };
232
telsoa01c577f2c2018-08-31 09:22:23 +0100233 // First image (two channels where the second channel is the negative of the first one).
telsoa014fcda012018-03-09 14:13:49 +0000234 inputData.insert(inputData.end(), singleChannelData.begin(), singleChannelData.end());
235 std::transform(singleChannelData.begin(), singleChannelData.end(), std::back_inserter(inputData), negator);
236
telsoa01c577f2c2018-08-31 09:22:23 +0100237 // Second image (same as first image).
telsoa014fcda012018-03-09 14:13:49 +0000238 inputData.insert(inputData.end(), singleChannelData.begin(), singleChannelData.end());
239 std::transform(singleChannelData.begin(), singleChannelData.end(), std::back_inserter(inputData), negator);
240
241 auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, inputData));
242
telsoa01c577f2c2018-08-31 09:22:23 +0100243 // These were calculated manually.
telsoa014fcda012018-03-09 14:13:49 +0000244 auto shape(GetTensorShapeAsArray<4>(outputTensorInfo));
245 boost::multi_array<T, 4> outputExpected(shape);
246 if (forceNoPadding)
247 {
248 outputExpected = MakeTensor<T, 4>(outputTensorInfo,
249 QuantizedVector<T>(qScale, qOffset, {
250 8.0f, 8.0f, 8.0f,
251 9.0f, 7.0f, 9.0f,
252 9.0f, 9.0f, 9.0f,
253
254 0.0f, 0.0f, -3.0f,
255 -1.0f, 0.0f, 0.0f,
256 -1.0f, -1.0f, -1.0f,
257
258 8.0f, 8.0f, 8.0f,
259 9.0f, 7.0f, 9.0f,
260 9.0f, 9.0f, 9.0f,
261
262 0.0f, 0.0f, -3.0f,
263 -1.0f, 0.0f, 0.0f,
264 -1.0f, -1.0f, -1.0f
265 }));
266 }
267 else
268 {
269 outputExpected = MakeTensor<T, 4>(outputTensorInfo,
270 QuantizedVector<T>(qScale, qOffset, {
271 0.0f, 8.0f, 8.0f, 8.0f, 8.0f, 8.0f,
272 0.0f, 9.0f, 7.0f, 9.0f, 9.0f, 3.0f,
273 0.0f, 8.0f, 9.0f, 9.0f, 9.0f, 9.0f,
274
275 0.0f, 0.0f, 0.0f, 0.0f,-3.0f, 0.0f,
276 0.0f,-1.0f, 0.0f, 0.0f, 0.0f, 0.0f,
277 0.0f,-1.0f,-1.0f,-1.0f,-1.0f, 0.0f,
278
279 0.0f, 8.0f, 8.0f, 8.0f, 8.0f, 8.0f,
280 0.0f, 9.0f, 7.0f, 9.0f, 9.0f, 3.0f,
281 0.0f, 8.0f, 9.0f, 9.0f, 9.0f, 9.0f,
282
283 0.0f, 0.0f, 0.0f, 0.0f,-3.0f, 0.0f,
284 0.0f,-1.0f, 0.0f, 0.0f, 0.0f, 0.0f,
285 0.0f,-1.0f,-1.0f,-1.0f,-1.0f, 0.0f
286 }));
287 }
288
289 return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
290}
291
292template<typename T>
293LayerTestResult<T, 4> SimpleAveragePooling2dTestCommon(armnn::IWorkloadFactory& workloadFactory,
294 float qScale = 1.0f,
295 int32_t qOffset = 0)
296{
297 armnn::Pooling2dDescriptor descriptor;
298 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
299 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
300 descriptor.m_StrideX = descriptor.m_StrideY = 2;
301 descriptor.m_PadLeft = 1;
302 descriptor.m_PadRight = 1;
303 descriptor.m_PadTop = 1;
304 descriptor.m_PadBottom = 1;
305 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
306
307 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
308 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, armnn::GetDataType<T>());
309
310 // Set quantization parameters if the requested type is a quantized type.
311 if(armnn::IsQuantizedType<T>())
312 {
313 inputTensorInfo.SetQuantizationScale(qScale);
314 inputTensorInfo.SetQuantizationOffset(qOffset);
315 outputTensorInfo.SetQuantizationScale(qScale);
316 outputTensorInfo.SetQuantizationOffset(qOffset);
317 }
318
319 auto input = MakeTensor<T, 4>(inputTensorInfo,
320 QuantizedVector<T>(qScale, qOffset, {
321 1.0f, 2.0f, 3.0f, 4.0f,
322 1.0f, 2.0f, 3.0f, 4.0f,
323 1.0f, 2.0f, 3.0f, 4.0f,
324 1.0f, 2.0f, 3.0f, 4.0f,
325 }));
326
327 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
328 QuantizedVector<T>(qScale, qOffset, {
329 1.0f, 2.5f, 4.0f,
330 1.0f, 2.5f, 4.0f,
331 1.0f, 2.5f, 4.0f,
332 }));
333
334 return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
335}
336
337template<typename T>
Francis Murtagh043d0d02018-10-05 14:08:48 +0100338LayerTestResult<T, 4> SimpleAveragePooling2dNhwcTestCommon(armnn::IWorkloadFactory& workloadFactory,
339 float qScale = 1.0f,
340 int32_t qOffset = 0)
341{
342 armnn::Pooling2dDescriptor descriptor;
343 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
344 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
345 descriptor.m_StrideX = descriptor.m_StrideY = 2;
346 descriptor.m_PadLeft = 1;
347 descriptor.m_PadRight = 1;
348 descriptor.m_PadTop = 1;
349 descriptor.m_PadBottom = 1;
350 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
351
352 armnn::TensorInfo inputTensorInfo({ 1, 4, 4, 1 }, armnn::GetDataType<T>());
353 armnn::TensorInfo outputTensorInfo({ 1, 3, 3, 1 }, armnn::GetDataType<T>());
354
355 // Set quantization parameters if the requested type is a quantized type.
356 if(armnn::IsQuantizedType<T>())
357 {
358 inputTensorInfo.SetQuantizationScale(qScale);
359 inputTensorInfo.SetQuantizationOffset(qOffset);
360 outputTensorInfo.SetQuantizationScale(qScale);
361 outputTensorInfo.SetQuantizationOffset(qOffset);
362 }
363
364 auto input = MakeTensor<T, 4>(inputTensorInfo,
365 QuantizedVector<T>(qScale, qOffset, {
366 1.0f, 2.0f, 3.0f, 4.0f,
367 1.0f, 2.0f, 3.0f, 4.0f,
368 1.0f, 2.0f, 3.0f, 4.0f,
369 1.0f, 2.0f, 3.0f, 4.0f,
370 }));
371
372 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
373 QuantizedVector<T>(qScale, qOffset, {
374 1.0f, 2.5f, 4.0f,
375 1.0f, 2.5f, 4.0f,
376 1.0f, 2.5f, 4.0f,
377 }));
378
379 return SimplePooling2dNhwcTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
380}
381
382template<typename T>
telsoa014fcda012018-03-09 14:13:49 +0000383LayerTestResult<T, 4> LargeTensorsAveragePooling2dTestCommon(armnn::IWorkloadFactory& workloadFactory,
384 float qScale = 1.0f,
385 int32_t qOffset = 0)
386{
387 armnn::Pooling2dDescriptor descriptor;
388 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
389 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 100;
390 descriptor.m_StrideX = descriptor.m_StrideY = 5;
391 descriptor.m_PadLeft = 50;
392 descriptor.m_PadRight = 50;
393 descriptor.m_PadTop = 50;
394 descriptor.m_PadBottom = 50;
395 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
396
397 armnn::TensorInfo inputTensorInfo({ 5, 3, 52, 60 }, armnn::GetDataType<T>());
398 armnn::TensorInfo outputTensorInfo({ 5, 3, 11, 13 }, armnn::GetDataType<T>());
399
400 // Set quantization parameters if the requested type is a quantized type.
401 if(armnn::IsQuantizedType<T>())
402 {
403 inputTensorInfo.SetQuantizationScale(qScale);
404 inputTensorInfo.SetQuantizationOffset(qOffset);
405 outputTensorInfo.SetQuantizationScale(qScale);
406 outputTensorInfo.SetQuantizationOffset(qOffset);
407 }
408
409 std::vector<T> inputVec;
410
411 for (unsigned int i = 0 ; i < inputTensorInfo.GetShape().GetNumElements(); ++i)
412 {
413 inputVec.push_back(1);
414 }
415
416 auto input = MakeTensor<T, 4>(inputTensorInfo, inputVec);
417
418 std::vector<T> outputVec;
419
420 for (unsigned int i = 0 ; i < outputTensorInfo.GetShape().GetNumElements(); ++i)
421 {
422 outputVec.push_back(1);
423 }
424
425 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputVec);
426
427 return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
428}
429
430template<typename T>
431LayerTestResult<T, 4> SimpleL2Pooling2dTestCommon(armnn::IWorkloadFactory& workloadFactory,
432 float qScale = 1.0f,
433 int32_t qOffset = 0)
434{
435 armnn::Pooling2dDescriptor descriptor;
436 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
437 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
438 descriptor.m_StrideX = descriptor.m_StrideY = 2;
439 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
440
441 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
442 auto input = MakeTensor<T, 4>(inputTensorInfo,
443 QuantizedVector<T>(qScale, qOffset, {
444 1.0f, 7.0f, 1.0f, 7.0f,
445 1.0f, 7.0f, 1.0f, 7.0f,
446 1.0f, 7.0f, 1.0f, 7.0f,
447 1.0f, 7.0f, 1.0f, 7.0f,
448 }));
449
450 armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, armnn::GetDataType<T>());
451 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
452 QuantizedVector<T>(qScale, qOffset, {
453 5.0f, 5.0f,
454 5.0f, 5.0f,
455 }));
456
457 return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
458}
459
460template<typename T>
461LayerTestResult<T, 4> L2Pooling2dSize3Stride1TestCommon(armnn::IWorkloadFactory& workloadFactory,
462 float qScale = 1.0f,
463 int32_t qOffset = 0)
464{
465 armnn::Pooling2dDescriptor descriptor;
466 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
467 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
468 descriptor.m_StrideX = descriptor.m_StrideY = 1;
469 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
470
471 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
472 auto input = MakeTensor<T, 4>(inputTensorInfo,
473 QuantizedVector<T>(qScale, qOffset, {
474 2.0f, 1.0f, 5.0f, 2.0f,
475 1.0f, 2.0f, 2.0f, 1.0f,
476 5.0f, 4.0f, 1.0f, 5.0f,
477 2.0f, 1.0f, 5.0f, 2.0f,
478 }));
479
480 armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, armnn::GetDataType<T>());
481 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
482 QuantizedVector<T>(qScale, qOffset, {
483 3.0f, 3.0f,
484 3.0f, 3.0f,
485 }));
486
487 return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
488}
489
490template<typename T>
491LayerTestResult<T, 4> L2Pooling2dSize3Stride3TestCommon(armnn::IWorkloadFactory& workloadFactory,
492 float qScale = 1.0f,
493 int32_t qOffset = 0)
494{
495 armnn::Pooling2dDescriptor descriptor;
496 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
497 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
498 descriptor.m_StrideX = descriptor.m_StrideY = 3;
499 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
500
501 armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, armnn::GetDataType<T>());
502 auto input = MakeTensor<T, 4>(inputTensorInfo,
503 QuantizedVector<T>(qScale, qOffset, {
504 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
505 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
506 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
507 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
508 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
509 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
510 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
511 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
512 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
513 }));
514
515 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, armnn::GetDataType<T>());
516 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
517 QuantizedVector<T>(qScale, qOffset, {
518 3.0f, 3.0f, 3.0f,
519 3.0f, 3.0f, 3.0f,
520 3.0f, 3.0f, 3.0f,
521 }));
522
523 return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
524}
525
526template<typename T>
527LayerTestResult<T, 4> L2Pooling2dSize3Stride4TestCommon(armnn::IWorkloadFactory& workloadFactory,
528 float qScale = 1.0f,
529 int32_t qOffset = 0)
530{
531 armnn::Pooling2dDescriptor descriptor;
532 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
533 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
534 descriptor.m_StrideX = descriptor.m_StrideY = 4;
535 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
536
537 armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, armnn::GetDataType<T>());
538 auto input = MakeTensor<T, 4>(inputTensorInfo,
539 QuantizedVector<T>(qScale, qOffset, {
540 2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f,
541 1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f,
542 5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f,
543 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
544 2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f,
545 1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f,
546 5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f,
547 }));
548
549 armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, armnn::GetDataType<T>());
550 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
551 QuantizedVector<T>(qScale, qOffset, {
552 3.0f, 3.0f,
553 3.0f, 3.0f,
554 }));
555
556 return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
557}
558
559template<typename T>
560LayerTestResult<T, 4> L2Pooling2dSize7TestCommon(armnn::IWorkloadFactory& workloadFactory,
561 float qScale = 1.0f,
562 int32_t qOffset = 0)
563{
564 armnn::Pooling2dDescriptor descriptor;
565 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
566 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 7;
567 descriptor.m_StrideX = descriptor.m_StrideY = 7;
568 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
569
570 armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, armnn::GetDataType<T>());
571 auto input = MakeTensor<T, 4>(inputTensorInfo,
572 QuantizedVector<T>(qScale, qOffset, {
573 1.0f, 0.0f, 2.0f, 0.0f, 3.0f, 0.0f, 4.0f,
574 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
575 0.0f, 5.0f, 0.0f, 6.0f, 0.0f, 7.0f, 0.0f,
576 8.0f, 0.0f, 9.0f, 0.0f, 10.0f, 0.0f, 5.0f,
577 0.0f, 5.0f, 0.0f, 2.0f, 0.0f, 1.0f, 1.0f,
578 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
579 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
580 }));
581
582 armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, armnn::GetDataType<T>());
583 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
584 QuantizedVector<T>(qScale, qOffset, {
585 3.0f,
586 }));
587
588 return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
589}
590
591template<typename T>
592LayerTestResult<T, 4> L2Pooling2dSize9TestCommon(armnn::IWorkloadFactory& workloadFactory,
593 float qScale = 1.0f,
594 int32_t qOffset = 0)
595{
596 armnn::Pooling2dDescriptor descriptor;
597 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
598 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 9;
599 descriptor.m_StrideX = descriptor.m_StrideY = 9;
600 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
601
602 armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, armnn::GetDataType<T>());
603 auto input = MakeTensor<T, 4>(inputTensorInfo,
604 QuantizedVector<T>(qScale, qOffset, {
605 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
606 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
607 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
608 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
609 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
610 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
611 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
612 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
613 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
614 }));
615
616 armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, armnn::GetDataType<T>());
617 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
618 QuantizedVector<T>(qScale, qOffset, {
619 3.0f,
620 }));
621
622 return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
623}
624
625template<typename T>
626LayerTestResult<T, 4> AsymmetricNonSquarePooling2dTestCommon(armnn::IWorkloadFactory& workloadFactory,
627 float qScale = 1.0f,
628 int32_t qOffset = 0)
629{
630 armnn::TensorInfo inputTensorInfo({ 1, 1, 1, 3 }, armnn::GetDataType<T>());
631 armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, armnn::GetDataType<T>());
632
633 armnn::Pooling2dDescriptor descriptor;
634 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
635 descriptor.m_PoolWidth = 2;
636 descriptor.m_PoolHeight = 3;
637 descriptor.m_StrideX = 2;
638 descriptor.m_StrideY = 1;
639 descriptor.m_PadLeft = 2;
640 descriptor.m_PadRight = 0;
641 descriptor.m_PadTop = 1;
642 descriptor.m_PadBottom = 2;
643 descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
644 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
645
telsoa01c577f2c2018-08-31 09:22:23 +0100646 // Construct input data.
telsoa014fcda012018-03-09 14:13:49 +0000647 auto input = MakeTensor<T, 4>(inputTensorInfo,
648 QuantizedVector<T>(qScale, qOffset, {
649 1.0f, 3.0f, 4.0f,
650 }));
651
telsoa01c577f2c2018-08-31 09:22:23 +0100652 // These were calculated manually.
telsoa014fcda012018-03-09 14:13:49 +0000653 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
654 QuantizedVector<T>(qScale, qOffset, {
655 0.0f, 3.0f, 0.0f, 3.0f,
656 }));
657
658 return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
659}
660
661template<typename T>
662LayerTestResult<T, 4> ComparePooling2dTestCommon(armnn::IWorkloadFactory& workloadFactory,
663 armnn::IWorkloadFactory& refWorkloadFactory,
664 armnn::PoolingAlgorithm poolingType,
665 float qScale = 1.0f,
666 int32_t qOffset = 0)
667{
668 const unsigned int inputWidth = 16;
669 const unsigned int inputHeight = 32;
670 const unsigned int channelCount = 2;
671 const unsigned int batchSize = 5;
672
673 const unsigned int poolSize = 3;
674 const unsigned int strideX = 2;
675 const unsigned int strideY = 4;
676 const unsigned int padX = 0;
677 const unsigned int padY = 0;
678
679 const unsigned int outputWidth = (inputWidth + 2 * padX + strideX - poolSize) / strideX;
680 const unsigned int outputHeight = (inputHeight + 2 * padY + strideY - poolSize) / strideY;
681
682 armnn::TensorInfo inputTensorInfo;
683 armnn::TensorInfo outputTensorInfo;
684
685 unsigned int inputShape[] = { batchSize, channelCount, inputHeight, inputWidth };
686 unsigned int outputShape[] = { batchSize, channelCount, outputHeight, outputWidth };
687
688 inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::GetDataType<T>());
689 outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::GetDataType<T>());
690
691 // Set quantization parameters if the requested type is a quantized type.
692 if(armnn::IsQuantizedType<T>())
693 {
694 inputTensorInfo.SetQuantizationScale(qScale);
695 inputTensorInfo.SetQuantizationOffset(qOffset);
696 outputTensorInfo.SetQuantizationScale(qScale);
697 outputTensorInfo.SetQuantizationOffset(qOffset);
698 }
699
700 boost::multi_array<T, 4> input = MakeRandomTensor<T, 4>(inputTensorInfo, 81715);
701
702 LayerTestResult<T, 4> comparisonResult(outputTensorInfo);
703
704 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
705 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
706
707 armnn::Pooling2dQueueDescriptor data;
708 armnn::WorkloadInfo info;
709 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
710 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
711 data.m_Parameters.m_PoolType = poolingType;
712 data.m_Parameters.m_PoolWidth = poolSize;
713 data.m_Parameters.m_PoolHeight = poolSize;
714 data.m_Parameters.m_StrideX = strideX;
715 data.m_Parameters.m_StrideY = strideY;
716 data.m_Parameters.m_PadLeft = padX;
717 data.m_Parameters.m_PadRight = padX;
718 data.m_Parameters.m_PadTop = padY;
719 data.m_Parameters.m_PadBottom = padY;
720 data.m_Parameters.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
721
722 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
723 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
724
725 // Don't execute if Pooling is not supported, as an exception will be raised.
726 armnn::Compute compute = workloadFactory.GetCompute();
727 const size_t reasonIfUnsupportedMaxLen = 255;
728 char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
729 comparisonResult.supported = armnn::IsPooling2dSupported(compute, inputTensorInfo, outputTensorInfo,
730 data.m_Parameters,
731 reasonIfUnsupported, reasonIfUnsupportedMaxLen);
732 if (!comparisonResult.supported)
733 {
734 return comparisonResult;
735 }
736
737 armnn::Pooling2dQueueDescriptor refData = data;
738 armnn::WorkloadInfo refInfo = info;
739 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
740 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
741
742 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(data, info);
743 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreatePooling2d(refData, refInfo);
744
745 outputHandleRef->Allocate();
746 inputHandleRef->Allocate();
747 inputHandle->Allocate();
748 outputHandle->Allocate();
749
750 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
751 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
752
753 workload->Execute();
754 workloadRef->Execute();
755
756 CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get());
757 CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get());
758
759 return comparisonResult;
760}
761
762//
763// Tests max pooling with the following parameters:
764//
765// Pooling size: 2x2
766// Stride: (2,2)
767// input size: 4x4
768// channels: 1
769// batch size: 1
770//
771template<typename T>
772LayerTestResult<T, 4> SimpleMaxPooling2dSize2x2Stride2x2TestCommon(armnn::IWorkloadFactory& workloadFactory,
773 bool forceNoPadding,
774 float qScale = 1.0f,
775 int32_t qOffset = 0)
776{
777 armnn::Pooling2dDescriptor descriptor;
778 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
779 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
780 descriptor.m_StrideX = 2;
781 descriptor.m_StrideY = 2;
782 descriptor.m_PadLeft = descriptor.m_PadRight = forceNoPadding ? 0 : 3;
783 descriptor.m_PadTop = descriptor.m_PadBottom = 0;
784 descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
785 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
786
787 unsigned int inputWidth = 4;
788 unsigned int inputHeight = 4;
789 unsigned int outputWidth =
790 (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) /
791 descriptor.m_StrideX;
792 unsigned int outputHeight =
793 (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) /
794 descriptor.m_StrideY;
795 unsigned int channels = 1;
796 unsigned int batchSize = 1;
797
798 std::vector<float> inputData = {
799 510.0f, 222.0f, 780.0f, 654.0f,
800 141.0f, 276.0f, 15.0f, 546.0f,
801 303.0f, 618.0f, 582.0f, 339.0f,
802 438.0f, 564.0f, 573.0f, 402.0f
803 };
804
telsoa01c577f2c2018-08-31 09:22:23 +0100805 // Note that left and right edges will be 0.f, due to the 2x2 max pooling only accessing zeros here.
telsoa014fcda012018-03-09 14:13:49 +0000806 std::vector<float> expectedOutputDataWithPadding = {
807 0.0f, 510.0f, 780.0f, 654.0f, 0.0f,
808 0.0f, 438.0f, 618.0f, 402.0f, 0.0f
809 };
810
811 std::vector<float> expectedOutputDataNoPadding = {
812 510.0f, 780.0f,
813 618.0f, 582.0f
814 };
815
816 armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, armnn::GetDataType<T>());
817
818 // Scale and offset should match input - we're just calculating maximum values.
819 armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, armnn::GetDataType<T>());
820
821 // Set quantization parameters if the requested type is a quantized type.
822 if(armnn::IsQuantizedType<T>())
823 {
824 inputTensorInfo.SetQuantizationScale(qScale);
825 inputTensorInfo.SetQuantizationOffset(qOffset);
826 outputTensorInfo.SetQuantizationScale(qScale);
827 outputTensorInfo.SetQuantizationOffset(qOffset);
828 }
829
830 auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, inputData));
831
832 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
833 forceNoPadding ? QuantizedVector<T>(qScale, qOffset, expectedOutputDataNoPadding) :
834 QuantizedVector<T>(qScale, qOffset, expectedOutputDataWithPadding));
835
836 return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
837}
838
surmeh01bceff2f2018-03-29 16:29:27 +0100839//
840// Tests max pooling with the following parameters:
841//
842// Pooling size: 3x2
843// Stride: (2,2)
844// input size: 3x2
845// channels: 1
846// batch size: 1
847//
848template<typename T>
849LayerTestResult<T, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon(
850 armnn::IWorkloadFactory& workloadFactory,
851 bool forceNoPadding,
852 float qScale = 1.0f,
853 int32_t qOffset = 0)
854{
855 armnn::Pooling2dDescriptor descriptor;
856 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
857 descriptor.m_PoolWidth = 3;
858 descriptor.m_PoolHeight = 2;
859 descriptor.m_StrideX = 2;
860 descriptor.m_StrideY = 2;
861 descriptor.m_PadLeft = (forceNoPadding) ? 0 : 1;
862 descriptor.m_PadRight = descriptor.m_PadLeft;
863 descriptor.m_PadTop = 0;
864 descriptor.m_PadBottom = 0;
865 descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
866 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
867
868 unsigned int inputWidth = 3;
869 unsigned int inputHeight = 2;
870 unsigned int outputWidth =
871 (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) /
872 descriptor.m_StrideX;
873 unsigned int outputHeight =
874 (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) /
875 descriptor.m_StrideY;
876 unsigned int channels = 1;
877 unsigned int batchSize = 1;
878
879 std::vector<float> inputData = {
880 3.0f, 6.0f, 9.0f,
881 12.0f, 15.0f, 18.0f,
882 };
883
884 std::vector<float> expectedOutputDataWithPadding = {
885 6.0f, 8.0f,
886 };
887
888 std::vector<float> expectedOutputDataNoPadding = {
889 10.5f,
890 };
891
892 armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, armnn::GetDataType<T>());
893
894 // Scale and offset should match input - we're just calculating average values.
895 armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, armnn::GetDataType<T>());
896
897 // Set quantization parameters if the requested type is a quantized type.
898 if(armnn::IsQuantizedType<T>())
899 {
900 inputTensorInfo.SetQuantizationScale(qScale);
901 inputTensorInfo.SetQuantizationOffset(qOffset);
902 outputTensorInfo.SetQuantizationScale(qScale);
903 outputTensorInfo.SetQuantizationOffset(qOffset);
904 }
905
906 auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, inputData));
907
908 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
909 forceNoPadding ? QuantizedVector<T>(qScale, qOffset, expectedOutputDataNoPadding) :
910 QuantizedVector<T>(qScale, qOffset, expectedOutputDataWithPadding));
911
912 return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
913}
914
915
telsoa014fcda012018-03-09 14:13:49 +0000916template<typename T>
917LayerTestResult<T, 4> IgnorePaddingSimpleMaxPooling2dTestCommon(armnn::IWorkloadFactory& workloadFactory,
918 float qScale = 1.0f,
919 int32_t qOffset = 0)
920{
921 armnn::Pooling2dDescriptor descriptor;
922 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
923 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
924 descriptor.m_StrideX = descriptor.m_StrideY = 2;
925 descriptor.m_PadLeft = 1;
926 descriptor.m_PadRight = 1;
927 descriptor.m_PadTop = 1;
928 descriptor.m_PadBottom = 1;
929 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
930
931 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
932 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, armnn::GetDataType<T>());
933
934 // Set quantization parameters if the requested type is a quantized type.
935 if(armnn::IsQuantizedType<T>())
936 {
937 inputTensorInfo.SetQuantizationScale(qScale);
938 inputTensorInfo.SetQuantizationOffset(qOffset);
939 outputTensorInfo.SetQuantizationScale(qScale);
940 outputTensorInfo.SetQuantizationOffset(qOffset);
941 }
942
943 auto input = MakeTensor<T, 4>(inputTensorInfo,
944 QuantizedVector<T>(qScale, qOffset, {
945 -1.0f, -2.0f, 3.0f, 4.0f,
946 -1.0f, -2.0f, 3.0f, 4.0f,
947 1.0f, 2.0f, -3.0f, -4.0f,
948 1.0f, 2.0f, -3.0f, -4.0f,
949 }));
950
951 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
952 QuantizedVector<T>(qScale, qOffset, {
953 -1.0f, 3.0f, 4.0f,
954 1.0f, 3.0f, 4.0f,
955 1.0f, 2.0f, -4.0f,
956 }));
957
958 return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
959}
960
961template<typename T>
962LayerTestResult<T, 4> IgnorePaddingMaxPooling2dSize3TestCommon(armnn::IWorkloadFactory& workloadFactory,
963 float qScale = 1.0f,
964 int32_t qOffset = 0)
965{
966 armnn::Pooling2dDescriptor descriptor;
967 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
968 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
969 descriptor.m_StrideX = descriptor.m_StrideY = 1;
970 descriptor.m_PadLeft = 1;
971 descriptor.m_PadRight = 1;
972 descriptor.m_PadTop = 1;
973 descriptor.m_PadBottom = 1;
974 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
975
976 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
977 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
978
979 // Set quantization parameters if the requested type is a quantized type.
980 if(armnn::IsQuantizedType<T>())
981 {
982 inputTensorInfo.SetQuantizationScale(qScale);
983 inputTensorInfo.SetQuantizationOffset(qOffset);
984 outputTensorInfo.SetQuantizationScale(qScale);
985 outputTensorInfo.SetQuantizationOffset(qOffset);
986 }
987
988 auto input = MakeTensor<T, 4>(inputTensorInfo,
989 QuantizedVector<T>(qScale, qOffset, {
990 -1.0f, -2.0f, 3.0f, 4.0f,
991 -1.0f, -2.0f, 3.0f, 4.0f,
992 1.0f, 2.0f, -3.0f, -4.0f,
993 1.0f, 2.0f, -3.0f, -4.0f,
994 }));
995
996 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
997 QuantizedVector<T>(qScale, qOffset, {
998 -1.0f, 3.0f, 4.0f, 4.0f,
999 2.0f, 3.0f, 4.0f, 4.0f,
1000 2.0f, 3.0f, 4.0f, 4.0f,
1001 2.0f, 2.0f, 2.0f, -3.0f,
1002 }));
1003
1004 return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
1005}
1006
1007template<typename T>
1008LayerTestResult<T, 4> IgnorePaddingSimpleAveragePooling2dTestCommon(armnn::IWorkloadFactory& workloadFactory,
1009 float qScale = 1.0f,
1010 int32_t qOffset = 0)
1011{
1012 armnn::Pooling2dDescriptor descriptor;
1013 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
1014 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
1015 descriptor.m_StrideX = descriptor.m_StrideY = 2;
1016 descriptor.m_PadLeft = 1;
1017 descriptor.m_PadRight = 1;
1018 descriptor.m_PadTop = 1;
1019 descriptor.m_PadBottom = 1;
1020 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1021
1022 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
1023 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, armnn::GetDataType<T>());
1024
1025 // Set quantization parameters if the requested type is a quantized type.
1026 if(armnn::IsQuantizedType<T>())
1027 {
1028 inputTensorInfo.SetQuantizationScale(qScale);
1029 inputTensorInfo.SetQuantizationOffset(qOffset);
1030 outputTensorInfo.SetQuantizationScale(qScale);
1031 outputTensorInfo.SetQuantizationOffset(qOffset);
1032 }
1033
1034 auto input = MakeTensor<T, 4>(inputTensorInfo,
1035 QuantizedVector<T>(qScale, qOffset, {
1036 12.0f, 20.0f, 32.0f, 40.0f,
1037 12.0f, 20.0f, 32.0f, 40.0f,
1038 12.0f, 20.0f, 32.0f, 40.0f,
1039 12.0f, 20.0f, 32.0f, 40.0f,
1040 }));
1041
1042 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1043 QuantizedVector<T>(qScale, qOffset, {
1044 3.0f, 13.0f, 10.0f,
1045 6.0f, 26.0f, 20.0f,
1046 3.0f, 13.0f, 10.0f,
1047 }));
1048
1049 return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
1050}
1051
1052template<typename T>
1053LayerTestResult<T, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon(armnn::IWorkloadFactory& workloadFactory,
1054 float qScale = 1.0f,
1055 int32_t qOffset = 0)
1056{
1057 armnn::Pooling2dDescriptor descriptor;
1058 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
1059 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1060 descriptor.m_StrideX = descriptor.m_StrideY = 2;
1061 descriptor.m_PadLeft = 0;
1062 descriptor.m_PadRight = 0;
1063 descriptor.m_PadTop = 0;
1064 descriptor.m_PadBottom = 0;
1065 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1066 descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Ceiling;
1067
1068 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4}, armnn::GetDataType<T>());
1069 armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, armnn::GetDataType<T>());
1070
1071 // Set quantization parameters if the requested type is a quantized type.
1072 if(armnn::IsQuantizedType<T>())
1073 {
1074 inputTensorInfo.SetQuantizationScale(qScale);
1075 inputTensorInfo.SetQuantizationOffset(qOffset);
1076 outputTensorInfo.SetQuantizationScale(qScale);
1077 outputTensorInfo.SetQuantizationOffset(qOffset);
1078 }
1079
1080 auto input = MakeTensor<T, 4>(inputTensorInfo,
1081 QuantizedVector<T>(qScale, qOffset, {
1082 1.0f, 2.0f, 3.0f, 4.0f,
1083 1.0f, 2.0f, 3.0f, 4.0f,
1084 1.0f, 2.0f, 3.0f, 4.0f,
1085 1.0f, 2.0f, 3.0f, 4.0f,
1086 }));
1087
1088 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1089 QuantizedVector<T>(qScale, qOffset, {
1090 2.0f, 3.5f,
1091 2.0f, 3.5f
1092 }));
1093
1094 return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
1095}
1096
1097template<typename T>
1098LayerTestResult<T, 4> IgnorePaddingAveragePooling2dSize3TestCommon(armnn::IWorkloadFactory& workloadFactory,
1099 float qScale = 1.0f,
1100 int32_t qOffset = 0)
1101{
1102 armnn::Pooling2dDescriptor descriptor;
1103 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
1104 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1105 descriptor.m_StrideX = descriptor.m_StrideY = 1;
1106 descriptor.m_PadLeft = 1;
1107 descriptor.m_PadRight = 1;
1108 descriptor.m_PadTop = 1;
1109 descriptor.m_PadBottom = 1;
1110 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1111
1112 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
1113 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
1114
1115 // Set quantization parameters if the requested type is a quantized type.
1116 if(armnn::IsQuantizedType<T>())
1117 {
1118 inputTensorInfo.SetQuantizationScale(qScale);
1119 inputTensorInfo.SetQuantizationOffset(qOffset);
1120 outputTensorInfo.SetQuantizationScale(qScale);
1121 outputTensorInfo.SetQuantizationOffset(qOffset);
1122 }
1123
1124 auto input = MakeTensor<T, 4>(inputTensorInfo,
1125 QuantizedVector<T>(qScale, qOffset, {
1126 9.0f, 27.0f, 18.0f, 36.0f,
1127 18.0f, 9.0f, 18.0f, 9.0f,
1128 27.0f, 18.0f, 9.0f, 27.0f,
1129 9.0f, 27.0f, 9.0f, 18.0f,
1130 }));
1131
1132 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1133 QuantizedVector<T>(qScale, qOffset, {
1134 7.0f, 11.0f, 13.0f, 9.0f,
1135 12.0f, 17.0f, 19.0f, 13.0f,
1136 12.0f, 16.0f, 16.0f, 10.0f,
1137 9.0f, 11.0f, 12.0f, 7.0f,
1138 }));
1139
1140 return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
1141}
1142
1143template<typename T>
1144LayerTestResult<T, 4> IgnorePaddingSimpleL2Pooling2dTestCommon(armnn::IWorkloadFactory& workloadFactory,
1145 float qScale = 1.0f,
1146 int32_t qOffset = 0)
1147{
1148 armnn::Pooling2dDescriptor descriptor;
1149 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
1150 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
1151 descriptor.m_StrideX = descriptor.m_StrideY = 2;
1152 descriptor.m_PadLeft = 1;
1153 descriptor.m_PadRight = 1;
1154 descriptor.m_PadTop = 1;
1155 descriptor.m_PadBottom = 1;
1156 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1157
1158 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
1159 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, armnn::GetDataType<T>());
1160
1161 // Set quantization parameters if the requested type is a quantized type.
1162 if(armnn::IsQuantizedType<T>())
1163 {
1164 inputTensorInfo.SetQuantizationScale(qScale);
1165 inputTensorInfo.SetQuantizationOffset(qOffset);
1166 outputTensorInfo.SetQuantizationScale(qScale);
1167 outputTensorInfo.SetQuantizationOffset(qOffset);
1168 }
1169
1170 auto input = MakeTensor<T, 4>(inputTensorInfo,
1171 QuantizedVector<T>(qScale, qOffset, {
1172 2.0f, 4.0f, 8.0f, 16.0f,
1173 4.0f, 2.0f, 2.0f, 4.0f,
1174 8.0f, 2.0f, 4.0f, 2.0f,
1175 16.0f, 2.0f, 2.0f, 8.0f,
1176 }));
1177
1178 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1179 QuantizedVector<T>(qScale, qOffset, {
1180 1.0f, 4.4721f, 8.0f,
1181 4.4721f, 2.6457f, 2.236f,
1182 8.0f, 1.4142f, 4.0f,
1183 }));
1184
1185 return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
1186}
1187
1188template<typename T>
1189LayerTestResult<T, 4> IgnorePaddingL2Pooling2dSize3TestCommon(armnn::IWorkloadFactory& workloadFactory,
1190 float qScale = 1.0f,
1191 int32_t qOffset = 0)
1192{
1193 armnn::Pooling2dDescriptor descriptor;
1194 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
1195 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1196 descriptor.m_StrideX = descriptor.m_StrideY = 1;
1197 descriptor.m_PadLeft = 1;
1198 descriptor.m_PadRight = 1;
1199 descriptor.m_PadTop = 1;
1200 descriptor.m_PadBottom = 1;
1201 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1202
1203 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
1204 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
1205
1206 // Set quantization parameters if the requested type is a quantized type.
1207 if(armnn::IsQuantizedType<T>())
1208 {
1209 inputTensorInfo.SetQuantizationScale(qScale);
1210 inputTensorInfo.SetQuantizationOffset(qOffset);
1211 outputTensorInfo.SetQuantizationScale(qScale);
1212 outputTensorInfo.SetQuantizationOffset(qOffset);
1213 }
1214
1215 auto input = MakeTensor<T, 4>(inputTensorInfo,
1216 QuantizedVector<T>(qScale, qOffset, {
1217 1.0f, 2.0f, 3.0f, 4.0f,
1218 1.0f, 2.0f, 3.0f, 4.0f,
1219 1.0f, 2.0f, 3.0f, 4.0f,
1220 1.0f, 2.0f, 3.0f, 4.0f,
1221 }));
1222
1223 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1224 QuantizedVector<T>(qScale, qOffset, {
1225 1.0540f, 1.7638f, 2.5385f, 2.3570f,
1226 1.2909f, 2.1602f, 3.1091f, 2.8867f,
1227 1.2909f, 2.1602f, 3.1091f, 2.8867f,
1228 1.0540f, 1.7638f, 2.5385f, 2.3570f,
1229 }));
1230
1231 return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
1232}